diff --git a/container/container.go b/container/container.go index 3e8a370241..11814b7719 100644 --- a/container/container.go +++ b/container/container.go @@ -15,7 +15,7 @@ import ( "syscall" "time" - "github.com/containerd/containerd" + "github.com/containerd/containerd/cio" containertypes "github.com/docker/docker/api/types/container" mounttypes "github.com/docker/docker/api/types/mount" networktypes "github.com/docker/docker/api/types/network" @@ -1004,7 +1004,7 @@ func (container *Container) CloseStreams() error { } // InitializeStdio is called by libcontainerd to connect the stdio. -func (container *Container) InitializeStdio(iop *libcontainerd.IOPipe) (containerd.IO, error) { +func (container *Container) InitializeStdio(iop *libcontainerd.IOPipe) (cio.IO, error) { if err := container.startLogging(); err != nil { container.Reset(false) return nil, err @@ -1020,7 +1020,7 @@ func (container *Container) InitializeStdio(iop *libcontainerd.IOPipe) (containe } } - return &cio{IO: iop, sc: container.StreamConfig}, nil + return &rio{IO: iop, sc: container.StreamConfig}, nil } // SecretMountPath returns the path of the secret mount for the container @@ -1078,19 +1078,19 @@ func (container *Container) CreateDaemonEnvironment(tty bool, linkedEnv []string return env } -type cio struct { - containerd.IO +type rio struct { + cio.IO sc *stream.Config } -func (i *cio) Close() error { +func (i *rio) Close() error { i.IO.Close() return i.sc.CloseStreams() } -func (i *cio) Wait() { +func (i *rio) Wait() { i.sc.Wait() i.IO.Wait() diff --git a/daemon/exec/exec.go b/daemon/exec/exec.go index 42aaa86b7b..193d32f022 100644 --- a/daemon/exec/exec.go +++ b/daemon/exec/exec.go @@ -4,7 +4,7 @@ import ( "runtime" "sync" - "github.com/containerd/containerd" + "github.com/containerd/containerd/cio" "github.com/docker/docker/container/stream" "github.com/docker/docker/libcontainerd" "github.com/docker/docker/pkg/stringid" @@ -43,26 +43,26 @@ func NewConfig() *Config { } } -type cio struct { - containerd.IO +type rio struct { + cio.IO sc *stream.Config } -func (i *cio) Close() error { +func (i *rio) Close() error { i.IO.Close() return i.sc.CloseStreams() } -func (i *cio) Wait() { +func (i *rio) Wait() { i.sc.Wait() i.IO.Wait() } // InitializeStdio is called by libcontainerd to connect the stdio. -func (c *Config) InitializeStdio(iop *libcontainerd.IOPipe) (containerd.IO, error) { +func (c *Config) InitializeStdio(iop *libcontainerd.IOPipe) (cio.IO, error) { c.StreamConfig.CopyToPipe(iop) if c.StreamConfig.Stdin() == nil && !c.Tty && runtime.GOOS == "windows" { @@ -73,7 +73,7 @@ func (c *Config) InitializeStdio(iop *libcontainerd.IOPipe) (containerd.IO, erro } } - return &cio{IO: iop, sc: c.StreamConfig}, nil + return &rio{IO: iop, sc: c.StreamConfig}, nil } // CloseStreams closes the stdio streams for the exec diff --git a/daemon/start_unix.go b/daemon/start_unix.go index a8402bb303..119b0a9240 100644 --- a/daemon/start_unix.go +++ b/daemon/start_unix.go @@ -7,7 +7,7 @@ import ( "os/exec" "path/filepath" - "github.com/containerd/containerd/linux/runcopts" + "github.com/containerd/containerd/linux/runctypes" "github.com/docker/docker/container" "github.com/pkg/errors" ) @@ -42,7 +42,7 @@ func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Contain if err != nil { return nil, err } - opts := &runcopts.RuncOptions{ + opts := &runctypes.RuncOptions{ Runtime: path, RuntimeRoot: filepath.Join(daemon.configStore.ExecRoot, fmt.Sprintf("runtime-%s", container.HostConfig.Runtime)), diff --git a/hack/dockerfile/binaries-commits b/hack/dockerfile/binaries-commits index 1a20352aa0..1b38076e88 100644 --- a/hack/dockerfile/binaries-commits +++ b/hack/dockerfile/binaries-commits @@ -4,7 +4,7 @@ TOMLV_COMMIT=9baf8a8a9f2ed20a8e54160840c492f937eeaf9a # When updating RUNC_COMMIT, also update runc in vendor.conf accordingly RUNC_COMMIT=b2567b37d7b75eb4cf325b77297b140ea686ce8f -CONTAINERD_COMMIT=v1.0.0-beta.3 +CONTAINERD_COMMIT=6bff39c643886dfa3d546e83a90a527b64ddeacf TINI_COMMIT=949e6facb77383876aeff8a6944dde66b3089574 LIBNETWORK_COMMIT=7b2b1feb1de4817d522cc372af149ff48d25028e VNDR_COMMIT=a6e196d8b4b0cbbdc29aebdb20c59ac6926bb384 diff --git a/hack/dockerfile/install-binaries.sh b/hack/dockerfile/install-binaries.sh index e97385ebcc..160ff1a1da 100755 --- a/hack/dockerfile/install-binaries.sh +++ b/hack/dockerfile/install-binaries.sh @@ -48,7 +48,7 @@ install_containerd_static() { git checkout -q "$CONTAINERD_COMMIT" ( export GOPATH - make EXTRA_FLAGS="-buildmode pie" EXTRA_LDFLAGS="-extldflags \\\"-fno-PIC -static\\\"" + make BUILDTAGS='static_build' EXTRA_FLAGS="-buildmode pie" EXTRA_LDFLAGS='-extldflags "-fno-PIC -static"' ) cp bin/containerd /usr/local/bin/docker-containerd cp bin/containerd-shim /usr/local/bin/docker-containerd-shim diff --git a/libcontainerd/client_daemon.go b/libcontainerd/client_daemon.go index f1b5f011f8..5c26bc257e 100644 --- a/libcontainerd/client_daemon.go +++ b/libcontainerd/client_daemon.go @@ -21,12 +21,14 @@ import ( "google.golang.org/grpc/status" "github.com/containerd/containerd" + "github.com/containerd/containerd/api/events" eventsapi "github.com/containerd/containerd/api/services/events/v1" "github.com/containerd/containerd/api/types" "github.com/containerd/containerd/archive" + "github.com/containerd/containerd/cio" "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" - "github.com/containerd/containerd/linux/runcopts" + "github.com/containerd/containerd/linux/runctypes" "github.com/containerd/typeurl" "github.com/docker/docker/pkg/ioutils" "github.com/opencontainers/image-spec/specs-go/v1" @@ -70,7 +72,7 @@ func (c *client) Restore(ctx context.Context, id string, attachStdio StdioCallba c.Lock() defer c.Unlock() - var cio containerd.IO + var rio cio.IO defer func() { err = wrapError(err) }() @@ -81,20 +83,20 @@ func (c *client) Restore(ctx context.Context, id string, attachStdio StdioCallba } defer func() { - if err != nil && cio != nil { - cio.Cancel() - cio.Close() + if err != nil && rio != nil { + rio.Cancel() + rio.Close() } }() - t, err := ctr.Task(ctx, func(fifos *containerd.FIFOSet) (containerd.IO, error) { + t, err := ctr.Task(ctx, func(fifos *cio.FIFOSet) (cio.IO, error) { io, err := newIOPipe(fifos) if err != nil { return nil, err } - cio, err = attachStdio(io) - return cio, err + rio, err = attachStdio(io) + return rio, err }) if err != nil && !strings.Contains(err.Error(), "no running task found") { return false, -1, err @@ -168,7 +170,7 @@ func (c *client) Start(ctx context.Context, id, checkpointDir string, withStdin var ( cp *types.Descriptor t containerd.Task - cio containerd.IO + rio cio.IO err error stdinCloseSync = make(chan struct{}) ) @@ -203,14 +205,14 @@ func (c *client) Start(ctx context.Context, id, checkpointDir string, withStdin } uid, gid := getSpecUser(spec) t, err = ctr.ctr.NewTask(ctx, - func(id string) (containerd.IO, error) { + func(id string) (cio.IO, error) { fifos := newFIFOSet(ctr.bundleDir, id, InitProcessName, withStdin, spec.Process.Terminal) - cio, err = c.createIO(fifos, id, InitProcessName, stdinCloseSync, attachStdio) - return cio, err + rio, err = c.createIO(fifos, id, InitProcessName, stdinCloseSync, attachStdio) + return rio, err }, func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error { info.Checkpoint = cp - info.Options = &runcopts.CreateOptions{ + info.Options = &runctypes.CreateOptions{ IoUid: uint32(uid), IoGid: uint32(gid), } @@ -218,9 +220,9 @@ func (c *client) Start(ctx context.Context, id, checkpointDir string, withStdin }) if err != nil { close(stdinCloseSync) - if cio != nil { - cio.Cancel() - cio.Close() + if rio != nil { + rio.Cancel() + rio.Close() } return -1, err } @@ -259,7 +261,7 @@ func (c *client) Exec(ctx context.Context, containerID, processID string, spec * var ( p containerd.Process - cio containerd.IO + rio cio.IO err error stdinCloseSync = make(chan struct{}) ) @@ -268,23 +270,23 @@ func (c *client) Exec(ctx context.Context, containerID, processID string, spec * defer func() { if err != nil { - if cio != nil { - cio.Cancel() - cio.Close() + if rio != nil { + rio.Cancel() + rio.Close() } rmFIFOSet(fifos) } }() - p, err = ctr.task.Exec(ctx, processID, spec, func(id string) (containerd.IO, error) { - cio, err = c.createIO(fifos, containerID, processID, stdinCloseSync, attachStdio) - return cio, err + p, err = ctr.task.Exec(ctx, processID, spec, func(id string) (cio.IO, error) { + rio, err = c.createIO(fifos, containerID, processID, stdinCloseSync, attachStdio) + return rio, err }) if err != nil { close(stdinCloseSync) - if cio != nil { - cio.Cancel() - cio.Close() + if rio != nil { + rio.Cancel() + rio.Close() } return -1, err } @@ -569,7 +571,7 @@ func (c *client) getProcess(containerID, processID string) (containerd.Process, // createIO creates the io to be used by a process // This needs to get a pointer to interface as upon closure the process may not have yet been registered -func (c *client) createIO(fifos *containerd.FIFOSet, containerID, processID string, stdinCloseSync chan struct{}, attachStdio StdioCallback) (containerd.IO, error) { +func (c *client) createIO(fifos *cio.FIFOSet, containerID, processID string, stdinCloseSync chan struct{}, attachStdio StdioCallback) (cio.IO, error) { io, err := newIOPipe(fifos) if err != nil { return nil, err @@ -601,12 +603,12 @@ func (c *client) createIO(fifos *containerd.FIFOSet, containerID, processID stri }) } - cio, err := attachStdio(io) + rio, err := attachStdio(io) if err != nil { io.Cancel() io.Close() } - return cio, err + return rio, err } func (c *client) processEvent(ctr *container, et EventType, ei EventInfo) { @@ -710,21 +712,21 @@ func (c *client) processEventStream(ctx context.Context) { c.logger.WithField("topic", ev.Topic).Debug("event") switch t := v.(type) { - case *eventsapi.TaskCreate: + case *events.TaskCreate: et = EventCreate ei = EventInfo{ ContainerID: t.ContainerID, ProcessID: t.ContainerID, Pid: t.Pid, } - case *eventsapi.TaskStart: + case *events.TaskStart: et = EventStart ei = EventInfo{ ContainerID: t.ContainerID, ProcessID: t.ContainerID, Pid: t.Pid, } - case *eventsapi.TaskExit: + case *events.TaskExit: et = EventExit ei = EventInfo{ ContainerID: t.ContainerID, @@ -733,32 +735,32 @@ func (c *client) processEventStream(ctx context.Context) { ExitCode: t.ExitStatus, ExitedAt: t.ExitedAt, } - case *eventsapi.TaskOOM: + case *events.TaskOOM: et = EventOOM ei = EventInfo{ ContainerID: t.ContainerID, OOMKilled: true, } oomKilled = true - case *eventsapi.TaskExecAdded: + case *events.TaskExecAdded: et = EventExecAdded ei = EventInfo{ ContainerID: t.ContainerID, ProcessID: t.ExecID, } - case *eventsapi.TaskExecStarted: + case *events.TaskExecStarted: et = EventExecStarted ei = EventInfo{ ContainerID: t.ContainerID, ProcessID: t.ExecID, Pid: t.Pid, } - case *eventsapi.TaskPaused: + case *events.TaskPaused: et = EventPaused ei = EventInfo{ ContainerID: t.ContainerID, } - case *eventsapi.TaskResumed: + case *events.TaskResumed: et = EventResumed ei = EventInfo{ ContainerID: t.ContainerID, diff --git a/libcontainerd/client_daemon_linux.go b/libcontainerd/client_daemon_linux.go index 14966f00b1..9a98fbdf13 100644 --- a/libcontainerd/client_daemon_linux.go +++ b/libcontainerd/client_daemon_linux.go @@ -8,6 +8,7 @@ import ( "strings" "github.com/containerd/containerd" + "github.com/containerd/containerd/cio" "github.com/docker/docker/pkg/idtools" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" @@ -79,8 +80,8 @@ func prepareBundleDir(bundleDir string, ociSpec *specs.Spec) (string, error) { return p, nil } -func newFIFOSet(bundleDir, containerID, processID string, withStdin, withTerminal bool) *containerd.FIFOSet { - fifos := &containerd.FIFOSet{ +func newFIFOSet(bundleDir, containerID, processID string, withStdin, withTerminal bool) *cio.FIFOSet { + fifos := &cio.FIFOSet{ Terminal: withTerminal, Out: filepath.Join(bundleDir, processID+"-stdout"), } @@ -96,7 +97,7 @@ func newFIFOSet(bundleDir, containerID, processID string, withStdin, withTermina return fifos } -func rmFIFOSet(fset *containerd.FIFOSet) { +func rmFIFOSet(fset *cio.FIFOSet) { for _, fn := range []string{fset.Out, fset.In, fset.Err} { if fn != "" { if err := os.RemoveAll(fn); err != nil { diff --git a/libcontainerd/client_daemon_windows.go b/libcontainerd/client_daemon_windows.go index 9bb5d86f44..10309cd1f4 100644 --- a/libcontainerd/client_daemon_windows.go +++ b/libcontainerd/client_daemon_windows.go @@ -3,7 +3,7 @@ package libcontainerd import ( "fmt" - "github.com/containerd/containerd" + "github.com/containerd/containerd/cio" "github.com/containerd/containerd/windows/hcsshimtypes" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" @@ -35,8 +35,8 @@ func pipeName(containerID, processID, name string) string { return fmt.Sprintf(`\\.\pipe\containerd-%s-%s-%s`, containerID, processID, name) } -func newFIFOSet(bundleDir, containerID, processID string, withStdin, withTerminal bool) *containerd.FIFOSet { - fifos := &containerd.FIFOSet{ +func newFIFOSet(bundleDir, containerID, processID string, withStdin, withTerminal bool) *cio.FIFOSet { + fifos := &cio.FIFOSet{ Terminal: withTerminal, Out: pipeName(containerID, processID, "stdout"), } diff --git a/libcontainerd/io.go b/libcontainerd/io.go index 2c4af58ce9..25a894b078 100644 --- a/libcontainerd/io.go +++ b/libcontainerd/io.go @@ -1,9 +1,9 @@ package libcontainerd -import "github.com/containerd/containerd" +import "github.com/containerd/containerd/cio" // Config returns the containerd.IOConfig of this pipe set -func (p *IOPipe) Config() containerd.IOConfig { +func (p *IOPipe) Config() cio.Config { return p.config } diff --git a/libcontainerd/io_unix.go b/libcontainerd/io_unix.go index 0c08b20136..8e597914ee 100644 --- a/libcontainerd/io_unix.go +++ b/libcontainerd/io_unix.go @@ -7,12 +7,12 @@ import ( "io" "syscall" - "github.com/containerd/containerd" + "github.com/containerd/containerd/cio" "github.com/containerd/fifo" "github.com/pkg/errors" ) -func newIOPipe(fifos *containerd.FIFOSet) (*IOPipe, error) { +func newIOPipe(fifos *cio.FIFOSet) (*IOPipe, error) { var ( err error ctx, cancel = context.WithCancel(context.Background()) @@ -20,7 +20,7 @@ func newIOPipe(fifos *containerd.FIFOSet) (*IOPipe, error) { iop = &IOPipe{ Terminal: fifos.Terminal, cancel: cancel, - config: containerd.IOConfig{ + config: cio.Config{ Terminal: fifos.Terminal, Stdin: fifos.In, Stdout: fifos.Out, diff --git a/libcontainerd/io_windows.go b/libcontainerd/io_windows.go index 312bdbd8cf..f2e5a93fe1 100644 --- a/libcontainerd/io_windows.go +++ b/libcontainerd/io_windows.go @@ -7,7 +7,7 @@ import ( "sync" winio "github.com/Microsoft/go-winio" - "github.com/containerd/containerd" + "github.com/containerd/containerd/cio" "github.com/pkg/errors" ) @@ -90,7 +90,7 @@ func (wp *winpipe) Close() error { } } -func newIOPipe(fifos *containerd.FIFOSet) (*IOPipe, error) { +func newIOPipe(fifos *cio.FIFOSet) (*IOPipe, error) { var ( err error ctx, cancel = context.WithCancel(context.Background()) @@ -98,7 +98,7 @@ func newIOPipe(fifos *containerd.FIFOSet) (*IOPipe, error) { iop = &IOPipe{ Terminal: fifos.Terminal, cancel: cancel, - config: containerd.IOConfig{ + config: cio.Config{ Terminal: fifos.Terminal, Stdin: fifos.In, Stdout: fifos.Out, diff --git a/libcontainerd/remote_daemon_options_linux.go b/libcontainerd/remote_daemon_options_linux.go index 1e5a98124a..7376c06119 100644 --- a/libcontainerd/remote_daemon_options_linux.go +++ b/libcontainerd/remote_daemon_options_linux.go @@ -27,7 +27,7 @@ type subreaper bool func (s subreaper) Apply(r Remote) error { if remote, ok := r.(*remote); ok { - remote.Subreaper = bool(s) + remote.NoSubreaper = !bool(s) return nil } return fmt.Errorf("WithSubreaper option not supported for this remote") diff --git a/libcontainerd/types.go b/libcontainerd/types.go index 9eede43a49..346fd241f1 100644 --- a/libcontainerd/types.go +++ b/libcontainerd/types.go @@ -6,6 +6,7 @@ import ( "time" "github.com/containerd/containerd" + "github.com/containerd/containerd/cio" "github.com/opencontainers/runtime-spec/specs-go" ) @@ -106,7 +107,7 @@ type Client interface { } // StdioCallback is called to connect a container or process stdio. -type StdioCallback func(*IOPipe) (containerd.IO, error) +type StdioCallback func(*IOPipe) (cio.IO, error) // IOPipe contains the stdio streams. type IOPipe struct { @@ -116,7 +117,7 @@ type IOPipe struct { Terminal bool // Whether stderr is connected on Windows cancel context.CancelFunc - config containerd.IOConfig + config cio.Config } // ServerVersion contains version information as retrieved from the diff --git a/plugin/executor/containerd/containerd.go b/plugin/executor/containerd/containerd.go index d93b8b75ec..98394679d5 100644 --- a/plugin/executor/containerd/containerd.go +++ b/plugin/executor/containerd/containerd.go @@ -6,8 +6,8 @@ import ( "path/filepath" "sync" - "github.com/containerd/containerd" - "github.com/containerd/containerd/linux/runcopts" + "github.com/containerd/containerd/cio" + "github.com/containerd/containerd/linux/runctypes" "github.com/docker/docker/api/errdefs" "github.com/docker/docker/libcontainerd" "github.com/opencontainers/runtime-spec/specs-go" @@ -46,7 +46,7 @@ type Executor struct { // Create creates a new container func (e *Executor) Create(id string, spec specs.Spec, stdout, stderr io.WriteCloser) error { - opts := runcopts.RuncOptions{ + opts := runctypes.RuncOptions{ RuntimeRoot: filepath.Join(e.rootDir, "runtime-root"), } ctx := context.Background() @@ -110,37 +110,37 @@ func (e *Executor) ProcessEvent(id string, et libcontainerd.EventType, ei libcon return nil } -type cio struct { - containerd.IO +type rio struct { + cio.IO wg sync.WaitGroup } -func (c *cio) Wait() { +func (c *rio) Wait() { c.wg.Wait() c.IO.Wait() } func attachStreamsFunc(stdout, stderr io.WriteCloser) libcontainerd.StdioCallback { - return func(iop *libcontainerd.IOPipe) (containerd.IO, error) { + return func(iop *libcontainerd.IOPipe) (cio.IO, error) { if iop.Stdin != nil { iop.Stdin.Close() // closing stdin shouldn't be needed here, it should never be open panic("plugin stdin shouldn't have been created!") } - cio := &cio{IO: iop} - cio.wg.Add(2) + rio := &rio{IO: iop} + rio.wg.Add(2) go func() { io.Copy(stdout, iop.Stdout) stdout.Close() - cio.wg.Done() + rio.wg.Done() }() go func() { io.Copy(stderr, iop.Stderr) stderr.Close() - cio.wg.Done() + rio.wg.Done() }() - return cio, nil + return rio, nil } } diff --git a/vendor.conf b/vendor.conf index 10aa633c97..84945ec155 100644 --- a/vendor.conf +++ b/vendor.conf @@ -103,14 +103,15 @@ github.com/googleapis/gax-go da06d194a00e19ce00d9011a13931c3f6f6887c7 google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 # containerd -github.com/containerd/containerd v1.0.0-beta.3 +github.com/containerd/containerd 6bff39c643886dfa3d546e83a90a527b64ddeacf github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6 github.com/containerd/continuity 35d55c5e8dd23b32037d56cf97174aff3efdfa83 -github.com/containerd/cgroups f7dd103d3e4e696aa67152f6b4ddd1779a3455a9 +github.com/containerd/cgroups 29da22c6171a4316169f9205ab6c49f59b5b852f github.com/containerd/console 84eeaae905fa414d03e07bcd6c8d3f19e7cf180e github.com/containerd/go-runc ed1cbe1fc31f5fb2359d3a54b6330d1a097858b7 github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788 -github.com/dmcgowan/go-tar 2e2c51242e8993c50445dab7c03c8e7febddd0cf +github.com/dmcgowan/go-tar go1.10 +github.com/stevvooe/ttrpc 8c92e22ce0c492875ccaac3ab06143a77d8ed0c1 # cluster github.com/docker/swarmkit de950a7ed842c7b7e47e9451cde9bf8f96031894 diff --git a/vendor/github.com/containerd/cgroups/cgroup.go b/vendor/github.com/containerd/cgroups/cgroup.go index d9fa75f311..d1c36bde37 100644 --- a/vendor/github.com/containerd/cgroups/cgroup.go +++ b/vendor/github.com/containerd/cgroups/cgroup.go @@ -310,7 +310,8 @@ func (c *cgroup) Thaw() error { } // OOMEventFD returns the memory cgroup's out of memory event fd that triggers -// when processes inside the cgroup receive an oom event +// when processes inside the cgroup receive an oom event. Returns +// ErrMemoryNotSupported if memory cgroups is not supported. func (c *cgroup) OOMEventFD() (uintptr, error) { c.mu.Lock() defer c.mu.Unlock() diff --git a/vendor/github.com/containerd/containerd/README.md b/vendor/github.com/containerd/containerd/README.md index 3832446b2f..6e59def117 100644 --- a/vendor/github.com/containerd/containerd/README.md +++ b/vendor/github.com/containerd/containerd/README.md @@ -78,7 +78,7 @@ containerd fully supports the OCI runtime specification for running containers. You can specify options when creating a container about how to modify the specification. ```go -redis, err := client.NewContainer(context, "redis-master", containerd.WithNewSpec(containerd.WithImageConfig(image))) +redis, err := client.NewContainer(context, "redis-master", containerd.WithNewSpec(oci.WithImageConfig(image))) ``` ### Root Filesystems @@ -92,7 +92,7 @@ image, err := client.Pull(context, "docker.io/library/redis:latest", containerd. // allocate a new RW root filesystem for a container based on the image redis, err := client.NewContainer(context, "redis-master", containerd.WithNewSnapshot("redis-rootfs", image), - containerd.WithNewSpec(containerd.WithImageConfig(image)), + containerd.WithNewSpec(oci.WithImageConfig(image)), ) @@ -101,7 +101,7 @@ for i := 0; i < 10; i++ { id := fmt.Sprintf("id-%s", i) container, err := client.NewContainer(ctx, id, containerd.WithNewSnapshotView(id, image), - containerd.WithNewSpec(containerd.WithImageConfig(image)), + containerd.WithNewSpec(oci.WithImageConfig(image)), ) } ``` @@ -158,11 +158,11 @@ To build the daemon and `ctr` simple test client, the following build system dep * Protoc 3.x compiler and headers (download at the [Google protobuf releases page](https://github.com/google/protobuf/releases)) * Btrfs headers and libraries for your distribution. Note that building the btrfs driver can be disabled via build tag removing this dependency. -For proper results, install the `protoc` release into `/usr/local` on your build system. For example, the following commands will download and install the 3.1.0 release for a 64-bit Linux host: +For proper results, install the `protoc` release into `/usr/local` on your build system. For example, the following commands will download and install the 3.5.0 release for a 64-bit Linux host: ``` -$ wget -c https://github.com/google/protobuf/releases/download/v3.1.0/protoc-3.1.0-linux-x86_64.zip -$ sudo unzip protoc-3.1.0-linux-x86_64.zip -d /usr/local +$ wget -c https://github.com/google/protobuf/releases/download/v3.5.0/protoc-3.5.0-linux-x86_64.zip +$ sudo unzip protoc-3.5.0-linux-x86_64.zip -d /usr/local ``` With the required dependencies installed, the `Makefile` target named **binaries** will compile the `ctr` and `containerd` binaries and place them in the `bin/` directory. Using `sudo make install` will place the binaries in `/usr/local/bin`. When making any changes to the gRPC API, `make generate` will use the installed `protoc` compiler to regenerate the API generated code packages. diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/container.pb.go b/vendor/github.com/containerd/containerd/api/events/container.pb.go similarity index 79% rename from vendor/github.com/containerd/containerd/api/services/events/v1/container.pb.go rename to vendor/github.com/containerd/containerd/api/events/container.pb.go index 420aba0369..b05a402bb0 100644 --- a/vendor/github.com/containerd/containerd/api/services/events/v1/container.pb.go +++ b/vendor/github.com/containerd/containerd/api/events/container.pb.go @@ -1,28 +1,22 @@ -// Code generated by protoc-gen-gogo. -// source: github.com/containerd/containerd/api/services/events/v1/container.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/containerd/api/events/container.proto /* Package events is a generated protocol buffer package. It is generated from these files: - github.com/containerd/containerd/api/services/events/v1/container.proto - github.com/containerd/containerd/api/services/events/v1/content.proto - github.com/containerd/containerd/api/services/events/v1/events.proto - github.com/containerd/containerd/api/services/events/v1/image.proto - github.com/containerd/containerd/api/services/events/v1/namespace.proto - github.com/containerd/containerd/api/services/events/v1/snapshot.proto - github.com/containerd/containerd/api/services/events/v1/task.proto + github.com/containerd/containerd/api/events/container.proto + github.com/containerd/containerd/api/events/content.proto + github.com/containerd/containerd/api/events/image.proto + github.com/containerd/containerd/api/events/namespace.proto + github.com/containerd/containerd/api/events/snapshot.proto + github.com/containerd/containerd/api/events/task.proto It has these top-level messages: ContainerCreate ContainerUpdate ContainerDelete ContentDelete - PublishRequest - ForwardRequest - SubscribeRequest - Envelope ImageCreate ImageUpdate ImageDelete @@ -49,9 +43,10 @@ package events import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import _ "github.com/gogo/protobuf/gogoproto" -import google_protobuf1 "github.com/gogo/protobuf/types" -import _ "github.com/containerd/containerd/protobuf/plugin" +import google_protobuf "github.com/gogo/protobuf/types" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" +// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin" import github_com_containerd_typeurl "github.com/containerd/typeurl" @@ -83,8 +78,8 @@ func (*ContainerCreate) ProtoMessage() {} func (*ContainerCreate) Descriptor() ([]byte, []int) { return fileDescriptorContainer, []int{0} } type ContainerCreate_Runtime struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Options *google_protobuf1.Any `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Options *google_protobuf.Any `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` } func (m *ContainerCreate_Runtime) Reset() { *m = ContainerCreate_Runtime{} } @@ -113,10 +108,10 @@ func (*ContainerDelete) ProtoMessage() {} func (*ContainerDelete) Descriptor() ([]byte, []int) { return fileDescriptorContainer, []int{2} } func init() { - proto.RegisterType((*ContainerCreate)(nil), "containerd.services.events.v1.ContainerCreate") - proto.RegisterType((*ContainerCreate_Runtime)(nil), "containerd.services.events.v1.ContainerCreate.Runtime") - proto.RegisterType((*ContainerUpdate)(nil), "containerd.services.events.v1.ContainerUpdate") - proto.RegisterType((*ContainerDelete)(nil), "containerd.services.events.v1.ContainerDelete") + proto.RegisterType((*ContainerCreate)(nil), "containerd.events.ContainerCreate") + proto.RegisterType((*ContainerCreate_Runtime)(nil), "containerd.events.ContainerCreate.Runtime") + proto.RegisterType((*ContainerUpdate)(nil), "containerd.events.ContainerUpdate") + proto.RegisterType((*ContainerDelete)(nil), "containerd.events.ContainerDelete") } // Field returns the value for the given fieldpath as a string, if defined. @@ -364,24 +359,6 @@ func (m *ContainerDelete) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Container(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Container(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintContainer(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -490,7 +467,7 @@ func (this *ContainerCreate_Runtime) String() string { } s := strings.Join([]string{`&ContainerCreate_Runtime{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf1.Any", 1) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf.Any", 1) + `,`, `}`, }, "") return s @@ -762,7 +739,7 @@ func (m *ContainerCreate_Runtime) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Options == nil { - m.Options = &google_protobuf1.Any{} + m.Options = &google_protobuf.Any{} } if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -902,51 +879,14 @@ func (m *ContainerUpdate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthContainer - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowContainer @@ -956,41 +896,80 @@ func (m *ContainerUpdate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthContainer + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthContainer + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipContainer(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContainer + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthContainer - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 4: if wireType != 2 { @@ -1227,36 +1206,35 @@ var ( ) func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/container.proto", fileDescriptorContainer) + proto.RegisterFile("github.com/containerd/containerd/api/events/container.proto", fileDescriptorContainer) } var fileDescriptorContainer = []byte{ - // 427 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x52, 0x5d, 0x8b, 0xd4, 0x30, - 0x14, 0xdd, 0x74, 0xd6, 0x19, 0x4c, 0x05, 0x25, 0x0c, 0x52, 0x07, 0xac, 0xe3, 0x3c, 0x8d, 0x2f, - 0x09, 0x3b, 0x82, 0xe8, 0x0a, 0xa2, 0xbb, 0x2b, 0x22, 0x2a, 0x48, 0xc0, 0x17, 0x11, 0x24, 0x9d, - 0xde, 0xed, 0x04, 0xdb, 0xa4, 0xb4, 0x69, 0xa1, 0x6f, 0xfe, 0x14, 0x7f, 0xce, 0x3e, 0xfa, 0xe8, - 0x93, 0xec, 0xf6, 0x1f, 0x08, 0xfe, 0x00, 0x69, 0x32, 0xdd, 0x2d, 0x82, 0x9f, 0x6f, 0xe7, 0xe6, - 0x9e, 0x73, 0xef, 0x39, 0xb7, 0xc5, 0xcf, 0x12, 0x69, 0x36, 0x55, 0x44, 0xd7, 0x3a, 0x63, 0x6b, - 0xad, 0x8c, 0x90, 0x0a, 0x8a, 0x78, 0x08, 0x45, 0x2e, 0x59, 0x09, 0x45, 0x2d, 0xd7, 0x50, 0x32, - 0xa8, 0x41, 0x99, 0x92, 0xd5, 0x7b, 0x17, 0x0c, 0x9a, 0x17, 0xda, 0x68, 0x72, 0xf3, 0x42, 0x42, - 0x7b, 0x3a, 0x75, 0x74, 0x5a, 0xef, 0xcd, 0xa6, 0x89, 0x4e, 0xb4, 0x65, 0xb2, 0x0e, 0x39, 0xd1, - 0xec, 0x46, 0xa2, 0x75, 0x92, 0x02, 0xb3, 0x55, 0x54, 0x1d, 0x33, 0xa1, 0x9a, 0x6d, 0xeb, 0xf1, - 0x1f, 0x8d, 0x9d, 0x8b, 0xf2, 0xb4, 0x4a, 0xa4, 0x62, 0xc7, 0x12, 0xd2, 0x38, 0x17, 0x66, 0xe3, - 0x26, 0x2c, 0x4e, 0x11, 0xbe, 0x7a, 0xd8, 0xd3, 0x0f, 0x0b, 0x10, 0x06, 0xc8, 0x75, 0xec, 0xc9, - 0x38, 0x40, 0x73, 0xb4, 0xbc, 0x7c, 0x30, 0x6e, 0xbf, 0xde, 0xf2, 0x9e, 0x1f, 0x71, 0x4f, 0xc6, - 0x64, 0x8a, 0x2f, 0xc9, 0x4c, 0x24, 0x10, 0x78, 0x5d, 0x8b, 0xbb, 0x82, 0xbc, 0xc6, 0x93, 0xa2, - 0x52, 0x46, 0x66, 0x10, 0x8c, 0xe6, 0x68, 0xe9, 0xaf, 0xee, 0xd1, 0xdf, 0xa6, 0xa4, 0x3f, 0xad, - 0xa3, 0xdc, 0xa9, 0x79, 0x3f, 0x66, 0xf6, 0x0a, 0x4f, 0xb6, 0x6f, 0x84, 0xe0, 0x5d, 0x25, 0x32, - 0x70, 0x66, 0xb8, 0xc5, 0x84, 0xe2, 0x89, 0xce, 0x8d, 0xd4, 0xaa, 0xb4, 0x46, 0xfc, 0xd5, 0x94, - 0xba, 0x0b, 0xd1, 0x3e, 0x2c, 0x7d, 0xa2, 0x1a, 0xde, 0x93, 0x16, 0xdf, 0x86, 0x11, 0xdf, 0xe4, - 0xf1, 0xbf, 0x47, 0xe4, 0x78, 0x9c, 0x8a, 0x08, 0xd2, 0x32, 0x18, 0xcd, 0x47, 0x4b, 0x7f, 0xb5, - 0xff, 0xb7, 0x09, 0xdd, 0x36, 0xfa, 0xd2, 0x8a, 0x9f, 0x2a, 0x53, 0x34, 0x7c, 0x3b, 0x89, 0xdc, - 0xc6, 0x57, 0x4a, 0x25, 0xf2, 0x72, 0xa3, 0xcd, 0xfb, 0x0f, 0xd0, 0x04, 0xbb, 0x76, 0xa1, 0xdf, - 0xbf, 0xbd, 0x80, 0x66, 0xf6, 0x00, 0xfb, 0x03, 0x25, 0xb9, 0x86, 0x47, 0x1d, 0xd1, 0x9d, 0xa2, - 0x83, 0x9d, 0xdb, 0x5a, 0xa4, 0xd5, 0xb9, 0x5b, 0x5b, 0xec, 0x7b, 0xf7, 0xd1, 0xe2, 0xce, 0x20, - 0xf2, 0x11, 0xa4, 0xf0, 0xeb, 0xc8, 0x07, 0xef, 0x4e, 0xce, 0xc2, 0x9d, 0x2f, 0x67, 0xe1, 0xce, - 0xc7, 0x36, 0x44, 0x27, 0x6d, 0x88, 0x3e, 0xb7, 0x21, 0x3a, 0x6d, 0x43, 0xf4, 0xe9, 0x7b, 0x88, - 0xde, 0x3e, 0xfa, 0xcf, 0x5f, 0xff, 0xa1, 0x43, 0xd1, 0xd8, 0x7e, 0x93, 0xbb, 0x3f, 0x02, 0x00, - 0x00, 0xff, 0xff, 0x26, 0x0d, 0x55, 0x1c, 0x43, 0x03, 0x00, 0x00, + // 413 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xc1, 0x0a, 0xd3, 0x30, + 0x18, 0xc7, 0x97, 0x76, 0x6e, 0x98, 0x0a, 0x6a, 0x18, 0x52, 0x7b, 0xa8, 0x73, 0xa7, 0xe9, 0x21, + 0x85, 0x7a, 0x51, 0x77, 0xd1, 0x6d, 0x0a, 0xa2, 0x82, 0x14, 0x84, 0xe1, 0x45, 0xd2, 0x35, 0xeb, + 0x82, 0x6d, 0x52, 0xda, 0x74, 0xd0, 0x9b, 0x8f, 0xe2, 0xe3, 0xec, 0xe8, 0xc1, 0x83, 0x27, 0x71, + 0x05, 0xdf, 0xc0, 0x07, 0x90, 0x26, 0xeb, 0x56, 0x14, 0x95, 0x9d, 0xfa, 0xcf, 0xd7, 0xff, 0x3f, + 0xdf, 0xf7, 0xfb, 0x08, 0x9c, 0xc5, 0x4c, 0x6e, 0xcb, 0x10, 0xaf, 0x45, 0xea, 0xad, 0x05, 0x97, + 0x84, 0x71, 0x9a, 0x47, 0x5d, 0x49, 0x32, 0xe6, 0xd1, 0x1d, 0xe5, 0xb2, 0x38, 0x57, 0x71, 0x96, + 0x0b, 0x29, 0xd0, 0xcd, 0xb3, 0x0d, 0x6b, 0x8b, 0x73, 0x3b, 0x16, 0x22, 0x4e, 0xa8, 0xa7, 0x0c, + 0x61, 0xb9, 0xf1, 0x08, 0xaf, 0xb4, 0xdb, 0x19, 0xc5, 0x22, 0x16, 0x4a, 0x7a, 0x8d, 0x3a, 0x56, + 0x9f, 0xfc, 0x77, 0x80, 0xd3, 0x55, 0x59, 0x52, 0xc6, 0x8c, 0x7b, 0x1b, 0x46, 0x93, 0x28, 0x23, + 0x72, 0xab, 0x6f, 0x98, 0x7c, 0x01, 0xf0, 0xfa, 0xa2, 0xb5, 0x2f, 0x72, 0x4a, 0x24, 0x45, 0xb7, + 0xa0, 0xc1, 0x22, 0x1b, 0x8c, 0xc1, 0xf4, 0xea, 0x7c, 0x50, 0x7f, 0xbb, 0x63, 0xbc, 0x58, 0x06, + 0x06, 0x8b, 0xd0, 0x08, 0x5e, 0x61, 0x29, 0x89, 0xa9, 0x6d, 0x34, 0xbf, 0x02, 0x7d, 0x40, 0x4b, + 0x38, 0xcc, 0x4b, 0x2e, 0x59, 0x4a, 0x6d, 0x73, 0x0c, 0xa6, 0x96, 0x7f, 0x1f, 0xff, 0x41, 0x86, + 0x7f, 0x6b, 0x81, 0x03, 0x9d, 0x08, 0xda, 0xa8, 0xf3, 0x1a, 0x0e, 0x8f, 0x35, 0x84, 0x60, 0x9f, + 0x93, 0x94, 0xea, 0x01, 0x02, 0xa5, 0x11, 0x86, 0x43, 0x91, 0x49, 0x26, 0x78, 0xa1, 0x9a, 0x5b, + 0xfe, 0x08, 0xeb, 0x5d, 0xe1, 0x16, 0x10, 0x3f, 0xe5, 0x55, 0xd0, 0x9a, 0x26, 0x3f, 0xba, 0x58, + 0x6f, 0xb3, 0xe8, 0x72, 0xac, 0xe7, 0x70, 0x90, 0x90, 0x90, 0x26, 0x85, 0x6d, 0x8e, 0xcd, 0xa9, + 0xe5, 0xe3, 0x7f, 0x51, 0xe9, 0x0e, 0xf8, 0x95, 0x0a, 0x3c, 0xe3, 0x32, 0xaf, 0x82, 0x63, 0x1a, + 0xdd, 0x85, 0xd7, 0x0a, 0x4e, 0xb2, 0x62, 0x2b, 0xe4, 0xfb, 0x0f, 0xb4, 0xb2, 0xfb, 0xaa, 0x89, + 0xd5, 0xd6, 0x5e, 0xd2, 0xca, 0x79, 0x04, 0xad, 0x4e, 0x12, 0xdd, 0x80, 0x66, 0x63, 0xd4, 0xf8, + 0x8d, 0x6c, 0x26, 0xdc, 0x91, 0xa4, 0x3c, 0x4d, 0xa8, 0x0e, 0x8f, 0x8d, 0x87, 0x60, 0x72, 0xaf, + 0x83, 0xb9, 0xa4, 0x09, 0xfd, 0x3b, 0xe6, 0xfc, 0xcd, 0xfe, 0xe0, 0xf6, 0xbe, 0x1e, 0xdc, 0xde, + 0xc7, 0xda, 0x05, 0xfb, 0xda, 0x05, 0x9f, 0x6b, 0x17, 0x7c, 0xaf, 0x5d, 0xf0, 0xe9, 0xa7, 0x0b, + 0xde, 0xf9, 0x17, 0x3c, 0xe5, 0x99, 0xfe, 0xac, 0xc0, 0xca, 0x08, 0x07, 0x6a, 0xff, 0x0f, 0x7e, + 0x05, 0x00, 0x00, 0xff, 0xff, 0xf5, 0x09, 0xe0, 0xd6, 0x0b, 0x03, 0x00, 0x00, } diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/container.proto b/vendor/github.com/containerd/containerd/api/events/container.proto similarity index 65% rename from vendor/github.com/containerd/containerd/api/services/events/v1/container.proto rename to vendor/github.com/containerd/containerd/api/events/container.proto index ca8acb8b9b..13aa5848c0 100644 --- a/vendor/github.com/containerd/containerd/api/services/events/v1/container.proto +++ b/vendor/github.com/containerd/containerd/api/events/container.proto @@ -1,12 +1,12 @@ syntax = "proto3"; -package containerd.services.events.v1; +package containerd.events; -import "gogoproto/gogo.proto"; import "google/protobuf/any.proto"; -import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; +import weak "gogoproto/gogo.proto"; +import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; -option go_package = "github.com/containerd/containerd/api/services/events/v1;events"; +option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.plugin.fieldpath_all) = true; message ContainerCreate { diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/content.pb.go b/vendor/github.com/containerd/containerd/api/events/content.pb.go similarity index 73% rename from vendor/github.com/containerd/containerd/api/services/events/v1/content.pb.go rename to vendor/github.com/containerd/containerd/api/events/content.pb.go index 6fc5d61b6a..87648d1938 100644 --- a/vendor/github.com/containerd/containerd/api/services/events/v1/content.pb.go +++ b/vendor/github.com/containerd/containerd/api/events/content.pb.go @@ -1,14 +1,14 @@ -// Code generated by protoc-gen-gogo. -// source: github.com/containerd/containerd/api/services/events/v1/content.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/containerd/api/events/content.proto package events import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import _ "github.com/gogo/protobuf/gogoproto" -import _ "github.com/containerd/containerd/protobuf/plugin" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" +// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin" import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" @@ -31,7 +31,7 @@ func (*ContentDelete) ProtoMessage() {} func (*ContentDelete) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{0} } func init() { - proto.RegisterType((*ContentDelete)(nil), "containerd.services.events.v1.ContentDelete") + proto.RegisterType((*ContentDelete)(nil), "containerd.events.ContentDelete") } // Field returns the value for the given fieldpath as a string, if defined. @@ -71,24 +71,6 @@ func (m *ContentDelete) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Content(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Content(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintContent(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -324,25 +306,24 @@ var ( ) func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/content.proto", fileDescriptorContent) + proto.RegisterFile("github.com/containerd/containerd/api/events/content.proto", fileDescriptorContent) } var fileDescriptorContent = []byte{ - // 242 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4d, 0xcf, 0x2c, 0xc9, + // 228 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb, - 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x97, 0x19, 0x82, 0x55, 0xa4, 0xe6, 0x95, 0xe8, 0x15, - 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0x34, 0xe8, 0xc1, 0x14, 0xeb, 0x41, 0x14, 0xeb, 0x95, - 0x19, 0x4a, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x55, 0xea, 0x83, 0x58, 0x10, 0x4d, 0x52, 0x0e, - 0x04, 0xed, 0x06, 0xab, 0x4b, 0x2a, 0x4d, 0xd3, 0x2f, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0xd3, 0x4f, - 0xcb, 0x4c, 0xcd, 0x49, 0x29, 0x48, 0x2c, 0xc9, 0x80, 0x98, 0xa0, 0x14, 0xcd, 0xc5, 0xeb, 0x0c, - 0x71, 0x87, 0x4b, 0x6a, 0x4e, 0x6a, 0x49, 0xaa, 0x90, 0x17, 0x17, 0x5b, 0x4a, 0x66, 0x7a, 0x6a, - 0x71, 0x89, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xa7, 0x93, 0xd1, 0x89, 0x7b, 0xf2, 0x0c, 0xb7, 0xee, - 0xc9, 0x6b, 0x21, 0x59, 0x95, 0x5f, 0x90, 0x9a, 0x07, 0xb7, 0xa3, 0x58, 0x3f, 0x3d, 0x5f, 0x17, - 0xa2, 0x45, 0xcf, 0x05, 0x4c, 0x05, 0x41, 0x4d, 0x70, 0x8a, 0x39, 0xf1, 0x50, 0x8e, 0xe1, 0xc6, - 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, - 0xe0, 0x91, 0x1c, 0xe3, 0x82, 0x2f, 0x72, 0x8c, 0x51, 0x76, 0x64, 0x06, 0x9c, 0x35, 0x84, 0x95, - 0xc4, 0x06, 0xf6, 0x81, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x51, 0xce, 0xec, 0x89, 0x81, 0x01, - 0x00, 0x00, + 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0x83, 0x45, 0x53, + 0xf3, 0x4a, 0xf4, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0x04, 0x11, 0x8a, 0xf4, 0x20, 0x0a, 0xa4, + 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0xb2, 0xfa, 0x20, 0x16, 0x44, 0xa1, 0x94, 0x03, 0x41, 0x3b, + 0xc0, 0xea, 0x92, 0x4a, 0xd3, 0xf4, 0x0b, 0x72, 0x4a, 0xd3, 0x33, 0xf3, 0xf4, 0xd3, 0x32, 0x53, + 0x73, 0x52, 0x0a, 0x12, 0x4b, 0x32, 0x20, 0x26, 0x28, 0x45, 0x73, 0xf1, 0x3a, 0x43, 0xec, 0x76, + 0x49, 0xcd, 0x49, 0x2d, 0x49, 0x15, 0xf2, 0xe2, 0x62, 0x4b, 0xc9, 0x4c, 0x4f, 0x2d, 0x2e, 0x91, + 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x74, 0x32, 0x3a, 0x71, 0x4f, 0x9e, 0xe1, 0xd6, 0x3d, 0x79, 0x2d, + 0x24, 0xab, 0xf2, 0x0b, 0x52, 0xf3, 0xe0, 0x76, 0x14, 0xeb, 0xa7, 0xe7, 0xeb, 0x42, 0xb4, 0xe8, + 0xb9, 0x80, 0xa9, 0x20, 0xa8, 0x09, 0x4e, 0x01, 0x27, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, + 0xd0, 0xf0, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, + 0x63, 0x5c, 0xf0, 0x45, 0x8e, 0x31, 0xca, 0x88, 0x84, 0x00, 0xb2, 0x86, 0x50, 0x11, 0x0c, 0x11, + 0x8c, 0x49, 0x6c, 0x60, 0x97, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x4b, 0x78, 0x99, 0xee, + 0x61, 0x01, 0x00, 0x00, } diff --git a/vendor/github.com/containerd/containerd/api/events/content.proto b/vendor/github.com/containerd/containerd/api/events/content.proto new file mode 100644 index 0000000000..aba50716f2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/content.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package containerd.events; + +import weak "gogoproto/gogo.proto"; +import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; + +option go_package = "github.com/containerd/containerd/api/events;events"; +option (containerd.plugin.fieldpath_all) = true; + +message ContentDelete { + string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; +} diff --git a/vendor/github.com/containerd/containerd/api/events/doc.go b/vendor/github.com/containerd/containerd/api/events/doc.go new file mode 100644 index 0000000000..ac1e83fb75 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/doc.go @@ -0,0 +1,3 @@ +// Package events has protobuf types for various events that are used in +// containerd. +package events diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/image.pb.go b/vendor/github.com/containerd/containerd/api/events/image.pb.go similarity index 74% rename from vendor/github.com/containerd/containerd/api/services/events/v1/image.pb.go rename to vendor/github.com/containerd/containerd/api/events/image.pb.go index 0349e7cb1d..ff3c6f69e7 100644 --- a/vendor/github.com/containerd/containerd/api/services/events/v1/image.pb.go +++ b/vendor/github.com/containerd/containerd/api/events/image.pb.go @@ -1,13 +1,13 @@ -// Code generated by protoc-gen-gogo. -// source: github.com/containerd/containerd/api/services/events/v1/image.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/containerd/api/events/image.proto package events import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import _ "github.com/containerd/containerd/protobuf/plugin" + +// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin" import strings "strings" import reflect "reflect" @@ -215,24 +215,6 @@ func (m *ImageDelete) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Image(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Image(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintImage(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -445,51 +427,14 @@ func (m *ImageCreate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthImage - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowImage @@ -499,41 +444,80 @@ func (m *ImageCreate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthImage + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthImage + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipImage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthImage + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthImage - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -640,51 +624,14 @@ func (m *ImageUpdate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthImage - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowImage @@ -694,41 +641,80 @@ func (m *ImageUpdate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthImage + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthImage + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipImage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthImage + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthImage - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -936,28 +922,28 @@ var ( ) func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/image.proto", fileDescriptorImage) + proto.RegisterFile("github.com/containerd/containerd/api/events/image.proto", fileDescriptorImage) } var fileDescriptorImage = []byte{ - // 296 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4e, 0xcf, 0x2c, 0xc9, + // 292 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb, - 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x97, 0x19, 0xea, 0x67, 0xe6, 0x26, 0xa6, 0xa7, 0xea, - 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0x94, 0xeb, 0xc1, 0x94, 0xea, 0x81, 0x15, 0x14, - 0xeb, 0x95, 0x19, 0x4a, 0x39, 0x10, 0xb4, 0x03, 0x6c, 0x4c, 0x52, 0x69, 0x9a, 0x7e, 0x41, 0x4e, - 0x69, 0x7a, 0x66, 0x9e, 0x7e, 0x5a, 0x66, 0x6a, 0x4e, 0x4a, 0x41, 0x62, 0x49, 0x06, 0xc4, 0x02, - 0xa5, 0x35, 0x8c, 0x5c, 0xdc, 0x9e, 0x20, 0xf3, 0x9c, 0x8b, 0x52, 0x13, 0x4b, 0x52, 0x85, 0x84, - 0xb8, 0x58, 0xf2, 0x12, 0x73, 0x53, 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0xc0, 0x6c, 0x21, - 0x3f, 0x2e, 0xb6, 0x9c, 0xc4, 0xa4, 0xd4, 0x9c, 0x62, 0x09, 0x26, 0x05, 0x66, 0x0d, 0x6e, 0x23, - 0x33, 0x3d, 0xbc, 0xae, 0xd2, 0x43, 0x32, 0x4f, 0xcf, 0x07, 0xac, 0xd1, 0x35, 0xaf, 0xa4, 0xa8, - 0x32, 0x08, 0x6a, 0x8a, 0x94, 0x25, 0x17, 0x37, 0x92, 0xb0, 0x90, 0x00, 0x17, 0x73, 0x76, 0x6a, - 0x25, 0xd4, 0x46, 0x10, 0x53, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, 0x34, 0x55, 0x82, 0x09, - 0x2c, 0x06, 0xe1, 0x58, 0x31, 0x59, 0x30, 0x22, 0x9c, 0x1b, 0x5a, 0x90, 0x42, 0x55, 0xe7, 0x42, - 0xcc, 0xa3, 0xb6, 0x73, 0x15, 0xa1, 0xae, 0x75, 0x49, 0xcd, 0x49, 0xc5, 0xee, 0x5a, 0xa7, 0x98, - 0x13, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0x68, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, - 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x2e, 0xf8, 0x22, 0xc7, 0x18, 0x65, 0x47, - 0x66, 0x22, 0xb2, 0x86, 0xb0, 0x92, 0xd8, 0xc0, 0xb1, 0x6c, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, - 0x44, 0x99, 0x59, 0x31, 0x8d, 0x02, 0x00, 0x00, + 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x67, 0xe6, + 0x26, 0xa6, 0xa7, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0x94, 0xe8, 0x15, 0xa7, + 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb, 0x81, 0x15, 0x14, 0xeb, 0x95, 0x19, 0x4a, 0x39, 0x10, + 0x34, 0x17, 0x6c, 0x4c, 0x52, 0x69, 0x9a, 0x7e, 0x41, 0x4e, 0x69, 0x7a, 0x66, 0x9e, 0x7e, 0x5a, + 0x66, 0x6a, 0x4e, 0x4a, 0x41, 0x62, 0x49, 0x06, 0xc4, 0x02, 0xa5, 0x35, 0x8c, 0x5c, 0xdc, 0x9e, + 0x20, 0xf3, 0x9c, 0x8b, 0x52, 0x13, 0x4b, 0x52, 0x85, 0x84, 0xb8, 0x58, 0xf2, 0x12, 0x73, 0x53, + 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0xc0, 0x6c, 0x21, 0x3f, 0x2e, 0xb6, 0x9c, 0xc4, 0xa4, + 0xd4, 0x9c, 0x62, 0x09, 0x26, 0x05, 0x66, 0x0d, 0x6e, 0x23, 0x33, 0x3d, 0xbc, 0xae, 0xd2, 0x43, + 0x32, 0x4f, 0xcf, 0x07, 0xac, 0xd1, 0x35, 0xaf, 0xa4, 0xa8, 0x32, 0x08, 0x6a, 0x8a, 0x94, 0x25, + 0x17, 0x37, 0x92, 0xb0, 0x90, 0x00, 0x17, 0x73, 0x76, 0x6a, 0x25, 0xd4, 0x46, 0x10, 0x53, 0x48, + 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, 0x34, 0x55, 0x82, 0x09, 0x2c, 0x06, 0xe1, 0x58, 0x31, 0x59, + 0x30, 0x22, 0x9c, 0x1b, 0x5a, 0x90, 0x42, 0x55, 0xe7, 0x42, 0xcc, 0xa3, 0xb6, 0x73, 0x15, 0xa1, + 0xae, 0x75, 0x49, 0xcd, 0x49, 0xc5, 0xee, 0x5a, 0xa7, 0x80, 0x13, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, + 0x94, 0x63, 0x68, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, + 0x1e, 0xc9, 0x31, 0x2e, 0xf8, 0x22, 0xc7, 0x18, 0x65, 0x44, 0x42, 0xc2, 0xb1, 0x86, 0x50, 0x11, + 0x0c, 0x49, 0x6c, 0xe0, 0xb8, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x41, 0x80, 0x92, 0x17, + 0x77, 0x02, 0x00, 0x00, } diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/image.proto b/vendor/github.com/containerd/containerd/api/events/image.proto similarity index 65% rename from vendor/github.com/containerd/containerd/api/services/events/v1/image.proto rename to vendor/github.com/containerd/containerd/api/events/image.proto index cbab0bbd4d..470c3a2fa4 100644 --- a/vendor/github.com/containerd/containerd/api/services/events/v1/image.proto +++ b/vendor/github.com/containerd/containerd/api/events/image.proto @@ -2,9 +2,9 @@ syntax = "proto3"; package containerd.services.images.v1; -import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; +import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; -option go_package = "github.com/containerd/containerd/api/services/events/v1;events"; +option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.plugin.fieldpath_all) = true; message ImageCreate { diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/namespace.pb.go b/vendor/github.com/containerd/containerd/api/events/namespace.pb.go similarity index 73% rename from vendor/github.com/containerd/containerd/api/services/events/v1/namespace.pb.go rename to vendor/github.com/containerd/containerd/api/events/namespace.pb.go index c2aabe461f..f9a3e27779 100644 --- a/vendor/github.com/containerd/containerd/api/services/events/v1/namespace.pb.go +++ b/vendor/github.com/containerd/containerd/api/events/namespace.pb.go @@ -1,14 +1,14 @@ -// Code generated by protoc-gen-gogo. -// source: github.com/containerd/containerd/api/services/events/v1/namespace.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/containerd/api/events/namespace.proto package events import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import _ "github.com/gogo/protobuf/gogoproto" -import _ "github.com/containerd/containerd/protobuf/plugin" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" +// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin" import strings "strings" import reflect "reflect" @@ -48,9 +48,9 @@ func (*NamespaceDelete) ProtoMessage() {} func (*NamespaceDelete) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{2} } func init() { - proto.RegisterType((*NamespaceCreate)(nil), "containerd.services.events.v1.NamespaceCreate") - proto.RegisterType((*NamespaceUpdate)(nil), "containerd.services.events.v1.NamespaceUpdate") - proto.RegisterType((*NamespaceDelete)(nil), "containerd.services.events.v1.NamespaceDelete") + proto.RegisterType((*NamespaceCreate)(nil), "containerd.events.NamespaceCreate") + proto.RegisterType((*NamespaceUpdate)(nil), "containerd.events.NamespaceUpdate") + proto.RegisterType((*NamespaceDelete)(nil), "containerd.events.NamespaceDelete") } // Field returns the value for the given fieldpath as a string, if defined. @@ -216,24 +216,6 @@ func (m *NamespaceDelete) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Namespace(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Namespace(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintNamespace(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -446,51 +428,14 @@ func (m *NamespaceCreate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthNamespace - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNamespace @@ -500,41 +445,80 @@ func (m *NamespaceCreate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNamespace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNamespace + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNamespace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthNamespace + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipNamespace(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNamespace + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthNamespace - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -641,51 +625,14 @@ func (m *NamespaceUpdate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthNamespace - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNamespace @@ -695,41 +642,80 @@ func (m *NamespaceUpdate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNamespace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNamespace + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNamespace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthNamespace + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipNamespace(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNamespace + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthNamespace - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -937,29 +923,28 @@ var ( ) func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/namespace.proto", fileDescriptorNamespace) + proto.RegisterFile("github.com/containerd/containerd/api/events/namespace.proto", fileDescriptorNamespace) } var fileDescriptorNamespace = []byte{ - // 310 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4f, 0xcf, 0x2c, 0xc9, + // 296 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb, - 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x97, 0x19, 0xea, 0xe7, 0x25, 0xe6, 0xa6, 0x16, 0x17, - 0x24, 0x26, 0xa7, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0xb4, 0xe8, 0xc1, 0x94, - 0xeb, 0x41, 0x94, 0xeb, 0x95, 0x19, 0x4a, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x55, 0xea, 0x83, - 0x58, 0x10, 0x4d, 0x52, 0x0e, 0x04, 0x6d, 0x07, 0xab, 0x4b, 0x2a, 0x4d, 0xd3, 0x2f, 0xc8, 0x29, - 0x4d, 0xcf, 0xcc, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd, 0x49, 0x29, 0x48, 0x2c, 0xc9, 0x80, 0x98, 0xa0, - 0xb4, 0x85, 0x91, 0x8b, 0xdf, 0x0f, 0xe6, 0x14, 0xe7, 0xa2, 0xd4, 0xc4, 0x92, 0x54, 0x21, 0x21, - 0x2e, 0x16, 0x90, 0xeb, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0xc0, 0x6c, 0xa1, 0x20, 0x2e, - 0xb6, 0x9c, 0xc4, 0xa4, 0xd4, 0x9c, 0x62, 0x09, 0x26, 0x05, 0x66, 0x0d, 0x6e, 0x23, 0x2b, 0x3d, - 0xbc, 0xee, 0xd5, 0x43, 0x33, 0x53, 0xcf, 0x07, 0xac, 0xd9, 0x35, 0xaf, 0xa4, 0xa8, 0x32, 0x08, - 0x6a, 0x92, 0x94, 0x25, 0x17, 0x37, 0x92, 0xb0, 0x90, 0x00, 0x17, 0x73, 0x76, 0x6a, 0x25, 0xd4, - 0x56, 0x10, 0x53, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, 0x34, 0x55, 0x82, 0x09, 0x2c, 0x06, - 0xe1, 0x58, 0x31, 0x59, 0x30, 0xa2, 0x3a, 0x3b, 0xb4, 0x20, 0x85, 0xea, 0xce, 0x86, 0x98, 0x49, - 0x6d, 0x67, 0xab, 0x22, 0xb9, 0xda, 0x25, 0x35, 0x27, 0x15, 0xbb, 0xab, 0x9d, 0x62, 0x4e, 0x3c, - 0x94, 0x63, 0xb8, 0xf1, 0x50, 0x8e, 0xa1, 0xe1, 0x91, 0x1c, 0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17, - 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0xb8, 0xe0, 0x8b, 0x1c, 0x63, 0x94, 0x1d, 0x99, 0x49, - 0xce, 0x1a, 0xc2, 0x4a, 0x62, 0x03, 0xc7, 0xbc, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x58, 0x7e, - 0x6c, 0xc6, 0xbb, 0x02, 0x00, 0x00, + 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0xe7, 0x25, + 0xe6, 0xa6, 0x16, 0x17, 0x24, 0x26, 0xa7, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x22, + 0x94, 0xe9, 0x41, 0x94, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x65, 0xf5, 0x41, 0x2c, 0x88, + 0x42, 0x29, 0x07, 0x82, 0xb6, 0x80, 0xd5, 0x25, 0x95, 0xa6, 0xe9, 0x17, 0xe4, 0x94, 0xa6, 0x67, + 0xe6, 0xe9, 0xa7, 0x65, 0xa6, 0xe6, 0xa4, 0x14, 0x24, 0x96, 0x64, 0x40, 0x4c, 0x50, 0x5a, 0xc1, + 0xc8, 0xc5, 0xef, 0x07, 0xb3, 0xde, 0xb9, 0x28, 0x35, 0xb1, 0x24, 0x55, 0x48, 0x88, 0x8b, 0x05, + 0xe4, 0x22, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x30, 0x5b, 0xc8, 0x8d, 0x8b, 0x2d, 0x27, + 0x31, 0x29, 0x35, 0xa7, 0x58, 0x82, 0x49, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x4f, 0x0f, 0xc3, 0x8d, + 0x7a, 0x68, 0xe6, 0xe8, 0xf9, 0x80, 0x35, 0xb8, 0xe6, 0x95, 0x14, 0x55, 0x06, 0x41, 0x75, 0x4b, + 0x59, 0x72, 0x71, 0x23, 0x09, 0x0b, 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x42, 0x6d, 0x02, 0x31, + 0x85, 0x44, 0xb8, 0x58, 0xcb, 0x12, 0x73, 0x4a, 0x53, 0x25, 0x98, 0xc0, 0x62, 0x10, 0x8e, 0x15, + 0x93, 0x05, 0x23, 0xaa, 0x53, 0x43, 0x0b, 0x52, 0xa8, 0xe2, 0x54, 0x88, 0x39, 0xd4, 0x76, 0xaa, + 0x2a, 0x92, 0x4b, 0x5d, 0x52, 0x73, 0x52, 0xb1, 0xbb, 0xd4, 0x29, 0xe0, 0xc4, 0x43, 0x39, 0x86, + 0x1b, 0x0f, 0xe5, 0x18, 0x1a, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, + 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x0b, 0xbe, 0xc8, 0x31, 0x46, 0x19, 0x91, 0x90, 0x84, 0xac, 0x21, + 0x54, 0x04, 0x43, 0x04, 0x63, 0x12, 0x1b, 0x38, 0x66, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x00, 0x50, 0x87, 0x59, 0x83, 0x02, 0x00, 0x00, } diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/namespace.proto b/vendor/github.com/containerd/containerd/api/events/namespace.proto similarity index 54% rename from vendor/github.com/containerd/containerd/api/services/events/v1/namespace.proto rename to vendor/github.com/containerd/containerd/api/events/namespace.proto index 2d6be2b4da..45deae79a4 100644 --- a/vendor/github.com/containerd/containerd/api/services/events/v1/namespace.proto +++ b/vendor/github.com/containerd/containerd/api/events/namespace.proto @@ -1,11 +1,11 @@ syntax = "proto3"; -package containerd.services.events.v1; +package containerd.events; -import "gogoproto/gogo.proto"; -import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; +import weak "gogoproto/gogo.proto"; +import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; -option go_package = "github.com/containerd/containerd/api/services/events/v1;events"; +option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.plugin.fieldpath_all) = true; message NamespaceCreate { diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/snapshot.pb.go b/vendor/github.com/containerd/containerd/api/events/snapshot.pb.go similarity index 85% rename from vendor/github.com/containerd/containerd/api/services/events/v1/snapshot.pb.go rename to vendor/github.com/containerd/containerd/api/events/snapshot.pb.go index 265d47a276..e1f8f5c588 100644 --- a/vendor/github.com/containerd/containerd/api/services/events/v1/snapshot.pb.go +++ b/vendor/github.com/containerd/containerd/api/events/snapshot.pb.go @@ -1,13 +1,13 @@ -// Code generated by protoc-gen-gogo. -// source: github.com/containerd/containerd/api/services/events/v1/snapshot.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/containerd/api/events/snapshot.proto package events import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import _ "github.com/containerd/containerd/protobuf/plugin" + +// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin" import strings "strings" import reflect "reflect" @@ -46,9 +46,9 @@ func (*SnapshotRemove) ProtoMessage() {} func (*SnapshotRemove) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{2} } func init() { - proto.RegisterType((*SnapshotPrepare)(nil), "containerd.services.events.v1.SnapshotPrepare") - proto.RegisterType((*SnapshotCommit)(nil), "containerd.services.events.v1.SnapshotCommit") - proto.RegisterType((*SnapshotRemove)(nil), "containerd.services.events.v1.SnapshotRemove") + proto.RegisterType((*SnapshotPrepare)(nil), "containerd.events.SnapshotPrepare") + proto.RegisterType((*SnapshotCommit)(nil), "containerd.events.SnapshotCommit") + proto.RegisterType((*SnapshotRemove)(nil), "containerd.events.SnapshotRemove") } // Field returns the value for the given fieldpath as a string, if defined. @@ -180,24 +180,6 @@ func (m *SnapshotRemove) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Snapshot(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Snapshot(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintSnapshot(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -699,25 +681,24 @@ var ( ) func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/snapshot.proto", fileDescriptorSnapshot) + proto.RegisterFile("github.com/containerd/containerd/api/events/snapshot.proto", fileDescriptorSnapshot) } var fileDescriptorSnapshot = []byte{ - // 252 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0xcf, 0x2c, 0xc9, + // 235 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4a, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb, - 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x97, 0x19, 0xea, 0x17, 0xe7, 0x25, 0x16, 0x14, 0x67, - 0xe4, 0x97, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0x74, 0xe8, 0xc1, 0x54, 0xeb, - 0x41, 0x54, 0xeb, 0x95, 0x19, 0x4a, 0x39, 0x10, 0xb4, 0x06, 0x6c, 0x4c, 0x52, 0x69, 0x9a, 0x7e, - 0x41, 0x4e, 0x69, 0x7a, 0x66, 0x9e, 0x7e, 0x5a, 0x66, 0x6a, 0x4e, 0x4a, 0x41, 0x62, 0x49, 0x06, - 0xc4, 0x02, 0x25, 0x6b, 0x2e, 0xfe, 0x60, 0xa8, 0x95, 0x01, 0x45, 0xa9, 0x05, 0x89, 0x45, 0xa9, - 0x42, 0x02, 0x5c, 0xcc, 0xd9, 0xa9, 0x95, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x20, 0xa6, - 0x90, 0x18, 0x17, 0x1b, 0x48, 0x26, 0xaf, 0x44, 0x82, 0x09, 0x2c, 0x08, 0xe5, 0x29, 0x99, 0x71, - 0xf1, 0xc1, 0x34, 0x3b, 0xe7, 0xe7, 0xe6, 0x66, 0x96, 0x60, 0xd1, 0x2b, 0xc4, 0xc5, 0x92, 0x97, - 0x98, 0x9b, 0x0a, 0xd5, 0x09, 0x66, 0x2b, 0x29, 0x21, 0xf4, 0x05, 0xa5, 0xe6, 0xe6, 0x97, 0x61, - 0xb1, 0xd3, 0x29, 0xe6, 0xc4, 0x43, 0x39, 0x86, 0x1b, 0x0f, 0xe5, 0x18, 0x1a, 0x1e, 0xc9, 0x31, - 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x0b, 0xbe, 0xc8, - 0x31, 0x46, 0xd9, 0x91, 0x19, 0xbe, 0xd6, 0x10, 0x56, 0x12, 0x1b, 0xd8, 0xf7, 0xc6, 0x80, 0x00, - 0x00, 0x00, 0xff, 0xff, 0x3a, 0x82, 0x7a, 0xa7, 0xa8, 0x01, 0x00, 0x00, + 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x17, 0xe7, + 0x25, 0x16, 0x14, 0x67, 0xe4, 0x97, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x22, 0x54, + 0xe9, 0x41, 0x54, 0x48, 0x39, 0x10, 0x34, 0x0e, 0xac, 0x35, 0xa9, 0x34, 0x4d, 0xbf, 0x20, 0xa7, + 0x34, 0x3d, 0x33, 0x4f, 0x3f, 0x2d, 0x33, 0x35, 0x27, 0xa5, 0x20, 0xb1, 0x24, 0x03, 0x62, 0xa8, + 0x92, 0x35, 0x17, 0x7f, 0x30, 0xd4, 0x9a, 0x80, 0xa2, 0xd4, 0x82, 0xc4, 0xa2, 0x54, 0x21, 0x01, + 0x2e, 0xe6, 0xec, 0xd4, 0x4a, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x10, 0x53, 0x48, 0x8c, + 0x8b, 0x0d, 0x24, 0x93, 0x57, 0x22, 0xc1, 0x04, 0x16, 0x84, 0xf2, 0x94, 0xcc, 0xb8, 0xf8, 0x60, + 0x9a, 0x9d, 0xf3, 0x73, 0x73, 0x33, 0x4b, 0xb0, 0xe8, 0x15, 0xe2, 0x62, 0xc9, 0x4b, 0xcc, 0x4d, + 0x85, 0xea, 0x04, 0xb3, 0x95, 0x94, 0x10, 0xfa, 0x82, 0x52, 0x73, 0xf3, 0xcb, 0xb0, 0xd8, 0xe9, + 0x14, 0x70, 0xe2, 0xa1, 0x1c, 0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, + 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x05, 0x5f, 0xe4, 0x18, 0xa3, + 0x8c, 0x48, 0x08, 0x47, 0x6b, 0x08, 0x15, 0xc1, 0x90, 0xc4, 0x06, 0xf6, 0xb3, 0x31, 0x20, 0x00, + 0x00, 0xff, 0xff, 0x69, 0x66, 0xa9, 0x2a, 0x86, 0x01, 0x00, 0x00, } diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/snapshot.proto b/vendor/github.com/containerd/containerd/api/events/snapshot.proto similarity index 55% rename from vendor/github.com/containerd/containerd/api/services/events/v1/snapshot.proto rename to vendor/github.com/containerd/containerd/api/events/snapshot.proto index b6af0ea4bc..425eeec8e0 100644 --- a/vendor/github.com/containerd/containerd/api/services/events/v1/snapshot.proto +++ b/vendor/github.com/containerd/containerd/api/events/snapshot.proto @@ -1,10 +1,10 @@ syntax = "proto3"; -package containerd.services.events.v1; +package containerd.events; -import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; +import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; -option go_package = "github.com/containerd/containerd/api/services/events/v1;events"; +option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.plugin.fieldpath_all) = true; message SnapshotPrepare { diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/task.pb.go b/vendor/github.com/containerd/containerd/api/events/task.pb.go similarity index 90% rename from vendor/github.com/containerd/containerd/api/services/events/v1/task.pb.go rename to vendor/github.com/containerd/containerd/api/events/task.pb.go index 97faa38086..e91e79965c 100644 --- a/vendor/github.com/containerd/containerd/api/services/events/v1/task.pb.go +++ b/vendor/github.com/containerd/containerd/api/events/task.pb.go @@ -1,16 +1,17 @@ -// Code generated by protoc-gen-gogo. -// source: github.com/containerd/containerd/api/services/events/v1/task.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/containerd/api/events/task.proto package events import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import _ "github.com/gogo/protobuf/gogoproto" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" import _ "github.com/gogo/protobuf/types" import containerd_types "github.com/containerd/containerd/api/types" -import _ "github.com/containerd/containerd/protobuf/plugin" + +// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin" import time "time" @@ -136,17 +137,17 @@ func (*TaskCheckpointed) ProtoMessage() {} func (*TaskCheckpointed) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{10} } func init() { - proto.RegisterType((*TaskCreate)(nil), "containerd.services.events.v1.TaskCreate") - proto.RegisterType((*TaskStart)(nil), "containerd.services.events.v1.TaskStart") - proto.RegisterType((*TaskDelete)(nil), "containerd.services.events.v1.TaskDelete") - proto.RegisterType((*TaskIO)(nil), "containerd.services.events.v1.TaskIO") - proto.RegisterType((*TaskExit)(nil), "containerd.services.events.v1.TaskExit") - proto.RegisterType((*TaskOOM)(nil), "containerd.services.events.v1.TaskOOM") - proto.RegisterType((*TaskExecAdded)(nil), "containerd.services.events.v1.TaskExecAdded") - proto.RegisterType((*TaskExecStarted)(nil), "containerd.services.events.v1.TaskExecStarted") - proto.RegisterType((*TaskPaused)(nil), "containerd.services.events.v1.TaskPaused") - proto.RegisterType((*TaskResumed)(nil), "containerd.services.events.v1.TaskResumed") - proto.RegisterType((*TaskCheckpointed)(nil), "containerd.services.events.v1.TaskCheckpointed") + proto.RegisterType((*TaskCreate)(nil), "containerd.events.TaskCreate") + proto.RegisterType((*TaskStart)(nil), "containerd.events.TaskStart") + proto.RegisterType((*TaskDelete)(nil), "containerd.events.TaskDelete") + proto.RegisterType((*TaskIO)(nil), "containerd.events.TaskIO") + proto.RegisterType((*TaskExit)(nil), "containerd.events.TaskExit") + proto.RegisterType((*TaskOOM)(nil), "containerd.events.TaskOOM") + proto.RegisterType((*TaskExecAdded)(nil), "containerd.events.TaskExecAdded") + proto.RegisterType((*TaskExecStarted)(nil), "containerd.events.TaskExecStarted") + proto.RegisterType((*TaskPaused)(nil), "containerd.events.TaskPaused") + proto.RegisterType((*TaskResumed)(nil), "containerd.events.TaskResumed") + proto.RegisterType((*TaskCheckpointed)(nil), "containerd.events.TaskCheckpointed") } // Field returns the value for the given fieldpath as a string, if defined. @@ -737,24 +738,6 @@ func (m *TaskCheckpointed) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Task(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Task(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintTask(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -991,7 +974,7 @@ func (this *TaskDelete) String() string { `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, - `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`, + `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1018,7 +1001,7 @@ func (this *TaskExit) String() string { `ID:` + fmt.Sprintf("%v", this.ID) + `,`, `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, - `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`, + `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2579,50 +2562,49 @@ var ( ) func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/task.proto", fileDescriptorTask) + proto.RegisterFile("github.com/containerd/containerd/api/events/task.proto", fileDescriptorTask) } var fileDescriptorTask = []byte{ - // 648 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x95, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0x86, 0x6b, 0xa7, 0x75, 0x93, 0x0d, 0x55, 0x2b, 0xab, 0x82, 0x28, 0x12, 0x76, 0x64, 0x84, - 0x94, 0x93, 0xad, 0x16, 0x89, 0x0b, 0x2a, 0x6a, 0xd2, 0x70, 0xc8, 0xa1, 0x0a, 0xb8, 0x3d, 0x21, - 0xa4, 0xc8, 0xb1, 0x27, 0xc9, 0xd2, 0xc4, 0x6b, 0x79, 0xc7, 0x51, 0x91, 0x38, 0xf0, 0x08, 0x3c, - 0x02, 0x4f, 0xc1, 0x33, 0xf4, 0xc0, 0x81, 0x23, 0xa7, 0x40, 0xfd, 0x0c, 0x9c, 0x38, 0xa1, 0xf5, - 0x3a, 0x6e, 0xa1, 0xa2, 0x20, 0x4b, 0xdc, 0x76, 0xc6, 0x33, 0xff, 0xcc, 0x7c, 0x3b, 0xd9, 0x90, - 0xee, 0x84, 0xe2, 0x34, 0x19, 0xd9, 0x3e, 0x9b, 0x3b, 0x3e, 0x0b, 0xd1, 0xa3, 0x21, 0xc4, 0xc1, - 0xf5, 0xa3, 0x17, 0x51, 0x87, 0x43, 0xbc, 0xa0, 0x3e, 0x70, 0x07, 0x16, 0x10, 0x22, 0x77, 0x16, - 0x7b, 0x0e, 0x7a, 0xfc, 0xcc, 0x8e, 0x62, 0x86, 0x4c, 0xbf, 0x7f, 0x15, 0x6d, 0xaf, 0x22, 0x6d, - 0x19, 0x69, 0x2f, 0xf6, 0x9a, 0xbb, 0x13, 0x36, 0x61, 0x59, 0xa4, 0x23, 0x4e, 0x32, 0xa9, 0x69, - 0x4e, 0x18, 0x9b, 0xcc, 0xc0, 0xc9, 0xac, 0x51, 0x32, 0x76, 0x90, 0xce, 0x81, 0xa3, 0x37, 0x8f, - 0xf2, 0x80, 0xc7, 0xff, 0xd4, 0x19, 0xbe, 0x89, 0x80, 0x3b, 0x73, 0x96, 0x84, 0x98, 0xe7, 0x1d, - 0xfe, 0x35, 0xaf, 0x28, 0x19, 0xcd, 0x92, 0x09, 0x0d, 0x9d, 0x31, 0x85, 0x59, 0x10, 0x79, 0x38, - 0x95, 0x0a, 0xd6, 0x0f, 0x85, 0x90, 0x53, 0x8f, 0x9f, 0x1d, 0xc5, 0xe0, 0x21, 0xe8, 0xfb, 0xe4, - 0x4e, 0x91, 0x3c, 0xa4, 0x41, 0x43, 0x69, 0x29, 0xed, 0x5a, 0x77, 0x3b, 0x5d, 0x9a, 0xf5, 0xa3, - 0x95, 0xbf, 0xdf, 0x73, 0xeb, 0x45, 0x50, 0x3f, 0xd0, 0xef, 0x12, 0x6d, 0x94, 0x84, 0xc1, 0x0c, - 0x1a, 0xaa, 0x88, 0x76, 0x73, 0x4b, 0x77, 0x88, 0x16, 0x33, 0x86, 0x63, 0xde, 0xa8, 0xb4, 0x2a, - 0xed, 0xfa, 0xfe, 0x3d, 0xfb, 0x1a, 0xbb, 0x6c, 0x16, 0xfb, 0x58, 0xcc, 0xe2, 0xe6, 0x61, 0xfa, - 0x01, 0x51, 0x29, 0x6b, 0xac, 0xb7, 0x94, 0x76, 0x7d, 0xff, 0xa1, 0x7d, 0x2b, 0x68, 0x5b, 0xf4, - 0xdc, 0x1f, 0x74, 0xb5, 0x74, 0x69, 0xaa, 0xfd, 0x81, 0xab, 0x52, 0xa6, 0x1b, 0x84, 0xf8, 0x53, - 0xf0, 0xcf, 0x22, 0x46, 0x43, 0x6c, 0x6c, 0x64, 0xbd, 0x5c, 0xf3, 0xe8, 0x3b, 0xa4, 0x12, 0xd1, - 0xa0, 0xa1, 0xb5, 0x94, 0xf6, 0x96, 0x2b, 0x8e, 0xd6, 0x0b, 0x52, 0x13, 0x3a, 0x27, 0xe8, 0xc5, - 0x58, 0x6a, 0xf4, 0x5c, 0x52, 0xbd, 0x92, 0xfc, 0x98, 0xf3, 0xec, 0xc1, 0x0c, 0x4a, 0xf2, 0xbc, - 0x21, 0xaa, 0x9b, 0xa4, 0x0e, 0xe7, 0x14, 0x87, 0x1c, 0x3d, 0x4c, 0x04, 0x4e, 0xf1, 0x85, 0x08, - 0xd7, 0x49, 0xe6, 0xd1, 0x3b, 0xa4, 0x26, 0x2c, 0x08, 0x86, 0x1e, 0xe6, 0x00, 0x9b, 0xb6, 0x5c, - 0x3a, 0x7b, 0xb5, 0x01, 0xf6, 0xe9, 0x6a, 0xe9, 0xba, 0xd5, 0x8b, 0xa5, 0xb9, 0xf6, 0xfe, 0xab, - 0xa9, 0xb8, 0x55, 0x99, 0xd6, 0x41, 0xeb, 0x35, 0xd1, 0x24, 0x53, 0x7d, 0x97, 0x6c, 0x70, 0x0c, - 0x68, 0x28, 0x9b, 0x75, 0xa5, 0x21, 0x6e, 0x99, 0x63, 0xc0, 0x12, 0x5c, 0xdd, 0xb2, 0xb4, 0x72, - 0x3f, 0xc4, 0x71, 0xd6, 0x96, 0xf4, 0x43, 0x1c, 0xeb, 0x4d, 0x52, 0x45, 0x88, 0xe7, 0x34, 0xf4, - 0x66, 0x59, 0x47, 0x55, 0xb7, 0xb0, 0xad, 0x4f, 0x0a, 0xa9, 0x8a, 0x62, 0xcf, 0xce, 0x29, 0x96, - 0x5c, 0x39, 0x35, 0x27, 0x54, 0xcb, 0x57, 0xa0, 0xe7, 0xaa, 0xb4, 0x40, 0x57, 0xf9, 0x23, 0xba, - 0xf5, 0xdb, 0xd1, 0x6d, 0x94, 0x42, 0x77, 0x40, 0x36, 0xc5, 0x34, 0x83, 0xc1, 0x71, 0x99, 0x61, - 0xac, 0x29, 0xd9, 0x92, 0x30, 0xc0, 0xef, 0x04, 0x01, 0x04, 0xa5, 0x88, 0x3c, 0x20, 0x9b, 0x70, - 0x0e, 0xfe, 0xb0, 0xc0, 0x42, 0xd2, 0xa5, 0xa9, 0x09, 0xcd, 0x7e, 0xcf, 0xd5, 0xc4, 0xa7, 0x7e, - 0x60, 0xbd, 0x25, 0xdb, 0xab, 0x4a, 0xd9, 0xce, 0xff, 0xc7, 0x5a, 0x37, 0xaf, 0xc2, 0x3a, 0x94, - 0xbf, 0x8c, 0xe7, 0x5e, 0xc2, 0xcb, 0x15, 0xb6, 0x3a, 0xa4, 0x2e, 0x14, 0x5c, 0xe0, 0xc9, 0xbc, - 0xa4, 0xc4, 0x98, 0xec, 0x64, 0xcf, 0x5d, 0xf1, 0x2c, 0x94, 0x64, 0xf0, 0xeb, 0x63, 0xa3, 0xfe, - 0xfe, 0xd8, 0x74, 0x5f, 0x5d, 0x5c, 0x1a, 0x6b, 0x5f, 0x2e, 0x8d, 0xb5, 0x77, 0xa9, 0xa1, 0x5c, - 0xa4, 0x86, 0xf2, 0x39, 0x35, 0x94, 0x6f, 0xa9, 0xa1, 0x7c, 0xf8, 0x6e, 0x28, 0x2f, 0x9f, 0x96, - 0xfc, 0x27, 0x7a, 0x22, 0x4f, 0x23, 0x2d, 0xdb, 0xcc, 0x47, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, - 0x76, 0xdf, 0xe7, 0xaa, 0xd2, 0x06, 0x00, 0x00, + // 637 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x95, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xc7, 0x63, 0xa7, 0x75, 0x93, 0x09, 0x55, 0x8b, 0x55, 0x41, 0xc8, 0xc1, 0x8e, 0xcc, 0x25, + 0x27, 0x5b, 0x04, 0x89, 0x0b, 0x42, 0x6a, 0xd2, 0x70, 0xc8, 0xa1, 0x4a, 0x71, 0x7b, 0xa8, 0xb8, + 0x44, 0x4e, 0x76, 0x93, 0x2c, 0x8d, 0xbd, 0x96, 0x3d, 0x46, 0x45, 0xe2, 0xc0, 0x23, 0xf0, 0x08, + 0x3c, 0x05, 0xcf, 0xd0, 0x03, 0x07, 0x8e, 0x9c, 0x02, 0xf5, 0x03, 0x70, 0xe2, 0x01, 0xd0, 0x7a, + 0x1d, 0xb7, 0x50, 0xf1, 0x65, 0x89, 0x53, 0x76, 0x66, 0x67, 0xff, 0x33, 0xf3, 0xdb, 0xc9, 0x1a, + 0x1e, 0xcd, 0x19, 0x2e, 0x92, 0x89, 0x3d, 0xe5, 0xbe, 0x33, 0xe5, 0x01, 0x7a, 0x2c, 0xa0, 0x11, + 0xb9, 0xbe, 0xf4, 0x42, 0xe6, 0xd0, 0x97, 0x34, 0xc0, 0xd8, 0x41, 0x2f, 0x3e, 0xb3, 0xc3, 0x88, + 0x23, 0xd7, 0x6f, 0x5f, 0x45, 0xd8, 0x72, 0xb7, 0xb5, 0x37, 0xe7, 0x73, 0x9e, 0xed, 0x3a, 0x62, + 0x25, 0x03, 0x5b, 0xe6, 0x9c, 0xf3, 0xf9, 0x92, 0x3a, 0x99, 0x35, 0x49, 0x66, 0x0e, 0x32, 0x9f, + 0xc6, 0xe8, 0xf9, 0x61, 0x1e, 0xf0, 0x77, 0x15, 0xe0, 0xab, 0x90, 0xc6, 0x8e, 0xcf, 0x93, 0x00, + 0xf3, 0x73, 0xfb, 0x7f, 0x3c, 0x57, 0xa4, 0x0c, 0x97, 0xc9, 0x9c, 0x05, 0xce, 0x8c, 0xd1, 0x25, + 0x09, 0x3d, 0x5c, 0x48, 0x05, 0xeb, 0xab, 0x02, 0x70, 0xe2, 0xc5, 0x67, 0x07, 0x11, 0xf5, 0x90, + 0xea, 0x5d, 0xb8, 0x55, 0x1c, 0x1e, 0x33, 0xd2, 0x54, 0xda, 0x4a, 0xa7, 0xde, 0xdf, 0x49, 0x57, + 0x66, 0xe3, 0x60, 0xed, 0x1f, 0x0e, 0xdc, 0x46, 0x11, 0x34, 0x24, 0xfa, 0x1d, 0xd0, 0x26, 0x49, + 0x40, 0x96, 0xb4, 0xa9, 0x8a, 0x68, 0x37, 0xb7, 0x74, 0x07, 0xb4, 0x88, 0x73, 0x9c, 0xc5, 0xcd, + 0x6a, 0xbb, 0xda, 0x69, 0x74, 0xef, 0xda, 0xd7, 0x78, 0x65, 0xbd, 0xd8, 0x87, 0xa2, 0x17, 0x37, + 0x0f, 0xd3, 0x1f, 0x80, 0xca, 0x78, 0x73, 0xa3, 0xad, 0x74, 0x1a, 0xdd, 0x7b, 0xf6, 0x0d, 0xb8, + 0xb6, 0xa8, 0x73, 0x38, 0xea, 0x6b, 0xe9, 0xca, 0x54, 0x87, 0x23, 0x57, 0x65, 0x5c, 0x37, 0x00, + 0xa6, 0x0b, 0x3a, 0x3d, 0x0b, 0x39, 0x0b, 0xb0, 0xb9, 0x99, 0xe5, 0xbf, 0xe6, 0xd1, 0x77, 0xa1, + 0x1a, 0x32, 0xd2, 0xd4, 0xda, 0x4a, 0x67, 0xdb, 0x15, 0x4b, 0xeb, 0x19, 0xd4, 0x85, 0xce, 0x31, + 0x7a, 0x11, 0x96, 0x6a, 0x37, 0x97, 0x54, 0xaf, 0x24, 0xdf, 0xe7, 0x0c, 0x07, 0x74, 0x49, 0x4b, + 0x32, 0xbc, 0x21, 0xaa, 0x9b, 0xd0, 0xa0, 0xe7, 0x0c, 0xc7, 0x31, 0x7a, 0x98, 0x08, 0x84, 0x62, + 0x07, 0x84, 0xeb, 0x38, 0xf3, 0xe8, 0x3d, 0xa8, 0x0b, 0x8b, 0x92, 0xb1, 0x87, 0x39, 0xb4, 0x96, + 0x2d, 0x07, 0xcd, 0x5e, 0xdf, 0xba, 0x7d, 0xb2, 0x1e, 0xb4, 0x7e, 0xed, 0x62, 0x65, 0x56, 0xde, + 0x7e, 0x36, 0x15, 0xb7, 0x26, 0x8f, 0xf5, 0xd0, 0x7a, 0x01, 0x9a, 0x64, 0xaa, 0xef, 0xc1, 0x66, + 0x8c, 0x84, 0x05, 0xb2, 0x58, 0x57, 0x1a, 0xe2, 0x66, 0x63, 0x24, 0x3c, 0xc1, 0xf5, 0xcd, 0x4a, + 0x2b, 0xf7, 0xd3, 0x28, 0xca, 0xca, 0x92, 0x7e, 0x1a, 0x45, 0x7a, 0x0b, 0x6a, 0x48, 0x23, 0x9f, + 0x05, 0xde, 0x32, 0xab, 0xa8, 0xe6, 0x16, 0xb6, 0xf5, 0x41, 0x81, 0x9a, 0x48, 0xf6, 0xf4, 0x9c, + 0x61, 0xc9, 0x31, 0x53, 0x73, 0x42, 0xf5, 0x7c, 0x04, 0x06, 0xae, 0xca, 0x0a, 0x74, 0xd5, 0x5f, + 0xa2, 0xdb, 0xf8, 0x3d, 0xba, 0xcd, 0x52, 0xe8, 0x9e, 0xc0, 0x96, 0xe8, 0x66, 0x34, 0x3a, 0x2c, + 0xd3, 0x8c, 0xb5, 0x80, 0x6d, 0x09, 0x83, 0x4e, 0x7b, 0x84, 0x50, 0x52, 0x8a, 0xc8, 0x7d, 0xd8, + 0xa2, 0xe7, 0x74, 0x3a, 0x2e, 0xb0, 0x40, 0xba, 0x32, 0x35, 0xa1, 0x39, 0x1c, 0xb8, 0x9a, 0xd8, + 0x1a, 0x12, 0xeb, 0x35, 0xec, 0xac, 0x33, 0x65, 0x33, 0xff, 0x1f, 0x73, 0xdd, 0xbc, 0x0a, 0x6b, + 0x5f, 0xfe, 0x33, 0x8e, 0xbc, 0x24, 0x2e, 0x97, 0xd8, 0xea, 0x41, 0x43, 0x28, 0xb8, 0x34, 0x4e, + 0xfc, 0x92, 0x12, 0x33, 0xd8, 0xcd, 0x9e, 0xb8, 0xe2, 0x59, 0x28, 0xc9, 0xe0, 0xc7, 0xc7, 0x46, + 0xfd, 0xf9, 0xb1, 0xe9, 0x1f, 0x5d, 0x5c, 0x1a, 0x95, 0x4f, 0x97, 0x46, 0xe5, 0x4d, 0x6a, 0x28, + 0x17, 0xa9, 0xa1, 0x7c, 0x4c, 0x0d, 0xe5, 0x4b, 0x6a, 0x28, 0xef, 0xbe, 0x19, 0xca, 0xf3, 0xee, + 0x3f, 0x7c, 0x65, 0x1e, 0xcb, 0x9f, 0xd3, 0xca, 0x69, 0x75, 0xa2, 0x65, 0x13, 0xf9, 0xf0, 0x7b, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x07, 0x69, 0x62, 0x9d, 0xa6, 0x06, 0x00, 0x00, } diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/task.proto b/vendor/github.com/containerd/containerd/api/events/task.proto similarity index 85% rename from vendor/github.com/containerd/containerd/api/services/events/v1/task.proto rename to vendor/github.com/containerd/containerd/api/events/task.proto index 79f87a6c5c..d69921365d 100644 --- a/vendor/github.com/containerd/containerd/api/services/events/v1/task.proto +++ b/vendor/github.com/containerd/containerd/api/events/task.proto @@ -1,13 +1,13 @@ syntax = "proto3"; -package containerd.services.events.v1; +package containerd.events; -import "gogoproto/gogo.proto"; +import weak "gogoproto/gogo.proto"; import "google/protobuf/timestamp.proto"; import "github.com/containerd/containerd/api/types/mount.proto"; -import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; +import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; -option go_package = "github.com/containerd/containerd/api/services/events/v1;events"; +option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.plugin.fieldpath_all) = true; message TaskCreate { diff --git a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go index 90d3099031..17cd36d36e 100644 --- a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/containerd/containerd/api/services/containers/v1/containers.proto -// DO NOT EDIT! /* Package containers is a generated protocol buffer package. @@ -25,9 +24,10 @@ package containers import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import _ "github.com/gogo/protobuf/gogoproto" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" import google_protobuf1 "github.com/gogo/protobuf/types" -import google_protobuf2 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf2 "github.com/gogo/protobuf/types" import google_protobuf3 "github.com/gogo/protobuf/types" import _ "github.com/gogo/protobuf/types" @@ -844,24 +844,6 @@ func (m *DeleteContainerRequest) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Containers(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Containers(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintContainers(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1270,51 +1252,14 @@ func (m *Container) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthContainers - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowContainers @@ -1324,41 +1269,80 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthContainers + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthContainers + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipContainers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContainers + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthContainers - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -1599,51 +1583,14 @@ func (m *Container) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthContainers - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Extensions == nil { m.Extensions = make(map[string]google_protobuf1.Any) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + mapvalue := &google_protobuf1.Any{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowContainers @@ -1653,46 +1600,85 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthContainers + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthContainers + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthContainers + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &google_protobuf1.Any{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipContainers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContainers + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthContainers - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthContainers - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &google_protobuf1.Any{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Extensions[mapkey] = *mapvalue - } else { - var mapvalue google_protobuf1.Any - m.Extensions[mapkey] = mapvalue } + m.Extensions[mapkey] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -2689,53 +2675,53 @@ func init() { var fileDescriptorContainers = []byte{ // 776 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcd, 0x72, 0x12, 0x5b, - 0x10, 0xce, 0x00, 0x81, 0xd0, 0xdc, 0xaa, 0x7b, 0xeb, 0x5c, 0x2e, 0x77, 0x1c, 0xab, 0x80, 0xb0, - 0xa2, 0x2c, 0x1d, 0x0c, 0x5a, 0x9a, 0x1f, 0x37, 0x21, 0x7f, 0x65, 0x99, 0x58, 0xa9, 0x51, 0x37, - 0xba, 0x88, 0x03, 0x74, 0xc8, 0xc8, 0xfc, 0x39, 0xe7, 0x40, 0x49, 0xb9, 0xd0, 0x47, 0x70, 0xe7, - 0x23, 0xf8, 0x2a, 0x59, 0xba, 0x74, 0x15, 0x13, 0x9e, 0xc4, 0x9a, 0x33, 0x33, 0xcc, 0x04, 0x06, - 0x85, 0x68, 0x76, 0xa7, 0x39, 0xfd, 0x7d, 0xfd, 0xf1, 0x75, 0xf7, 0x01, 0xd8, 0xef, 0x68, 0xec, - 0xa4, 0xd7, 0x94, 0x5b, 0x96, 0x51, 0x6b, 0x59, 0x26, 0x53, 0x35, 0x13, 0x9d, 0x76, 0xf4, 0xa8, - 0xda, 0x5a, 0x8d, 0xa2, 0xd3, 0xd7, 0x5a, 0x48, 0xc3, 0xcf, 0x69, 0xad, 0xbf, 0x12, 0x89, 0x64, - 0xdb, 0xb1, 0x98, 0x45, 0x96, 0x43, 0x9c, 0x1c, 0x60, 0xe4, 0x48, 0x56, 0x7f, 0x45, 0xca, 0x77, - 0xac, 0x8e, 0xc5, 0xb3, 0x6b, 0xee, 0xc9, 0x03, 0x4a, 0x37, 0x3a, 0x96, 0xd5, 0xd1, 0xb1, 0xc6, - 0xa3, 0x66, 0xef, 0xb8, 0xa6, 0x9a, 0x03, 0xff, 0xea, 0xe6, 0xf8, 0x15, 0x1a, 0x36, 0x0b, 0x2e, - 0xcb, 0xe3, 0x97, 0xc7, 0x1a, 0xea, 0xed, 0x23, 0x43, 0xa5, 0x5d, 0x3f, 0xa3, 0x34, 0x9e, 0xc1, - 0x34, 0x03, 0x29, 0x53, 0x0d, 0xdb, 0x4b, 0xa8, 0x7c, 0x4e, 0x43, 0x76, 0x2b, 0x90, 0x48, 0x0a, - 0x90, 0xd0, 0xda, 0xa2, 0x50, 0x16, 0xaa, 0xd9, 0x46, 0x7a, 0x78, 0x56, 0x4a, 0x3c, 0xde, 0x56, - 0x12, 0x5a, 0x9b, 0x1c, 0x42, 0x5a, 0x57, 0x9b, 0xa8, 0x53, 0x31, 0x51, 0x4e, 0x56, 0x73, 0xf5, - 0x55, 0xf9, 0x97, 0x5f, 0x55, 0x1e, 0xb1, 0xca, 0xfb, 0x1c, 0xba, 0x63, 0x32, 0x67, 0xa0, 0xf8, - 0x3c, 0x24, 0x0f, 0x8b, 0x9a, 0xa1, 0x76, 0x50, 0x4c, 0xba, 0xc5, 0x14, 0x2f, 0x20, 0x4f, 0x21, - 0xe3, 0xf4, 0x4c, 0x57, 0xa3, 0x98, 0x2a, 0x0b, 0xd5, 0x5c, 0xfd, 0xfe, 0x5c, 0x85, 0x14, 0x0f, - 0xab, 0x04, 0x24, 0xa4, 0x0a, 0x29, 0x6a, 0x63, 0x4b, 0x5c, 0xe4, 0x64, 0x79, 0xd9, 0x73, 0x43, - 0x0e, 0xdc, 0x90, 0x37, 0xcd, 0x81, 0xc2, 0x33, 0x48, 0x19, 0x72, 0xd4, 0x54, 0x6d, 0x7a, 0x62, - 0x31, 0x86, 0x8e, 0x98, 0xe6, 0xaa, 0xa2, 0x1f, 0x91, 0x65, 0xf8, 0x2b, 0x08, 0x8f, 0xba, 0x38, - 0x10, 0x33, 0x97, 0x53, 0x9e, 0xe0, 0x80, 0x6c, 0x01, 0xb4, 0x1c, 0x54, 0x19, 0xb6, 0x8f, 0x54, - 0x26, 0x2e, 0xf1, 0xa2, 0xd2, 0x44, 0xd1, 0xe7, 0x41, 0x0b, 0x1a, 0x4b, 0xa7, 0x67, 0xa5, 0x85, - 0x4f, 0xdf, 0x4b, 0x82, 0x92, 0xf5, 0x71, 0x9b, 0xcc, 0x25, 0xe9, 0xd9, 0xed, 0x80, 0x24, 0x3b, - 0x0f, 0x89, 0x8f, 0xdb, 0x64, 0xa4, 0x09, 0x80, 0xef, 0x18, 0x9a, 0x54, 0xb3, 0x4c, 0x2a, 0x02, - 0x6f, 0xda, 0xa3, 0xb9, 0xbc, 0xdc, 0x19, 0xc1, 0x79, 0xe3, 0x1a, 0x29, 0xb7, 0x8c, 0x12, 0x61, - 0x95, 0xd6, 0x20, 0x17, 0xe9, 0x2c, 0xf9, 0x07, 0x92, 0xae, 0x2d, 0x7c, 0x78, 0x14, 0xf7, 0xe8, - 0xf6, 0xb8, 0xaf, 0xea, 0x3d, 0x14, 0x13, 0x5e, 0x8f, 0x79, 0xb0, 0x9e, 0x58, 0x15, 0xa4, 0x03, - 0xc8, 0xf8, 0xbd, 0x22, 0x04, 0x52, 0xa6, 0x6a, 0xa0, 0x8f, 0xe3, 0x67, 0x22, 0x43, 0xc6, 0xb2, - 0x19, 0x97, 0x9e, 0xf8, 0x49, 0xe7, 0x82, 0x24, 0xe9, 0x19, 0xfc, 0x3d, 0x26, 0x37, 0x46, 0xcd, - 0xad, 0xa8, 0x9a, 0x69, 0x94, 0xa1, 0xc6, 0xca, 0x1d, 0xf8, 0x77, 0x0f, 0xd9, 0xc8, 0x10, 0x05, - 0xdf, 0xf6, 0x90, 0xb2, 0x69, 0x2b, 0x52, 0x39, 0x81, 0xfc, 0xe5, 0x74, 0x6a, 0x5b, 0x26, 0x45, - 0x72, 0x08, 0xd9, 0x91, 0xc5, 0x1c, 0x96, 0xab, 0xdf, 0x9e, 0xa7, 0x11, 0xbe, 0xf1, 0x21, 0x49, - 0x65, 0x05, 0xfe, 0xdb, 0xd7, 0x68, 0x58, 0x8a, 0x06, 0xd2, 0x44, 0xc8, 0x1c, 0x6b, 0x3a, 0x43, - 0x87, 0x8a, 0x42, 0x39, 0x59, 0xcd, 0x2a, 0x41, 0x58, 0xd1, 0xa1, 0x30, 0x0e, 0xf1, 0xe5, 0x29, - 0x00, 0x61, 0x61, 0x0e, 0xbb, 0x9a, 0xbe, 0x08, 0x4b, 0xe5, 0x0d, 0x14, 0xb6, 0xf8, 0x38, 0x4f, - 0x98, 0xf7, 0xe7, 0xcd, 0xe8, 0xc2, 0xff, 0x13, 0xb5, 0xae, 0xcd, 0xf9, 0x2f, 0x02, 0x14, 0x5e, - 0xf0, 0x1d, 0xbb, 0xfe, 0x6f, 0x46, 0x36, 0x20, 0xe7, 0xed, 0x33, 0x7f, 0xcf, 0xfd, 0xa9, 0x9d, - 0x7c, 0x08, 0x76, 0xdd, 0x27, 0xff, 0x40, 0xa5, 0x5d, 0xc5, 0x7f, 0x36, 0xdc, 0xb3, 0x6b, 0xcb, - 0x84, 0xd0, 0x6b, 0xb3, 0xe5, 0x2e, 0x14, 0xb6, 0x51, 0xc7, 0x18, 0x57, 0xa6, 0x2c, 0x4b, 0xfd, - 0x3c, 0x05, 0x10, 0x0e, 0x23, 0xe9, 0x43, 0x72, 0x0f, 0x19, 0x79, 0x30, 0x83, 0x8c, 0x98, 0x95, - 0x94, 0x1e, 0xce, 0x8d, 0xf3, 0xad, 0x78, 0x0f, 0x29, 0x77, 0x2d, 0xc8, 0x2c, 0x3f, 0x67, 0xb1, - 0x2b, 0x27, 0xad, 0x5d, 0x01, 0xe9, 0x17, 0xff, 0x00, 0x69, 0x6f, 0x72, 0xc9, 0x2c, 0x24, 0xf1, - 0x0b, 0x25, 0xad, 0x5f, 0x05, 0x1a, 0x0a, 0xf0, 0x66, 0x64, 0x26, 0x01, 0xf1, 0x73, 0x3f, 0x93, - 0x80, 0x69, 0x93, 0xf8, 0x0a, 0xd2, 0xde, 0xdc, 0xcc, 0x24, 0x20, 0x7e, 0xc4, 0xa4, 0xc2, 0xc4, - 0x46, 0xec, 0xb8, 0xff, 0x90, 0x1a, 0xaf, 0x4f, 0x2f, 0x8a, 0x0b, 0xdf, 0x2e, 0x8a, 0x0b, 0x1f, - 0x87, 0x45, 0xe1, 0x74, 0x58, 0x14, 0xbe, 0x0e, 0x8b, 0xc2, 0xf9, 0xb0, 0x28, 0xbc, 0xdc, 0xfd, - 0x8d, 0x3f, 0x7d, 0x1b, 0x61, 0xd4, 0x4c, 0xf3, 0x8a, 0xf7, 0x7e, 0x04, 0x00, 0x00, 0xff, 0xff, - 0x17, 0x73, 0xba, 0x43, 0x45, 0x0a, 0x00, 0x00, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcd, 0x72, 0xd2, 0x50, + 0x14, 0x26, 0x81, 0x86, 0x72, 0x70, 0x46, 0xe7, 0x8a, 0x18, 0xe3, 0x0c, 0x50, 0x56, 0x8c, 0xa3, + 0xc1, 0xa2, 0xa3, 0xfd, 0x71, 0x53, 0xfa, 0x37, 0x8e, 0xad, 0xd3, 0x89, 0x3a, 0xe3, 0xe8, 0xa2, + 0x06, 0xb8, 0xa5, 0x91, 0xfc, 0x99, 0x7b, 0x61, 0x64, 0x5c, 0xe8, 0x23, 0xb8, 0xf3, 0x11, 0x7c, + 0x95, 0x2e, 0x5d, 0xba, 0xaa, 0x2d, 0x4f, 0xe2, 0xe4, 0x26, 0x21, 0x29, 0x04, 0x85, 0x2a, 0xbb, + 0x7b, 0xb8, 0xe7, 0xfb, 0xce, 0xc7, 0x77, 0xce, 0xb9, 0x00, 0x7b, 0x6d, 0x8d, 0x1e, 0x77, 0x1b, + 0x72, 0xd3, 0x32, 0xaa, 0x4d, 0xcb, 0xa4, 0xaa, 0x66, 0x62, 0xa7, 0x15, 0x3d, 0xaa, 0xb6, 0x56, + 0x25, 0xd8, 0xe9, 0x69, 0x4d, 0x4c, 0xc2, 0xcf, 0x49, 0xb5, 0xb7, 0x1c, 0x89, 0x64, 0xdb, 0xb1, + 0xa8, 0x85, 0x96, 0x42, 0x9c, 0x1c, 0x60, 0xe4, 0x48, 0x56, 0x6f, 0x59, 0xca, 0xb5, 0xad, 0xb6, + 0xc5, 0xb2, 0xab, 0xee, 0xc9, 0x03, 0x4a, 0xb7, 0xda, 0x96, 0xd5, 0xd6, 0x71, 0x95, 0x45, 0x8d, + 0xee, 0x51, 0x55, 0x35, 0xfb, 0xfe, 0xd5, 0xed, 0xd1, 0x2b, 0x6c, 0xd8, 0x34, 0xb8, 0x2c, 0x8d, + 0x5e, 0x1e, 0x69, 0x58, 0x6f, 0x1d, 0x1a, 0x2a, 0xe9, 0xf8, 0x19, 0xc5, 0xd1, 0x0c, 0xaa, 0x19, + 0x98, 0x50, 0xd5, 0xb0, 0xbd, 0x84, 0xf2, 0x37, 0x01, 0x32, 0x9b, 0x81, 0x44, 0x94, 0x07, 0x5e, + 0x6b, 0x89, 0x5c, 0x89, 0xab, 0x64, 0xea, 0xc2, 0xe0, 0xb4, 0xc8, 0x3f, 0xdd, 0x52, 0x78, 0xad, + 0x85, 0x0e, 0x40, 0xd0, 0xd5, 0x06, 0xd6, 0x89, 0xc8, 0x97, 0x92, 0x95, 0x6c, 0x6d, 0x45, 0xfe, + 0xeb, 0x57, 0x95, 0x87, 0xac, 0xf2, 0x1e, 0x83, 0x6e, 0x9b, 0xd4, 0xe9, 0x2b, 0x3e, 0x0f, 0xca, + 0xc1, 0x82, 0x66, 0xa8, 0x6d, 0x2c, 0x26, 0xdd, 0x62, 0x8a, 0x17, 0xa0, 0xe7, 0x90, 0x76, 0xba, + 0xa6, 0xab, 0x51, 0x4c, 0x95, 0xb8, 0x4a, 0xb6, 0xf6, 0x70, 0xa6, 0x42, 0x8a, 0x87, 0x55, 0x02, + 0x12, 0x54, 0x81, 0x14, 0xb1, 0x71, 0x53, 0x5c, 0x60, 0x64, 0x39, 0xd9, 0x73, 0x43, 0x0e, 0xdc, + 0x90, 0x37, 0xcc, 0xbe, 0xc2, 0x32, 0x50, 0x09, 0xb2, 0xc4, 0x54, 0x6d, 0x72, 0x6c, 0x51, 0x8a, + 0x1d, 0x51, 0x60, 0xaa, 0xa2, 0x1f, 0xa1, 0x25, 0xb8, 0x12, 0x84, 0x87, 0x1d, 0xdc, 0x17, 0xd3, + 0x17, 0x53, 0x9e, 0xe1, 0x3e, 0xda, 0x04, 0x68, 0x3a, 0x58, 0xa5, 0xb8, 0x75, 0xa8, 0x52, 0x71, + 0x91, 0x15, 0x95, 0xc6, 0x8a, 0xbe, 0x0c, 0x5a, 0x50, 0x5f, 0x3c, 0x39, 0x2d, 0x26, 0xbe, 0xfe, + 0x2a, 0x72, 0x4a, 0xc6, 0xc7, 0x6d, 0x50, 0x97, 0xa4, 0x6b, 0xb7, 0x02, 0x92, 0xcc, 0x2c, 0x24, + 0x3e, 0x6e, 0x83, 0xa2, 0x06, 0x00, 0xfe, 0x48, 0xb1, 0x49, 0x34, 0xcb, 0x24, 0x22, 0xb0, 0xa6, + 0x3d, 0x99, 0xc9, 0xcb, 0xed, 0x21, 0x9c, 0x35, 0xae, 0x9e, 0x72, 0xcb, 0x28, 0x11, 0x56, 0x69, + 0x15, 0xb2, 0x91, 0xce, 0xa2, 0x6b, 0x90, 0x74, 0x6d, 0x61, 0xc3, 0xa3, 0xb8, 0x47, 0xb7, 0xc7, + 0x3d, 0x55, 0xef, 0x62, 0x91, 0xf7, 0x7a, 0xcc, 0x82, 0x35, 0x7e, 0x85, 0x93, 0xf6, 0x21, 0xed, + 0xf7, 0x0a, 0x21, 0x48, 0x99, 0xaa, 0x81, 0x7d, 0x1c, 0x3b, 0x23, 0x19, 0xd2, 0x96, 0x4d, 0x99, + 0x74, 0xfe, 0x0f, 0x9d, 0x0b, 0x92, 0xa4, 0x17, 0x70, 0x75, 0x44, 0x6e, 0x8c, 0x9a, 0x3b, 0x51, + 0x35, 0x93, 0x28, 0x43, 0x8d, 0xe5, 0x7b, 0x70, 0x7d, 0x17, 0xd3, 0xa1, 0x21, 0x0a, 0xfe, 0xd0, + 0xc5, 0x84, 0x4e, 0x5a, 0x91, 0xf2, 0x31, 0xe4, 0x2e, 0xa6, 0x13, 0xdb, 0x32, 0x09, 0x46, 0x07, + 0x90, 0x19, 0x5a, 0xcc, 0x60, 0xd9, 0xda, 0xdd, 0x59, 0x1a, 0xe1, 0x1b, 0x1f, 0x92, 0x94, 0x97, + 0xe1, 0xc6, 0x9e, 0x46, 0xc2, 0x52, 0x24, 0x90, 0x26, 0x42, 0xfa, 0x48, 0xd3, 0x29, 0x76, 0x88, + 0xc8, 0x95, 0x92, 0x95, 0x8c, 0x12, 0x84, 0x65, 0x1d, 0xf2, 0xa3, 0x10, 0x5f, 0x9e, 0x02, 0x10, + 0x16, 0x66, 0xb0, 0xcb, 0xe9, 0x8b, 0xb0, 0x94, 0xdf, 0x43, 0x7e, 0x93, 0x8d, 0xf3, 0x98, 0x79, + 0xff, 0xdf, 0x8c, 0x0e, 0xdc, 0x1c, 0xab, 0x35, 0x37, 0xe7, 0xbf, 0x73, 0x90, 0x7f, 0xc5, 0x76, + 0x6c, 0xfe, 0xdf, 0x0c, 0xad, 0x43, 0xd6, 0xdb, 0x67, 0xf6, 0x9e, 0xfb, 0x53, 0x3b, 0xfe, 0x10, + 0xec, 0xb8, 0x4f, 0xfe, 0xbe, 0x4a, 0x3a, 0x8a, 0xff, 0x6c, 0xb8, 0x67, 0xd7, 0x96, 0x31, 0xa1, + 0x73, 0xb3, 0xe5, 0x3e, 0xe4, 0xb7, 0xb0, 0x8e, 0x63, 0x5c, 0x99, 0xb0, 0x2c, 0xb5, 0xb3, 0x14, + 0x40, 0x38, 0x8c, 0xa8, 0x07, 0xc9, 0x5d, 0x4c, 0xd1, 0xa3, 0x29, 0x64, 0xc4, 0xac, 0xa4, 0xf4, + 0x78, 0x66, 0x9c, 0x6f, 0xc5, 0x27, 0x48, 0xb9, 0x6b, 0x81, 0xa6, 0xf9, 0x39, 0x8b, 0x5d, 0x39, + 0x69, 0xf5, 0x12, 0x48, 0xbf, 0xf8, 0x67, 0x10, 0xbc, 0xc9, 0x45, 0xd3, 0x90, 0xc4, 0x2f, 0x94, + 0xb4, 0x76, 0x19, 0x68, 0x28, 0xc0, 0x9b, 0x91, 0xa9, 0x04, 0xc4, 0xcf, 0xfd, 0x54, 0x02, 0x26, + 0x4d, 0xe2, 0x5b, 0x10, 0xbc, 0xb9, 0x99, 0x4a, 0x40, 0xfc, 0x88, 0x49, 0xf9, 0xb1, 0x8d, 0xd8, + 0x76, 0xff, 0x21, 0xd5, 0xdf, 0x9d, 0x9c, 0x17, 0x12, 0x3f, 0xcf, 0x0b, 0x89, 0x2f, 0x83, 0x02, + 0x77, 0x32, 0x28, 0x70, 0x3f, 0x06, 0x05, 0xee, 0x6c, 0x50, 0xe0, 0xde, 0xec, 0xfc, 0xc3, 0x9f, + 0xbe, 0xf5, 0x30, 0x7a, 0x9d, 0x68, 0x08, 0xac, 0xe6, 0x83, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x95, 0x94, 0x84, 0xf2, 0x47, 0x0a, 0x00, 0x00, } diff --git a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto index 0a2311c4c4..b7b32d949a 100644 --- a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto +++ b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package containerd.services.containers.v1; -import "gogoproto/gogo.proto"; +import weak "gogoproto/gogo.proto"; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; diff --git a/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go b/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go index c9b76b5a77..4e4f723328 100644 --- a/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/containerd/containerd/api/services/content/v1/content.proto -// DO NOT EDIT! /* Package content is a generated protocol buffer package. @@ -33,10 +32,11 @@ package content import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import _ "github.com/gogo/protobuf/gogoproto" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" import google_protobuf1 "github.com/gogo/protobuf/types" import _ "github.com/gogo/protobuf/types" -import google_protobuf3 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf3 "github.com/gogo/protobuf/types" import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" import time "time" @@ -1550,24 +1550,6 @@ func (m *AbortRequest) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Content(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Content(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintContent(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -2247,51 +2229,14 @@ func (m *Info) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthContent - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowContent @@ -2301,41 +2246,80 @@ func (m *Info) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthContent + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthContent + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthContent - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -3895,51 +3879,14 @@ func (m *WriteContentRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthContent - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowContent @@ -3949,41 +3896,80 @@ func (m *WriteContentRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthContent + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthContent + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthContent - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -4391,73 +4377,73 @@ func init() { } var fileDescriptorContent = []byte{ - // 1079 bytes of a gzipped FileDescriptorProto + // 1081 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xcd, 0x6f, 0x1b, 0x45, - 0x14, 0xcf, 0x78, 0xed, 0x4d, 0xf2, 0x9c, 0x16, 0x33, 0x31, 0x95, 0xb5, 0x08, 0x7b, 0xbb, 0x42, + 0x14, 0xf7, 0x78, 0xed, 0x4d, 0xf2, 0x9c, 0x16, 0x33, 0x31, 0x95, 0xb5, 0x08, 0x67, 0xbb, 0x42, 0xc8, 0x6a, 0xc9, 0x3a, 0x75, 0x7a, 0x00, 0x2a, 0x01, 0x8e, 0x9b, 0xaa, 0x41, 0x4d, 0x41, 0x5b, - 0x97, 0x88, 0x5e, 0xca, 0xda, 0x1e, 0x9b, 0x55, 0x6c, 0xaf, 0xbb, 0x33, 0xb6, 0x08, 0x27, 0x2e, - 0x48, 0x28, 0xea, 0x01, 0x71, 0xcf, 0x05, 0xf8, 0x2b, 0x38, 0x70, 0xce, 0x91, 0x23, 0xe2, 0xd0, - 0xd2, 0xfc, 0x0f, 0xdc, 0xd1, 0xcc, 0xce, 0xda, 0xeb, 0x8f, 0xb0, 0xb6, 0xe3, 0x9e, 0xfc, 0x66, - 0xf6, 0xfd, 0xde, 0xf7, 0xc7, 0x18, 0xee, 0x35, 0x1d, 0xf6, 0x4d, 0xaf, 0x6a, 0xd6, 0xdc, 0x76, - 0xa1, 0xe6, 0x76, 0x98, 0xed, 0x74, 0x88, 0x57, 0x0f, 0x93, 0x76, 0xd7, 0x29, 0x50, 0xe2, 0xf5, - 0x9d, 0x1a, 0xa1, 0xe2, 0x9e, 0x74, 0x58, 0xa1, 0x7f, 0x2b, 0x20, 0xcd, 0xae, 0xe7, 0x32, 0x17, - 0x67, 0x87, 0x08, 0x33, 0xe0, 0x36, 0x03, 0x96, 0xfe, 0x2d, 0x2d, 0xdd, 0x74, 0x9b, 0xae, 0x60, - 0x2d, 0x70, 0xca, 0x47, 0x69, 0x7a, 0xd3, 0x75, 0x9b, 0x2d, 0x52, 0x10, 0xa7, 0x6a, 0xaf, 0x51, - 0x68, 0x38, 0xa4, 0x55, 0x7f, 0xda, 0xb6, 0xe9, 0x91, 0xe4, 0xc8, 0x8d, 0x73, 0x30, 0xa7, 0x4d, - 0x28, 0xb3, 0xdb, 0x5d, 0xc9, 0xf0, 0xf6, 0x38, 0x03, 0x69, 0x77, 0xd9, 0xb1, 0xff, 0xd1, 0xf8, - 0x37, 0x06, 0xf1, 0xfd, 0x4e, 0xc3, 0xc5, 0x9f, 0x81, 0x5a, 0x77, 0x9a, 0x84, 0xb2, 0x0c, 0xd2, - 0x51, 0x7e, 0x7d, 0xb7, 0x78, 0xf6, 0x22, 0xb7, 0xf2, 0xf7, 0x8b, 0xdc, 0x8d, 0x90, 0xfb, 0x6e, + 0x97, 0x40, 0x2f, 0x65, 0x6d, 0x8f, 0xcd, 0x2a, 0xb6, 0xd7, 0xdd, 0x19, 0x5b, 0x84, 0x13, 0x17, + 0x24, 0x14, 0xf5, 0x80, 0xb8, 0xe7, 0x02, 0xfc, 0x15, 0x1c, 0x38, 0xe7, 0xc8, 0x11, 0x71, 0x68, + 0x69, 0xfe, 0x07, 0xee, 0x68, 0x66, 0x67, 0xed, 0xf5, 0x47, 0x58, 0xdb, 0x31, 0x27, 0xbf, 0x99, + 0x7d, 0xbf, 0xf7, 0xfd, 0x31, 0x86, 0x7b, 0x4d, 0x87, 0x7d, 0xdd, 0xab, 0x9a, 0x35, 0xb7, 0x5d, + 0xa8, 0xb9, 0x1d, 0x66, 0x3b, 0x1d, 0xe2, 0xd5, 0xc3, 0xa4, 0xdd, 0x75, 0x0a, 0x94, 0x78, 0x7d, + 0xa7, 0x46, 0xa8, 0xb8, 0x27, 0x1d, 0x56, 0xe8, 0xdf, 0x0a, 0x48, 0xb3, 0xeb, 0xb9, 0xcc, 0xc5, + 0xb9, 0x21, 0xc2, 0x0c, 0xb8, 0xcd, 0x80, 0xa5, 0x7f, 0x4b, 0xcb, 0x34, 0xdd, 0xa6, 0x2b, 0x58, + 0x0b, 0x9c, 0xf2, 0x51, 0x9a, 0xde, 0x74, 0xdd, 0x66, 0x8b, 0x14, 0xc4, 0xa9, 0xda, 0x6b, 0x14, + 0x1a, 0x0e, 0x69, 0xd5, 0x9f, 0xb6, 0x6d, 0x7a, 0x24, 0x39, 0x36, 0xc7, 0x39, 0x98, 0xd3, 0x26, + 0x94, 0xd9, 0xed, 0xae, 0x64, 0x78, 0x73, 0x9c, 0x81, 0xb4, 0xbb, 0xec, 0xd8, 0xff, 0x68, 0xfc, + 0x13, 0x87, 0xc4, 0x7e, 0xa7, 0xe1, 0xe2, 0x4f, 0x40, 0xad, 0x3b, 0x4d, 0x42, 0x59, 0x16, 0xe9, + 0x28, 0xbf, 0xb6, 0x5b, 0x3c, 0x7b, 0xb1, 0x19, 0xfb, 0xeb, 0xc5, 0xe6, 0x8d, 0x90, 0xfb, 0x6e, 0x97, 0x74, 0x06, 0x5e, 0xd0, 0x42, 0xd3, 0xdd, 0xf2, 0x21, 0xe6, 0x5d, 0xf1, 0x63, 0x49, 0x09, - 0x18, 0x43, 0x9c, 0x3a, 0xdf, 0x91, 0x4c, 0x4c, 0x47, 0x79, 0xc5, 0x12, 0x34, 0x2e, 0x03, 0xd4, - 0x3c, 0x62, 0x33, 0x52, 0x7f, 0x6a, 0xb3, 0x8c, 0xa2, 0xa3, 0x7c, 0xb2, 0xa8, 0x99, 0xbe, 0x69, - 0x66, 0x60, 0x9a, 0x59, 0x09, 0x6c, 0xdf, 0x5d, 0xe3, 0xfa, 0x7f, 0x7a, 0x99, 0x43, 0xd6, 0xba, - 0xc4, 0x95, 0x18, 0x17, 0xd2, 0xeb, 0xd6, 0x03, 0x21, 0xf1, 0x79, 0x84, 0x48, 0x5c, 0x89, 0xe1, - 0xfb, 0xa0, 0xb6, 0xec, 0x2a, 0x69, 0xd1, 0x4c, 0x42, 0x57, 0xf2, 0xc9, 0xe2, 0xb6, 0xf9, 0xff, - 0x99, 0x31, 0x79, 0x7c, 0xcc, 0x07, 0x02, 0xb2, 0xd7, 0x61, 0xde, 0xb1, 0x25, 0xf1, 0xda, 0x87, - 0x90, 0x0c, 0x5d, 0xe3, 0x14, 0x28, 0x47, 0xe4, 0xd8, 0x8f, 0x9f, 0xc5, 0x49, 0x9c, 0x86, 0x44, - 0xdf, 0x6e, 0xf5, 0xfc, 0x48, 0xac, 0x5b, 0xfe, 0xe1, 0xa3, 0xd8, 0x07, 0xc8, 0xf8, 0x0a, 0x92, - 0x5c, 0xac, 0x45, 0x9e, 0xf5, 0x78, 0xc4, 0x96, 0x18, 0x7d, 0xe3, 0x21, 0x6c, 0xf8, 0xa2, 0x69, - 0xd7, 0xed, 0x50, 0x82, 0x3f, 0x86, 0xb8, 0xd3, 0x69, 0xb8, 0x42, 0x72, 0xb2, 0xf8, 0xee, 0x2c, - 0xde, 0xee, 0xc6, 0xb9, 0x7e, 0x4b, 0xe0, 0x8c, 0xe7, 0x08, 0xae, 0x3c, 0x16, 0xd1, 0x0b, 0xac, - 0xbd, 0xa4, 0x44, 0x7c, 0x07, 0x92, 0x7e, 0x3a, 0x44, 0x1d, 0x8b, 0xe0, 0x4c, 0xcb, 0xe3, 0x3d, - 0x5e, 0xea, 0x07, 0x36, 0x3d, 0xb2, 0x64, 0xd6, 0x39, 0x6d, 0x7c, 0x01, 0x57, 0x03, 0x6b, 0x96, - 0xe4, 0xa0, 0x09, 0xf8, 0x81, 0x43, 0x59, 0xd9, 0x67, 0x09, 0x9c, 0xcc, 0xc0, 0x6a, 0xc3, 0x69, - 0x31, 0xe2, 0xd1, 0x0c, 0xd2, 0x95, 0xfc, 0xba, 0x15, 0x1c, 0x8d, 0xc7, 0xb0, 0x39, 0xc2, 0x3f, - 0x61, 0x86, 0xb2, 0x90, 0x19, 0x55, 0x48, 0xdf, 0x25, 0x2d, 0xc2, 0xc8, 0x98, 0x21, 0xcb, 0xac, - 0x8d, 0xe7, 0x08, 0xb0, 0x45, 0xec, 0xfa, 0xeb, 0x53, 0x81, 0xaf, 0x81, 0xea, 0x36, 0x1a, 0x94, - 0x30, 0xd9, 0xfe, 0xf2, 0x34, 0x18, 0x0a, 0xca, 0x70, 0x28, 0x18, 0x25, 0xd8, 0x1c, 0xb1, 0x46, - 0x46, 0x72, 0x28, 0x02, 0x8d, 0x8b, 0xa8, 0xdb, 0xcc, 0x16, 0x82, 0x37, 0x2c, 0x41, 0x1b, 0xbf, - 0xc4, 0x40, 0x7d, 0xc4, 0x6c, 0xd6, 0xa3, 0x7c, 0x3a, 0x50, 0x66, 0x7b, 0x72, 0x3a, 0xa0, 0x79, - 0xa6, 0x83, 0xc4, 0x4d, 0x8c, 0x98, 0xd8, 0x62, 0x23, 0x26, 0x05, 0x8a, 0x47, 0x1a, 0xc2, 0xd5, - 0x75, 0x8b, 0x93, 0x21, 0x97, 0xe2, 0x23, 0x2e, 0xa5, 0x21, 0xc1, 0x5c, 0x66, 0xb7, 0x32, 0x09, - 0x71, 0xed, 0x1f, 0xf0, 0x43, 0x58, 0x23, 0xdf, 0x76, 0x49, 0x8d, 0x91, 0x7a, 0x46, 0x5d, 0x38, - 0x23, 0x03, 0x19, 0xc6, 0x75, 0xb8, 0xe2, 0xc7, 0x28, 0x48, 0xb8, 0x34, 0x10, 0x0d, 0x0c, 0xe4, - 0x6d, 0x15, 0xb0, 0x0c, 0xea, 0x59, 0xa5, 0xe2, 0x46, 0x86, 0xf2, 0xbd, 0xa8, 0x8a, 0x96, 0x78, - 0x89, 0x32, 0x0a, 0x7e, 0x9b, 0xf8, 0xb7, 0x84, 0x46, 0xf7, 0xd5, 0xd7, 0x90, 0x1e, 0x05, 0x48, - 0x43, 0xee, 0xc3, 0x1a, 0x95, 0x77, 0xb2, 0xb9, 0x66, 0x34, 0x45, 0xb6, 0xd7, 0x00, 0x6d, 0xfc, - 0xac, 0xc0, 0xe6, 0xa1, 0xe7, 0x4c, 0xb4, 0x58, 0x19, 0x54, 0xbb, 0xc6, 0x1c, 0xb7, 0x23, 0x5c, - 0xbd, 0x5a, 0xbc, 0x19, 0x25, 0x5f, 0x08, 0x29, 0x09, 0x88, 0x25, 0xa1, 0x41, 0x4c, 0x63, 0xc3, - 0xa4, 0x0f, 0x92, 0xab, 0x5c, 0x94, 0xdc, 0xf8, 0xe5, 0x93, 0x1b, 0x2a, 0xad, 0xc4, 0xd4, 0x6e, - 0x51, 0x87, 0xdd, 0x82, 0x0f, 0x07, 0xbb, 0x6f, 0x55, 0x04, 0xf2, 0x93, 0x99, 0x1c, 0x1d, 0x8d, - 0xd6, 0xb2, 0x57, 0xe1, 0xcb, 0x18, 0xa4, 0x47, 0xd5, 0xc8, 0xbc, 0x2f, 0x25, 0x2b, 0xa3, 0x43, - 0x21, 0xb6, 0x8c, 0xa1, 0xa0, 0x2c, 0x36, 0x14, 0xe6, 0x1b, 0x01, 0xc3, 0x91, 0xac, 0x5e, 0x7a, - 0xea, 0xeb, 0xb0, 0x51, 0xaa, 0xba, 0x1e, 0xbb, 0xb0, 0xfb, 0x6f, 0xfc, 0x80, 0x20, 0x19, 0x8a, - 0x1e, 0x7e, 0x07, 0xe2, 0x8f, 0x2a, 0xa5, 0x4a, 0x6a, 0x45, 0xdb, 0x3c, 0x39, 0xd5, 0xdf, 0x08, - 0x7d, 0xe2, 0x9d, 0x85, 0x73, 0x90, 0x38, 0xb4, 0xf6, 0x2b, 0x7b, 0x29, 0xa4, 0xa5, 0x4f, 0x4e, - 0xf5, 0x54, 0xe8, 0xbb, 0x20, 0xf1, 0x75, 0x50, 0xcb, 0x9f, 0x1f, 0x1c, 0xec, 0x57, 0x52, 0x31, - 0xed, 0xad, 0x93, 0x53, 0xfd, 0xcd, 0x10, 0x47, 0xd9, 0x6d, 0xb7, 0x1d, 0xa6, 0x6d, 0xfe, 0xf8, - 0x6b, 0x76, 0xe5, 0xf7, 0xdf, 0xb2, 0x61, 0xbd, 0xc5, 0x3f, 0x56, 0x61, 0x55, 0x96, 0x01, 0xb6, - 0xe5, 0xcb, 0xf4, 0xe6, 0x2c, 0x9b, 0x54, 0xba, 0xa6, 0xbd, 0x3f, 0x1b, 0xb3, 0xac, 0xb0, 0x26, - 0xa8, 0xfe, 0x5b, 0x02, 0x6f, 0x45, 0xe1, 0x46, 0x5e, 0x40, 0x9a, 0x39, 0x2b, 0xbb, 0x54, 0xf4, - 0x0c, 0xe2, 0x7c, 0xb4, 0xe1, 0x62, 0x14, 0x6e, 0xf2, 0x21, 0xa2, 0xed, 0xcc, 0x85, 0xf1, 0x15, - 0x6e, 0x23, 0xfc, 0x25, 0xa8, 0xfe, 0x73, 0x02, 0xdf, 0x8e, 0x12, 0x30, 0xed, 0xd9, 0xa1, 0x5d, - 0x9b, 0xa8, 0xef, 0x3d, 0xfe, 0xbf, 0x81, 0xbb, 0xc2, 0x77, 0x76, 0xb4, 0x2b, 0x93, 0xef, 0x8c, - 0x68, 0x57, 0xa6, 0xbc, 0x06, 0xb6, 0x11, 0x4f, 0x93, 0x5c, 0xf1, 0x5b, 0x33, 0xee, 0xa0, 0x59, - 0xd3, 0x34, 0xb6, 0xf2, 0x8e, 0x61, 0x23, 0xbc, 0x81, 0xf0, 0x4c, 0xa1, 0x1f, 0x5b, 0x70, 0xda, - 0xed, 0xf9, 0x40, 0x52, 0x75, 0x1f, 0x12, 0x7e, 0xeb, 0xec, 0x2c, 0x30, 0x92, 0xa3, 0x75, 0x4e, - 0x1b, 0xb0, 0x79, 0xb4, 0x8d, 0xf0, 0x01, 0x24, 0xc4, 0x6c, 0xc0, 0x91, 0x9d, 0x13, 0x1e, 0x21, - 0x17, 0x55, 0xc7, 0xee, 0x93, 0xb3, 0x57, 0xd9, 0x95, 0xbf, 0x5e, 0x65, 0x57, 0xbe, 0x3f, 0xcf, - 0xa2, 0xb3, 0xf3, 0x2c, 0xfa, 0xf3, 0x3c, 0x8b, 0xfe, 0x39, 0xcf, 0xa2, 0x27, 0x9f, 0x2e, 0xfa, - 0x3f, 0xfa, 0x8e, 0x24, 0xab, 0xaa, 0xd0, 0xb5, 0xf3, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbf, - 0xc1, 0xae, 0xf1, 0x92, 0x0f, 0x00, 0x00, + 0x18, 0x43, 0x82, 0x3a, 0xdf, 0x92, 0x6c, 0x5c, 0x47, 0x79, 0xc5, 0x12, 0x34, 0x2e, 0x03, 0xd4, + 0x3c, 0x62, 0x33, 0x52, 0x7f, 0x6a, 0xb3, 0xac, 0xa2, 0xa3, 0x7c, 0xaa, 0xa8, 0x99, 0xbe, 0x69, + 0x66, 0x60, 0x9a, 0x59, 0x09, 0x6c, 0xdf, 0x5d, 0xe5, 0xfa, 0x7f, 0x7c, 0xb9, 0x89, 0xac, 0x35, + 0x89, 0x2b, 0x31, 0x2e, 0xa4, 0xd7, 0xad, 0x07, 0x42, 0x12, 0xf3, 0x08, 0x91, 0xb8, 0x12, 0xc3, + 0xf7, 0x41, 0x6d, 0xd9, 0x55, 0xd2, 0xa2, 0xd9, 0xa4, 0xae, 0xe4, 0x53, 0xc5, 0x6d, 0xf3, 0xbf, + 0x33, 0x63, 0xf2, 0xf8, 0x98, 0x0f, 0x04, 0x64, 0xaf, 0xc3, 0xbc, 0x63, 0x4b, 0xe2, 0xb5, 0xf7, + 0x21, 0x15, 0xba, 0xc6, 0x69, 0x50, 0x8e, 0xc8, 0xb1, 0x1f, 0x3f, 0x8b, 0x93, 0x38, 0x03, 0xc9, + 0xbe, 0xdd, 0xea, 0xf9, 0x91, 0x58, 0xb3, 0xfc, 0xc3, 0x07, 0xf1, 0xf7, 0x90, 0xf1, 0x25, 0xa4, + 0xb8, 0x58, 0x8b, 0x3c, 0xeb, 0xf1, 0x88, 0x2d, 0x31, 0xfa, 0xc6, 0x43, 0x58, 0xf7, 0x45, 0xd3, + 0xae, 0xdb, 0xa1, 0x04, 0x7f, 0x08, 0x09, 0xa7, 0xd3, 0x70, 0x85, 0xe4, 0x54, 0xf1, 0xed, 0x59, + 0xbc, 0xdd, 0x4d, 0x70, 0xfd, 0x96, 0xc0, 0x19, 0xcf, 0x11, 0x5c, 0x79, 0x2c, 0xa2, 0x17, 0x58, + 0x7b, 0x49, 0x89, 0xf8, 0x0e, 0xa4, 0xfc, 0x74, 0x88, 0x3a, 0x16, 0xc1, 0x99, 0x96, 0xc7, 0x7b, + 0xbc, 0xd4, 0x0f, 0x6c, 0x7a, 0x64, 0xc9, 0xac, 0x73, 0xda, 0xf8, 0x0c, 0xae, 0x06, 0xd6, 0x2c, + 0xc9, 0x41, 0x13, 0xf0, 0x03, 0x87, 0xb2, 0xb2, 0xcf, 0x12, 0x38, 0x99, 0x85, 0x95, 0x86, 0xd3, + 0x62, 0xc4, 0xa3, 0x59, 0xa4, 0x2b, 0xf9, 0x35, 0x2b, 0x38, 0x1a, 0x8f, 0x61, 0x63, 0x84, 0x7f, + 0xc2, 0x0c, 0x65, 0x21, 0x33, 0xaa, 0x90, 0xb9, 0x4b, 0x5a, 0x84, 0x91, 0x31, 0x43, 0x96, 0x59, + 0x1b, 0xcf, 0x11, 0x60, 0x8b, 0xd8, 0xf5, 0xff, 0x4f, 0x05, 0xbe, 0x06, 0xaa, 0xdb, 0x68, 0x50, + 0xc2, 0x64, 0xfb, 0xcb, 0xd3, 0x60, 0x28, 0x28, 0xc3, 0xa1, 0x60, 0x94, 0x60, 0x63, 0xc4, 0x1a, + 0x19, 0xc9, 0xa1, 0x08, 0x34, 0x2e, 0xa2, 0x6e, 0x33, 0x5b, 0x08, 0x5e, 0xb7, 0x04, 0x6d, 0xfc, + 0x1c, 0x07, 0xf5, 0x11, 0xb3, 0x59, 0x8f, 0xf2, 0xe9, 0x40, 0x99, 0xed, 0xc9, 0xe9, 0x80, 0xe6, + 0x99, 0x0e, 0x12, 0x37, 0x31, 0x62, 0xe2, 0x8b, 0x8d, 0x98, 0x34, 0x28, 0x1e, 0x69, 0x08, 0x57, + 0xd7, 0x2c, 0x4e, 0x86, 0x5c, 0x4a, 0x8c, 0xb8, 0x94, 0x81, 0x24, 0x73, 0x99, 0xdd, 0xca, 0x26, + 0xc5, 0xb5, 0x7f, 0xc0, 0x0f, 0x61, 0x95, 0x7c, 0xd3, 0x25, 0x35, 0x46, 0xea, 0x59, 0x75, 0xe1, + 0x8c, 0x0c, 0x64, 0x18, 0xd7, 0xe1, 0x8a, 0x1f, 0xa3, 0x20, 0xe1, 0xd2, 0x40, 0x34, 0x30, 0x90, + 0xb7, 0x55, 0xc0, 0x32, 0xa8, 0x67, 0x95, 0x8a, 0x1b, 0x19, 0xca, 0x77, 0xa2, 0x2a, 0x5a, 0xe2, + 0x25, 0xca, 0x28, 0xf8, 0x6d, 0xe2, 0xdf, 0x12, 0x1a, 0xdd, 0x57, 0x5f, 0x41, 0x66, 0x14, 0x20, + 0x0d, 0xb9, 0x0f, 0xab, 0x54, 0xde, 0xc9, 0xe6, 0x9a, 0xd1, 0x14, 0xd9, 0x5e, 0x03, 0xb4, 0xf1, + 0x93, 0x02, 0x1b, 0x87, 0x9e, 0x33, 0xd1, 0x62, 0x65, 0x50, 0xed, 0x1a, 0x73, 0xdc, 0x8e, 0x70, + 0xf5, 0x6a, 0xf1, 0x66, 0x94, 0x7c, 0x21, 0xa4, 0x24, 0x20, 0x96, 0x84, 0x06, 0x31, 0x8d, 0x0f, + 0x93, 0x3e, 0x48, 0xae, 0x72, 0x51, 0x72, 0x13, 0x97, 0x4f, 0x6e, 0xa8, 0xb4, 0x92, 0x53, 0xbb, + 0x45, 0x1d, 0x76, 0x0b, 0x3e, 0x1c, 0xec, 0xbe, 0x15, 0x11, 0xc8, 0x8f, 0x66, 0x72, 0x74, 0x34, + 0x5a, 0xcb, 0x5e, 0x85, 0x2f, 0xe3, 0x90, 0x19, 0x55, 0x23, 0xf3, 0xbe, 0x94, 0xac, 0x8c, 0x0e, + 0x85, 0xf8, 0x32, 0x86, 0x82, 0xb2, 0xd8, 0x50, 0x98, 0x6f, 0x04, 0x0c, 0x47, 0xb2, 0x7a, 0xe9, + 0xa9, 0xaf, 0xc3, 0x7a, 0xa9, 0xea, 0x7a, 0xec, 0xc2, 0xee, 0xbf, 0xf1, 0x3d, 0x82, 0x54, 0x28, + 0x7a, 0xf8, 0x2d, 0x48, 0x3c, 0xaa, 0x94, 0x2a, 0xe9, 0x98, 0xb6, 0x71, 0x72, 0xaa, 0xbf, 0x16, + 0xfa, 0xc4, 0x3b, 0x0b, 0x6f, 0x42, 0xf2, 0xd0, 0xda, 0xaf, 0xec, 0xa5, 0x91, 0x96, 0x39, 0x39, + 0xd5, 0xd3, 0xa1, 0xef, 0x82, 0xc4, 0xd7, 0x41, 0x2d, 0x7f, 0x7a, 0x70, 0xb0, 0x5f, 0x49, 0xc7, + 0xb5, 0x37, 0x4e, 0x4e, 0xf5, 0xd7, 0x43, 0x1c, 0x65, 0xb7, 0xdd, 0x76, 0x98, 0xb6, 0xf1, 0xc3, + 0x2f, 0xb9, 0xd8, 0x6f, 0xbf, 0xe6, 0xc2, 0x7a, 0x8b, 0xbf, 0xaf, 0xc0, 0x8a, 0x2c, 0x03, 0x6c, + 0xcb, 0x97, 0xe9, 0xcd, 0x59, 0x36, 0xa9, 0x74, 0x4d, 0x7b, 0x77, 0x36, 0x66, 0x59, 0x61, 0x4d, + 0x50, 0xfd, 0xb7, 0x04, 0xde, 0x8a, 0xc2, 0x8d, 0xbc, 0x80, 0x34, 0x73, 0x56, 0x76, 0xa9, 0xe8, + 0x19, 0x24, 0xf8, 0x68, 0xc3, 0xc5, 0x28, 0xdc, 0xe4, 0x43, 0x44, 0xdb, 0x99, 0x0b, 0xe3, 0x2b, + 0xdc, 0x46, 0xf8, 0x73, 0x50, 0xfd, 0xe7, 0x04, 0xbe, 0x1d, 0x25, 0x60, 0xda, 0xb3, 0x43, 0xbb, + 0x36, 0x51, 0xdf, 0x7b, 0xfc, 0x7f, 0x03, 0x77, 0x85, 0xef, 0xec, 0x68, 0x57, 0x26, 0xdf, 0x19, + 0xd1, 0xae, 0x4c, 0x79, 0x0d, 0x6c, 0x23, 0x9e, 0x26, 0xb9, 0xe2, 0xb7, 0x66, 0xdc, 0x41, 0xb3, + 0xa6, 0x69, 0x6c, 0xe5, 0x1d, 0xc3, 0x7a, 0x78, 0x03, 0xe1, 0x99, 0x42, 0x3f, 0xb6, 0xe0, 0xb4, + 0xdb, 0xf3, 0x81, 0xa4, 0xea, 0x3e, 0x24, 0xfd, 0xd6, 0xd9, 0x59, 0x60, 0x24, 0x47, 0xeb, 0x9c, + 0x36, 0x60, 0xf3, 0x68, 0x1b, 0xe1, 0x03, 0x48, 0x8a, 0xd9, 0x80, 0x23, 0x3b, 0x27, 0x3c, 0x42, + 0x2e, 0xaa, 0x8e, 0xdd, 0x27, 0x67, 0xaf, 0x72, 0xb1, 0x3f, 0x5f, 0xe5, 0x62, 0xdf, 0x9d, 0xe7, + 0xd0, 0xd9, 0x79, 0x0e, 0xfd, 0x71, 0x9e, 0x43, 0x7f, 0x9f, 0xe7, 0xd0, 0x93, 0x8f, 0x17, 0xfd, + 0x1f, 0x7d, 0x47, 0x92, 0x5f, 0xc4, 0xaa, 0xaa, 0xd0, 0xb6, 0xf3, 0x6f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xc0, 0xc2, 0x35, 0xb1, 0x94, 0x0f, 0x00, 0x00, } diff --git a/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto b/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto index a0a41c447b..4f1187145b 100644 --- a/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto +++ b/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package containerd.services.content.v1; -import "gogoproto/gogo.proto"; +import weak "gogoproto/gogo.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; import "google/protobuf/empty.proto"; diff --git a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go index b19a377317..f7b3529e57 100644 --- a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/containerd/containerd/api/services/diff/v1/diff.proto -// DO NOT EDIT! /* Package diff is a generated protocol buffer package. @@ -19,7 +18,8 @@ package diff import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import _ "github.com/gogo/protobuf/gogoproto" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" import containerd_types "github.com/containerd/containerd/api/types" import containerd_types1 "github.com/containerd/containerd/api/types" @@ -386,24 +386,6 @@ func (m *DiffResponse) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Diff(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Diff(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintDiff(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -931,51 +913,14 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthDiff - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDiff @@ -985,41 +930,80 @@ func (m *DiffRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDiff + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthDiff + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDiff + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthDiff + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipDiff(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDiff + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthDiff - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -1235,34 +1219,34 @@ func init() { } var fileDescriptorDiff = []byte{ - // 454 bytes of a gzipped FileDescriptorProto + // 457 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4f, 0x6f, 0xd3, 0x30, - 0x14, 0x9f, 0xfb, 0x0f, 0xf5, 0x75, 0x48, 0xc8, 0x9a, 0x44, 0x14, 0x20, 0xaa, 0x7a, 0xea, 0x40, - 0x38, 0xac, 0xa0, 0x09, 0xb6, 0xcb, 0x40, 0x43, 0x5c, 0xc6, 0x25, 0xda, 0x09, 0x24, 0x50, 0xda, - 0xbc, 0x74, 0x16, 0x69, 0xec, 0xd9, 0x6e, 0xa5, 0xdc, 0xf8, 0x2e, 0x7c, 0x14, 0x2e, 0x3b, 0x72, - 0xe4, 0x48, 0xfb, 0x49, 0x90, 0x9d, 0x14, 0x22, 0x21, 0x95, 0xc0, 0x29, 0x2f, 0xcf, 0xbf, 0x7f, - 0xf6, 0xb3, 0xe1, 0x6c, 0xce, 0xcd, 0xd5, 0x72, 0xca, 0x66, 0x62, 0x11, 0xce, 0x44, 0x6e, 0x62, - 0x9e, 0xa3, 0x4a, 0xea, 0x65, 0x2c, 0x79, 0xa8, 0x51, 0xad, 0xf8, 0x0c, 0x75, 0x98, 0xf0, 0x34, - 0x0d, 0x57, 0x47, 0xee, 0xcb, 0xa4, 0x12, 0x46, 0xd0, 0x7b, 0xbf, 0xb1, 0x6c, 0x8b, 0x63, 0x6e, - 0x7d, 0x75, 0xe4, 0x1f, 0xcc, 0xc5, 0x5c, 0x38, 0x5c, 0x68, 0xab, 0x92, 0xe2, 0x1f, 0x37, 0x32, - 0x35, 0x85, 0x44, 0x1d, 0x2e, 0xc4, 0x32, 0x37, 0x15, 0xef, 0xf4, 0x1f, 0x78, 0x09, 0xea, 0x99, - 0xe2, 0xd2, 0x08, 0x55, 0x92, 0x47, 0xd7, 0xb0, 0xff, 0x52, 0xca, 0xac, 0x88, 0xf0, 0x7a, 0x89, - 0xda, 0xd0, 0x27, 0xd0, 0xb1, 0x29, 0x3d, 0x32, 0x24, 0xe3, 0xc1, 0xe4, 0x3e, 0xab, 0x6d, 0xc3, - 0x29, 0xb0, 0xf3, 0x5f, 0x0a, 0x91, 0x43, 0xd2, 0x10, 0x7a, 0x2e, 0x8d, 0xf6, 0x5a, 0xc3, 0xf6, - 0x78, 0x30, 0xb9, 0xfb, 0x27, 0xe7, 0xad, 0x5d, 0x8f, 0x2a, 0xd8, 0xe8, 0x0d, 0xdc, 0xae, 0x2c, - 0xb5, 0x14, 0xb9, 0x46, 0x7a, 0x0c, 0xb7, 0x62, 0x29, 0x33, 0x8e, 0x49, 0x23, 0xdb, 0x2d, 0x78, - 0xf4, 0xa5, 0x05, 0x83, 0x73, 0x9e, 0xa6, 0xdb, 0xec, 0x8f, 0xa0, 0x93, 0x61, 0x6a, 0x3c, 0xb2, - 0x3b, 0x87, 0x03, 0xd1, 0xc7, 0xd0, 0x55, 0x7c, 0x7e, 0x65, 0xfe, 0x96, 0xba, 0x44, 0xd1, 0x07, - 0x00, 0x0b, 0x4c, 0x78, 0xfc, 0xd1, 0xae, 0x79, 0xed, 0x21, 0x19, 0xf7, 0xa3, 0xbe, 0xeb, 0x5c, - 0x16, 0x12, 0xe9, 0x1d, 0x68, 0x2b, 0x4c, 0xbd, 0x8e, 0xeb, 0xdb, 0x92, 0x5e, 0x40, 0x2f, 0x8b, - 0xa7, 0x98, 0x69, 0xaf, 0xeb, 0x0c, 0x9e, 0xb1, 0x1d, 0x37, 0x82, 0xd5, 0xb6, 0xc1, 0x2e, 0x1c, - 0xed, 0x75, 0x6e, 0x54, 0x11, 0x55, 0x1a, 0xfe, 0x0b, 0x18, 0xd4, 0xda, 0xd6, 0xee, 0x13, 0x16, - 0xee, 0xb4, 0xfa, 0x91, 0x2d, 0xe9, 0x01, 0x74, 0x57, 0x71, 0xb6, 0x44, 0xaf, 0xe5, 0x7a, 0xe5, - 0xcf, 0x49, 0xeb, 0x39, 0x19, 0x9d, 0xc1, 0x7e, 0xa9, 0x5e, 0x9d, 0xf6, 0x76, 0xc2, 0xed, 0xa6, - 0x13, 0x9e, 0x7c, 0x25, 0xd0, 0xb1, 0x12, 0xf4, 0x03, 0x74, 0xdd, 0xe4, 0xe8, 0xe1, 0xce, 0xcd, - 0xd4, 0x2f, 0x94, 0xff, 0xb0, 0x09, 0xb4, 0x8a, 0xf6, 0xbe, 0xf2, 0x19, 0x37, 0x3d, 0x2b, 0xff, - 0xb0, 0x01, 0xb2, 0x14, 0x7f, 0x75, 0x79, 0xb3, 0x0e, 0xf6, 0xbe, 0xaf, 0x83, 0xbd, 0xcf, 0x9b, - 0x80, 0xdc, 0x6c, 0x02, 0xf2, 0x6d, 0x13, 0x90, 0x1f, 0x9b, 0x80, 0xbc, 0x3b, 0xf9, 0xaf, 0xd7, - 0x7e, 0x6a, 0xbf, 0xd3, 0x9e, 0x7b, 0x46, 0x4f, 0x7f, 0x06, 0x00, 0x00, 0xff, 0xff, 0xd6, 0x01, - 0x51, 0xf0, 0x32, 0x04, 0x00, 0x00, + 0x14, 0xaf, 0xfb, 0x0f, 0xf5, 0x75, 0x48, 0xc8, 0x9a, 0x44, 0x14, 0x20, 0xaa, 0x7a, 0xea, 0x40, + 0x38, 0xac, 0xa0, 0x09, 0xb6, 0xcb, 0x40, 0x43, 0x5c, 0xc6, 0x25, 0xda, 0x01, 0x81, 0x04, 0x4a, + 0x9b, 0x97, 0xce, 0x22, 0x8d, 0xbd, 0xd8, 0xad, 0x94, 0x1b, 0xdf, 0x85, 0x8f, 0xc2, 0x65, 0x47, + 0x8e, 0x1c, 0x69, 0x3f, 0x09, 0xb2, 0x93, 0x40, 0x24, 0xa4, 0x12, 0x76, 0xca, 0xcb, 0xf3, 0xef, + 0x9f, 0xfd, 0x6c, 0x38, 0x5d, 0x70, 0x7d, 0xb9, 0x9a, 0xb1, 0xb9, 0x58, 0xfa, 0x73, 0x91, 0xea, + 0x90, 0xa7, 0x98, 0x45, 0xf5, 0x32, 0x94, 0xdc, 0x57, 0x98, 0xad, 0xf9, 0x1c, 0x95, 0x1f, 0xf1, + 0x38, 0xf6, 0xd7, 0x87, 0xf6, 0xcb, 0x64, 0x26, 0xb4, 0xa0, 0xf7, 0xfe, 0x60, 0x59, 0x85, 0x63, + 0x76, 0x7d, 0x7d, 0xe8, 0xee, 0x2f, 0xc4, 0x42, 0x58, 0x9c, 0x6f, 0xaa, 0x82, 0xe2, 0x1e, 0x35, + 0x32, 0xd5, 0xb9, 0x44, 0xe5, 0x2f, 0xc5, 0x2a, 0xd5, 0x25, 0xef, 0xe4, 0x3f, 0x78, 0x11, 0xaa, + 0x79, 0xc6, 0xa5, 0x16, 0x59, 0x41, 0x1e, 0x5f, 0xc1, 0xde, 0x4b, 0x29, 0x93, 0x3c, 0xc0, 0xab, + 0x15, 0x2a, 0x4d, 0x9f, 0x40, 0xd7, 0xa4, 0x74, 0xc8, 0x88, 0x4c, 0x86, 0xd3, 0xfb, 0xac, 0xb6, + 0x0d, 0xab, 0xc0, 0xce, 0x7e, 0x2b, 0x04, 0x16, 0x49, 0x7d, 0xe8, 0xdb, 0x34, 0xca, 0x69, 0x8f, + 0x3a, 0x93, 0xe1, 0xf4, 0xee, 0xdf, 0x9c, 0xb7, 0x66, 0x3d, 0x28, 0x61, 0xe3, 0x37, 0x70, 0xbb, + 0xb4, 0x54, 0x52, 0xa4, 0x0a, 0xe9, 0x11, 0xdc, 0x0a, 0xa5, 0x4c, 0x38, 0x46, 0x8d, 0x6c, 0x2b, + 0xf0, 0xf8, 0x6b, 0x1b, 0x86, 0x67, 0x3c, 0x8e, 0xab, 0xec, 0x8f, 0xa0, 0x9b, 0x60, 0xac, 0x1d, + 0xb2, 0x3b, 0x87, 0x05, 0xd1, 0xc7, 0xd0, 0xcb, 0xf8, 0xe2, 0x52, 0xff, 0x2b, 0x75, 0x81, 0xa2, + 0x0f, 0x00, 0x96, 0x18, 0xf1, 0xf0, 0x93, 0x59, 0x73, 0x3a, 0x23, 0x32, 0x19, 0x04, 0x03, 0xdb, + 0xb9, 0xc8, 0x25, 0xd2, 0x3b, 0xd0, 0xc9, 0x30, 0x76, 0xba, 0xb6, 0x6f, 0x4a, 0x7a, 0x0e, 0xfd, + 0x24, 0x9c, 0x61, 0xa2, 0x9c, 0x9e, 0x35, 0x78, 0xc6, 0x76, 0xdc, 0x08, 0x56, 0xdb, 0x06, 0x3b, + 0xb7, 0xb4, 0xd7, 0xa9, 0xce, 0xf2, 0xa0, 0xd4, 0x70, 0x5f, 0xc0, 0xb0, 0xd6, 0x36, 0x76, 0x9f, + 0x31, 0xb7, 0xa7, 0x35, 0x08, 0x4c, 0x49, 0xf7, 0xa1, 0xb7, 0x0e, 0x93, 0x15, 0x3a, 0x6d, 0xdb, + 0x2b, 0x7e, 0x8e, 0xdb, 0xcf, 0xc9, 0xf8, 0x14, 0xf6, 0x0a, 0xf5, 0xf2, 0xb4, 0xab, 0x09, 0x77, + 0x9a, 0x4e, 0x78, 0xfa, 0x8d, 0x40, 0xd7, 0x48, 0xd0, 0x8f, 0xd0, 0xb3, 0x93, 0xa3, 0x07, 0x3b, + 0x37, 0x53, 0xbf, 0x50, 0xee, 0xc3, 0x26, 0xd0, 0x32, 0xda, 0x87, 0xd2, 0x67, 0xd2, 0xf4, 0xac, + 0xdc, 0x83, 0x06, 0xc8, 0x42, 0xfc, 0xd5, 0xc5, 0xf5, 0xc6, 0x6b, 0xfd, 0xd8, 0x78, 0xad, 0x2f, + 0x5b, 0x8f, 0x5c, 0x6f, 0x3d, 0xf2, 0x7d, 0xeb, 0x91, 0x9f, 0x5b, 0x8f, 0xbc, 0x3f, 0xbe, 0xd1, + 0x6b, 0x3f, 0x31, 0xdf, 0x77, 0xad, 0x59, 0xdf, 0x3e, 0xa4, 0xa7, 0xbf, 0x02, 0x00, 0x00, 0xff, + 0xff, 0x61, 0xd1, 0x6e, 0x9e, 0x34, 0x04, 0x00, 0x00, } diff --git a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto index d1d2a82449..66d7ecb19f 100644 --- a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto +++ b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package containerd.services.diff.v1; -import "gogoproto/gogo.proto"; +import weak "gogoproto/gogo.proto"; import "github.com/containerd/containerd/api/types/mount.proto"; import "github.com/containerd/containerd/api/types/descriptor.proto"; diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/content.proto b/vendor/github.com/containerd/containerd/api/services/events/v1/content.proto deleted file mode 100644 index 95358f5230..0000000000 --- a/vendor/github.com/containerd/containerd/api/services/events/v1/content.proto +++ /dev/null @@ -1,13 +0,0 @@ -syntax = "proto3"; - -package containerd.services.events.v1; - -import "gogoproto/gogo.proto"; -import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; - -option go_package = "github.com/containerd/containerd/api/services/events/v1;events"; -option (containerd.plugin.fieldpath_all) = true; - -message ContentDelete { - string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; -} diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/doc.go b/vendor/github.com/containerd/containerd/api/services/events/v1/doc.go new file mode 100644 index 0000000000..070604bb8f --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/events/v1/doc.go @@ -0,0 +1,2 @@ +// Package events defines the event pushing and subscription service. +package events diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go b/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go index e89406440b..e2ad455a43 100644 --- a/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go @@ -1,16 +1,28 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/containerd/containerd/api/services/events/v1/events.proto -// DO NOT EDIT! +/* + Package events is a generated protocol buffer package. + + It is generated from these files: + github.com/containerd/containerd/api/services/events/v1/events.proto + + It has these top-level messages: + PublishRequest + ForwardRequest + SubscribeRequest + Envelope +*/ package events import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import _ "github.com/containerd/containerd/protobuf/plugin" -import _ "github.com/gogo/protobuf/gogoproto" + +// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin" +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" import google_protobuf1 "github.com/gogo/protobuf/types" -import google_protobuf2 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf2 "github.com/gogo/protobuf/types" import _ "github.com/gogo/protobuf/types" import time "time" @@ -35,6 +47,12 @@ var _ = fmt.Errorf var _ = math.Inf var _ = time.Kitchen +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + type PublishRequest struct { Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` Event *google_protobuf1.Any `protobuf:"bytes,2,opt,name=event" json:"event,omitempty"` @@ -449,24 +467,6 @@ func (m *Envelope) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Events(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Events(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintEvents(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1152,34 +1152,35 @@ func init() { } var fileDescriptorEvents = []byte{ - // 462 bytes of a gzipped FileDescriptorProto + // 466 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcd, 0x8e, 0xd3, 0x30, 0x14, 0x85, 0xeb, 0xf9, 0x6d, 0x3c, 0xd2, 0x08, 0x45, 0x15, 0x2a, 0x01, 0xd2, 0xaa, 0x1b, 0x2a, - 0x04, 0x0e, 0x53, 0x76, 0x20, 0x21, 0x28, 0x94, 0xf5, 0x28, 0x80, 0x84, 0xd8, 0x25, 0xe9, 0x6d, - 0x6a, 0x29, 0xb1, 0x4d, 0xec, 0x04, 0xcd, 0x6e, 0x1e, 0x81, 0x0d, 0x6f, 0xc2, 0x86, 0x37, 0xe8, - 0x92, 0x25, 0x2b, 0x60, 0xfa, 0x24, 0xa8, 0x89, 0xdd, 0x30, 0x2d, 0x10, 0x34, 0xbb, 0x6b, 0xdf, - 0xe3, 0xcf, 0xb9, 0xe7, 0x38, 0xf8, 0x45, 0x4c, 0xd5, 0x3c, 0x0f, 0x49, 0xc4, 0x53, 0x2f, 0xe2, - 0x4c, 0x05, 0x94, 0x41, 0x36, 0xfd, 0xbd, 0x0c, 0x04, 0xf5, 0x24, 0x64, 0x05, 0x8d, 0x40, 0x7a, + 0x04, 0x0e, 0x53, 0x76, 0x20, 0x21, 0x28, 0x94, 0xf5, 0x28, 0x80, 0x54, 0xb1, 0x4b, 0xd2, 0xdb, + 0xd4, 0x52, 0x62, 0x9b, 0xd8, 0x09, 0x9a, 0xdd, 0x3c, 0x02, 0x1b, 0xde, 0x84, 0x0d, 0x6f, 0xd0, + 0x25, 0x4b, 0x56, 0xc0, 0xf4, 0x49, 0x50, 0x13, 0xbb, 0x61, 0x3a, 0x40, 0x10, 0xbb, 0x6b, 0xdf, + 0xe3, 0xcf, 0xb9, 0xe7, 0x38, 0xf8, 0x45, 0x4c, 0xd5, 0x22, 0x0f, 0x49, 0xc4, 0x53, 0x2f, 0xe2, + 0x4c, 0x05, 0x94, 0x41, 0x36, 0xfb, 0xb5, 0x0c, 0x04, 0xf5, 0x24, 0x64, 0x05, 0x8d, 0x40, 0x7a, 0x50, 0x00, 0x53, 0xd2, 0x2b, 0x4e, 0x74, 0x45, 0x44, 0xc6, 0x15, 0xb7, 0x6f, 0xd7, 0x7a, 0x62, - 0xb4, 0x44, 0x2b, 0x8a, 0x13, 0xe7, 0x69, 0xe3, 0x25, 0x25, 0x26, 0xcc, 0x67, 0x9e, 0x48, 0xf2, - 0x98, 0x32, 0x6f, 0x46, 0x21, 0x99, 0x8a, 0x40, 0xcd, 0xab, 0x0b, 0x9c, 0x4e, 0xcc, 0x63, 0x5e, - 0x96, 0xde, 0xaa, 0xd2, 0xbb, 0x37, 0x62, 0xce, 0xe3, 0x04, 0xea, 0xd3, 0x01, 0x3b, 0xd3, 0xad, - 0x9b, 0x9b, 0x2d, 0x48, 0x85, 0x32, 0xcd, 0xde, 0x66, 0x53, 0xd1, 0x14, 0xa4, 0x0a, 0x52, 0x51, - 0x09, 0x06, 0x3e, 0x3e, 0x3e, 0xcd, 0xc3, 0x84, 0xca, 0xb9, 0x0f, 0xef, 0x73, 0x90, 0xca, 0xee, + 0xb4, 0x44, 0x2b, 0x8a, 0x13, 0xe7, 0x69, 0xe3, 0x25, 0x25, 0x26, 0xcc, 0xe7, 0x9e, 0x48, 0xf2, + 0x98, 0x32, 0x6f, 0x4e, 0x21, 0x99, 0x89, 0x40, 0x2d, 0xaa, 0x0b, 0x9c, 0x4e, 0xcc, 0x63, 0x5e, + 0x96, 0xde, 0xba, 0xd2, 0xbb, 0x37, 0x62, 0xce, 0xe3, 0x04, 0xea, 0xd3, 0x01, 0x3b, 0xd3, 0xad, + 0x9b, 0xdb, 0x2d, 0x48, 0x85, 0x32, 0xcd, 0xde, 0x76, 0x53, 0xd1, 0x14, 0xa4, 0x0a, 0x52, 0x51, + 0x09, 0x06, 0x3e, 0x3e, 0x3e, 0xcd, 0xc3, 0x84, 0xca, 0x85, 0x0f, 0xef, 0x72, 0x90, 0xca, 0xee, 0xe0, 0x7d, 0xc5, 0x05, 0x8d, 0xba, 0xa8, 0x8f, 0x86, 0x96, 0x5f, 0x2d, 0xec, 0xbb, 0x78, 0xbf, 0x9c, 0xb2, 0xbb, 0xd3, 0x47, 0xc3, 0xa3, 0x51, 0x87, 0x54, 0x60, 0x62, 0xc0, 0xe4, 0x19, 0x3b, - 0xf3, 0x2b, 0xc9, 0xe0, 0x0d, 0x3e, 0x7e, 0xc9, 0xb3, 0x0f, 0x41, 0x36, 0x35, 0xcc, 0xe7, 0xb8, - 0x0d, 0xac, 0x80, 0x84, 0x0b, 0x28, 0xb1, 0x47, 0xa3, 0x3b, 0xe4, 0x9f, 0x46, 0x92, 0x89, 0x96, - 0xfb, 0xeb, 0x83, 0x83, 0x7b, 0xf8, 0xda, 0xab, 0x3c, 0x94, 0x51, 0x46, 0x43, 0x30, 0xe0, 0x2e, - 0x3e, 0x9c, 0xd1, 0x44, 0x41, 0x26, 0xbb, 0xa8, 0xbf, 0x3b, 0xb4, 0x7c, 0xb3, 0x1c, 0x7c, 0x46, - 0xb8, 0x6d, 0x20, 0xf6, 0x18, 0x5b, 0xeb, 0xc1, 0xf5, 0x07, 0x38, 0x5b, 0x13, 0xbc, 0x36, 0x8a, - 0x71, 0x7b, 0xf1, 0xbd, 0xd7, 0xfa, 0xf8, 0xa3, 0x87, 0xfc, 0xfa, 0x98, 0x7d, 0x0b, 0x5b, 0x2c, - 0x48, 0x41, 0x8a, 0x20, 0x82, 0xd2, 0x05, 0xcb, 0xaf, 0x37, 0x6a, 0xd7, 0x76, 0xff, 0xe8, 0xda, - 0x5e, 0xa3, 0x6b, 0x8f, 0xf6, 0xce, 0xbf, 0xf4, 0xd0, 0xe8, 0xd3, 0x0e, 0x3e, 0x98, 0x94, 0x2e, - 0xd8, 0xa7, 0xf8, 0x50, 0x47, 0x63, 0xdf, 0x6f, 0x70, 0xeb, 0x72, 0x84, 0xce, 0xf5, 0xad, 0x7b, - 0x26, 0xab, 0x37, 0xb1, 0x22, 0xea, 0x60, 0x1a, 0x89, 0x97, 0x03, 0xfc, 0x2b, 0x31, 0xc6, 0xd6, - 0x3a, 0x13, 0xdb, 0x6b, 0x60, 0x6e, 0xa6, 0xe7, 0xfc, 0xef, 0x23, 0x78, 0x80, 0xc6, 0x6f, 0x17, - 0x17, 0x6e, 0xeb, 0xdb, 0x85, 0xdb, 0x3a, 0x5f, 0xba, 0x68, 0xb1, 0x74, 0xd1, 0xd7, 0xa5, 0x8b, - 0x7e, 0x2e, 0x5d, 0xf4, 0xee, 0xc9, 0x15, 0xff, 0xeb, 0xc7, 0x55, 0x15, 0x1e, 0x94, 0x23, 0x3d, - 0xfc, 0x15, 0x00, 0x00, 0xff, 0xff, 0x1c, 0x38, 0x37, 0x72, 0x20, 0x04, 0x00, 0x00, + 0xf3, 0x2b, 0xc9, 0xe0, 0x0d, 0x3e, 0x7e, 0xc9, 0xb3, 0xf7, 0x41, 0x36, 0x33, 0xcc, 0xe7, 0xb8, + 0x0d, 0xac, 0x80, 0x84, 0x0b, 0x28, 0xb1, 0x47, 0xa3, 0x3b, 0xe4, 0xaf, 0x46, 0x92, 0x89, 0x96, + 0xfb, 0x9b, 0x83, 0x83, 0x7b, 0xf8, 0xda, 0xab, 0x3c, 0x94, 0x51, 0x46, 0x43, 0x30, 0xe0, 0x2e, + 0x3e, 0x9c, 0xd3, 0x44, 0x41, 0x26, 0xbb, 0xa8, 0xbf, 0x3b, 0xb4, 0x7c, 0xb3, 0x1c, 0x7c, 0x42, + 0xb8, 0x6d, 0x20, 0xf6, 0x18, 0x5b, 0x9b, 0xc1, 0xf5, 0x07, 0x38, 0x57, 0x26, 0x78, 0x6d, 0x14, + 0xe3, 0xf6, 0xf2, 0x5b, 0xaf, 0xf5, 0xe1, 0x7b, 0x0f, 0xf9, 0xf5, 0x31, 0xfb, 0x16, 0xb6, 0x58, + 0x90, 0x82, 0x14, 0x41, 0x04, 0xa5, 0x0b, 0x96, 0x5f, 0x6f, 0xd4, 0xae, 0xed, 0xfe, 0xd6, 0xb5, + 0xbd, 0x46, 0xd7, 0x1e, 0xed, 0x9d, 0x7f, 0xee, 0xa1, 0xd1, 0xc7, 0x1d, 0x7c, 0x30, 0x29, 0x5d, + 0xb0, 0x4f, 0xf1, 0xa1, 0x8e, 0xc6, 0xbe, 0xdf, 0xe0, 0xd6, 0xe5, 0x08, 0x9d, 0xeb, 0x57, 0xee, + 0x99, 0xac, 0xdf, 0xc4, 0x9a, 0xa8, 0x83, 0x69, 0x24, 0x5e, 0x0e, 0xf0, 0x8f, 0xc4, 0x18, 0x5b, + 0x9b, 0x4c, 0x6c, 0xaf, 0x81, 0xb9, 0x9d, 0x9e, 0xf3, 0xaf, 0x8f, 0xe0, 0x01, 0x1a, 0x4f, 0x97, + 0x17, 0x6e, 0xeb, 0xeb, 0x85, 0xdb, 0x3a, 0x5f, 0xb9, 0x68, 0xb9, 0x72, 0xd1, 0x97, 0x95, 0x8b, + 0x7e, 0xac, 0x5c, 0xf4, 0xf6, 0xc9, 0x7f, 0xfe, 0xd7, 0x8f, 0xab, 0x6a, 0xda, 0x9a, 0xa2, 0xf0, + 0xa0, 0x1c, 0xeb, 0xe1, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe6, 0xbf, 0x19, 0xa6, 0x24, 0x04, + 0x00, 0x00, } diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto b/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto index a20a1e854f..58f2dadeb5 100644 --- a/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto +++ b/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto @@ -2,8 +2,8 @@ syntax = "proto3"; package containerd.services.events.v1; -import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; -import "gogoproto/gogo.proto"; +import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; +import weak "gogoproto/gogo.proto"; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; diff --git a/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go b/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go index 41b97dfdd0..4577eb089e 100644 --- a/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/containerd/containerd/api/services/images/v1/images.proto -// DO NOT EDIT! /* Package images is a generated protocol buffer package. @@ -25,8 +24,9 @@ package images import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import _ "github.com/gogo/protobuf/gogoproto" -import google_protobuf1 "github.com/golang/protobuf/ptypes/empty" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" +import google_protobuf1 "github.com/gogo/protobuf/types" import google_protobuf2 "github.com/gogo/protobuf/types" import _ "github.com/gogo/protobuf/types" import containerd_types "github.com/containerd/containerd/api/types" @@ -163,6 +163,11 @@ func (*ListImagesResponse) Descriptor() ([]byte, []int) { return fileDescriptorI type DeleteImageRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Sync indicates that the delete and cleanup should be done + // synchronously before returning to the caller + // + // Default is false + Sync bool `protobuf:"varint,2,opt,name=sync,proto3" json:"sync,omitempty"` } func (m *DeleteImageRequest) Reset() { *m = DeleteImageRequest{} } @@ -717,27 +722,19 @@ func (m *DeleteImageRequest) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintImages(dAtA, i, uint64(len(m.Name))) i += copy(dAtA[i:], m.Name) } + if m.Sync { + dAtA[i] = 0x10 + i++ + if m.Sync { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } return i, nil } -func encodeFixed64Images(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Images(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintImages(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -858,6 +855,9 @@ func (m *DeleteImageRequest) Size() (n int) { if l > 0 { n += 1 + l + sovImages(uint64(l)) } + if m.Sync { + n += 2 + } return n } @@ -985,6 +985,7 @@ func (this *DeleteImageRequest) String() string { } s := strings.Join([]string{`&DeleteImageRequest{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Sync:` + fmt.Sprintf("%v", this.Sync) + `,`, `}`, }, "") return s @@ -1081,51 +1082,14 @@ func (m *Image) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthImages - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowImages @@ -1135,41 +1099,80 @@ func (m *Image) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthImages + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthImages + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipImages(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthImages + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthImages - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -2015,6 +2018,26 @@ func (m *DeleteImageRequest) Unmarshal(dAtA []byte) error { } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sync", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Sync = bool(v != 0) default: iNdEx = preIndex skippy, err := skipImages(dAtA[iNdEx:]) @@ -2146,46 +2169,47 @@ func init() { } var fileDescriptorImages = []byte{ - // 648 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x4f, 0x6f, 0xd3, 0x4e, - 0x10, 0xad, 0x93, 0xd4, 0x6d, 0x27, 0x87, 0x5f, 0x7f, 0x4b, 0x85, 0x2c, 0x03, 0x69, 0x14, 0x81, - 0x94, 0x0b, 0x6b, 0x1a, 0x2e, 0xd0, 0x4a, 0x88, 0xa6, 0x2d, 0x05, 0xa9, 0x70, 0x30, 0xff, 0x2a, - 0x2e, 0xd5, 0x26, 0x99, 0x18, 0x2b, 0x76, 0x6c, 0xbc, 0x9b, 0x48, 0xb9, 0xf1, 0x11, 0x90, 0xe0, - 0x43, 0xf5, 0xc8, 0x91, 0x13, 0xd0, 0x1c, 0xf8, 0x1c, 0xc8, 0xbb, 0x1b, 0x9a, 0x26, 0x11, 0x4e, - 0x4a, 0x6f, 0xe3, 0xf8, 0xbd, 0x79, 0x33, 0x6f, 0x66, 0x62, 0xd8, 0xf7, 0x7c, 0xf1, 0xbe, 0xd7, - 0xa0, 0xcd, 0x28, 0x74, 0x9a, 0x51, 0x57, 0x30, 0xbf, 0x8b, 0x49, 0x6b, 0x3c, 0x64, 0xb1, 0xef, - 0x70, 0x4c, 0xfa, 0x7e, 0x13, 0xb9, 0xe3, 0x87, 0xcc, 0x43, 0xee, 0xf4, 0xb7, 0x74, 0x44, 0xe3, - 0x24, 0x12, 0x11, 0xb9, 0x75, 0x8e, 0xa7, 0x23, 0x2c, 0xd5, 0x88, 0xfe, 0x96, 0xbd, 0xe1, 0x45, - 0x5e, 0x24, 0x91, 0x4e, 0x1a, 0x29, 0x92, 0x7d, 0xc3, 0x8b, 0x22, 0x2f, 0x40, 0x47, 0x3e, 0x35, - 0x7a, 0x6d, 0x07, 0xc3, 0x58, 0x0c, 0xf4, 0xcb, 0xf2, 0xe4, 0xcb, 0xb6, 0x8f, 0x41, 0xeb, 0x24, - 0x64, 0xbc, 0xa3, 0x11, 0x9b, 0x93, 0x08, 0xe1, 0x87, 0xc8, 0x05, 0x0b, 0x63, 0x0d, 0xd8, 0x99, - 0xab, 0x35, 0x31, 0x88, 0x91, 0x3b, 0x2d, 0xe4, 0xcd, 0xc4, 0x8f, 0x45, 0x94, 0x28, 0x72, 0xe5, - 0x57, 0x0e, 0x96, 0x9f, 0xa5, 0x0d, 0x10, 0x02, 0x85, 0x2e, 0x0b, 0xd1, 0x32, 0xca, 0x46, 0x75, - 0xcd, 0x95, 0x31, 0x79, 0x0a, 0x66, 0xc0, 0x1a, 0x18, 0x70, 0x2b, 0x57, 0xce, 0x57, 0x8b, 0xb5, - 0x7b, 0xf4, 0xaf, 0x06, 0x50, 0x99, 0x89, 0x1e, 0x49, 0xca, 0x41, 0x57, 0x24, 0x03, 0x57, 0xf3, - 0xc9, 0x36, 0x98, 0x82, 0x25, 0x1e, 0x0a, 0x2b, 0x5f, 0x36, 0xaa, 0xc5, 0xda, 0xcd, 0xf1, 0x4c, - 0xb2, 0x36, 0xba, 0xff, 0xa7, 0xb6, 0x7a, 0xe1, 0xf4, 0xfb, 0xe6, 0x92, 0xab, 0x19, 0x64, 0x0f, - 0xa0, 0x99, 0x20, 0x13, 0xd8, 0x3a, 0x61, 0xc2, 0x5a, 0x91, 0x7c, 0x9b, 0x2a, 0x5b, 0xe8, 0xc8, - 0x16, 0xfa, 0x6a, 0x64, 0x4b, 0x7d, 0x35, 0x65, 0x7f, 0xfa, 0xb1, 0x69, 0xb8, 0x6b, 0x9a, 0xb7, - 0x2b, 0x93, 0xf4, 0xe2, 0xd6, 0x28, 0xc9, 0xea, 0x22, 0x49, 0x34, 0x6f, 0x57, 0xd8, 0x0f, 0xa1, - 0x38, 0xd6, 0x1c, 0x59, 0x87, 0x7c, 0x07, 0x07, 0xda, 0xb1, 0x34, 0x24, 0x1b, 0xb0, 0xdc, 0x67, - 0x41, 0x0f, 0xad, 0x9c, 0xfc, 0x4d, 0x3d, 0x6c, 0xe7, 0x1e, 0x18, 0x95, 0x3b, 0xf0, 0xdf, 0x21, - 0x0a, 0x69, 0x90, 0x8b, 0x1f, 0x7a, 0xc8, 0xc5, 0x2c, 0xc7, 0x2b, 0x2f, 0x60, 0xfd, 0x1c, 0xc6, - 0xe3, 0xa8, 0xcb, 0x91, 0x6c, 0xc3, 0xb2, 0xb4, 0x58, 0x02, 0x8b, 0xb5, 0xdb, 0xf3, 0x0c, 0xc1, - 0x55, 0x94, 0xca, 0x1b, 0x20, 0x7b, 0xd2, 0x83, 0x0b, 0xca, 0x8f, 0x2f, 0x91, 0x51, 0x0f, 0x45, - 0xe7, 0x7d, 0x0b, 0xd7, 0x2e, 0xe4, 0xd5, 0xa5, 0xfe, 0x7b, 0xe2, 0xcf, 0x06, 0x90, 0xd7, 0xd2, - 0xf0, 0xab, 0xad, 0x98, 0xec, 0x40, 0x51, 0x0d, 0x52, 0x1e, 0x97, 0x1c, 0xd0, 0xac, 0x0d, 0x78, - 0x92, 0xde, 0xdf, 0x73, 0xc6, 0x3b, 0xae, 0xde, 0x97, 0x34, 0x4e, 0xdb, 0xbd, 0x50, 0xd4, 0x95, - 0xb5, 0x7b, 0x17, 0xfe, 0x3f, 0xf2, 0xb9, 0x1a, 0x38, 0x1f, 0x35, 0x6b, 0xc1, 0x4a, 0xdb, 0x0f, - 0x04, 0x26, 0xdc, 0x32, 0xca, 0xf9, 0xea, 0x9a, 0x3b, 0x7a, 0xac, 0x1c, 0x03, 0x19, 0x87, 0xeb, - 0x32, 0xea, 0x60, 0x2a, 0x11, 0x09, 0x5f, 0xac, 0x0e, 0xcd, 0xac, 0x54, 0x81, 0xec, 0x63, 0x80, - 0x13, 0xb6, 0xcf, 0x58, 0xd1, 0xda, 0x97, 0x02, 0x98, 0xaa, 0x00, 0xd2, 0x86, 0xfc, 0x21, 0x0a, - 0x42, 0x33, 0xf4, 0x26, 0x16, 0xdf, 0x76, 0xe6, 0xc6, 0xeb, 0x06, 0x3b, 0x50, 0x48, 0xdb, 0x26, - 0x59, 0xff, 0x3f, 0x53, 0x56, 0xda, 0x5b, 0x0b, 0x30, 0xb4, 0x58, 0x04, 0xa6, 0x5a, 0x6d, 0x92, - 0x45, 0x9e, 0xbe, 0x2c, 0xbb, 0xb6, 0x08, 0xe5, 0x5c, 0x50, 0x2d, 0x57, 0xa6, 0xe0, 0xf4, 0x61, - 0x64, 0x0a, 0xce, 0x5a, 0xdb, 0x97, 0x60, 0xaa, 0x59, 0x67, 0x0a, 0x4e, 0xaf, 0x84, 0x7d, 0x7d, - 0xea, 0x64, 0x0e, 0xd2, 0xef, 0x59, 0xfd, 0xf8, 0xf4, 0xac, 0xb4, 0xf4, 0xed, 0xac, 0xb4, 0xf4, - 0x71, 0x58, 0x32, 0x4e, 0x87, 0x25, 0xe3, 0xeb, 0xb0, 0x64, 0xfc, 0x1c, 0x96, 0x8c, 0x77, 0x8f, - 0x2e, 0xf9, 0xed, 0xdd, 0x51, 0x51, 0xc3, 0x94, 0x4a, 0xf7, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, - 0x74, 0x4c, 0xf0, 0x24, 0xc4, 0x07, 0x00, 0x00, + // 659 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0x8e, 0x93, 0xd4, 0x6d, 0x27, 0x07, 0xca, 0x52, 0x21, 0xcb, 0x40, 0x1a, 0x45, 0x20, 0xe5, + 0xc2, 0x9a, 0x86, 0x0b, 0xb4, 0x08, 0xd1, 0xb4, 0xa5, 0x20, 0x15, 0x0e, 0xe6, 0xaf, 0xe2, 0x52, + 0x6d, 0x92, 0x89, 0xb1, 0x62, 0xc7, 0xc6, 0xbb, 0x89, 0x94, 0x1b, 0x8f, 0x80, 0x04, 0x0f, 0xd5, + 0x23, 0x47, 0x4e, 0x40, 0x73, 0xe0, 0x39, 0x90, 0x77, 0x37, 0x34, 0x4d, 0x22, 0x92, 0x94, 0xde, + 0x66, 0xed, 0xef, 0x9b, 0x9f, 0x6f, 0x66, 0x76, 0x61, 0xcf, 0xf3, 0xc5, 0x87, 0x6e, 0x9d, 0x36, + 0xa2, 0xd0, 0x69, 0x44, 0x1d, 0xc1, 0xfc, 0x0e, 0x26, 0xcd, 0x51, 0x93, 0xc5, 0xbe, 0xc3, 0x31, + 0xe9, 0xf9, 0x0d, 0xe4, 0x8e, 0x1f, 0x32, 0x0f, 0xb9, 0xd3, 0xdb, 0xd4, 0x16, 0x8d, 0x93, 0x48, + 0x44, 0xe4, 0xd6, 0x19, 0x9e, 0x0e, 0xb1, 0x54, 0x23, 0x7a, 0x9b, 0xf6, 0xba, 0x17, 0x79, 0x91, + 0x44, 0x3a, 0xa9, 0xa5, 0x48, 0xf6, 0x0d, 0x2f, 0x8a, 0xbc, 0x00, 0x1d, 0x79, 0xaa, 0x77, 0x5b, + 0x0e, 0x86, 0xb1, 0xe8, 0xeb, 0x9f, 0xa5, 0xf1, 0x9f, 0x2d, 0x1f, 0x83, 0xe6, 0x71, 0xc8, 0x78, + 0x5b, 0x23, 0x36, 0xc6, 0x11, 0xc2, 0x0f, 0x91, 0x0b, 0x16, 0xc6, 0x1a, 0xb0, 0x3d, 0x57, 0x69, + 0xa2, 0x1f, 0x23, 0x77, 0x9a, 0xc8, 0x1b, 0x89, 0x1f, 0x8b, 0x28, 0x51, 0xe4, 0xf2, 0xef, 0x2c, + 0x2c, 0x3d, 0x4f, 0x0b, 0x20, 0x04, 0xf2, 0x1d, 0x16, 0xa2, 0x65, 0x94, 0x8c, 0xca, 0xaa, 0x2b, + 0x6d, 0xf2, 0x0c, 0xcc, 0x80, 0xd5, 0x31, 0xe0, 0x56, 0xb6, 0x94, 0xab, 0x14, 0xaa, 0xf7, 0xe8, + 0x3f, 0x05, 0xa0, 0xd2, 0x13, 0x3d, 0x94, 0x94, 0xfd, 0x8e, 0x48, 0xfa, 0xae, 0xe6, 0x93, 0x2d, + 0x30, 0x05, 0x4b, 0x3c, 0x14, 0x56, 0xae, 0x64, 0x54, 0x0a, 0xd5, 0x9b, 0xa3, 0x9e, 0x64, 0x6e, + 0x74, 0xef, 0x6f, 0x6e, 0xb5, 0xfc, 0xc9, 0x8f, 0x8d, 0x8c, 0xab, 0x19, 0x64, 0x17, 0xa0, 0x91, + 0x20, 0x13, 0xd8, 0x3c, 0x66, 0xc2, 0x5a, 0x96, 0x7c, 0x9b, 0x2a, 0x59, 0xe8, 0x50, 0x16, 0xfa, + 0x7a, 0x28, 0x4b, 0x6d, 0x25, 0x65, 0x7f, 0xfe, 0xb9, 0x61, 0xb8, 0xab, 0x9a, 0xb7, 0x23, 0x9d, + 0x74, 0xe3, 0xe6, 0xd0, 0xc9, 0xca, 0x22, 0x4e, 0x34, 0x6f, 0x47, 0xd8, 0x0f, 0xa1, 0x30, 0x52, + 0x1c, 0x59, 0x83, 0x5c, 0x1b, 0xfb, 0x5a, 0xb1, 0xd4, 0x24, 0xeb, 0xb0, 0xd4, 0x63, 0x41, 0x17, + 0xad, 0xac, 0xfc, 0xa6, 0x0e, 0x5b, 0xd9, 0x07, 0x46, 0xf9, 0x0e, 0x5c, 0x39, 0x40, 0x21, 0x05, + 0x72, 0xf1, 0x63, 0x17, 0xb9, 0x98, 0xa6, 0x78, 0xf9, 0x25, 0xac, 0x9d, 0xc1, 0x78, 0x1c, 0x75, + 0x38, 0x92, 0x2d, 0x58, 0x92, 0x12, 0x4b, 0x60, 0xa1, 0x7a, 0x7b, 0x9e, 0x26, 0xb8, 0x8a, 0x52, + 0x7e, 0x0b, 0x64, 0x57, 0x6a, 0x70, 0x2e, 0xf2, 0x93, 0x0b, 0x78, 0xd4, 0x4d, 0xd1, 0x7e, 0xdf, + 0xc1, 0xb5, 0x73, 0x7e, 0x75, 0xaa, 0xff, 0xef, 0xf8, 0x8b, 0x01, 0xe4, 0x8d, 0x14, 0xfc, 0x72, + 0x33, 0x26, 0xdb, 0x50, 0x50, 0x8d, 0x94, 0xcb, 0x25, 0x1b, 0x34, 0x6d, 0x02, 0x9e, 0xa6, 0xfb, + 0xf7, 0x82, 0xf1, 0xb6, 0xab, 0xe7, 0x25, 0xb5, 0xd3, 0x72, 0xcf, 0x25, 0x75, 0x69, 0xe5, 0xde, + 0x85, 0xab, 0x87, 0x3e, 0x57, 0x0d, 0xe7, 0xc3, 0x62, 0x2d, 0x58, 0x6e, 0xf9, 0x81, 0xc0, 0x84, + 0x5b, 0x46, 0x29, 0x57, 0x59, 0x75, 0x87, 0xc7, 0xf2, 0x11, 0x90, 0x51, 0xb8, 0x4e, 0xa3, 0x06, + 0xa6, 0x0a, 0x22, 0xe1, 0x8b, 0xe5, 0xa1, 0x99, 0xe5, 0x47, 0x40, 0xf6, 0x30, 0xc0, 0x31, 0xd9, + 0xa7, 0x5d, 0x0a, 0x04, 0xf2, 0xbc, 0xdf, 0x69, 0x48, 0x05, 0x57, 0x5c, 0x69, 0x57, 0xbf, 0xe6, + 0xc1, 0x54, 0x49, 0x91, 0x16, 0xe4, 0x0e, 0x50, 0x10, 0x3a, 0x23, 0x87, 0xb1, 0x65, 0xb0, 0x9d, + 0xb9, 0xf1, 0xba, 0xe8, 0x36, 0xe4, 0x53, 0x29, 0xc8, 0xac, 0x3b, 0x69, 0x42, 0x5e, 0x7b, 0x73, + 0x01, 0x86, 0x0e, 0x16, 0x81, 0xa9, 0xc6, 0x9d, 0xcc, 0x22, 0x4f, 0x6e, 0x9b, 0x5d, 0x5d, 0x84, + 0x72, 0x16, 0x50, 0x0d, 0xdc, 0xcc, 0x80, 0x93, 0xcb, 0x32, 0x33, 0xe0, 0xb4, 0x51, 0x7e, 0x05, + 0xa6, 0xea, 0xff, 0xcc, 0x80, 0x93, 0x63, 0x62, 0x5f, 0x9f, 0x58, 0xa3, 0xfd, 0xf4, 0x8d, 0xab, + 0x1d, 0x9d, 0x9c, 0x16, 0x33, 0xdf, 0x4f, 0x8b, 0x99, 0x4f, 0x83, 0xa2, 0x71, 0x32, 0x28, 0x1a, + 0xdf, 0x06, 0x45, 0xe3, 0xd7, 0xa0, 0x68, 0xbc, 0x7f, 0x7c, 0xc1, 0xf7, 0x78, 0x5b, 0x59, 0x47, + 0x99, 0xba, 0x29, 0x63, 0xdd, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x24, 0x4e, 0xca, 0x64, 0xda, + 0x07, 0x00, 0x00, } diff --git a/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto b/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto index d6b6b8378a..152ade2a08 100644 --- a/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto +++ b/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package containerd.services.images.v1; -import "gogoproto/gogo.proto"; +import weak "gogoproto/gogo.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; @@ -115,4 +115,10 @@ message ListImagesResponse { message DeleteImageRequest { string name = 1; + + // Sync indicates that the delete and cleanup should be done + // synchronously before returning to the caller + // + // Default is false + bool sync = 2; } diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go index 9e61bf725d..bce8231a74 100644 --- a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/containerd/containerd/api/services/introspection/v1/introspection.proto -// DO NOT EDIT! /* Package introspection is a generated protocol buffer package. @@ -20,7 +19,8 @@ import fmt "fmt" import math "math" import containerd_types "github.com/containerd/containerd/api/types" import google_rpc "github.com/containerd/containerd/protobuf/google/rpc" -import _ "github.com/gogo/protobuf/gogoproto" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" import ( context "golang.org/x/net/context" @@ -362,24 +362,6 @@ func (m *PluginsResponse) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Introspection(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Introspection(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintIntrospection(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -697,51 +679,14 @@ func (m *Plugin) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthIntrospection - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Exports == nil { m.Exports = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIntrospection @@ -751,41 +696,80 @@ func (m *Plugin) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIntrospection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthIntrospection + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIntrospection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthIntrospection + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipIntrospection(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthIntrospection + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthIntrospection - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Exports[mapkey] = mapvalue - } else { - var mapvalue string - m.Exports[mapkey] = mapvalue } + m.Exports[mapkey] = mapvalue iNdEx = postIndex case 6: if wireType != 2 { @@ -1140,36 +1124,36 @@ func init() { } var fileDescriptorIntrospection = []byte{ - // 485 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0x3d, 0x6f, 0xdb, 0x30, - 0x10, 0x35, 0x65, 0xc7, 0x8a, 0xcf, 0xe9, 0x07, 0x88, 0xa0, 0x15, 0x34, 0x28, 0x86, 0xd1, 0xc1, - 0x28, 0x5a, 0x0a, 0x71, 0x5b, 0xa0, 0x49, 0x81, 0x0e, 0x46, 0x3d, 0x04, 0xc8, 0x10, 0x28, 0x5b, - 0x97, 0x40, 0x96, 0x69, 0x95, 0xa8, 0x22, 0x32, 0x24, 0x25, 0xd4, 0x5b, 0xb7, 0xfe, 0x35, 0x8f, - 0x1d, 0x3b, 0x05, 0x8d, 0x7e, 0x43, 0x7f, 0x40, 0x21, 0x51, 0x4a, 0xec, 0xcd, 0x46, 0xb6, 0xbb, - 0xa7, 0xf7, 0xee, 0xde, 0x3d, 0x88, 0x10, 0xc4, 0x4c, 0x7f, 0xcb, 0x66, 0x24, 0xe2, 0xd7, 0x7e, - 0xc4, 0x53, 0x1d, 0xb2, 0x94, 0xca, 0xf9, 0x7a, 0x19, 0x0a, 0xe6, 0x2b, 0x2a, 0x73, 0x16, 0x51, - 0xe5, 0xb3, 0x54, 0x4b, 0xae, 0x04, 0x8d, 0x34, 0xe3, 0xa9, 0x9f, 0x1f, 0x6f, 0x02, 0x44, 0x48, - 0xae, 0x39, 0x7e, 0xf5, 0xa0, 0x26, 0x8d, 0x92, 0x6c, 0x12, 0xf3, 0x63, 0xf7, 0x64, 0xab, 0xcd, - 0x7a, 0x29, 0xa8, 0xf2, 0x45, 0x12, 0xea, 0x05, 0x97, 0xd7, 0x66, 0x81, 0xfb, 0x32, 0xe6, 0x3c, - 0x4e, 0xa8, 0x2f, 0x45, 0xe4, 0x2b, 0x1d, 0xea, 0x4c, 0xd5, 0x1f, 0x0e, 0x63, 0x1e, 0xf3, 0xaa, - 0xf4, 0xcb, 0xca, 0xa0, 0xc3, 0x7f, 0x16, 0x74, 0x2f, 0x92, 0x2c, 0x66, 0x29, 0xc6, 0xd0, 0x29, - 0x27, 0x3a, 0x68, 0x80, 0x46, 0xbd, 0xa0, 0xaa, 0xf1, 0x0b, 0xb0, 0xd8, 0xdc, 0xb1, 0x4a, 0x64, - 0xd2, 0x2d, 0x6e, 0x8f, 0xac, 0xb3, 0x2f, 0x81, 0xc5, 0xe6, 0xd8, 0x85, 0x7d, 0x49, 0x6f, 0x32, - 0x26, 0xa9, 0x72, 0xda, 0x83, 0xf6, 0xa8, 0x17, 0xdc, 0xf7, 0xf8, 0x33, 0xf4, 0x1a, 0x4f, 0xca, - 0xe9, 0x0c, 0xda, 0xa3, 0xfe, 0xd8, 0x25, 0x6b, 0x67, 0x57, 0xb6, 0xc9, 0x45, 0x4d, 0x99, 0x74, - 0x56, 0xb7, 0x47, 0xad, 0xe0, 0x41, 0x82, 0x2f, 0xc1, 0xa6, 0x3f, 0x04, 0x97, 0x5a, 0x39, 0x7b, - 0x95, 0xfa, 0x84, 0x6c, 0x13, 0x1a, 0x31, 0x67, 0x90, 0xa9, 0xd1, 0x4e, 0x53, 0x2d, 0x97, 0x41, - 0x33, 0x09, 0x0f, 0xe1, 0x20, 0x0a, 0x45, 0x38, 0x63, 0x09, 0xd3, 0x8c, 0x2a, 0xa7, 0x5b, 0x99, - 0xde, 0xc0, 0xf0, 0x5b, 0xd8, 0x67, 0x29, 0xd3, 0x57, 0x54, 0x4a, 0xc7, 0x1e, 0xa0, 0x51, 0x7f, - 0x8c, 0x89, 0x49, 0x93, 0x48, 0x11, 0x91, 0xcb, 0x2a, 0xcd, 0xc0, 0x2e, 0x39, 0x53, 0x29, 0xdd, - 0x53, 0x38, 0x58, 0xdf, 0x85, 0x9f, 0x43, 0xfb, 0x3b, 0x5d, 0xd6, 0xf1, 0x95, 0x25, 0x3e, 0x84, - 0xbd, 0x3c, 0x4c, 0x32, 0x6a, 0x02, 0x0c, 0x4c, 0x73, 0x6a, 0x7d, 0x44, 0xc3, 0xd7, 0xf0, 0xd4, - 0xd8, 0x55, 0x01, 0xbd, 0xc9, 0xa8, 0xd2, 0xd8, 0x01, 0x7b, 0xc1, 0x12, 0x4d, 0xa5, 0x72, 0x50, - 0xe5, 0xad, 0x69, 0x87, 0x57, 0xf0, 0xec, 0x9e, 0xab, 0x04, 0x4f, 0x15, 0xc5, 0xe7, 0x60, 0x0b, - 0x03, 0x55, 0xe4, 0xfe, 0xf8, 0xcd, 0x2e, 0x11, 0xd5, 0x91, 0x37, 0x23, 0xc6, 0xbf, 0x10, 0x3c, - 0x39, 0x5b, 0xa7, 0xe2, 0x1c, 0xec, 0x7a, 0x25, 0x7e, 0xbf, 0xcb, 0xe4, 0xe6, 0x1a, 0xf7, 0xc3, - 0x8e, 0x2a, 0x73, 0xd7, 0x64, 0xb1, 0xba, 0xf3, 0x5a, 0x7f, 0xee, 0xbc, 0xd6, 0xcf, 0xc2, 0x43, - 0xab, 0xc2, 0x43, 0xbf, 0x0b, 0x0f, 0xfd, 0x2d, 0x3c, 0xf4, 0xf5, 0xfc, 0x71, 0x6f, 0xf1, 0xd3, - 0x06, 0x30, 0xeb, 0x56, 0x3f, 0xff, 0xbb, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x21, 0xec, 0xef, - 0x92, 0xe2, 0x03, 0x00, 0x00, + // 487 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0x4d, 0x6f, 0xd3, 0x40, + 0x10, 0xcd, 0x3a, 0x69, 0xdc, 0x4c, 0xca, 0x87, 0x56, 0x15, 0x58, 0x3e, 0xb8, 0x51, 0xc4, 0x21, + 0x42, 0xb0, 0x56, 0x03, 0x48, 0xb4, 0x48, 0x1c, 0x22, 0x72, 0xa8, 0xd4, 0x43, 0xe5, 0x5e, 0x10, + 0x97, 0xca, 0x71, 0x36, 0x66, 0x85, 0xeb, 0xdd, 0xee, 0xae, 0x2d, 0x72, 0xe3, 0xc6, 0x5f, 0xcb, + 0x91, 0x23, 0xa7, 0x8a, 0xfa, 0x37, 0xf0, 0x03, 0x90, 0xbd, 0x76, 0x9b, 0xdc, 0x12, 0x71, 0x9b, + 0x79, 0x7e, 0x6f, 0xe6, 0xcd, 0x93, 0x17, 0x82, 0x98, 0xe9, 0xaf, 0xd9, 0x8c, 0x44, 0xfc, 0xda, + 0x8f, 0x78, 0xaa, 0x43, 0x96, 0x52, 0x39, 0x5f, 0x2f, 0x43, 0xc1, 0x7c, 0x45, 0x65, 0xce, 0x22, + 0xaa, 0x7c, 0x96, 0x6a, 0xc9, 0x95, 0xa0, 0x91, 0x66, 0x3c, 0xf5, 0xf3, 0xe3, 0x4d, 0x80, 0x08, + 0xc9, 0x35, 0xc7, 0x2f, 0x1e, 0xd4, 0xa4, 0x51, 0x92, 0x4d, 0x62, 0x7e, 0xec, 0x9e, 0x6c, 0xb5, + 0x59, 0x2f, 0x05, 0x55, 0xbe, 0x48, 0x42, 0xbd, 0xe0, 0xf2, 0xda, 0x2c, 0x70, 0x9f, 0xc7, 0x9c, + 0xc7, 0x09, 0xf5, 0xa5, 0x88, 0x7c, 0xa5, 0x43, 0x9d, 0xa9, 0xfa, 0xc3, 0x61, 0xcc, 0x63, 0x5e, + 0x95, 0x7e, 0x59, 0x19, 0x74, 0xf8, 0xd7, 0x82, 0xee, 0x45, 0x92, 0xc5, 0x2c, 0xc5, 0x18, 0x3a, + 0xe5, 0x44, 0x07, 0x0d, 0xd0, 0xa8, 0x17, 0x54, 0x35, 0x7e, 0x06, 0x16, 0x9b, 0x3b, 0x56, 0x89, + 0x4c, 0xba, 0xc5, 0xed, 0x91, 0x75, 0xf6, 0x29, 0xb0, 0xd8, 0x1c, 0xbb, 0xb0, 0x2f, 0xe9, 0x4d, + 0xc6, 0x24, 0x55, 0x4e, 0x7b, 0xd0, 0x1e, 0xf5, 0x82, 0xfb, 0x1e, 0x7f, 0x84, 0x5e, 0xe3, 0x49, + 0x39, 0x9d, 0x41, 0x7b, 0xd4, 0x1f, 0xbb, 0x64, 0xed, 0xec, 0xca, 0x36, 0xb9, 0xa8, 0x29, 0x93, + 0xce, 0xea, 0xf6, 0xa8, 0x15, 0x3c, 0x48, 0xf0, 0x25, 0xd8, 0xf4, 0xbb, 0xe0, 0x52, 0x2b, 0x67, + 0xaf, 0x52, 0x9f, 0x90, 0x6d, 0x42, 0x23, 0xe6, 0x0c, 0x32, 0x35, 0xda, 0x69, 0xaa, 0xe5, 0x32, + 0x68, 0x26, 0xe1, 0x21, 0x1c, 0x44, 0xa1, 0x08, 0x67, 0x2c, 0x61, 0x9a, 0x51, 0xe5, 0x74, 0x2b, + 0xd3, 0x1b, 0x18, 0x7e, 0x0d, 0xfb, 0x2c, 0x65, 0xfa, 0x8a, 0x4a, 0xe9, 0xd8, 0x03, 0x34, 0xea, + 0x8f, 0x31, 0x31, 0x69, 0x12, 0x29, 0x22, 0x72, 0x59, 0xa5, 0x19, 0xd8, 0x25, 0x67, 0x2a, 0xa5, + 0x7b, 0x0a, 0x07, 0xeb, 0xbb, 0xf0, 0x53, 0x68, 0x7f, 0xa3, 0xcb, 0x3a, 0xbe, 0xb2, 0xc4, 0x87, + 0xb0, 0x97, 0x87, 0x49, 0x46, 0x4d, 0x80, 0x81, 0x69, 0x4e, 0xad, 0xf7, 0x68, 0xf8, 0x12, 0x1e, + 0x1b, 0xbb, 0x2a, 0xa0, 0x37, 0x19, 0x55, 0x1a, 0x3b, 0x60, 0x2f, 0x58, 0xa2, 0xa9, 0x54, 0x0e, + 0xaa, 0xbc, 0x35, 0xed, 0xf0, 0x0a, 0x9e, 0xdc, 0x73, 0x95, 0xe0, 0xa9, 0xa2, 0xf8, 0x1c, 0x6c, + 0x61, 0xa0, 0x8a, 0xdc, 0x1f, 0xbf, 0xda, 0x25, 0xa2, 0x3a, 0xf2, 0x66, 0xc4, 0xf8, 0x27, 0x82, + 0x47, 0x67, 0xeb, 0x54, 0x9c, 0x83, 0x5d, 0xaf, 0xc4, 0x6f, 0x77, 0x99, 0xdc, 0x5c, 0xe3, 0xbe, + 0xdb, 0x51, 0x65, 0xee, 0x9a, 0x2c, 0x56, 0x77, 0x5e, 0xeb, 0xf7, 0x9d, 0xd7, 0xfa, 0x51, 0x78, + 0x68, 0x55, 0x78, 0xe8, 0x57, 0xe1, 0xa1, 0x3f, 0x85, 0x87, 0xbe, 0x9c, 0xff, 0xdf, 0x5b, 0xfc, + 0xb0, 0x01, 0x7c, 0xb6, 0x66, 0xdd, 0xea, 0xf7, 0x7f, 0xf3, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xe6, + 0x72, 0xde, 0x35, 0xe4, 0x03, 0x00, 0x00, } diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto index 424c731640..95e804b9b7 100644 --- a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto +++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto @@ -4,7 +4,7 @@ package containerd.services.introspection.v1; import "github.com/containerd/containerd/api/types/platform.proto"; import "google/rpc/status.proto"; -import "gogoproto/gogo.proto"; +import weak "gogoproto/gogo.proto"; option go_package = "github.com/containerd/containerd/api/services/introspection/v1;introspection"; diff --git a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go index db2a5f8b6f..a922fdeb58 100644 --- a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/containerd/containerd/api/services/leases/v1/leases.proto -// DO NOT EDIT! /* Package leases is a generated protocol buffer package. @@ -21,8 +20,9 @@ package leases import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import _ "github.com/gogo/protobuf/gogoproto" -import google_protobuf1 "github.com/golang/protobuf/ptypes/empty" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" +import google_protobuf1 "github.com/gogo/protobuf/types" import _ "github.com/gogo/protobuf/types" import time "time" @@ -133,7 +133,7 @@ type LeasesClient interface { // during the lease eligible for garbage collection if not referenced // or retained by other resources during the lease. Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) - // ListTransactions lists all active leases, returning the full list of + // List lists all active leases, returning the full list of // leases and optionally including the referenced resources. List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error) } @@ -183,7 +183,7 @@ type LeasesServer interface { // during the lease eligible for garbage collection if not referenced // or retained by other resources during the lease. Delete(context.Context, *DeleteRequest) (*google_protobuf1.Empty, error) - // ListTransactions lists all active leases, returning the full list of + // List lists all active leases, returning the full list of // leases and optionally including the referenced resources. List(context.Context, *ListRequest) (*ListResponse, error) } @@ -472,24 +472,6 @@ func (m *ListResponse) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Leases(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Leases(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintLeases(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -799,51 +781,14 @@ func (m *Lease) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthLeases - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowLeases @@ -853,41 +798,80 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthLeases + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthLeases + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipLeases(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLeases + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthLeases - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -994,51 +978,14 @@ func (m *CreateRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthLeases - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowLeases @@ -1048,41 +995,80 @@ func (m *CreateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthLeases + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthLeases + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipLeases(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLeases + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthLeases - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -1537,37 +1523,37 @@ func init() { } var fileDescriptorLeases = []byte{ - // 501 bytes of a gzipped FileDescriptorProto + // 504 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xdf, 0x8a, 0xd3, 0x40, - 0x14, 0xc6, 0x3b, 0xa9, 0x8d, 0xf6, 0xd4, 0x15, 0x19, 0x96, 0x25, 0x44, 0x4c, 0x4b, 0x10, 0xb6, - 0xf8, 0x67, 0xe2, 0xd6, 0x9b, 0x75, 0x15, 0xc1, 0x6e, 0x17, 0x14, 0x82, 0x48, 0xf0, 0x42, 0xbc, - 0x59, 0xd2, 0xf6, 0x6c, 0x0c, 0xa6, 0x9d, 0x98, 0x99, 0x16, 0x7a, 0xe7, 0x23, 0xf8, 0x08, 0x3e, - 0x84, 0x0f, 0xd1, 0x4b, 0x2f, 0xbd, 0x5a, 0xdd, 0xdc, 0xf9, 0x16, 0x92, 0x99, 0x84, 0xfd, 0x23, - 0xda, 0x2a, 0xde, 0x9d, 0xc9, 0x7c, 0xdf, 0x39, 0xbf, 0xf3, 0xc1, 0x04, 0x06, 0x51, 0x2c, 0xdf, - 0xce, 0x86, 0x6c, 0xc4, 0x27, 0xde, 0x88, 0x4f, 0x65, 0x18, 0x4f, 0x31, 0x1b, 0x9f, 0x2d, 0xc3, - 0x34, 0xf6, 0x04, 0x66, 0xf3, 0x78, 0x84, 0xc2, 0x4b, 0x30, 0x14, 0x28, 0xbc, 0xf9, 0x4e, 0x59, - 0xb1, 0x34, 0xe3, 0x92, 0xd3, 0x9b, 0xa7, 0x7a, 0x56, 0x69, 0x59, 0xa9, 0x98, 0xef, 0xd8, 0x9b, - 0x11, 0x8f, 0xb8, 0x52, 0x7a, 0x45, 0xa5, 0x4d, 0xf6, 0x8d, 0x88, 0xf3, 0x28, 0x41, 0x4f, 0x9d, - 0x86, 0xb3, 0x23, 0x0f, 0x27, 0xa9, 0x5c, 0x94, 0x97, 0xed, 0x8b, 0x97, 0x32, 0x9e, 0xa0, 0x90, - 0xe1, 0x24, 0xd5, 0x02, 0xf7, 0x07, 0x81, 0x86, 0x5f, 0x4c, 0xa0, 0x5b, 0x60, 0xc4, 0x63, 0x8b, - 0x74, 0x48, 0xb7, 0xd9, 0x37, 0xf3, 0xe3, 0xb6, 0xf1, 0x7c, 0x10, 0x18, 0xf1, 0x98, 0xee, 0x03, - 0x8c, 0x32, 0x0c, 0x25, 0x8e, 0x0f, 0x43, 0x69, 0x19, 0x1d, 0xd2, 0x6d, 0xf5, 0x6c, 0xa6, 0xfb, - 0xb2, 0xaa, 0x2f, 0x7b, 0x55, 0xf5, 0xed, 0x5f, 0x59, 0x1e, 0xb7, 0x6b, 0x1f, 0xbf, 0xb5, 0x49, - 0xd0, 0x2c, 0x7d, 0x4f, 0x25, 0x7d, 0x06, 0x66, 0x12, 0x0e, 0x31, 0x11, 0x56, 0xbd, 0x53, 0xef, - 0xb6, 0x7a, 0xf7, 0xd9, 0x1f, 0x57, 0x65, 0x0a, 0x89, 0xf9, 0xca, 0x72, 0x30, 0x95, 0xd9, 0x22, - 0x28, 0xfd, 0xf6, 0x43, 0x68, 0x9d, 0xf9, 0x4c, 0xaf, 0x43, 0xfd, 0x1d, 0x2e, 0x34, 0x76, 0x50, - 0x94, 0x74, 0x13, 0x1a, 0xf3, 0x30, 0x99, 0xa1, 0x42, 0x6d, 0x06, 0xfa, 0xb0, 0x67, 0xec, 0x12, - 0xf7, 0x33, 0x81, 0x8d, 0x7d, 0x85, 0x14, 0xe0, 0xfb, 0x19, 0x0a, 0xf9, 0xdb, 0x9d, 0x5f, 0x5e, - 0xc0, 0xdd, 0x5d, 0x81, 0x7b, 0xae, 0xeb, 0xff, 0xc6, 0xf6, 0xe1, 0x5a, 0xd5, 0x5f, 0xa4, 0x7c, - 0x2a, 0x90, 0xee, 0x41, 0x43, 0xcd, 0x56, 0xfe, 0x56, 0xef, 0xd6, 0x3a, 0x61, 0x06, 0xda, 0xe2, - 0x6e, 0xc3, 0xc6, 0x00, 0x13, 0x5c, 0x99, 0x81, 0xbb, 0x0d, 0x2d, 0x3f, 0x16, 0xb2, 0x92, 0x59, - 0x70, 0xf9, 0x28, 0x4e, 0x24, 0x66, 0xc2, 0x22, 0x9d, 0x7a, 0xb7, 0x19, 0x54, 0x47, 0xd7, 0x87, - 0xab, 0x5a, 0x58, 0xd2, 0x3d, 0x06, 0x53, 0xcf, 0x56, 0xc2, 0x75, 0xf1, 0x4a, 0x4f, 0xef, 0x93, - 0x01, 0xa6, 0xfa, 0x22, 0x28, 0x82, 0xa9, 0x17, 0xa7, 0x77, 0xff, 0x26, 0x7f, 0xfb, 0xde, 0x9a, - 0xea, 0x92, 0xf7, 0x05, 0x98, 0x3a, 0x91, 0x95, 0x63, 0xce, 0x05, 0x67, 0x6f, 0xfd, 0xf2, 0x08, - 0x0e, 0x8a, 0x97, 0x47, 0x0f, 0xe1, 0x52, 0x91, 0x07, 0xbd, 0xbd, 0x6a, 0xef, 0xd3, 0x74, 0xed, - 0x3b, 0x6b, 0x69, 0x35, 0x70, 0xff, 0xf5, 0xf2, 0xc4, 0xa9, 0x7d, 0x3d, 0x71, 0x6a, 0x1f, 0x72, - 0x87, 0x2c, 0x73, 0x87, 0x7c, 0xc9, 0x1d, 0xf2, 0x3d, 0x77, 0xc8, 0x9b, 0x27, 0xff, 0xf8, 0x1b, - 0x7a, 0xa4, 0xab, 0xa1, 0xa9, 0x56, 0x79, 0xf0, 0x33, 0x00, 0x00, 0xff, 0xff, 0x1d, 0xb9, 0xa6, - 0x63, 0xcf, 0x04, 0x00, 0x00, + 0x14, 0xc6, 0x3b, 0xa9, 0x8d, 0xf6, 0xc4, 0x15, 0x19, 0x96, 0x25, 0x44, 0x4c, 0x4b, 0x10, 0xb6, + 0xf8, 0x67, 0xe2, 0xd6, 0x9b, 0x75, 0x15, 0xc1, 0x6e, 0x17, 0x14, 0x82, 0x48, 0xf0, 0x62, 0xf1, + 0x66, 0x49, 0xdb, 0xb3, 0x31, 0x98, 0x36, 0x31, 0x33, 0x2d, 0xf4, 0xce, 0x47, 0xf0, 0x11, 0x7c, + 0x08, 0x1f, 0xa2, 0x97, 0x5e, 0x7a, 0xb5, 0xba, 0xb9, 0xf3, 0x2d, 0x24, 0x33, 0x09, 0xbb, 0x5b, + 0xd1, 0x56, 0xd9, 0xbb, 0x33, 0x99, 0xef, 0x3b, 0xe7, 0x77, 0x3e, 0x98, 0x40, 0x3f, 0x8c, 0xc4, + 0xbb, 0xe9, 0x80, 0x0d, 0x93, 0xb1, 0x3b, 0x4c, 0x26, 0x22, 0x88, 0x26, 0x98, 0x8d, 0xce, 0x97, + 0x41, 0x1a, 0xb9, 0x1c, 0xb3, 0x59, 0x34, 0x44, 0xee, 0xc6, 0x18, 0x70, 0xe4, 0xee, 0x6c, 0xa7, + 0xac, 0x58, 0x9a, 0x25, 0x22, 0xa1, 0xb7, 0xcf, 0xf4, 0xac, 0xd2, 0xb2, 0x52, 0x31, 0xdb, 0xb1, + 0x36, 0xc3, 0x24, 0x4c, 0xa4, 0xd2, 0x2d, 0x2a, 0x65, 0xb2, 0x6e, 0x85, 0x49, 0x12, 0xc6, 0xe8, + 0xca, 0xd3, 0x60, 0x7a, 0xec, 0xe2, 0x38, 0x15, 0xf3, 0xf2, 0xb2, 0xb5, 0x7c, 0x29, 0xa2, 0x31, + 0x72, 0x11, 0x8c, 0x53, 0x25, 0x70, 0x7e, 0x12, 0x68, 0x78, 0xc5, 0x04, 0xba, 0x05, 0x5a, 0x34, + 0x32, 0x49, 0x9b, 0x74, 0x9a, 0x3d, 0x3d, 0x3f, 0x69, 0x69, 0x2f, 0xfb, 0xbe, 0x16, 0x8d, 0xe8, + 0x3e, 0xc0, 0x30, 0xc3, 0x40, 0xe0, 0xe8, 0x28, 0x10, 0xa6, 0xd6, 0x26, 0x1d, 0xa3, 0x6b, 0x31, + 0xd5, 0x97, 0x55, 0x7d, 0xd9, 0x9b, 0xaa, 0x6f, 0xef, 0xda, 0xe2, 0xa4, 0x55, 0xfb, 0xf4, 0xbd, + 0x45, 0xfc, 0x66, 0xe9, 0x7b, 0x2e, 0xe8, 0x0b, 0xd0, 0xe3, 0x60, 0x80, 0x31, 0x37, 0xeb, 0xed, + 0x7a, 0xc7, 0xe8, 0x3e, 0x64, 0x7f, 0x5d, 0x95, 0x49, 0x24, 0xe6, 0x49, 0xcb, 0xc1, 0x44, 0x64, + 0x73, 0xbf, 0xf4, 0x5b, 0x8f, 0xc1, 0x38, 0xf7, 0x99, 0xde, 0x84, 0xfa, 0x7b, 0x9c, 0x2b, 0x6c, + 0xbf, 0x28, 0xe9, 0x26, 0x34, 0x66, 0x41, 0x3c, 0x45, 0x89, 0xda, 0xf4, 0xd5, 0x61, 0x4f, 0xdb, + 0x25, 0xce, 0x17, 0x02, 0x1b, 0xfb, 0x12, 0xc9, 0xc7, 0x0f, 0x53, 0xe4, 0xe2, 0x8f, 0x3b, 0xbf, + 0x5e, 0xc2, 0xdd, 0x5d, 0x81, 0x7b, 0xa1, 0xeb, 0x65, 0x63, 0x7b, 0x70, 0xa3, 0xea, 0xcf, 0xd3, + 0x64, 0xc2, 0x91, 0xee, 0x41, 0x43, 0xce, 0x96, 0x7e, 0xa3, 0x7b, 0x67, 0x9d, 0x30, 0x7d, 0x65, + 0x71, 0xb6, 0x61, 0xa3, 0x8f, 0x31, 0xae, 0xcc, 0xc0, 0xd9, 0x06, 0xc3, 0x8b, 0xb8, 0xa8, 0x64, + 0x26, 0x5c, 0x3d, 0x8e, 0x62, 0x81, 0x19, 0x37, 0x49, 0xbb, 0xde, 0x69, 0xfa, 0xd5, 0xd1, 0xf1, + 0xe0, 0xba, 0x12, 0x96, 0x74, 0x4f, 0x41, 0x57, 0xb3, 0xa5, 0x70, 0x5d, 0xbc, 0xd2, 0xd3, 0xfd, + 0xac, 0x81, 0x2e, 0xbf, 0x70, 0x8a, 0xa0, 0xab, 0xc5, 0xe9, 0xfd, 0x7f, 0xc9, 0xdf, 0x7a, 0xb0, + 0xa6, 0xba, 0xe4, 0x7d, 0x05, 0xba, 0x4a, 0x64, 0xe5, 0x98, 0x0b, 0xc1, 0x59, 0x5b, 0xbf, 0x3d, + 0x82, 0x83, 0xe2, 0xe5, 0xd1, 0x23, 0xb8, 0x52, 0xe4, 0x41, 0xef, 0xae, 0xda, 0xfb, 0x2c, 0x5d, + 0xeb, 0xde, 0x5a, 0x5a, 0x05, 0xdc, 0x3b, 0x5c, 0x9c, 0xda, 0xb5, 0x6f, 0xa7, 0x76, 0xed, 0x63, + 0x6e, 0x93, 0x45, 0x6e, 0x93, 0xaf, 0xb9, 0x4d, 0x7e, 0xe4, 0x36, 0x79, 0xfb, 0xec, 0x3f, 0x7f, + 0x43, 0x4f, 0x54, 0x75, 0x58, 0x1b, 0xe8, 0x72, 0x99, 0x47, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, + 0xad, 0x77, 0xda, 0x73, 0xd1, 0x04, 0x00, 0x00, } diff --git a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto index 29d58d1119..1002a2d6ed 100644 --- a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto +++ b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package containerd.services.leases.v1; -import "gogoproto/gogo.proto"; +import weak "gogoproto/gogo.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; @@ -19,7 +19,7 @@ service Leases { // or retained by other resources during the lease. rpc Delete(DeleteRequest) returns (google.protobuf.Empty); - // ListTransactions lists all active leases, returning the full list of + // List lists all active leases, returning the full list of // leases and optionally including the referenced resources. rpc List(ListRequest) returns (ListResponse); } diff --git a/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go index e98330b6c2..822e348304 100644 --- a/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto -// DO NOT EDIT! /* Package namespaces is a generated protocol buffer package. @@ -25,8 +24,9 @@ package namespaces import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import _ "github.com/gogo/protobuf/gogoproto" -import google_protobuf1 "github.com/golang/protobuf/ptypes/empty" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" +import google_protobuf1 "github.com/gogo/protobuf/types" import google_protobuf2 "github.com/gogo/protobuf/types" import ( @@ -653,24 +653,6 @@ func (m *DeleteNamespaceRequest) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Namespace(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Namespace(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintNamespace(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1001,51 +983,14 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthNamespace - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNamespace @@ -1055,41 +1000,80 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNamespace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNamespace + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNamespace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthNamespace + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipNamespace(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNamespace + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthNamespace - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -1973,40 +1957,40 @@ func init() { } var fileDescriptorNamespace = []byte{ - // 547 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0x4d, 0x6f, 0xd3, 0x40, - 0x10, 0xcd, 0x26, 0xc1, 0x52, 0xc6, 0x17, 0xb4, 0x04, 0x13, 0x19, 0xc9, 0x04, 0x9f, 0x8a, 0x54, - 0xad, 0xd5, 0x20, 0x41, 0x3f, 0x6e, 0x85, 0xb6, 0x07, 0x0a, 0x42, 0x96, 0xb8, 0xc0, 0x01, 0x9c, - 0x64, 0xe3, 0x9a, 0x38, 0xb6, 0xf1, 0xae, 0x2d, 0x45, 0x1c, 0xe0, 0xdf, 0x70, 0xe1, 0x87, 0xe4, - 0xc8, 0x91, 0x13, 0x6a, 0xf3, 0x4b, 0xd0, 0xae, 0x9d, 0x38, 0x34, 0x46, 0xb8, 0x81, 0x72, 0x9b, - 0xb1, 0xf7, 0xcd, 0x7b, 0x3b, 0x7a, 0xcf, 0x86, 0x67, 0xae, 0xc7, 0xcf, 0x92, 0x3e, 0x19, 0x84, - 0x13, 0x6b, 0x10, 0x06, 0xdc, 0xf1, 0x02, 0x1a, 0x0f, 0x57, 0x4b, 0x27, 0xf2, 0x2c, 0x46, 0xe3, - 0xd4, 0x1b, 0x50, 0x66, 0x05, 0xce, 0x84, 0xb2, 0xc8, 0x11, 0x65, 0xba, 0x53, 0x74, 0x24, 0x8a, - 0x43, 0x1e, 0xe2, 0xfb, 0x05, 0x8c, 0x2c, 0x20, 0xa4, 0x80, 0x90, 0x74, 0x47, 0x6f, 0xbb, 0xa1, - 0x1b, 0xca, 0xd3, 0x96, 0xa8, 0x32, 0xa0, 0x7e, 0xd7, 0x0d, 0x43, 0xd7, 0xa7, 0x96, 0xec, 0xfa, - 0xc9, 0xc8, 0xa2, 0x93, 0x88, 0x4f, 0xf3, 0x97, 0xdd, 0xcb, 0x2f, 0x47, 0x1e, 0xf5, 0x87, 0x6f, - 0x27, 0x0e, 0x1b, 0x67, 0x27, 0xcc, 0xaf, 0x08, 0x5a, 0x2f, 0x16, 0x34, 0x18, 0x43, 0x53, 0x70, - 0x76, 0x50, 0x17, 0x6d, 0xb5, 0x6c, 0x59, 0xe3, 0x97, 0xa0, 0xf8, 0x4e, 0x9f, 0xfa, 0xac, 0x53, - 0xef, 0x36, 0xb6, 0xd4, 0xde, 0x2e, 0xf9, 0xa3, 0x54, 0xb2, 0x9c, 0x48, 0x4e, 0x25, 0xf4, 0x28, - 0xe0, 0xf1, 0xd4, 0xce, 0xe7, 0xe8, 0x7b, 0xa0, 0xae, 0x3c, 0xc6, 0x37, 0xa1, 0x31, 0xa6, 0xd3, - 0x9c, 0x53, 0x94, 0xb8, 0x0d, 0x37, 0x52, 0xc7, 0x4f, 0x68, 0xa7, 0x2e, 0x9f, 0x65, 0xcd, 0x7e, - 0x7d, 0x17, 0x99, 0x0f, 0xe0, 0xd6, 0x09, 0xe5, 0xcb, 0xf1, 0x36, 0xfd, 0x90, 0x50, 0xc6, 0xcb, - 0x74, 0x9b, 0x67, 0xd0, 0xfe, 0xf5, 0x28, 0x8b, 0xc2, 0x80, 0x89, 0xfb, 0xb4, 0x96, 0x62, 0x25, - 0x40, 0xed, 0x6d, 0x5f, 0xe5, 0x4a, 0x87, 0xcd, 0xd9, 0x8f, 0x7b, 0x35, 0xbb, 0x18, 0x62, 0x5a, - 0x70, 0xfb, 0xd4, 0x63, 0x05, 0x15, 0x5b, 0xc8, 0xd2, 0x40, 0x19, 0x79, 0x3e, 0xa7, 0x71, 0x2e, - 0x2c, 0xef, 0x4c, 0x1f, 0xb4, 0xcb, 0x80, 0x5c, 0x9c, 0x0d, 0x50, 0xd0, 0x76, 0x90, 0x5c, 0xf8, - 0x26, 0xea, 0x56, 0xa6, 0x98, 0xef, 0x41, 0x7b, 0x12, 0x53, 0x87, 0xd3, 0xb5, 0xb5, 0xfd, 0xfb, - 0x55, 0x8c, 0xe1, 0xce, 0x1a, 0xd7, 0xb5, 0xed, 0xfd, 0x0b, 0x02, 0xed, 0x55, 0x34, 0xfc, 0x2f, - 0x37, 0xc3, 0x07, 0xa0, 0x26, 0x92, 0x4b, 0xa6, 0x47, 0x3a, 0x53, 0xed, 0xe9, 0x24, 0x0b, 0x18, - 0x59, 0x04, 0x8c, 0x1c, 0x8b, 0x80, 0x3d, 0x77, 0xd8, 0xd8, 0x86, 0xec, 0xb8, 0xa8, 0xc5, 0x5a, - 0xd6, 0x84, 0x5e, 0xdb, 0x5a, 0xb6, 0x41, 0x7b, 0x4a, 0x7d, 0x5a, 0xb2, 0x95, 0x92, 0x98, 0xf4, - 0xce, 0x9b, 0x00, 0x85, 0x11, 0x71, 0x0a, 0x8d, 0x13, 0xca, 0xf1, 0xa3, 0x0a, 0x12, 0x4a, 0x82, - 0xa8, 0x3f, 0xbe, 0x32, 0x2e, 0x5f, 0xc3, 0x47, 0x68, 0x8a, 0x48, 0xe0, 0x2a, 0x5f, 0x97, 0xd2, - 0xb0, 0xe9, 0x7b, 0x1b, 0x20, 0x73, 0xf2, 0x4f, 0xa0, 0x64, 0xae, 0xc5, 0x55, 0x86, 0x94, 0x87, - 0x49, 0xdf, 0xdf, 0x04, 0x5a, 0x08, 0xc8, 0xfc, 0x51, 0x49, 0x40, 0xb9, 0xe7, 0x2b, 0x09, 0xf8, - 0x9d, 0x0b, 0xdf, 0x80, 0x92, 0x79, 0xa6, 0x92, 0x80, 0x72, 0x7b, 0xe9, 0xda, 0x5a, 0x1a, 0x8e, - 0xc4, 0xbf, 0xe8, 0xf0, 0xdd, 0xec, 0xc2, 0xa8, 0x7d, 0xbf, 0x30, 0x6a, 0x9f, 0xe7, 0x06, 0x9a, - 0xcd, 0x0d, 0xf4, 0x6d, 0x6e, 0xa0, 0xf3, 0xb9, 0x81, 0x5e, 0x1f, 0xff, 0xc5, 0x2f, 0xf4, 0xa0, - 0xe8, 0xfa, 0x8a, 0x64, 0x7c, 0xf8, 0x33, 0x00, 0x00, 0xff, 0xff, 0xbf, 0xe8, 0x4d, 0xe1, 0x93, - 0x07, 0x00, 0x00, + // 551 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x6e, 0xd3, 0x4c, + 0x14, 0xcd, 0x24, 0xf9, 0x2c, 0xe5, 0x7a, 0xf3, 0x69, 0x08, 0x26, 0x32, 0x92, 0x09, 0x5e, 0x15, + 0xa9, 0x1a, 0xab, 0x41, 0x82, 0xfe, 0xec, 0x0a, 0x6d, 0x17, 0x14, 0x84, 0x2c, 0x21, 0x21, 0x58, + 0x80, 0x93, 0x4c, 0x5c, 0x13, 0xc7, 0x36, 0x9e, 0xb1, 0xa5, 0x88, 0x05, 0xbc, 0x0d, 0x1b, 0x1e, + 0x24, 0x4b, 0x96, 0xac, 0x50, 0x9b, 0x27, 0x41, 0x33, 0x76, 0xe2, 0xd0, 0x18, 0xe1, 0x06, 0xca, + 0xee, 0x5e, 0x7b, 0xce, 0x3d, 0x67, 0xae, 0xce, 0xb1, 0xe1, 0x89, 0xeb, 0xf1, 0xb3, 0xa4, 0x4f, + 0x06, 0xe1, 0xc4, 0x1a, 0x84, 0x01, 0x77, 0xbc, 0x80, 0xc6, 0xc3, 0xd5, 0xd2, 0x89, 0x3c, 0x8b, + 0xd1, 0x38, 0xf5, 0x06, 0x94, 0x59, 0x81, 0x33, 0xa1, 0x2c, 0x72, 0x44, 0x99, 0xee, 0x14, 0x1d, + 0x89, 0xe2, 0x90, 0x87, 0xf8, 0x6e, 0x01, 0x23, 0x0b, 0x08, 0x29, 0x20, 0x24, 0xdd, 0xd1, 0xdb, + 0x6e, 0xe8, 0x86, 0xf2, 0xb4, 0x25, 0xaa, 0x0c, 0xa8, 0xdf, 0x76, 0xc3, 0xd0, 0xf5, 0xa9, 0x25, + 0xbb, 0x7e, 0x32, 0xb2, 0xe8, 0x24, 0xe2, 0xd3, 0xfc, 0x65, 0xf7, 0xf2, 0xcb, 0x91, 0x47, 0xfd, + 0xe1, 0x9b, 0x89, 0xc3, 0xc6, 0xd9, 0x09, 0xf3, 0x0b, 0x82, 0xd6, 0xb3, 0x05, 0x0d, 0xc6, 0xd0, + 0x14, 0x9c, 0x1d, 0xd4, 0x45, 0x5b, 0x2d, 0x5b, 0xd6, 0xf8, 0x39, 0x28, 0xbe, 0xd3, 0xa7, 0x3e, + 0xeb, 0xd4, 0xbb, 0x8d, 0x2d, 0xb5, 0xb7, 0x4b, 0x7e, 0x2b, 0x95, 0x2c, 0x27, 0x92, 0x53, 0x09, + 0x3d, 0x0a, 0x78, 0x3c, 0xb5, 0xf3, 0x39, 0xfa, 0x1e, 0xa8, 0x2b, 0x8f, 0xf1, 0xff, 0xd0, 0x18, + 0xd3, 0x69, 0xce, 0x29, 0x4a, 0xdc, 0x86, 0xff, 0x52, 0xc7, 0x4f, 0x68, 0xa7, 0x2e, 0x9f, 0x65, + 0xcd, 0x7e, 0x7d, 0x17, 0x99, 0xf7, 0xe0, 0xc6, 0x09, 0xe5, 0xcb, 0xf1, 0x36, 0x7d, 0x9f, 0x50, + 0xc6, 0xcb, 0x74, 0x9b, 0x67, 0xd0, 0xfe, 0xf9, 0x28, 0x8b, 0xc2, 0x80, 0x89, 0xfb, 0xb4, 0x96, + 0x62, 0x25, 0x40, 0xed, 0x6d, 0x5f, 0xe5, 0x4a, 0x87, 0xcd, 0xd9, 0xf7, 0x3b, 0x35, 0xbb, 0x18, + 0x62, 0x5a, 0x70, 0xf3, 0xd4, 0x63, 0x05, 0x15, 0x5b, 0xc8, 0xd2, 0x40, 0x19, 0x79, 0x3e, 0xa7, + 0x71, 0x2e, 0x2c, 0xef, 0x4c, 0x1f, 0xb4, 0xcb, 0x80, 0x5c, 0x9c, 0x0d, 0x50, 0xd0, 0x76, 0x90, + 0x5c, 0xf8, 0x26, 0xea, 0x56, 0xa6, 0x98, 0xef, 0x40, 0x7b, 0x14, 0x53, 0x87, 0xd3, 0xb5, 0xb5, + 0xfd, 0xfd, 0x55, 0x8c, 0xe1, 0xd6, 0x1a, 0xd7, 0xb5, 0xed, 0xfd, 0x33, 0x02, 0xed, 0x45, 0x34, + 0xfc, 0x27, 0x37, 0xc3, 0x07, 0xa0, 0x26, 0x92, 0x4b, 0xa6, 0x47, 0x3a, 0x53, 0xed, 0xe9, 0x24, + 0x0b, 0x18, 0x59, 0x04, 0x8c, 0x1c, 0x8b, 0x80, 0x3d, 0x75, 0xd8, 0xd8, 0x86, 0xec, 0xb8, 0xa8, + 0xc5, 0x5a, 0xd6, 0x84, 0x5e, 0xdb, 0x5a, 0xb6, 0x41, 0x7b, 0x4c, 0x7d, 0x5a, 0xb2, 0x95, 0x92, + 0x98, 0xf4, 0xce, 0x9b, 0x00, 0x85, 0x11, 0x71, 0x0a, 0x8d, 0x13, 0xca, 0xf1, 0x83, 0x0a, 0x12, + 0x4a, 0x82, 0xa8, 0x3f, 0xbc, 0x32, 0x2e, 0x5f, 0xc3, 0x07, 0x68, 0x8a, 0x48, 0xe0, 0x2a, 0x5f, + 0x97, 0xd2, 0xb0, 0xe9, 0x7b, 0x1b, 0x20, 0x73, 0xf2, 0x8f, 0xa0, 0x64, 0xae, 0xc5, 0x55, 0x86, + 0x94, 0x87, 0x49, 0xdf, 0xdf, 0x04, 0x5a, 0x08, 0xc8, 0xfc, 0x51, 0x49, 0x40, 0xb9, 0xe7, 0x2b, + 0x09, 0xf8, 0x95, 0x0b, 0x5f, 0x83, 0x92, 0x79, 0xa6, 0x92, 0x80, 0x72, 0x7b, 0xe9, 0xda, 0x5a, + 0x1a, 0x8e, 0xc4, 0xbf, 0xe8, 0xf0, 0xed, 0xec, 0xc2, 0xa8, 0x7d, 0xbb, 0x30, 0x6a, 0x9f, 0xe6, + 0x06, 0x9a, 0xcd, 0x0d, 0xf4, 0x75, 0x6e, 0xa0, 0xf3, 0xb9, 0x81, 0x5e, 0x1d, 0xff, 0xc1, 0x2f, + 0xf4, 0xa0, 0xe8, 0x5e, 0xd6, 0xfa, 0x8a, 0xe4, 0xbc, 0xff, 0x23, 0x00, 0x00, 0xff, 0xff, 0x4f, + 0x4a, 0x87, 0xf3, 0x95, 0x07, 0x00, 0x00, } diff --git a/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto index d58a8c2ad2..c22eebaf15 100644 --- a/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto +++ b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package containerd.services.namespaces.v1; -import "gogoproto/gogo.proto"; +import weak "gogoproto/gogo.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; diff --git a/vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.pb.go b/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go similarity index 87% rename from vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.pb.go rename to vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go index ae3c7927e6..4cf68f0d89 100644 --- a/vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-gogo. -// source: github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto /* - Package snapshot is a generated protocol buffer package. + Package snapshots is a generated protocol buffer package. It is generated from these files: - github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto + github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto It has these top-level messages: PrepareSnapshotRequest @@ -27,13 +26,14 @@ UsageRequest UsageResponse */ -package snapshot +package snapshots import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import _ "github.com/gogo/protobuf/gogoproto" -import google_protobuf1 "github.com/golang/protobuf/ptypes/empty" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" +import google_protobuf1 "github.com/gogo/protobuf/types" import google_protobuf2 "github.com/gogo/protobuf/types" import _ "github.com/gogo/protobuf/types" import containerd_types "github.com/containerd/containerd/api/types" @@ -651,7 +651,7 @@ var _Snapshots_serviceDesc = grpc.ServiceDesc{ ServerStreams: true, }, }, - Metadata: "github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto", + Metadata: "github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto", } func (m *PrepareSnapshotRequest) Marshal() (dAtA []byte, err error) { @@ -1267,24 +1267,6 @@ func (m *UsageResponse) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Snapshots(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Snapshots(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintSnapshots(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1953,51 +1935,14 @@ func (m *PrepareSnapshotRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthSnapshots - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowSnapshots @@ -2007,41 +1952,80 @@ func (m *PrepareSnapshotRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshots + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthSnapshots + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshots + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthSnapshots + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipSnapshots(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSnapshots + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthSnapshots - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -2287,51 +2271,14 @@ func (m *ViewSnapshotRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthSnapshots - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowSnapshots @@ -2341,41 +2288,80 @@ func (m *ViewSnapshotRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshots + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthSnapshots + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshots + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthSnapshots + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipSnapshots(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSnapshots + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthSnapshots - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -2918,51 +2904,14 @@ func (m *CommitSnapshotRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthSnapshots - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowSnapshots @@ -2972,41 +2921,80 @@ func (m *CommitSnapshotRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshots + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthSnapshots + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshots + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthSnapshots + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipSnapshots(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSnapshots + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthSnapshots - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -3329,51 +3317,14 @@ func (m *Info) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthSnapshots - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowSnapshots @@ -3383,41 +3334,80 @@ func (m *Info) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshots + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthSnapshots + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshots + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthSnapshots + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipSnapshots(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSnapshots + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthSnapshots - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -4204,72 +4194,72 @@ var ( ) func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto", fileDescriptorSnapshots) + proto.RegisterFile("github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto", fileDescriptorSnapshots) } var fileDescriptorSnapshots = []byte{ - // 1004 bytes of a gzipped FileDescriptorProto + // 1007 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x4f, 0x6f, 0x1a, 0x47, - 0x14, 0xf7, 0xc0, 0x1a, 0xc7, 0x0f, 0xdb, 0xa5, 0x13, 0x4c, 0xd0, 0xb6, 0xc2, 0x2b, 0x0e, 0x95, - 0xd5, 0xc3, 0x6e, 0x42, 0xd5, 0xc4, 0x89, 0x2f, 0x05, 0x42, 0x2b, 0xe2, 0xd8, 0xa9, 0x36, 0xb6, - 0x53, 0xa7, 0x91, 0xa2, 0x35, 0x8c, 0xf1, 0x0a, 0x76, 0x97, 0x32, 0x03, 0x11, 0xad, 0x54, 0xf5, + 0x14, 0x67, 0x60, 0x8d, 0xe3, 0x87, 0xed, 0xd2, 0x09, 0x26, 0x68, 0x5b, 0xe1, 0x15, 0x87, 0xca, + 0xea, 0x61, 0x37, 0xa1, 0x6a, 0xe2, 0xc4, 0x97, 0x62, 0x4c, 0x2b, 0xec, 0xd8, 0xa9, 0x36, 0xb6, + 0x13, 0xa7, 0x55, 0xa3, 0x35, 0x8c, 0xf1, 0x0a, 0x76, 0x97, 0x32, 0x03, 0x11, 0xad, 0x54, 0xf5, 0x18, 0xf9, 0xd4, 0x2f, 0xe0, 0x53, 0xfb, 0x21, 0xaa, 0x7e, 0x02, 0x1f, 0x7b, 0xec, 0xa9, 0x6d, - 0xfc, 0x25, 0x7a, 0xea, 0x1f, 0xcd, 0xec, 0x2c, 0x60, 0x4c, 0xc5, 0xb2, 0xa1, 0xb7, 0xb7, 0x33, - 0xf3, 0xde, 0xfb, 0xbd, 0xdf, 0x9b, 0xf7, 0xde, 0x2c, 0x54, 0x1b, 0x36, 0x3b, 0xed, 0x1e, 0xeb, - 0x35, 0xcf, 0x31, 0x6a, 0x9e, 0xcb, 0x2c, 0xdb, 0x25, 0x9d, 0xfa, 0xa8, 0x68, 0xb5, 0x6d, 0x83, - 0x92, 0x4e, 0xcf, 0xae, 0x11, 0x6a, 0x50, 0xd7, 0x6a, 0xd3, 0x53, 0x8f, 0x19, 0xbd, 0x3b, 0x03, - 0x99, 0xea, 0xed, 0x8e, 0xc7, 0x3c, 0xac, 0x0d, 0x95, 0xf4, 0x40, 0x41, 0x1f, 0x1e, 0xea, 0xdd, - 0x51, 0xd3, 0x0d, 0xaf, 0xe1, 0x89, 0xc3, 0x06, 0x97, 0x7c, 0x3d, 0xf5, 0xbd, 0x86, 0xe7, 0x35, - 0x5a, 0xc4, 0x10, 0x5f, 0xc7, 0xdd, 0x13, 0x83, 0x38, 0x6d, 0xd6, 0x97, 0x9b, 0xda, 0xf8, 0xe6, - 0x89, 0x4d, 0x5a, 0xf5, 0x97, 0x8e, 0x45, 0x9b, 0xf2, 0xc4, 0xc6, 0xf8, 0x09, 0x66, 0x3b, 0x84, - 0x32, 0xcb, 0x69, 0xcb, 0x03, 0x77, 0x43, 0x85, 0xc8, 0xfa, 0x6d, 0x42, 0x0d, 0xc7, 0xeb, 0xba, - 0xcc, 0xd7, 0xcb, 0xff, 0x85, 0x20, 0xf3, 0x79, 0x87, 0xb4, 0xad, 0x0e, 0x79, 0x2a, 0xa3, 0x30, - 0xc9, 0x57, 0x5d, 0x42, 0x19, 0xd6, 0x20, 0x19, 0x04, 0xc6, 0x48, 0x27, 0x8b, 0x34, 0xb4, 0xb9, - 0x6c, 0x8e, 0x2e, 0xe1, 0x14, 0xc4, 0x9b, 0xa4, 0x9f, 0x8d, 0x89, 0x1d, 0x2e, 0xe2, 0x0c, 0x24, - 0xb8, 0x29, 0x97, 0x65, 0xe3, 0x62, 0x51, 0x7e, 0xe1, 0x17, 0x90, 0x68, 0x59, 0xc7, 0xa4, 0x45, - 0xb3, 0x8a, 0x16, 0xdf, 0x4c, 0x16, 0x1e, 0xea, 0xd3, 0x78, 0xd4, 0x27, 0xa3, 0xd2, 0x1f, 0x0b, - 0x33, 0x15, 0x97, 0x75, 0xfa, 0xa6, 0xb4, 0xa9, 0xde, 0x87, 0xe4, 0xc8, 0x72, 0x00, 0x0b, 0x0d, - 0x61, 0xa5, 0x61, 0xb1, 0x67, 0xb5, 0xba, 0x44, 0x42, 0xf5, 0x3f, 0x1e, 0xc4, 0xb6, 0x50, 0xfe, - 0x11, 0xdc, 0xba, 0xe6, 0x88, 0xb6, 0x3d, 0x97, 0x12, 0x6c, 0x40, 0x42, 0x30, 0x45, 0xb3, 0x48, - 0x60, 0xbe, 0x35, 0x8a, 0x59, 0x30, 0xa9, 0xef, 0xf2, 0x7d, 0x53, 0x1e, 0xcb, 0xff, 0x89, 0xe0, - 0xe6, 0xa1, 0x4d, 0x5e, 0xfd, 0x9f, 0x44, 0x1e, 0x8d, 0x11, 0x59, 0x9c, 0x4e, 0xe4, 0x04, 0x48, - 0xf3, 0x66, 0xf1, 0x33, 0x48, 0x5f, 0xf5, 0x12, 0x95, 0xc2, 0x32, 0xac, 0x8a, 0x05, 0xfa, 0x16, - 0xdc, 0xe5, 0x8b, 0xb0, 0x16, 0x18, 0x89, 0x8a, 0x63, 0x07, 0xd6, 0x4d, 0xe2, 0x78, 0xbd, 0x79, - 0x14, 0x05, 0xbf, 0x17, 0xeb, 0x65, 0xcf, 0x71, 0x6c, 0x36, 0xbb, 0x35, 0x0c, 0x8a, 0x6b, 0x39, - 0x01, 0xe5, 0x42, 0x0e, 0x3c, 0xc4, 0x87, 0x99, 0xf9, 0x72, 0xec, 0x56, 0x94, 0xa7, 0xdf, 0x8a, - 0x89, 0x80, 0xe6, 0x7d, 0x2f, 0xaa, 0x70, 0xf3, 0x29, 0xb3, 0xd8, 0x3c, 0x48, 0xfc, 0x27, 0x06, - 0x4a, 0xd5, 0x3d, 0xf1, 0x06, 0x8c, 0xa0, 0x11, 0x46, 0x86, 0xd5, 0x12, 0xbb, 0x52, 0x2d, 0x0f, - 0x40, 0x69, 0xda, 0x6e, 0x5d, 0x50, 0xb5, 0x56, 0xf8, 0x60, 0x3a, 0x2b, 0x3b, 0xb6, 0x5b, 0x37, - 0x85, 0x0e, 0x2e, 0x03, 0xd4, 0x3a, 0xc4, 0x62, 0xa4, 0xfe, 0xd2, 0x62, 0x59, 0x45, 0x43, 0x9b, - 0xc9, 0x82, 0xaa, 0xfb, 0x7d, 0x58, 0x0f, 0xfa, 0xb0, 0xbe, 0x1f, 0xf4, 0xe1, 0xd2, 0x8d, 0x8b, - 0xdf, 0x36, 0x16, 0xbe, 0xff, 0x7d, 0x03, 0x99, 0xcb, 0x52, 0xaf, 0xc8, 0xb8, 0x91, 0x6e, 0xbb, - 0x1e, 0x18, 0x59, 0x9c, 0xc5, 0x88, 0xd4, 0x2b, 0x32, 0xfc, 0x68, 0x90, 0xdd, 0x84, 0xc8, 0x6e, - 0x61, 0x7a, 0x1c, 0x9c, 0xa9, 0x79, 0x27, 0xf3, 0x0b, 0x48, 0x5f, 0x4d, 0xa6, 0x2c, 0xae, 0x4f, - 0x40, 0xb1, 0xdd, 0x13, 0x4f, 0x18, 0x49, 0x86, 0x21, 0x99, 0x83, 0x2b, 0x29, 0x3c, 0x52, 0x53, - 0x68, 0xe6, 0x7f, 0x42, 0xb0, 0x7e, 0x20, 0xc2, 0x9d, 0xfd, 0xa6, 0x04, 0xde, 0x63, 0x51, 0xbd, - 0xe3, 0x6d, 0x48, 0xfa, 0x5c, 0x8b, 0x81, 0x2b, 0xee, 0xca, 0xa4, 0x24, 0x7d, 0xca, 0x67, 0xf2, - 0xae, 0x45, 0x9b, 0xa6, 0x4c, 0x29, 0x97, 0xf3, 0xcf, 0x21, 0x33, 0x8e, 0x7c, 0x6e, 0xb4, 0x6c, - 0x41, 0xfa, 0xb1, 0x4d, 0x07, 0x84, 0x87, 0xef, 0x89, 0xf9, 0x23, 0x58, 0x1f, 0xd3, 0xbc, 0x06, - 0x2a, 0x1e, 0x11, 0x54, 0x09, 0x56, 0x0e, 0xa8, 0xd5, 0x20, 0x6f, 0x53, 0xcb, 0xdb, 0xb0, 0x2a, - 0x6d, 0x48, 0x58, 0x18, 0x14, 0x6a, 0x7f, 0xed, 0xd7, 0x74, 0xdc, 0x14, 0x32, 0xaf, 0x69, 0xdb, - 0xf5, 0xea, 0x84, 0x0a, 0xcd, 0xb8, 0x29, 0xbf, 0x3e, 0x7c, 0x8d, 0x40, 0xe1, 0x65, 0x8a, 0xdf, - 0x87, 0xa5, 0x83, 0xbd, 0x9d, 0xbd, 0x27, 0xcf, 0xf6, 0x52, 0x0b, 0xea, 0x3b, 0x67, 0xe7, 0x5a, - 0x92, 0x2f, 0x1f, 0xb8, 0x4d, 0xd7, 0x7b, 0xe5, 0xe2, 0x0c, 0x28, 0x87, 0xd5, 0xca, 0xb3, 0x14, - 0x52, 0x57, 0xce, 0xce, 0xb5, 0x1b, 0x7c, 0x8b, 0x8f, 0x28, 0xac, 0x42, 0xa2, 0x58, 0xde, 0xaf, - 0x1e, 0x56, 0x52, 0x31, 0x75, 0xed, 0xec, 0x5c, 0x03, 0xbe, 0x53, 0xac, 0x31, 0xbb, 0x47, 0xb0, - 0x06, 0xcb, 0xe5, 0x27, 0xbb, 0xbb, 0xd5, 0xfd, 0xfd, 0xca, 0xc3, 0x54, 0x5c, 0x7d, 0xf7, 0xec, - 0x5c, 0x5b, 0xe5, 0xdb, 0x7e, 0xaf, 0x64, 0xa4, 0xae, 0xae, 0xbc, 0xfe, 0x21, 0xb7, 0xf0, 0xf3, - 0x8f, 0x39, 0x81, 0xa0, 0xf0, 0xf7, 0x12, 0x2c, 0x0f, 0x38, 0xc6, 0xdf, 0xc2, 0x92, 0x7c, 0x4a, - 0xe0, 0xad, 0xa8, 0xcf, 0x1b, 0xf5, 0x7e, 0x04, 0x4d, 0x49, 0x62, 0x17, 0x14, 0x11, 0xe1, 0xc7, - 0x91, 0x9e, 0x04, 0xea, 0xdd, 0x59, 0xd5, 0xa4, 0xdb, 0x26, 0x24, 0xfc, 0x69, 0x8b, 0x8d, 0xe9, - 0x16, 0xae, 0x0c, 0x77, 0xf5, 0x76, 0x78, 0x05, 0xe9, 0xec, 0x08, 0x12, 0x7e, 0x32, 0xf0, 0xbd, - 0x88, 0x23, 0x4e, 0xcd, 0x5c, 0xab, 0xec, 0x0a, 0x7f, 0x8a, 0x73, 0xd3, 0xfe, 0xc8, 0x0f, 0x63, - 0x7a, 0xe2, 0xe3, 0xe0, 0x3f, 0x4d, 0x77, 0x41, 0xe1, 0x9d, 0x33, 0x4c, 0x66, 0x26, 0x8c, 0xcb, - 0x30, 0x99, 0x99, 0xd8, 0x98, 0xbf, 0x81, 0x84, 0xdf, 0x9b, 0xc2, 0x44, 0x34, 0xb1, 0xff, 0xaa, - 0x5b, 0xb3, 0x2b, 0x4a, 0xe7, 0x7d, 0x50, 0x78, 0x0b, 0xc2, 0x21, 0xc0, 0x4f, 0x6a, 0x72, 0xea, - 0xbd, 0x99, 0xf5, 0x7c, 0xc7, 0xb7, 0x11, 0x3e, 0x85, 0x45, 0xd1, 0x5e, 0xb0, 0x1e, 0x02, 0xfd, - 0x48, 0x2f, 0x53, 0x8d, 0xd0, 0xe7, 0x7d, 0x5f, 0xa5, 0x17, 0x17, 0x6f, 0x72, 0x0b, 0xbf, 0xbe, - 0xc9, 0x2d, 0x7c, 0x77, 0x99, 0x43, 0x17, 0x97, 0x39, 0xf4, 0xcb, 0x65, 0x0e, 0xfd, 0x71, 0x99, - 0x43, 0xcf, 0x4b, 0x91, 0x7f, 0x39, 0xb7, 0x03, 0xf9, 0x38, 0x21, 0xae, 0xd1, 0x47, 0xff, 0x06, - 0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0x35, 0xc8, 0xbf, 0x0e, 0x00, 0x00, + 0xfc, 0x25, 0x7a, 0xea, 0x1f, 0xcd, 0xec, 0x2c, 0x60, 0x4c, 0xc5, 0x82, 0xc9, 0x6d, 0x66, 0x67, + 0x7e, 0xef, 0xfd, 0xe6, 0xf7, 0xe6, 0xbd, 0x37, 0x0b, 0xdb, 0x35, 0x9b, 0x9d, 0xb6, 0x8f, 0xf5, + 0x8a, 0xe7, 0x18, 0x15, 0xcf, 0x65, 0x96, 0xed, 0x92, 0x56, 0x75, 0x70, 0x68, 0x35, 0x6d, 0x83, + 0x92, 0x56, 0xc7, 0xae, 0x10, 0x6a, 0x50, 0xd7, 0x6a, 0xd2, 0x53, 0x8f, 0x51, 0xa3, 0x73, 0xaf, + 0x3f, 0xd1, 0x9b, 0x2d, 0x8f, 0x79, 0x58, 0xeb, 0xa3, 0xf4, 0x00, 0xa1, 0xf7, 0x37, 0x75, 0xee, + 0xa9, 0xa9, 0x9a, 0x57, 0xf3, 0xc4, 0x66, 0x83, 0x8f, 0x7c, 0x9c, 0xfa, 0x5e, 0xcd, 0xf3, 0x6a, + 0x0d, 0x62, 0x88, 0xd9, 0x71, 0xfb, 0xc4, 0x20, 0x4e, 0x93, 0x75, 0xe5, 0xa2, 0x36, 0xbc, 0x78, + 0x62, 0x93, 0x46, 0xf5, 0xa5, 0x63, 0xd1, 0xba, 0xdc, 0xb1, 0x3a, 0xbc, 0x83, 0xd9, 0x0e, 0xa1, + 0xcc, 0x72, 0x9a, 0x72, 0xc3, 0xfd, 0x50, 0x67, 0x64, 0xdd, 0x26, 0xa1, 0x86, 0xe3, 0xb5, 0x5d, + 0xe6, 0xe3, 0x72, 0x7f, 0x23, 0x48, 0x7f, 0xde, 0x22, 0x4d, 0xab, 0x45, 0x9e, 0xca, 0x53, 0x98, + 0xe4, 0xeb, 0x36, 0xa1, 0x0c, 0x6b, 0x90, 0x08, 0x0e, 0xc6, 0x48, 0x2b, 0x83, 0x34, 0xb4, 0xb6, + 0x60, 0x0e, 0x7e, 0xc2, 0x49, 0x88, 0xd5, 0x49, 0x37, 0x13, 0x15, 0x2b, 0x7c, 0x88, 0xd3, 0x10, + 0xe7, 0xa6, 0x5c, 0x96, 0x89, 0x89, 0x8f, 0x72, 0x86, 0xbf, 0x84, 0x78, 0xc3, 0x3a, 0x26, 0x0d, + 0x9a, 0x51, 0xb4, 0xd8, 0x5a, 0x22, 0xbf, 0xa5, 0x8f, 0xd3, 0x51, 0x1f, 0xcd, 0x4a, 0x7f, 0x2c, + 0xcc, 0x94, 0x5c, 0xd6, 0xea, 0x9a, 0xd2, 0xa6, 0xfa, 0x10, 0x12, 0x03, 0x9f, 0x03, 0x5a, 0xa8, + 0x4f, 0x2b, 0x05, 0x73, 0x1d, 0xab, 0xd1, 0x26, 0x92, 0xaa, 0x3f, 0x79, 0x14, 0x5d, 0x47, 0xb9, + 0x6d, 0xb8, 0x73, 0xcd, 0x11, 0x6d, 0x7a, 0x2e, 0x25, 0xd8, 0x80, 0xb8, 0x50, 0x8a, 0x66, 0x90, + 0xe0, 0x7c, 0x67, 0x90, 0xb3, 0x50, 0x52, 0xdf, 0xe5, 0xeb, 0xa6, 0xdc, 0x96, 0xfb, 0x0b, 0xc1, + 0xed, 0x43, 0x9b, 0xbc, 0x7a, 0x9b, 0x42, 0x1e, 0x0d, 0x09, 0x59, 0x18, 0x2f, 0xe4, 0x08, 0x4a, + 0xb3, 0x56, 0xf1, 0x33, 0x48, 0x5d, 0xf5, 0x32, 0xad, 0x84, 0x45, 0x58, 0x12, 0x1f, 0xe8, 0x0d, + 0xb4, 0xcb, 0x15, 0x60, 0x39, 0x30, 0x32, 0x2d, 0x8f, 0x1d, 0x58, 0x31, 0x89, 0xe3, 0x75, 0x66, + 0x91, 0x14, 0xfc, 0x5e, 0xac, 0x14, 0x3d, 0xc7, 0xb1, 0xd9, 0xe4, 0xd6, 0x30, 0x28, 0xae, 0xe5, + 0x04, 0x92, 0x8b, 0x71, 0xe0, 0x21, 0xd6, 0x8f, 0xcc, 0x17, 0x43, 0xb7, 0xa2, 0x38, 0xfe, 0x56, + 0x8c, 0x24, 0x34, 0xeb, 0x7b, 0x51, 0x86, 0xdb, 0x4f, 0x99, 0xc5, 0x66, 0x21, 0xe2, 0xbf, 0x51, + 0x50, 0xca, 0xee, 0x89, 0xd7, 0x53, 0x04, 0x0d, 0x28, 0xd2, 0xcf, 0x96, 0xe8, 0x95, 0x6c, 0x79, + 0x04, 0x4a, 0xdd, 0x76, 0xab, 0x42, 0xaa, 0xe5, 0xfc, 0x07, 0xe3, 0x55, 0xd9, 0xb1, 0xdd, 0xaa, + 0x29, 0x30, 0xb8, 0x08, 0x50, 0x69, 0x11, 0x8b, 0x91, 0xea, 0x4b, 0x8b, 0x65, 0x14, 0x0d, 0xad, + 0x25, 0xf2, 0xaa, 0xee, 0xd7, 0x61, 0x3d, 0xa8, 0xc3, 0xfa, 0x7e, 0x50, 0x87, 0x37, 0x6f, 0x5d, + 0xfc, 0xbe, 0x1a, 0xf9, 0xe1, 0x8f, 0x55, 0x64, 0x2e, 0x48, 0x5c, 0x81, 0x71, 0x23, 0xed, 0x66, + 0x35, 0x30, 0x32, 0x37, 0x89, 0x11, 0x89, 0x2b, 0x30, 0xbc, 0xdd, 0x8b, 0x6e, 0x5c, 0x44, 0x37, + 0x3f, 0xfe, 0x1c, 0x5c, 0xa9, 0x59, 0x07, 0xf3, 0x39, 0xa4, 0xae, 0x06, 0x53, 0x26, 0xd7, 0x27, + 0xa0, 0xd8, 0xee, 0x89, 0x27, 0x8c, 0x24, 0xc2, 0x88, 0xcc, 0xc9, 0x6d, 0x2a, 0xfc, 0xa4, 0xa6, + 0x40, 0xe6, 0x7e, 0x46, 0xb0, 0x72, 0x20, 0x8e, 0x3b, 0xf9, 0x4d, 0x09, 0xbc, 0x47, 0xa7, 0xf5, + 0x8e, 0x37, 0x20, 0xe1, 0x6b, 0x2d, 0x1a, 0xae, 0xb8, 0x2b, 0xa3, 0x82, 0xf4, 0x29, 0xef, 0xc9, + 0xbb, 0x16, 0xad, 0x9b, 0x32, 0xa4, 0x7c, 0x9c, 0x7b, 0x01, 0xe9, 0x61, 0xe6, 0x33, 0x93, 0x65, + 0x1d, 0x52, 0x8f, 0x6d, 0xda, 0x13, 0x3c, 0x7c, 0x4d, 0xcc, 0x1d, 0xc1, 0xca, 0x10, 0xf2, 0x1a, + 0xa9, 0xd8, 0x94, 0xa4, 0x36, 0x61, 0xf1, 0x80, 0x5a, 0x35, 0x72, 0x93, 0x5c, 0xde, 0x80, 0x25, + 0x69, 0x43, 0xd2, 0xc2, 0xa0, 0x50, 0xfb, 0x1b, 0x3f, 0xa7, 0x63, 0xa6, 0x18, 0xf3, 0x9c, 0xb6, + 0x5d, 0xaf, 0x4a, 0xa8, 0x40, 0xc6, 0x4c, 0x39, 0xfb, 0xf0, 0x35, 0x02, 0x85, 0xa7, 0x29, 0x7e, + 0x1f, 0xe6, 0x0f, 0xf6, 0x76, 0xf6, 0x9e, 0x3c, 0xdb, 0x4b, 0x46, 0xd4, 0x77, 0xce, 0xce, 0xb5, + 0x04, 0xff, 0x7c, 0xe0, 0xd6, 0x5d, 0xef, 0x95, 0x8b, 0xd3, 0xa0, 0x1c, 0x96, 0x4b, 0xcf, 0x92, + 0x48, 0x5d, 0x3c, 0x3b, 0xd7, 0x6e, 0xf1, 0x25, 0xde, 0xa2, 0xb0, 0x0a, 0xf1, 0x42, 0x71, 0xbf, + 0x7c, 0x58, 0x4a, 0x46, 0xd5, 0xe5, 0xb3, 0x73, 0x0d, 0xf8, 0x4a, 0xa1, 0xc2, 0xec, 0x0e, 0xc1, + 0x1a, 0x2c, 0x14, 0x9f, 0xec, 0xee, 0x96, 0xf7, 0xf7, 0x4b, 0x5b, 0xc9, 0x98, 0xfa, 0xee, 0xd9, + 0xb9, 0xb6, 0xc4, 0x97, 0xfd, 0x5a, 0xc9, 0x48, 0x55, 0x5d, 0x7c, 0xfd, 0x63, 0x36, 0xf2, 0xcb, + 0x4f, 0x59, 0xc1, 0x20, 0xff, 0xcf, 0x3c, 0x2c, 0xf4, 0x34, 0xc6, 0xdf, 0xc1, 0xbc, 0x7c, 0x4a, + 0xe0, 0xf5, 0x69, 0x9f, 0x37, 0xea, 0xc3, 0x29, 0x90, 0x52, 0xc4, 0x36, 0x28, 0xe2, 0x84, 0x1f, + 0x4f, 0xf5, 0x24, 0x50, 0xef, 0x4f, 0x0a, 0x93, 0x6e, 0xeb, 0x10, 0xf7, 0xbb, 0x2d, 0x36, 0xc6, + 0x5b, 0xb8, 0xd2, 0xdc, 0xd5, 0xbb, 0xe1, 0x01, 0xd2, 0xd9, 0x11, 0xc4, 0xfd, 0x60, 0xe0, 0x07, + 0x53, 0xb6, 0x38, 0x35, 0x7d, 0x2d, 0xb3, 0x4b, 0xfc, 0x29, 0xce, 0x4d, 0xfb, 0x2d, 0x3f, 0x8c, + 0xe9, 0x91, 0x8f, 0x83, 0xff, 0x35, 0xdd, 0x06, 0x85, 0x57, 0xce, 0x30, 0x91, 0x19, 0xd1, 0x2e, + 0xc3, 0x44, 0x66, 0x64, 0x61, 0xfe, 0x16, 0xe2, 0x7e, 0x6d, 0x0a, 0x73, 0xa2, 0x91, 0xf5, 0x57, + 0x5d, 0x9f, 0x1c, 0x28, 0x9d, 0x77, 0x41, 0xe1, 0x25, 0x08, 0x87, 0x20, 0x3f, 0xaa, 0xc8, 0xa9, + 0x0f, 0x26, 0xc6, 0xf9, 0x8e, 0xef, 0x22, 0x7c, 0x0a, 0x73, 0xa2, 0xbc, 0x60, 0x3d, 0x04, 0xfb, + 0x81, 0x5a, 0xa6, 0x1a, 0xa1, 0xf7, 0xfb, 0xbe, 0x36, 0xbf, 0xba, 0x78, 0x93, 0x8d, 0xfc, 0xf6, + 0x26, 0x1b, 0xf9, 0xfe, 0x32, 0x8b, 0x2e, 0x2e, 0xb3, 0xe8, 0xd7, 0xcb, 0x2c, 0xfa, 0xf3, 0x32, + 0x8b, 0x5e, 0x6c, 0x4d, 0xff, 0xcf, 0xb9, 0xd1, 0x9b, 0x3c, 0x8f, 0x1c, 0xc7, 0xc5, 0x55, 0xfa, + 0xe8, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8e, 0xa0, 0xb2, 0xda, 0xc4, 0x0e, 0x00, 0x00, } diff --git a/vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto b/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto similarity index 98% rename from vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto rename to vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto index f8c0895202..0e62add3bf 100644 --- a/vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto +++ b/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto @@ -2,13 +2,13 @@ syntax = "proto3"; package containerd.services.snapshots.v1; -import "gogoproto/gogo.proto"; +import weak "gogoproto/gogo.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; import "github.com/containerd/containerd/api/types/mount.proto"; -option go_package = "github.com/containerd/containerd/api/services/snapshot/v1;snapshot"; +option go_package = "github.com/containerd/containerd/api/services/snapshots/v1;snapshots"; // Snapshot service manages snapshots service Snapshots { diff --git a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go index 0f587685b9..83c18f68aa 100644 --- a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/containerd/containerd/api/services/tasks/v1/tasks.proto -// DO NOT EDIT! /* Package tasks is a generated protocol buffer package. @@ -42,9 +41,10 @@ package tasks import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import google_protobuf "github.com/golang/protobuf/ptypes/empty" +import google_protobuf "github.com/gogo/protobuf/types" import google_protobuf1 "github.com/gogo/protobuf/types" -import _ "github.com/gogo/protobuf/gogoproto" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" import containerd_types "github.com/containerd/containerd/api/types" import containerd_types1 "github.com/containerd/containerd/api/types" import containerd_types2 "github.com/containerd/containerd/api/types" @@ -1890,24 +1890,6 @@ func (m *WaitResponse) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Tasks(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Tasks(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintTasks(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -5725,88 +5707,88 @@ func init() { } var fileDescriptorTasks = []byte{ - // 1317 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x5b, 0x6f, 0x1b, 0x45, - 0x1b, 0xee, 0xfa, 0xec, 0xd7, 0x49, 0x9b, 0xec, 0x97, 0xe6, 0x33, 0x4b, 0x15, 0x87, 0xe5, 0xc6, - 0x04, 0xba, 0x4b, 0x5d, 0x54, 0x21, 0x5a, 0x21, 0x35, 0x07, 0x22, 0x0b, 0xaa, 0xa6, 0xdb, 0x72, - 0x50, 0x25, 0x14, 0xb6, 0xbb, 0x13, 0x67, 0x14, 0x7b, 0x67, 0xbb, 0x33, 0x4e, 0x1b, 0xb8, 0x80, - 0x9f, 0xd0, 0x5b, 0x6e, 0xf8, 0x3d, 0xb9, 0xe4, 0x12, 0xa1, 0x2a, 0x50, 0xff, 0x0b, 0xee, 0xd0, - 0x1c, 0x76, 0xb3, 0xb1, 0x63, 0xaf, 0x93, 0x34, 0xdc, 0xb4, 0x33, 0xb3, 0xef, 0x69, 0x9e, 0x79, - 0x0f, 0x8f, 0x03, 0xab, 0x1d, 0xcc, 0x76, 0xfb, 0xcf, 0x2c, 0x8f, 0xf4, 0x6c, 0x8f, 0x04, 0xcc, - 0xc5, 0x01, 0x8a, 0xfc, 0xf4, 0xd2, 0x0d, 0xb1, 0x4d, 0x51, 0xb4, 0x8f, 0x3d, 0x44, 0x6d, 0xe6, - 0xd2, 0x3d, 0x6a, 0xef, 0xdf, 0x92, 0x0b, 0x2b, 0x8c, 0x08, 0x23, 0xfa, 0x8d, 0x63, 0x69, 0x2b, - 0x96, 0xb4, 0xa4, 0xc0, 0xfe, 0x2d, 0xe3, 0xdd, 0x0e, 0x21, 0x9d, 0x2e, 0xb2, 0x85, 0xec, 0xb3, - 0xfe, 0x8e, 0x8d, 0x7a, 0x21, 0x3b, 0x90, 0xaa, 0xc6, 0x3b, 0xc3, 0x1f, 0xdd, 0x20, 0xfe, 0xb4, - 0xd0, 0x21, 0x1d, 0x22, 0x96, 0x36, 0x5f, 0xa9, 0xd3, 0x3b, 0x53, 0xc5, 0xcb, 0x0e, 0x42, 0x44, - 0xed, 0x1e, 0xe9, 0x07, 0x4c, 0xe9, 0x7d, 0x7a, 0x16, 0x3d, 0xc4, 0x22, 0xec, 0xa9, 0xdb, 0x19, - 0x77, 0xcf, 0xa0, 0xe9, 0x23, 0xea, 0x45, 0x38, 0x64, 0x24, 0x52, 0xca, 0x9f, 0x9d, 0x41, 0x99, - 0x23, 0x26, 0xfe, 0x51, 0xba, 0x8d, 0x61, 0x6c, 0x18, 0xee, 0x21, 0xca, 0xdc, 0x5e, 0x28, 0x05, - 0xcc, 0xc3, 0x1c, 0xcc, 0xaf, 0x45, 0xc8, 0x65, 0xe8, 0x89, 0x4b, 0xf7, 0x1c, 0xf4, 0xbc, 0x8f, - 0x28, 0xd3, 0x5b, 0x30, 0x93, 0x98, 0xdf, 0xc6, 0x7e, 0x5d, 0x5b, 0xd6, 0x9a, 0xd5, 0xd5, 0x6b, - 0x83, 0xa3, 0x46, 0x6d, 0x2d, 0x3e, 0x6f, 0xaf, 0x3b, 0xb5, 0x44, 0xa8, 0xed, 0xeb, 0x36, 0x94, - 0x22, 0x42, 0xd8, 0x0e, 0xad, 0xe7, 0x97, 0xf3, 0xcd, 0x5a, 0xeb, 0xff, 0x56, 0xea, 0x49, 0x45, - 0x74, 0xd6, 0x03, 0x0e, 0xa6, 0xa3, 0xc4, 0xf4, 0x05, 0x28, 0x52, 0xe6, 0xe3, 0xa0, 0x5e, 0xe0, - 0xd6, 0x1d, 0xb9, 0xd1, 0x17, 0xa1, 0x44, 0x99, 0x4f, 0xfa, 0xac, 0x5e, 0x14, 0xc7, 0x6a, 0xa7, - 0xce, 0x51, 0x14, 0xd5, 0x4b, 0xc9, 0x39, 0x8a, 0x22, 0xdd, 0x80, 0x0a, 0x43, 0x51, 0x0f, 0x07, - 0x6e, 0xb7, 0x5e, 0x5e, 0xd6, 0x9a, 0x15, 0x27, 0xd9, 0xeb, 0xf7, 0x00, 0xbc, 0x5d, 0xe4, 0xed, - 0x85, 0x04, 0x07, 0xac, 0x5e, 0x59, 0xd6, 0x9a, 0xb5, 0xd6, 0x8d, 0xd1, 0xb0, 0xd6, 0x13, 0xc4, - 0x9d, 0x94, 0xbc, 0x6e, 0x41, 0x99, 0x84, 0x0c, 0x93, 0x80, 0xd6, 0xab, 0x42, 0x75, 0xc1, 0x92, - 0x68, 0x5a, 0x31, 0x9a, 0xd6, 0xfd, 0xe0, 0xc0, 0x89, 0x85, 0xcc, 0xa7, 0xa0, 0xa7, 0x91, 0xa4, - 0x21, 0x09, 0x28, 0x3a, 0x17, 0x94, 0x73, 0x90, 0x0f, 0xb1, 0x5f, 0xcf, 0x2d, 0x6b, 0xcd, 0x59, - 0x87, 0x2f, 0xcd, 0x0e, 0xcc, 0x3c, 0x66, 0x6e, 0xc4, 0x2e, 0xf2, 0x40, 0xef, 0x43, 0x19, 0xbd, - 0x44, 0xde, 0xb6, 0xb2, 0x5c, 0x5d, 0x85, 0xc1, 0x51, 0xa3, 0xb4, 0xf1, 0x12, 0x79, 0xed, 0x75, - 0xa7, 0xc4, 0x3f, 0xb5, 0x7d, 0xf3, 0x3d, 0x98, 0x55, 0x8e, 0x54, 0xfc, 0x2a, 0x16, 0xed, 0x38, - 0x96, 0x4d, 0x98, 0x5f, 0x47, 0x5d, 0x74, 0xe1, 0x8c, 0x31, 0x7f, 0xd3, 0xe0, 0xaa, 0xb4, 0x94, - 0x78, 0x5b, 0x84, 0x5c, 0xa2, 0x5c, 0x1a, 0x1c, 0x35, 0x72, 0xed, 0x75, 0x27, 0x87, 0x4f, 0x41, - 0x44, 0x6f, 0x40, 0x0d, 0xbd, 0xc4, 0x6c, 0x9b, 0x32, 0x97, 0xf5, 0x79, 0xce, 0xf1, 0x2f, 0xc0, - 0x8f, 0x1e, 0x8b, 0x13, 0xfd, 0x3e, 0x54, 0xf9, 0x0e, 0xf9, 0xdb, 0x2e, 0x13, 0x29, 0x56, 0x6b, - 0x19, 0x23, 0x0f, 0xf8, 0x24, 0x2e, 0x87, 0xd5, 0xca, 0xe1, 0x51, 0xe3, 0xca, 0xab, 0xbf, 0x1a, - 0x9a, 0x53, 0x91, 0x6a, 0xf7, 0x99, 0x49, 0x60, 0x41, 0xc6, 0xb7, 0x15, 0x11, 0x0f, 0x51, 0x7a, - 0xe9, 0xe8, 0x23, 0x80, 0x4d, 0x74, 0xf9, 0x8f, 0xbc, 0x01, 0x35, 0xe1, 0x46, 0x81, 0x7e, 0x07, - 0xca, 0xa1, 0xbc, 0xa0, 0x70, 0x31, 0x54, 0x23, 0xfb, 0xb7, 0x54, 0x99, 0xc4, 0x20, 0xc4, 0xc2, - 0xe6, 0x0a, 0xcc, 0x7d, 0x85, 0x29, 0xe3, 0x69, 0x90, 0x40, 0xb3, 0x08, 0xa5, 0x1d, 0xdc, 0x65, - 0x28, 0x92, 0xd1, 0x3a, 0x6a, 0xc7, 0x93, 0x26, 0x25, 0x9b, 0xd4, 0x46, 0x51, 0xb4, 0xf8, 0xba, - 0x26, 0x3a, 0xc6, 0x64, 0xb7, 0x52, 0xd4, 0x7c, 0xa5, 0x41, 0xed, 0x4b, 0xdc, 0xed, 0x5e, 0x36, - 0x48, 0xa2, 0xe1, 0xe0, 0x0e, 0x6f, 0x2b, 0x32, 0xb7, 0xd4, 0x8e, 0xa7, 0xa2, 0xdb, 0xed, 0x8a, - 0x8c, 0xaa, 0x38, 0x7c, 0x69, 0xfe, 0xa3, 0x81, 0xce, 0x95, 0xdf, 0x42, 0x96, 0x24, 0x3d, 0x31, - 0x77, 0x7a, 0x4f, 0xcc, 0x8f, 0xe9, 0x89, 0x85, 0xb1, 0x3d, 0xb1, 0x38, 0xd4, 0x13, 0x9b, 0x50, - 0xa0, 0x21, 0xf2, 0x44, 0x17, 0x1d, 0xd7, 0xd2, 0x84, 0x44, 0x1a, 0xa5, 0xf2, 0xd8, 0x54, 0xba, - 0x0e, 0xff, 0x3b, 0x71, 0x75, 0xf9, 0xb2, 0xe6, 0xaf, 0x1a, 0xcc, 0x39, 0x88, 0xe2, 0x1f, 0xd1, - 0x16, 0x3b, 0xb8, 0xf4, 0xa7, 0x5a, 0x80, 0xe2, 0x0b, 0xec, 0xb3, 0x5d, 0xf5, 0x52, 0x72, 0xc3, - 0xd1, 0xd9, 0x45, 0xb8, 0xb3, 0x2b, 0xab, 0x7f, 0xd6, 0x51, 0x3b, 0xf3, 0x67, 0xb8, 0xba, 0xd6, - 0x25, 0x14, 0xb5, 0x1f, 0xfe, 0x17, 0x81, 0xc9, 0xe7, 0xcc, 0x8b, 0x57, 0x90, 0x1b, 0xf3, 0x0b, - 0x98, 0xdb, 0x72, 0xfb, 0xf4, 0xc2, 0xfd, 0x73, 0x13, 0xe6, 0x1d, 0x44, 0xfb, 0xbd, 0x0b, 0x1b, - 0xda, 0x80, 0x6b, 0xbc, 0x38, 0xb7, 0xb0, 0x7f, 0x91, 0xe4, 0x35, 0x1d, 0xd9, 0x0f, 0xa4, 0x19, - 0x55, 0xe2, 0x9f, 0x43, 0x55, 0xb5, 0x0b, 0x14, 0x97, 0xf9, 0xf2, 0xa4, 0x32, 0x6f, 0x07, 0x3b, - 0xc4, 0x39, 0x56, 0x31, 0x5f, 0x6b, 0x70, 0x7d, 0x2d, 0x99, 0xc9, 0x17, 0xe5, 0x28, 0xdb, 0x30, - 0x1f, 0xba, 0x11, 0x0a, 0xd8, 0x76, 0x8a, 0x17, 0xc8, 0xe7, 0x6b, 0xf1, 0xfe, 0xff, 0xe7, 0x51, - 0x63, 0x25, 0xc5, 0xb6, 0x48, 0x88, 0x82, 0x44, 0x9d, 0xda, 0x1d, 0x72, 0xd3, 0xc7, 0x1d, 0x44, - 0x99, 0xb5, 0x2e, 0xfe, 0x73, 0xe6, 0xa4, 0xb1, 0xb5, 0x53, 0x39, 0x43, 0x7e, 0x1a, 0xce, 0xf0, - 0x1d, 0x2c, 0x0e, 0xdf, 0x2e, 0x01, 0xae, 0x76, 0xcc, 0x04, 0x4f, 0xed, 0x90, 0x23, 0xe4, 0x25, - 0xad, 0x60, 0xfe, 0x04, 0xf3, 0x5f, 0x87, 0xfe, 0x5b, 0xe0, 0x75, 0x2d, 0xa8, 0x46, 0x88, 0x92, - 0x7e, 0xe4, 0x21, 0x2a, 0xb0, 0x1a, 0x77, 0xa9, 0x63, 0x31, 0x73, 0x05, 0xae, 0x3e, 0x90, 0x04, - 0x38, 0xf6, 0x5c, 0x87, 0xb2, 0x9c, 0x04, 0xf2, 0x2a, 0x55, 0x27, 0xde, 0xf2, 0xe4, 0x4b, 0x64, - 0x93, 0xb9, 0x50, 0x56, 0xfc, 0x59, 0xdd, 0xbb, 0x7e, 0x0a, 0x97, 0x14, 0x02, 0x4e, 0x2c, 0x68, - 0xee, 0x40, 0xed, 0x5b, 0x17, 0x5f, 0xfe, 0xec, 0x8c, 0x60, 0x46, 0xfa, 0x51, 0xb1, 0x0e, 0xf1, - 0x10, 0x6d, 0x32, 0x0f, 0xc9, 0x9d, 0x87, 0x87, 0xb4, 0x5e, 0xcf, 0x40, 0x51, 0x4c, 0x4e, 0x7d, - 0x0f, 0x4a, 0x92, 0x63, 0xea, 0xb6, 0x35, 0xe9, 0x17, 0x93, 0x35, 0xc2, 0xe9, 0x8d, 0x8f, 0xa7, - 0x57, 0x50, 0x57, 0xfb, 0x01, 0x8a, 0x82, 0x0b, 0xea, 0x2b, 0x93, 0x55, 0xd3, 0xcc, 0xd4, 0xf8, - 0x70, 0x2a, 0x59, 0xe5, 0xa1, 0x03, 0x25, 0x49, 0xb0, 0xb2, 0xae, 0x33, 0x42, 0x38, 0x8d, 0x8f, - 0xa6, 0x51, 0x48, 0x1c, 0x3d, 0x87, 0xd9, 0x13, 0x4c, 0x4e, 0x6f, 0x4d, 0xa3, 0x7e, 0x72, 0xa0, - 0x9f, 0xd1, 0xe5, 0x53, 0xc8, 0x6f, 0x22, 0xa6, 0x37, 0x27, 0x2b, 0x1d, 0xd3, 0x3d, 0xe3, 0x83, - 0x29, 0x24, 0x13, 0xdc, 0x0a, 0xbc, 0xd3, 0xea, 0xd6, 0x64, 0x95, 0x61, 0x76, 0x66, 0xd8, 0x53, - 0xcb, 0x2b, 0x47, 0x6d, 0x28, 0x70, 0xb2, 0xa5, 0x67, 0xc4, 0x96, 0x22, 0x64, 0xc6, 0xe2, 0x48, - 0x72, 0x6f, 0xf0, 0x1f, 0xeb, 0xfa, 0x16, 0x14, 0x78, 0x29, 0xe9, 0x19, 0x79, 0x38, 0x4a, 0xa4, - 0xc6, 0x5a, 0x7c, 0x0c, 0xd5, 0x84, 0x63, 0x64, 0x41, 0x31, 0x4c, 0x46, 0xc6, 0x1a, 0x7d, 0x08, - 0x65, 0xc5, 0x0e, 0xf4, 0x8c, 0xf7, 0x3e, 0x49, 0x22, 0x26, 0x18, 0x2c, 0x8a, 0x69, 0x9f, 0x15, - 0xe1, 0x30, 0x25, 0x18, 0x6b, 0xf0, 0x11, 0x94, 0xe4, 0xd8, 0xcf, 0x2a, 0x9a, 0x11, 0x72, 0x30, - 0xd6, 0x24, 0x86, 0x4a, 0x3c, 0xb9, 0xf5, 0x9b, 0xd9, 0x39, 0x92, 0x22, 0x0a, 0x86, 0x35, 0xad, - 0xb8, 0xca, 0xa8, 0x17, 0x00, 0xa9, 0x79, 0x79, 0x3b, 0x03, 0xe2, 0xd3, 0x26, 0xbf, 0xf1, 0xc9, - 0xd9, 0x94, 0x94, 0xe3, 0x47, 0x50, 0x92, 0x03, 0x31, 0x0b, 0xb6, 0x91, 0xb1, 0x39, 0x16, 0xb6, - 0x1d, 0x28, 0xab, 0xd1, 0x95, 0x95, 0x2b, 0x27, 0xa7, 0xa1, 0x71, 0x73, 0x4a, 0x69, 0x15, 0xfa, - 0xf7, 0x50, 0xe0, 0x33, 0x27, 0xab, 0x0a, 0x53, 0xf3, 0xcf, 0x58, 0x99, 0x46, 0x54, 0x9a, 0x5f, - 0xfd, 0xe6, 0xf0, 0xcd, 0xd2, 0x95, 0x3f, 0xde, 0x2c, 0x5d, 0xf9, 0x65, 0xb0, 0xa4, 0x1d, 0x0e, - 0x96, 0xb4, 0xdf, 0x07, 0x4b, 0xda, 0xdf, 0x83, 0x25, 0xed, 0xe9, 0xbd, 0xf3, 0xfd, 0x65, 0xef, - 0xae, 0x58, 0x3c, 0x2b, 0x09, 0xb8, 0x6e, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x67, 0xc5, 0x63, - 0x32, 0x20, 0x14, 0x00, 0x00, + // 1318 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x6f, 0x1b, 0x45, + 0x1c, 0xef, 0xfa, 0xed, 0xbf, 0x93, 0x36, 0x59, 0xd2, 0x60, 0x96, 0x2a, 0x0e, 0xcb, 0xc5, 0x04, + 0xba, 0x4b, 0x5d, 0x54, 0x21, 0x5a, 0x21, 0x35, 0x0f, 0x22, 0x0b, 0xaa, 0xa6, 0xdb, 0x02, 0x55, + 0x25, 0x14, 0xb6, 0xbb, 0x13, 0x67, 0x14, 0x7b, 0x67, 0xbb, 0x33, 0x4e, 0x1b, 0x38, 0xc0, 0x47, + 0xe8, 0x95, 0x0b, 0x9f, 0x27, 0x47, 0x8e, 0x08, 0x55, 0x81, 0xfa, 0x5b, 0x70, 0x43, 0xf3, 0xd8, + 0xcd, 0xc6, 0x8e, 0xbd, 0x4e, 0xd3, 0x70, 0x69, 0x67, 0x66, 0xff, 0xaf, 0xf9, 0xcd, 0xff, 0xf1, + 0x73, 0x60, 0xb5, 0x83, 0xd9, 0x6e, 0xff, 0xa9, 0xe5, 0x91, 0x9e, 0xed, 0x91, 0x80, 0xb9, 0x38, + 0x40, 0x91, 0x9f, 0x5e, 0xba, 0x21, 0xb6, 0x29, 0x8a, 0xf6, 0xb1, 0x87, 0xa8, 0xcd, 0x5c, 0xba, + 0x47, 0xed, 0xfd, 0x1b, 0x72, 0x61, 0x85, 0x11, 0x61, 0x44, 0xbf, 0x76, 0x2c, 0x6d, 0xc5, 0x92, + 0x96, 0x14, 0xd8, 0xbf, 0x61, 0xbc, 0xdf, 0x21, 0xa4, 0xd3, 0x45, 0xb6, 0x90, 0x7d, 0xda, 0xdf, + 0xb1, 0x51, 0x2f, 0x64, 0x07, 0x52, 0xd5, 0x78, 0x6f, 0xf8, 0xa3, 0x1b, 0xc4, 0x9f, 0x16, 0x3a, + 0xa4, 0x43, 0xc4, 0xd2, 0xe6, 0x2b, 0x75, 0x7a, 0x6b, 0xaa, 0x78, 0xd9, 0x41, 0x88, 0xa8, 0xdd, + 0x23, 0xfd, 0x80, 0x29, 0xbd, 0xcf, 0xcf, 0xa2, 0x87, 0x58, 0x84, 0x3d, 0x75, 0x3b, 0xe3, 0xf6, + 0x19, 0x34, 0x7d, 0x44, 0xbd, 0x08, 0x87, 0x8c, 0x44, 0x4a, 0xf9, 0x8b, 0x33, 0x28, 0x73, 0xc4, + 0xc4, 0x3f, 0x4a, 0xb7, 0x31, 0x8c, 0x0d, 0xc3, 0x3d, 0x44, 0x99, 0xdb, 0x0b, 0xa5, 0x80, 0x79, + 0x98, 0x83, 0xf9, 0xb5, 0x08, 0xb9, 0x0c, 0x3d, 0x72, 0xe9, 0x9e, 0x83, 0x9e, 0xf5, 0x11, 0x65, + 0x7a, 0x0b, 0x66, 0x12, 0xf3, 0xdb, 0xd8, 0xaf, 0x6b, 0xcb, 0x5a, 0xb3, 0xba, 0x7a, 0x65, 0x70, + 0xd4, 0xa8, 0xad, 0xc5, 0xe7, 0xed, 0x75, 0xa7, 0x96, 0x08, 0xb5, 0x7d, 0xdd, 0x86, 0x52, 0x44, + 0x08, 0xdb, 0xa1, 0xf5, 0xfc, 0x72, 0xbe, 0x59, 0x6b, 0xbd, 0x6b, 0xa5, 0x9e, 0x54, 0x44, 0x67, + 0xdd, 0xe3, 0x60, 0x3a, 0x4a, 0x4c, 0x5f, 0x80, 0x22, 0x65, 0x3e, 0x0e, 0xea, 0x05, 0x6e, 0xdd, + 0x91, 0x1b, 0x7d, 0x11, 0x4a, 0x94, 0xf9, 0xa4, 0xcf, 0xea, 0x45, 0x71, 0xac, 0x76, 0xea, 0x1c, + 0x45, 0x51, 0xbd, 0x94, 0x9c, 0xa3, 0x28, 0xd2, 0x0d, 0xa8, 0x30, 0x14, 0xf5, 0x70, 0xe0, 0x76, + 0xeb, 0xe5, 0x65, 0xad, 0x59, 0x71, 0x92, 0xbd, 0x7e, 0x07, 0xc0, 0xdb, 0x45, 0xde, 0x5e, 0x48, + 0x70, 0xc0, 0xea, 0x95, 0x65, 0xad, 0x59, 0x6b, 0x5d, 0x1b, 0x0d, 0x6b, 0x3d, 0x41, 0xdc, 0x49, + 0xc9, 0xeb, 0x16, 0x94, 0x49, 0xc8, 0x30, 0x09, 0x68, 0xbd, 0x2a, 0x54, 0x17, 0x2c, 0x89, 0xa6, + 0x15, 0xa3, 0x69, 0xdd, 0x0d, 0x0e, 0x9c, 0x58, 0xc8, 0x7c, 0x02, 0x7a, 0x1a, 0x49, 0x1a, 0x92, + 0x80, 0xa2, 0x37, 0x82, 0x72, 0x0e, 0xf2, 0x21, 0xf6, 0xeb, 0xb9, 0x65, 0xad, 0x39, 0xeb, 0xf0, + 0xa5, 0xd9, 0x81, 0x99, 0x87, 0xcc, 0x8d, 0xd8, 0x79, 0x1e, 0xe8, 0x43, 0x28, 0xa3, 0x17, 0xc8, + 0xdb, 0x56, 0x96, 0xab, 0xab, 0x30, 0x38, 0x6a, 0x94, 0x36, 0x5e, 0x20, 0xaf, 0xbd, 0xee, 0x94, + 0xf8, 0xa7, 0xb6, 0x6f, 0x7e, 0x00, 0xb3, 0xca, 0x91, 0x8a, 0x5f, 0xc5, 0xa2, 0x1d, 0xc7, 0xb2, + 0x09, 0xf3, 0xeb, 0xa8, 0x8b, 0xce, 0x9d, 0x31, 0xe6, 0xef, 0x1a, 0x5c, 0x96, 0x96, 0x12, 0x6f, + 0x8b, 0x90, 0x4b, 0x94, 0x4b, 0x83, 0xa3, 0x46, 0xae, 0xbd, 0xee, 0xe4, 0xf0, 0x29, 0x88, 0xe8, + 0x0d, 0xa8, 0xa1, 0x17, 0x98, 0x6d, 0x53, 0xe6, 0xb2, 0x3e, 0xcf, 0x39, 0xfe, 0x05, 0xf8, 0xd1, + 0x43, 0x71, 0xa2, 0xdf, 0x85, 0x2a, 0xdf, 0x21, 0x7f, 0xdb, 0x65, 0x22, 0xc5, 0x6a, 0x2d, 0x63, + 0xe4, 0x01, 0x1f, 0xc5, 0xe5, 0xb0, 0x5a, 0x39, 0x3c, 0x6a, 0x5c, 0x7a, 0xf9, 0x77, 0x43, 0x73, + 0x2a, 0x52, 0xed, 0x2e, 0x33, 0x09, 0x2c, 0xc8, 0xf8, 0xb6, 0x22, 0xe2, 0x21, 0x4a, 0x2f, 0x1c, + 0x7d, 0x04, 0xb0, 0x89, 0x2e, 0xfe, 0x91, 0x37, 0xa0, 0x26, 0xdc, 0x28, 0xd0, 0x6f, 0x41, 0x39, + 0x94, 0x17, 0x14, 0x2e, 0x86, 0x6a, 0x64, 0xff, 0x86, 0x2a, 0x93, 0x18, 0x84, 0x58, 0xd8, 0x5c, + 0x81, 0xb9, 0x6f, 0x30, 0x65, 0x3c, 0x0d, 0x12, 0x68, 0x16, 0xa1, 0xb4, 0x83, 0xbb, 0x0c, 0x45, + 0x32, 0x5a, 0x47, 0xed, 0x78, 0xd2, 0xa4, 0x64, 0x93, 0xda, 0x28, 0x8a, 0x16, 0x5f, 0xd7, 0x44, + 0xc7, 0x98, 0xec, 0x56, 0x8a, 0x9a, 0x2f, 0x35, 0xa8, 0x7d, 0x8d, 0xbb, 0xdd, 0x8b, 0x06, 0x49, + 0x34, 0x1c, 0xdc, 0xe1, 0x6d, 0x45, 0xe6, 0x96, 0xda, 0xf1, 0x54, 0x74, 0xbb, 0x5d, 0x91, 0x51, + 0x15, 0x87, 0x2f, 0xcd, 0x7f, 0x35, 0xd0, 0xb9, 0xf2, 0x5b, 0xc8, 0x92, 0xa4, 0x27, 0xe6, 0x4e, + 0xef, 0x89, 0xf9, 0x31, 0x3d, 0xb1, 0x30, 0xb6, 0x27, 0x16, 0x87, 0x7a, 0x62, 0x13, 0x0a, 0x34, + 0x44, 0x9e, 0xe8, 0xa2, 0xe3, 0x5a, 0x9a, 0x90, 0x48, 0xa3, 0x54, 0x1e, 0x9b, 0x4a, 0x57, 0xe1, + 0x9d, 0x13, 0x57, 0x97, 0x2f, 0x6b, 0xfe, 0xa6, 0xc1, 0x9c, 0x83, 0x28, 0xfe, 0x09, 0x6d, 0xb1, + 0x83, 0x0b, 0x7f, 0xaa, 0x05, 0x28, 0x3e, 0xc7, 0x3e, 0xdb, 0x55, 0x2f, 0x25, 0x37, 0x1c, 0x9d, + 0x5d, 0x84, 0x3b, 0xbb, 0xb2, 0xfa, 0x67, 0x1d, 0xb5, 0x33, 0x7f, 0x81, 0xcb, 0x6b, 0x5d, 0x42, + 0x51, 0xfb, 0xfe, 0xff, 0x11, 0x98, 0x7c, 0xce, 0xbc, 0x78, 0x05, 0xb9, 0x31, 0xbf, 0x82, 0xb9, + 0x2d, 0xb7, 0x4f, 0xcf, 0xdd, 0x3f, 0x37, 0x61, 0xde, 0x41, 0xb4, 0xdf, 0x3b, 0xb7, 0xa1, 0x0d, + 0xb8, 0xc2, 0x8b, 0x73, 0x0b, 0xfb, 0xe7, 0x49, 0x5e, 0xd3, 0x91, 0xfd, 0x40, 0x9a, 0x51, 0x25, + 0xfe, 0x25, 0x54, 0x55, 0xbb, 0x40, 0x71, 0x99, 0x2f, 0x4f, 0x2a, 0xf3, 0x76, 0xb0, 0x43, 0x9c, + 0x63, 0x15, 0xf3, 0x95, 0x06, 0x57, 0xd7, 0x92, 0x99, 0x7c, 0x5e, 0x8e, 0xb2, 0x0d, 0xf3, 0xa1, + 0x1b, 0xa1, 0x80, 0x6d, 0xa7, 0x78, 0x81, 0x7c, 0xbe, 0x16, 0xef, 0xff, 0x7f, 0x1d, 0x35, 0x56, + 0x52, 0x6c, 0x8b, 0x84, 0x28, 0x48, 0xd4, 0xa9, 0xdd, 0x21, 0xd7, 0x7d, 0xdc, 0x41, 0x94, 0x59, + 0xeb, 0xe2, 0x3f, 0x67, 0x4e, 0x1a, 0x5b, 0x3b, 0x95, 0x33, 0xe4, 0xa7, 0xe1, 0x0c, 0x8f, 0x61, + 0x71, 0xf8, 0x76, 0x09, 0x70, 0xb5, 0x63, 0x26, 0x78, 0x6a, 0x87, 0x1c, 0x21, 0x2f, 0x69, 0x05, + 0xf3, 0x67, 0x98, 0xff, 0x36, 0xf4, 0xdf, 0x02, 0xaf, 0x6b, 0x41, 0x35, 0x42, 0x94, 0xf4, 0x23, + 0x0f, 0x51, 0x81, 0xd5, 0xb8, 0x4b, 0x1d, 0x8b, 0x99, 0x2b, 0x70, 0xf9, 0x9e, 0x24, 0xc0, 0xb1, + 0xe7, 0x3a, 0x94, 0xe5, 0x24, 0x90, 0x57, 0xa9, 0x3a, 0xf1, 0x96, 0x27, 0x5f, 0x22, 0x9b, 0xcc, + 0x85, 0xb2, 0xe2, 0xcf, 0xea, 0xde, 0xf5, 0x53, 0xb8, 0xa4, 0x10, 0x70, 0x62, 0x41, 0x73, 0x07, + 0x6a, 0xdf, 0xbb, 0xf8, 0xe2, 0x67, 0x67, 0x04, 0x33, 0xd2, 0x8f, 0x8a, 0x75, 0x88, 0x87, 0x68, + 0x93, 0x79, 0x48, 0xee, 0x4d, 0x78, 0x48, 0xeb, 0xd5, 0x0c, 0x14, 0xc5, 0xe4, 0xd4, 0xf7, 0xa0, + 0x24, 0x39, 0xa6, 0x6e, 0x5b, 0x93, 0x7e, 0x31, 0x59, 0x23, 0x9c, 0xde, 0xf8, 0x74, 0x7a, 0x05, + 0x75, 0xb5, 0x1f, 0xa1, 0x28, 0xb8, 0xa0, 0xbe, 0x32, 0x59, 0x35, 0xcd, 0x4c, 0x8d, 0x8f, 0xa7, + 0x92, 0x55, 0x1e, 0x3a, 0x50, 0x92, 0x04, 0x2b, 0xeb, 0x3a, 0x23, 0x84, 0xd3, 0xf8, 0x64, 0x1a, + 0x85, 0xc4, 0xd1, 0x33, 0x98, 0x3d, 0xc1, 0xe4, 0xf4, 0xd6, 0x34, 0xea, 0x27, 0x07, 0xfa, 0x19, + 0x5d, 0x3e, 0x81, 0xfc, 0x26, 0x62, 0x7a, 0x73, 0xb2, 0xd2, 0x31, 0xdd, 0x33, 0x3e, 0x9a, 0x42, + 0x32, 0xc1, 0xad, 0xc0, 0x3b, 0xad, 0x6e, 0x4d, 0x56, 0x19, 0x66, 0x67, 0x86, 0x3d, 0xb5, 0xbc, + 0x72, 0xd4, 0x86, 0x02, 0x27, 0x5b, 0x7a, 0x46, 0x6c, 0x29, 0x42, 0x66, 0x2c, 0x8e, 0x24, 0xf7, + 0x06, 0xff, 0xb1, 0xae, 0x6f, 0x41, 0x81, 0x97, 0x92, 0x9e, 0x91, 0x87, 0xa3, 0x44, 0x6a, 0xac, + 0xc5, 0x87, 0x50, 0x4d, 0x38, 0x46, 0x16, 0x14, 0xc3, 0x64, 0x64, 0xac, 0xd1, 0xfb, 0x50, 0x56, + 0xec, 0x40, 0xcf, 0x78, 0xef, 0x93, 0x24, 0x62, 0x82, 0xc1, 0xa2, 0x98, 0xf6, 0x59, 0x11, 0x0e, + 0x53, 0x82, 0xb1, 0x06, 0x1f, 0x40, 0x49, 0x8e, 0xfd, 0xac, 0xa2, 0x19, 0x21, 0x07, 0x63, 0x4d, + 0x62, 0xa8, 0xc4, 0x93, 0x5b, 0xbf, 0x9e, 0x9d, 0x23, 0x29, 0xa2, 0x60, 0x58, 0xd3, 0x8a, 0xab, + 0x8c, 0x7a, 0x0e, 0x90, 0x9a, 0x97, 0x37, 0x33, 0x20, 0x3e, 0x6d, 0xf2, 0x1b, 0x9f, 0x9d, 0x4d, + 0x49, 0x39, 0x7e, 0x00, 0x25, 0x39, 0x10, 0xb3, 0x60, 0x1b, 0x19, 0x9b, 0x63, 0x61, 0xdb, 0x81, + 0xb2, 0x1a, 0x5d, 0x59, 0xb9, 0x72, 0x72, 0x1a, 0x1a, 0xd7, 0xa7, 0x94, 0x56, 0xa1, 0xff, 0x00, + 0x05, 0x3e, 0x73, 0xb2, 0xaa, 0x30, 0x35, 0xff, 0x8c, 0x95, 0x69, 0x44, 0xa5, 0xf9, 0xd5, 0xef, + 0x0e, 0x5f, 0x2f, 0x5d, 0xfa, 0xf3, 0xf5, 0xd2, 0xa5, 0x5f, 0x07, 0x4b, 0xda, 0xe1, 0x60, 0x49, + 0xfb, 0x63, 0xb0, 0xa4, 0xfd, 0x33, 0x58, 0xd2, 0x9e, 0xdc, 0x79, 0xb3, 0xbf, 0xec, 0xdd, 0x16, + 0x8b, 0xc7, 0xb9, 0xa7, 0x25, 0x01, 0xd8, 0xcd, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x85, 0xa2, + 0x4f, 0xd1, 0x22, 0x14, 0x00, 0x00, } diff --git a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto index eb373185eb..90793cbaba 100644 --- a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto +++ b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto @@ -4,7 +4,7 @@ package containerd.services.tasks.v1; import "google/protobuf/empty.proto"; import "google/protobuf/any.proto"; -import "gogoproto/gogo.proto"; +import weak "gogoproto/gogo.proto"; import "github.com/containerd/containerd/api/types/mount.proto"; import "github.com/containerd/containerd/api/types/metrics.proto"; import "github.com/containerd/containerd/api/types/descriptor.proto"; diff --git a/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go b/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go index c403c8431b..3f6528a0bb 100644 --- a/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/containerd/containerd/api/services/version/v1/version.proto -// DO NOT EDIT! /* Package version is a generated protocol buffer package. @@ -16,8 +15,9 @@ package version import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import google_protobuf "github.com/golang/protobuf/ptypes/empty" -import _ "github.com/gogo/protobuf/gogoproto" +import google_protobuf "github.com/gogo/protobuf/types" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" import ( context "golang.org/x/net/context" @@ -155,24 +155,6 @@ func (m *VersionResponse) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Version(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Version(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintVersion(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -446,7 +428,7 @@ func init() { } var fileDescriptorVersion = []byte{ - // 241 bytes of a gzipped FileDescriptorProto + // 243 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb, @@ -460,7 +442,7 @@ var fileDescriptorVersion = []byte{ 0x76, 0xa8, 0x41, 0x42, 0x41, 0x08, 0xa6, 0x98, 0x1e, 0xc4, 0x49, 0x7a, 0x30, 0x27, 0xe9, 0xb9, 0x82, 0x9c, 0x24, 0xa5, 0xaf, 0x87, 0xdf, 0x2b, 0x7a, 0x68, 0x8e, 0x72, 0x8a, 0x3a, 0xf1, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, - 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x03, 0xb9, 0x81, 0x6b, 0x0d, 0x65, 0x26, 0xb1, - 0x81, 0x1d, 0x67, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xb6, 0x37, 0xd8, 0xc6, 0xa7, 0x01, 0x00, - 0x00, + 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x03, 0xb9, 0x81, 0x6b, 0x0d, 0x65, 0x46, 0x30, + 0x26, 0xb1, 0x81, 0x9d, 0x67, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x95, 0x0d, 0x52, 0x23, 0xa9, + 0x01, 0x00, 0x00, } diff --git a/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto b/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto index 2398fdcbed..0e4c3d1e08 100644 --- a/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto +++ b/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto @@ -3,7 +3,7 @@ syntax = "proto3"; package containerd.services.version.v1; import "google/protobuf/empty.proto"; -import "gogoproto/gogo.proto"; +import weak "gogoproto/gogo.proto"; // TODO(stevvooe): Should version service actually be versioned? option go_package = "github.com/containerd/containerd/api/services/version/v1;version"; diff --git a/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go b/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go index 785d050707..93e88c0dc2 100644 --- a/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go +++ b/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/containerd/containerd/api/types/descriptor.proto -// DO NOT EDIT! /* Package types is a generated protocol buffer package. @@ -22,7 +21,8 @@ package types import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import _ "github.com/gogo/protobuf/gogoproto" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" @@ -95,24 +95,6 @@ func (m *Descriptor) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Descriptor(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Descriptor(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintDescriptor(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -409,7 +391,7 @@ func init() { } var fileDescriptorDescriptor = []byte{ - // 232 bytes of a gzipped FileDescriptorProto + // 234 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xa7, 0xa4, 0x16, @@ -423,6 +405,6 @@ var fileDescriptorDescriptor = []byte{ 0x4d, 0x10, 0x12, 0xe2, 0x62, 0x29, 0xce, 0xac, 0x4a, 0x95, 0x60, 0x56, 0x60, 0xd4, 0x60, 0x0e, 0x02, 0xb3, 0x9d, 0xbc, 0x4e, 0x3c, 0x94, 0x63, 0xb8, 0xf1, 0x50, 0x8e, 0xa1, 0xe1, 0x91, 0x1c, 0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x18, 0x65, 0x40, - 0x7c, 0x60, 0x58, 0x83, 0xc9, 0x24, 0x36, 0xb0, 0x07, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, - 0x23, 0x14, 0xc9, 0x7c, 0x47, 0x01, 0x00, 0x00, + 0x7c, 0x60, 0x58, 0x83, 0xc9, 0x08, 0x86, 0x24, 0x36, 0xb0, 0x17, 0x8d, 0x01, 0x01, 0x00, 0x00, + 0xff, 0xff, 0xea, 0xac, 0x78, 0x9a, 0x49, 0x01, 0x00, 0x00, } diff --git a/vendor/github.com/containerd/containerd/api/types/descriptor.proto b/vendor/github.com/containerd/containerd/api/types/descriptor.proto index 7975ab06d9..5c00dca4f1 100644 --- a/vendor/github.com/containerd/containerd/api/types/descriptor.proto +++ b/vendor/github.com/containerd/containerd/api/types/descriptor.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package containerd.types; -import "gogoproto/gogo.proto"; +import weak "gogoproto/gogo.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; diff --git a/vendor/github.com/containerd/containerd/api/types/metrics.pb.go b/vendor/github.com/containerd/containerd/api/types/metrics.pb.go index f9aacf941a..da0467884c 100644 --- a/vendor/github.com/containerd/containerd/api/types/metrics.pb.go +++ b/vendor/github.com/containerd/containerd/api/types/metrics.pb.go @@ -1,13 +1,13 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/containerd/containerd/api/types/metrics.proto -// DO NOT EDIT! package types import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import _ "github.com/gogo/protobuf/gogoproto" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" import google_protobuf1 "github.com/gogo/protobuf/types" import _ "github.com/gogo/protobuf/types" @@ -81,24 +81,6 @@ func (m *Metric) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Metrics(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Metrics(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -409,7 +391,7 @@ func init() { } var fileDescriptorMetrics = []byte{ - // 256 bytes of a gzipped FileDescriptorProto + // 258 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x48, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xa6, 0x96, @@ -424,6 +406,7 @@ var fileDescriptorMetrics = []byte{ 0x5d, 0x82, 0x98, 0x32, 0x53, 0x84, 0x34, 0xb8, 0x58, 0x52, 0x12, 0x4b, 0x12, 0x25, 0x98, 0xc1, 0xc6, 0x8a, 0x60, 0x18, 0xeb, 0x98, 0x57, 0x19, 0x04, 0x56, 0xe1, 0xe4, 0x75, 0xe2, 0xa1, 0x1c, 0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, - 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x28, 0x03, 0xe2, 0x03, 0xd2, 0x1a, 0x4c, 0x26, 0xb1, 0x81, - 0xcd, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xf8, 0x51, 0x36, 0x74, 0x83, 0x01, 0x00, 0x00, + 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x28, 0x03, 0xe2, 0x03, 0xd2, 0x1a, 0x4c, 0x46, 0x30, 0x24, + 0xb1, 0x81, 0x6d, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xde, 0x0d, 0x02, 0xfe, 0x85, 0x01, + 0x00, 0x00, } diff --git a/vendor/github.com/containerd/containerd/api/types/metrics.proto b/vendor/github.com/containerd/containerd/api/types/metrics.proto index d1629c7eec..0e631d2ac3 100644 --- a/vendor/github.com/containerd/containerd/api/types/metrics.proto +++ b/vendor/github.com/containerd/containerd/api/types/metrics.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package containerd.types; -import "gogoproto/gogo.proto"; +import weak "gogoproto/gogo.proto"; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; diff --git a/vendor/github.com/containerd/containerd/api/types/mount.pb.go b/vendor/github.com/containerd/containerd/api/types/mount.pb.go index cc835140e3..f7a9c3c1fe 100644 --- a/vendor/github.com/containerd/containerd/api/types/mount.pb.go +++ b/vendor/github.com/containerd/containerd/api/types/mount.pb.go @@ -1,13 +1,13 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/containerd/containerd/api/types/mount.proto -// DO NOT EDIT! package types import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import _ "github.com/gogo/protobuf/gogoproto" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" import strings "strings" import reflect "reflect" @@ -96,24 +96,6 @@ func (m *Mount) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Mount(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Mount(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintMount(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -457,7 +439,7 @@ func init() { } var fileDescriptorMount = []byte{ - // 200 bytes of a gzipped FileDescriptorProto + // 202 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4b, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xe6, 0x97, @@ -469,6 +451,6 @@ var fileDescriptorMount = []byte{ 0x82, 0x8b, 0x3d, 0xbf, 0xa0, 0x24, 0x33, 0x3f, 0xaf, 0x58, 0x82, 0x45, 0x81, 0x59, 0x83, 0x33, 0x08, 0xc6, 0x75, 0xf2, 0x3a, 0xf1, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x01, - 0xf1, 0x1e, 0xb4, 0x06, 0x93, 0x49, 0x6c, 0x60, 0x97, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, - 0xe5, 0xc7, 0x07, 0x3f, 0x1b, 0x01, 0x00, 0x00, + 0xf1, 0x1e, 0xb4, 0x06, 0x93, 0x11, 0x0c, 0x49, 0x6c, 0x60, 0xb7, 0x1b, 0x03, 0x02, 0x00, 0x00, + 0xff, 0xff, 0x82, 0x1c, 0x02, 0x18, 0x1d, 0x01, 0x00, 0x00, } diff --git a/vendor/github.com/containerd/containerd/api/types/mount.proto b/vendor/github.com/containerd/containerd/api/types/mount.proto index 031e654424..cd80e44a2c 100644 --- a/vendor/github.com/containerd/containerd/api/types/mount.proto +++ b/vendor/github.com/containerd/containerd/api/types/mount.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package containerd.types; -import "gogoproto/gogo.proto"; +import weak "gogoproto/gogo.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; diff --git a/vendor/github.com/containerd/containerd/api/types/platform.pb.go b/vendor/github.com/containerd/containerd/api/types/platform.pb.go index 0ca2afc259..ba9a3bf881 100644 --- a/vendor/github.com/containerd/containerd/api/types/platform.pb.go +++ b/vendor/github.com/containerd/containerd/api/types/platform.pb.go @@ -1,13 +1,13 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/containerd/containerd/api/types/platform.proto -// DO NOT EDIT! package types import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import _ "github.com/gogo/protobuf/gogoproto" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" import strings "strings" import reflect "reflect" @@ -70,24 +70,6 @@ func (m *Platform) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Platform(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Platform(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintPlatform(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -395,7 +377,7 @@ func init() { } var fileDescriptorPlatform = []byte{ - // 203 bytes of a gzipped FileDescriptorProto + // 205 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0x17, 0xe4, 0x24, @@ -407,6 +389,6 @@ var fileDescriptorPlatform = []byte{ 0x40, 0x2a, 0x82, 0x50, 0xc4, 0x84, 0x24, 0xb8, 0xd8, 0xcb, 0x12, 0x8b, 0x32, 0x13, 0xf3, 0x4a, 0x24, 0x98, 0xc1, 0xd2, 0x30, 0xae, 0x93, 0xd7, 0x89, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, 0x34, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, - 0x18, 0xa3, 0x0c, 0x88, 0xf7, 0x9e, 0x35, 0x98, 0x4c, 0x62, 0x03, 0x3b, 0xda, 0x18, 0x10, 0x00, - 0x00, 0xff, 0xff, 0x97, 0xa1, 0x99, 0x56, 0x19, 0x01, 0x00, 0x00, + 0x18, 0xa3, 0x0c, 0x88, 0xf7, 0x9e, 0x35, 0x98, 0x8c, 0x60, 0x48, 0x62, 0x03, 0x3b, 0xdb, 0x18, + 0x10, 0x00, 0x00, 0xff, 0xff, 0x05, 0xaa, 0xda, 0xa1, 0x1b, 0x01, 0x00, 0x00, } diff --git a/vendor/github.com/containerd/containerd/api/types/platform.proto b/vendor/github.com/containerd/containerd/api/types/platform.proto index b1dce06233..4cf9834bdc 100644 --- a/vendor/github.com/containerd/containerd/api/types/platform.proto +++ b/vendor/github.com/containerd/containerd/api/types/platform.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package containerd.types; -import "gogoproto/gogo.proto"; +import weak "gogoproto/gogo.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; diff --git a/vendor/github.com/containerd/containerd/api/types/task/task.pb.go b/vendor/github.com/containerd/containerd/api/types/task/task.pb.go index ccc230ae73..ba34270a37 100644 --- a/vendor/github.com/containerd/containerd/api/types/task/task.pb.go +++ b/vendor/github.com/containerd/containerd/api/types/task/task.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/containerd/containerd/api/types/task/task.proto -// DO NOT EDIT! /* Package task is a generated protocol buffer package. @@ -17,7 +16,8 @@ package task import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import _ "github.com/gogo/protobuf/gogoproto" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" import _ "github.com/gogo/protobuf/types" import google_protobuf2 "github.com/gogo/protobuf/types" @@ -224,24 +224,6 @@ func (m *ProcessInfo) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Task(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Task(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintTask(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -869,39 +851,40 @@ func init() { } var fileDescriptorTask = []byte{ - // 543 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xcf, 0x6e, 0xd3, 0x4c, - 0x14, 0xc5, 0x33, 0x6e, 0xe3, 0x24, 0xe3, 0xb6, 0x9f, 0x3f, 0x13, 0x55, 0xc6, 0x20, 0xdb, 0xea, - 0xca, 0x62, 0x61, 0x8b, 0x74, 0xc7, 0x2e, 0xff, 0x84, 0x2c, 0x24, 0x37, 0x72, 0x12, 0xb1, 0x8c, - 0x9c, 0x78, 0x62, 0x46, 0x6d, 0x66, 0x2c, 0x7b, 0x0c, 0x64, 0xc7, 0x12, 0x75, 0xc5, 0x0b, 0x74, - 0x05, 0x4f, 0xc1, 0x13, 0x64, 0xc9, 0x0a, 0xb1, 0x0a, 0xd4, 0x4f, 0x82, 0xc6, 0x76, 0xd2, 0x08, - 0xd8, 0x8c, 0xee, 0x3d, 0xbf, 0x33, 0x77, 0xee, 0x1c, 0xf8, 0x22, 0xc2, 0xec, 0x4d, 0x36, 0xb7, - 0x17, 0x74, 0xe5, 0x2c, 0x28, 0x61, 0x01, 0x26, 0x28, 0x09, 0x0f, 0xcb, 0x20, 0xc6, 0x0e, 0x5b, - 0xc7, 0x28, 0x75, 0x58, 0x90, 0x5e, 0x17, 0x87, 0x1d, 0x27, 0x94, 0x51, 0xe5, 0xd1, 0x83, 0xcb, - 0x7e, 0xfb, 0xdc, 0x2e, 0x4c, 0x5a, 0x3b, 0xa2, 0x11, 0x2d, 0xb8, 0xc3, 0xab, 0xd2, 0xaa, 0x19, - 0x11, 0xa5, 0xd1, 0x0d, 0x72, 0x8a, 0x6e, 0x9e, 0x2d, 0x1d, 0x86, 0x57, 0x28, 0x65, 0xc1, 0x2a, - 0xae, 0x0c, 0x8f, 0xff, 0x34, 0x04, 0x64, 0x5d, 0xa2, 0x8b, 0x5c, 0x80, 0x8d, 0x51, 0x42, 0x17, - 0x28, 0x4d, 0x95, 0x0e, 0x3c, 0xd9, 0x3f, 0x3a, 0xc3, 0xa1, 0x0a, 0x4c, 0x60, 0xb5, 0x7a, 0xff, - 0xe5, 0x5b, 0x43, 0xea, 0xef, 0x74, 0x77, 0xe0, 0x4b, 0x7b, 0x93, 0x1b, 0x2a, 0xe7, 0x50, 0xc0, - 0xa1, 0x2a, 0x14, 0x4e, 0x31, 0xdf, 0x1a, 0x82, 0x3b, 0xf0, 0x05, 0x1c, 0x2a, 0x32, 0x3c, 0x8a, - 0x71, 0xa8, 0x1e, 0x99, 0xc0, 0x3a, 0xf5, 0x79, 0xa9, 0x5c, 0x42, 0x31, 0x65, 0x01, 0xcb, 0x52, - 0xf5, 0xd8, 0x04, 0xd6, 0x59, 0xe7, 0x89, 0xfd, 0x8f, 0x1f, 0xda, 0xe3, 0xc2, 0xe2, 0x57, 0x56, - 0xa5, 0x0d, 0xeb, 0x29, 0x0b, 0x31, 0x51, 0xeb, 0xfc, 0x05, 0xbf, 0x6c, 0x94, 0x73, 0x3e, 0x2a, - 0xa4, 0x19, 0x53, 0xc5, 0x42, 0xae, 0xba, 0x4a, 0x47, 0x49, 0xa2, 0x36, 0xf6, 0x3a, 0x4a, 0x12, - 0x45, 0x83, 0x4d, 0x86, 0x92, 0x15, 0x26, 0xc1, 0x8d, 0xda, 0x34, 0x81, 0xd5, 0xf4, 0xf7, 0xbd, - 0x62, 0x40, 0x09, 0xbd, 0xc7, 0x6c, 0x56, 0xed, 0xd6, 0x2a, 0x16, 0x86, 0x5c, 0x2a, 0x57, 0x51, - 0xba, 0xb0, 0xc5, 0x3b, 0x14, 0xce, 0x02, 0xa6, 0x42, 0x13, 0x58, 0x52, 0x47, 0xb3, 0xcb, 0x40, - 0xed, 0x5d, 0xa0, 0xf6, 0x64, 0x97, 0x78, 0xaf, 0xb9, 0xd9, 0x1a, 0xb5, 0x4f, 0x3f, 0x0d, 0xe0, - 0x37, 0xcb, 0x6b, 0x5d, 0x76, 0xe1, 0x42, 0xa9, 0xca, 0xd8, 0x25, 0x4b, 0xba, 0xcb, 0x06, 0x3c, - 0x64, 0x63, 0xc1, 0x63, 0x4c, 0x96, 0xb4, 0xc8, 0x51, 0xea, 0xb4, 0xff, 0x1a, 0xdf, 0x25, 0x6b, - 0xbf, 0x70, 0x3c, 0xfb, 0x0e, 0xa0, 0x58, 0x2d, 0xa6, 0xc3, 0xc6, 0xd4, 0x7b, 0xe5, 0x5d, 0xbd, - 0xf6, 0xe4, 0x9a, 0xf6, 0xff, 0xed, 0x9d, 0x79, 0x5a, 0x82, 0x29, 0xb9, 0x26, 0xf4, 0x1d, 0xe1, - 0xbc, 0xef, 0x0f, 0xbb, 0x93, 0xe1, 0x40, 0x06, 0x87, 0xbc, 0x9f, 0xa0, 0x80, 0xa1, 0x90, 0x73, - 0x7f, 0xea, 0x79, 0xae, 0xf7, 0x52, 0x16, 0x0e, 0xb9, 0x9f, 0x11, 0x82, 0x49, 0xc4, 0xf9, 0x78, - 0x72, 0x35, 0x1a, 0x0d, 0x07, 0xf2, 0xd1, 0x21, 0x1f, 0x33, 0x1a, 0xc7, 0x28, 0x54, 0x9e, 0x42, - 0x71, 0xd4, 0x9d, 0x8e, 0x87, 0x03, 0xf9, 0x58, 0x93, 0x6f, 0xef, 0xcc, 0x93, 0x12, 0x8f, 0x82, - 0x2c, 0x2d, 0xa7, 0x73, 0xca, 0xa7, 0xd7, 0x0f, 0x6f, 0x73, 0x8c, 0x49, 0xa4, 0x9d, 0x7d, 0xfc, - 0xac, 0xd7, 0xbe, 0x7e, 0xd1, 0xab, 0xdf, 0xf4, 0xd4, 0xcd, 0xbd, 0x5e, 0xfb, 0x71, 0xaf, 0xd7, - 0x3e, 0xe4, 0x3a, 0xd8, 0xe4, 0x3a, 0xf8, 0x96, 0xeb, 0xe0, 0x57, 0xae, 0x83, 0xb9, 0x58, 0xc4, - 0x70, 0xf9, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x19, 0xf7, 0x5b, 0x8f, 0x4e, 0x03, 0x00, 0x00, + // 545 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x3f, 0x6f, 0xd3, 0x40, + 0x18, 0xc6, 0x7d, 0x6e, 0xeb, 0xa6, 0xe7, 0xb6, 0x18, 0x13, 0x55, 0xc6, 0x20, 0xdb, 0xea, 0x64, + 0x31, 0xd8, 0x22, 0xdd, 0xd8, 0xf2, 0x4f, 0xc8, 0x42, 0x72, 0x23, 0x27, 0x11, 0x6c, 0x91, 0x13, + 0x5f, 0xcc, 0xa9, 0xcd, 0x9d, 0x65, 0x9f, 0x81, 0x6c, 0x8c, 0xa8, 0x13, 0x5f, 0xa0, 0x13, 0x7c, + 0x0a, 0x3e, 0x41, 0x46, 0x26, 0xc4, 0x14, 0xa8, 0x3f, 0x09, 0x3a, 0xdb, 0x49, 0x23, 0x60, 0x39, + 0xbd, 0xef, 0xf3, 0x7b, 0xee, 0xbd, 0xf7, 0x1e, 0xf8, 0x22, 0xc6, 0xec, 0x6d, 0x3e, 0x75, 0x66, + 0x74, 0xe1, 0xce, 0x28, 0x61, 0x21, 0x26, 0x28, 0x8d, 0x76, 0xcb, 0x30, 0xc1, 0x2e, 0x5b, 0x26, + 0x28, 0x73, 0x59, 0x98, 0x5d, 0x95, 0x87, 0x93, 0xa4, 0x94, 0x51, 0xf5, 0xd1, 0xbd, 0xcb, 0x79, + 0xf7, 0xdc, 0x29, 0x4d, 0x7a, 0x33, 0xa6, 0x31, 0x2d, 0xb9, 0xcb, 0xab, 0xca, 0xaa, 0x9b, 0x31, + 0xa5, 0xf1, 0x35, 0x72, 0xcb, 0x6e, 0x9a, 0xcf, 0x5d, 0x86, 0x17, 0x28, 0x63, 0xe1, 0x22, 0xa9, + 0x0d, 0x8f, 0xff, 0x36, 0x84, 0x64, 0x59, 0xa1, 0xf3, 0x42, 0x84, 0x87, 0x83, 0x94, 0xce, 0x50, + 0x96, 0xa9, 0x2d, 0x78, 0xbc, 0x7d, 0x74, 0x82, 0x23, 0x0d, 0x58, 0xc0, 0x3e, 0xea, 0x3c, 0x28, + 0xd6, 0xa6, 0xdc, 0xdd, 0xe8, 0x5e, 0x2f, 0x90, 0xb7, 0x26, 0x2f, 0x52, 0xcf, 0xa0, 0x88, 0x23, + 0x4d, 0x2c, 0x9d, 0x52, 0xb1, 0x36, 0x45, 0xaf, 0x17, 0x88, 0x38, 0x52, 0x15, 0xb8, 0x97, 0xe0, + 0x48, 0xdb, 0xb3, 0x80, 0x7d, 0x12, 0xf0, 0x52, 0xbd, 0x80, 0x52, 0xc6, 0x42, 0x96, 0x67, 0xda, + 0xbe, 0x05, 0xec, 0xd3, 0xd6, 0x13, 0xe7, 0x3f, 0x3f, 0x74, 0x86, 0xa5, 0x25, 0xa8, 0xad, 0x6a, + 0x13, 0x1e, 0x64, 0x2c, 0xc2, 0x44, 0x3b, 0xe0, 0x2f, 0x04, 0x55, 0xa3, 0x9e, 0xf1, 0x51, 0x11, + 0xcd, 0x99, 0x26, 0x95, 0x72, 0xdd, 0xd5, 0x3a, 0x4a, 0x53, 0xed, 0x70, 0xab, 0xa3, 0x34, 0x55, + 0x75, 0xd8, 0x60, 0x28, 0x5d, 0x60, 0x12, 0x5e, 0x6b, 0x0d, 0x0b, 0xd8, 0x8d, 0x60, 0xdb, 0xab, + 0x26, 0x94, 0xd1, 0x07, 0xcc, 0x26, 0xf5, 0x6e, 0x47, 0xe5, 0xc2, 0x90, 0x4b, 0xd5, 0x2a, 0x6a, + 0x1b, 0x1e, 0xf1, 0x0e, 0x45, 0x93, 0x90, 0x69, 0xd0, 0x02, 0xb6, 0xdc, 0xd2, 0x9d, 0x2a, 0x50, + 0x67, 0x13, 0xa8, 0x33, 0xda, 0x24, 0xde, 0x69, 0xac, 0xd6, 0xa6, 0xf0, 0xf9, 0x97, 0x09, 0x82, + 0x46, 0x75, 0xad, 0xcd, 0xce, 0x3d, 0x28, 0xd7, 0x19, 0x7b, 0x64, 0x4e, 0x37, 0xd9, 0x80, 0xfb, + 0x6c, 0x6c, 0xb8, 0x8f, 0xc9, 0x9c, 0x96, 0x39, 0xca, 0xad, 0xe6, 0x3f, 0xe3, 0xdb, 0x64, 0x19, + 0x94, 0x8e, 0x67, 0x3f, 0x00, 0x94, 0xea, 0xc5, 0x0c, 0x78, 0x38, 0xf6, 0x5f, 0xf9, 0x97, 0xaf, + 0x7d, 0x45, 0xd0, 0x1f, 0xde, 0xdc, 0x5a, 0x27, 0x15, 0x18, 0x93, 0x2b, 0x42, 0xdf, 0x13, 0xce, + 0xbb, 0x41, 0xbf, 0x3d, 0xea, 0xf7, 0x14, 0xb0, 0xcb, 0xbb, 0x29, 0x0a, 0x19, 0x8a, 0x38, 0x0f, + 0xc6, 0xbe, 0xef, 0xf9, 0x2f, 0x15, 0x71, 0x97, 0x07, 0x39, 0x21, 0x98, 0xc4, 0x9c, 0x0f, 0x47, + 0x97, 0x83, 0x41, 0xbf, 0xa7, 0xec, 0xed, 0xf2, 0x21, 0xa3, 0x49, 0x82, 0x22, 0xf5, 0x29, 0x94, + 0x06, 0xed, 0xf1, 0xb0, 0xdf, 0x53, 0xf6, 0x75, 0xe5, 0xe6, 0xd6, 0x3a, 0xae, 0xf0, 0x20, 0xcc, + 0xb3, 0x6a, 0x3a, 0xa7, 0x7c, 0xfa, 0xc1, 0xee, 0x6d, 0x8e, 0x31, 0x89, 0xf5, 0xd3, 0x4f, 0x5f, + 0x0c, 0xe1, 0xdb, 0x57, 0xa3, 0xfe, 0x4d, 0x47, 0x5b, 0xdd, 0x19, 0xc2, 0xcf, 0x3b, 0x43, 0xf8, + 0x58, 0x18, 0x60, 0x55, 0x18, 0xe0, 0x7b, 0x61, 0x80, 0xdf, 0x85, 0x01, 0xde, 0x08, 0x53, 0xa9, + 0x0c, 0xe2, 0xe2, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x32, 0xd2, 0x86, 0x50, 0x03, 0x00, + 0x00, } diff --git a/vendor/github.com/containerd/containerd/api/types/task/task.proto b/vendor/github.com/containerd/containerd/api/types/task/task.proto index 5845edf292..da91cb033e 100644 --- a/vendor/github.com/containerd/containerd/api/types/task/task.proto +++ b/vendor/github.com/containerd/containerd/api/types/task/task.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package containerd.v1.types; -import "gogoproto/gogo.proto"; +import weak "gogoproto/gogo.proto"; import "google/protobuf/timestamp.proto"; import "google/protobuf/any.proto"; diff --git a/vendor/github.com/containerd/containerd/archive/path.go b/vendor/github.com/containerd/containerd/archive/path.go deleted file mode 100644 index 0f6cfa32e7..0000000000 --- a/vendor/github.com/containerd/containerd/archive/path.go +++ /dev/null @@ -1 +0,0 @@ -package archive diff --git a/vendor/github.com/containerd/containerd/archive/tar.go b/vendor/github.com/containerd/containerd/archive/tar.go index 0550f218cd..c12d4f280c 100644 --- a/vendor/github.com/containerd/containerd/archive/tar.go +++ b/vendor/github.com/containerd/containerd/archive/tar.go @@ -19,13 +19,12 @@ import ( "github.com/pkg/errors" ) -var ( - bufferPool = &sync.Pool{ - New: func() interface{} { - return make([]byte, 32*1024) - }, - } -) +var bufferPool = &sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 32*1024) + return &buffer + }, +} // Diff returns a tar stream of the computed filesystem // difference between the provided directories. @@ -82,6 +81,8 @@ const ( // whiteoutOpaqueDir file means directory has been made opaque - meaning // readdir calls to this directory do not follow to lower layers. whiteoutOpaqueDir = whiteoutMetaPrefix + ".opq" + + paxSchilyXattr = "SCHILY.xattrs." ) // Apply applies a tar stream of an OCI style diff tar. @@ -388,9 +389,10 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e if capability, err := getxattr(source, "security.capability"); err != nil { return errors.Wrap(err, "failed to get capabilities xattr") } else if capability != nil { - hdr.Xattrs = map[string]string{ - "security.capability": string(capability), + if hdr.PAXRecords == nil { + hdr.PAXRecords = map[string]string{} } + hdr.PAXRecords[paxSchilyXattr+"security.capability"] = string(capability) } if err := cw.tw.WriteHeader(hdr); err != nil { @@ -404,8 +406,8 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e } defer file.Close() - buf := bufferPool.Get().([]byte) - n, err := io.CopyBuffer(cw.tw, file, buf) + buf := bufferPool.Get().(*[]byte) + n, err := io.CopyBuffer(cw.tw, file, *buf) bufferPool.Put(buf) if err != nil { return errors.Wrap(err, "failed to copy") @@ -509,13 +511,16 @@ func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header } } - for key, value := range hdr.Xattrs { - if err := setxattr(path, key, value); err != nil { - if errors.Cause(err) == syscall.ENOTSUP { - log.G(ctx).WithError(err).Warnf("ignored xattr %s in archive", key) - continue + for key, value := range hdr.PAXRecords { + if strings.HasPrefix(key, paxSchilyXattr) { + key = key[len(paxSchilyXattr):] + if err := setxattr(path, key, value); err != nil { + if errors.Cause(err) == syscall.ENOTSUP { + log.G(ctx).WithError(err).Warnf("ignored xattr %s in archive", key) + continue + } + return err } - return err } } @@ -529,7 +534,7 @@ func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header } func copyBuffered(ctx context.Context, dst io.Writer, src io.Reader) (written int64, err error) { - buf := bufferPool.Get().([]byte) + buf := bufferPool.Get().(*[]byte) defer bufferPool.Put(buf) for { @@ -540,9 +545,9 @@ func copyBuffered(ctx context.Context, dst io.Writer, src io.Reader) (written in default: } - nr, er := src.Read(buf) + nr, er := src.Read(*buf) if nr > 0 { - nw, ew := dst.Write(buf[0:nr]) + nw, ew := dst.Write((*buf)[0:nr]) if nw > 0 { written += int64(nw) } diff --git a/vendor/github.com/containerd/containerd/io.go b/vendor/github.com/containerd/containerd/cio/io.go similarity index 84% rename from vendor/github.com/containerd/containerd/io.go rename to vendor/github.com/containerd/containerd/cio/io.go index 48c06f12e2..25e3981471 100644 --- a/vendor/github.com/containerd/containerd/io.go +++ b/vendor/github.com/containerd/containerd/cio/io.go @@ -1,4 +1,4 @@ -package containerd +package cio import ( "context" @@ -8,8 +8,8 @@ import ( "sync" ) -// IOConfig holds the io configurations. -type IOConfig struct { +// Config holds the io configurations. +type Config struct { // Terminal is true if one has been allocated Terminal bool // Stdin path @@ -23,7 +23,7 @@ type IOConfig struct { // IO holds the io information for a task or process type IO interface { // Config returns the IO configuration. - Config() IOConfig + Config() Config // Cancel aborts all current io operations Cancel() // Wait blocks until all io copy operations have completed @@ -34,12 +34,12 @@ type IO interface { // cio is a basic container IO implementation. type cio struct { - config IOConfig + config Config closer *wgCloser } -func (c *cio) Config() IOConfig { +func (c *cio) Config() Config { return c.config } @@ -64,23 +64,23 @@ func (c *cio) Close() error { return c.closer.Close() } -// IOCreation creates new IO sets for a task -type IOCreation func(id string) (IO, error) +// Creation creates new IO sets for a task +type Creation func(id string) (IO, error) -// IOAttach allows callers to reattach to running tasks +// Attach allows callers to reattach to running tasks // // There should only be one reader for a task's IO set // because fifo's can only be read from one reader or the output // will be sent only to the first reads -type IOAttach func(*FIFOSet) (IO, error) +type Attach func(*FIFOSet) (IO, error) -// NewIO returns an IOCreation that will provide IO sets without a terminal -func NewIO(stdin io.Reader, stdout, stderr io.Writer) IOCreation { +// NewIO returns an Creation that will provide IO sets without a terminal +func NewIO(stdin io.Reader, stdout, stderr io.Writer) Creation { return NewIOWithTerminal(stdin, stdout, stderr, false) } // NewIOWithTerminal creates a new io set with the provied io.Reader/Writers for use with a terminal -func NewIOWithTerminal(stdin io.Reader, stdout, stderr io.Writer, terminal bool) IOCreation { +func NewIOWithTerminal(stdin io.Reader, stdout, stderr io.Writer, terminal bool) Creation { return func(id string) (_ IO, err error) { paths, err := NewFifos(id) if err != nil { @@ -91,7 +91,7 @@ func NewIOWithTerminal(stdin io.Reader, stdout, stderr io.Writer, terminal bool) os.RemoveAll(paths.Dir) } }() - cfg := IOConfig{ + cfg := Config{ Terminal: terminal, Stdout: paths.Out, Stderr: paths.Err, @@ -113,12 +113,12 @@ func NewIOWithTerminal(stdin io.Reader, stdout, stderr io.Writer, terminal bool) } // WithAttach attaches the existing io for a task to the provided io.Reader/Writers -func WithAttach(stdin io.Reader, stdout, stderr io.Writer) IOAttach { +func WithAttach(stdin io.Reader, stdout, stderr io.Writer) Attach { return func(paths *FIFOSet) (IO, error) { if paths == nil { return nil, fmt.Errorf("cannot attach to existing fifos") } - cfg := IOConfig{ + cfg := Config{ Terminal: paths.Terminal, Stdout: paths.Out, Stderr: paths.Err, diff --git a/vendor/github.com/containerd/containerd/io_unix.go b/vendor/github.com/containerd/containerd/cio/io_unix.go similarity index 97% rename from vendor/github.com/containerd/containerd/io_unix.go rename to vendor/github.com/containerd/containerd/cio/io_unix.go index 08aba14bac..c18f7ecf95 100644 --- a/vendor/github.com/containerd/containerd/io_unix.go +++ b/vendor/github.com/containerd/containerd/cio/io_unix.go @@ -1,6 +1,6 @@ // +build !windows -package containerd +package cio import ( "context" @@ -139,9 +139,9 @@ func (f *DirectIO) IOAttach(set *FIFOSet) (IO, error) { return f, nil } -// Config returns the IOConfig -func (f *DirectIO) Config() IOConfig { - return IOConfig{ +// Config returns the Config +func (f *DirectIO) Config() Config { + return Config{ Terminal: f.terminal, Stdin: f.set.In, Stdout: f.set.Out, diff --git a/vendor/github.com/containerd/containerd/io_windows.go b/vendor/github.com/containerd/containerd/cio/io_windows.go similarity index 99% rename from vendor/github.com/containerd/containerd/io_windows.go rename to vendor/github.com/containerd/containerd/cio/io_windows.go index e37568c26e..1458c31739 100644 --- a/vendor/github.com/containerd/containerd/io_windows.go +++ b/vendor/github.com/containerd/containerd/cio/io_windows.go @@ -1,4 +1,4 @@ -package containerd +package cio import ( "fmt" diff --git a/vendor/github.com/containerd/containerd/client.go b/vendor/github.com/containerd/containerd/client.go index 663f2fab86..5c20335ea1 100644 --- a/vendor/github.com/containerd/containerd/client.go +++ b/vendor/github.com/containerd/containerd/client.go @@ -17,7 +17,7 @@ import ( imagesapi "github.com/containerd/containerd/api/services/images/v1" introspectionapi "github.com/containerd/containerd/api/services/introspection/v1" namespacesapi "github.com/containerd/containerd/api/services/namespaces/v1" - snapshotapi "github.com/containerd/containerd/api/services/snapshot/v1" + snapshotsapi "github.com/containerd/containerd/api/services/snapshots/v1" "github.com/containerd/containerd/api/services/tasks/v1" versionservice "github.com/containerd/containerd/api/services/version/v1" "github.com/containerd/containerd/containers" @@ -33,14 +33,9 @@ import ( "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" "github.com/containerd/containerd/remotes/docker/schema1" - contentservice "github.com/containerd/containerd/services/content" - diffservice "github.com/containerd/containerd/services/diff" - imagesservice "github.com/containerd/containerd/services/images" - namespacesservice "github.com/containerd/containerd/services/namespaces" - snapshotservice "github.com/containerd/containerd/services/snapshot" - "github.com/containerd/containerd/snapshot" + "github.com/containerd/containerd/snapshots" "github.com/containerd/typeurl" - pempty "github.com/golang/protobuf/ptypes/empty" + ptypes "github.com/gogo/protobuf/types" ocispec "github.com/opencontainers/image-spec/specs-go/v1" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" @@ -138,7 +133,7 @@ func (c *Client) Containers(ctx context.Context, filters ...string) ([]Container // NewContainer will create a new container in container with the provided id // the id must be unique within the namespace func (c *Client) NewContainer(ctx context.Context, id string, opts ...NewContainerOpts) (Container, error) { - ctx, done, err := c.withLease(ctx) + ctx, done, err := c.WithLease(ctx) if err != nil { return nil, err } @@ -219,7 +214,7 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image } store := c.ContentStore() - ctx, done, err := c.withLease(ctx) + ctx, done, err := c.WithLease(ctx) if err != nil { return nil, err } @@ -426,7 +421,7 @@ func (c *Client) Close() error { // NamespaceService returns the underlying Namespaces Store func (c *Client) NamespaceService() namespaces.Store { - return namespacesservice.NewStoreFromClient(namespacesapi.NewNamespacesClient(c.conn)) + return NewNamespaceStoreFromClient(namespacesapi.NewNamespacesClient(c.conn)) } // ContainerService returns the underlying container Store @@ -436,12 +431,12 @@ func (c *Client) ContainerService() containers.Store { // ContentStore returns the underlying content Store func (c *Client) ContentStore() content.Store { - return contentservice.NewStoreFromClient(contentapi.NewContentClient(c.conn)) + return NewContentStoreFromClient(contentapi.NewContentClient(c.conn)) } // SnapshotService returns the underlying snapshotter for the provided snapshotter name -func (c *Client) SnapshotService(snapshotterName string) snapshot.Snapshotter { - return snapshotservice.NewSnapshotterFromClient(snapshotapi.NewSnapshotsClient(c.conn), snapshotterName) +func (c *Client) SnapshotService(snapshotterName string) snapshots.Snapshotter { + return NewSnapshotterFromClient(snapshotsapi.NewSnapshotsClient(c.conn), snapshotterName) } // TaskService returns the underlying TasksClient @@ -451,12 +446,12 @@ func (c *Client) TaskService() tasks.TasksClient { // ImageService returns the underlying image Store func (c *Client) ImageService() images.Store { - return imagesservice.NewStoreFromClient(imagesapi.NewImagesClient(c.conn)) + return NewImageStoreFromClient(imagesapi.NewImagesClient(c.conn)) } // DiffService returns the underlying Differ func (c *Client) DiffService() diff.Differ { - return diffservice.NewDiffServiceFromClient(diffapi.NewDiffClient(c.conn)) + return NewDiffServiceFromClient(diffapi.NewDiffClient(c.conn)) } // IntrospectionService returns the underlying Introspection Client @@ -489,7 +484,7 @@ type Version struct { // Version returns the version of containerd that the client is connected to func (c *Client) Version(ctx context.Context) (Version, error) { - response, err := c.VersionService().Version(ctx, &pempty.Empty{}) + response, err := c.VersionService().Version(ctx, &ptypes.Empty{}) if err != nil { return Version{}, err } @@ -592,7 +587,7 @@ func (c *Client) Import(ctx context.Context, ref string, reader io.Reader, opts return nil, err } - ctx, done, err := c.withLease(ctx) + ctx, done, err := c.WithLease(ctx) if err != nil { return nil, err } diff --git a/vendor/github.com/containerd/containerd/container.go b/vendor/github.com/containerd/containerd/container.go index a9750eca01..2d5c9aedb6 100644 --- a/vendor/github.com/containerd/containerd/container.go +++ b/vendor/github.com/containerd/containerd/container.go @@ -8,6 +8,7 @@ import ( "github.com/containerd/containerd/api/services/tasks/v1" "github.com/containerd/containerd/api/types" + "github.com/containerd/containerd/cio" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/errdefs" "github.com/containerd/typeurl" @@ -25,17 +26,17 @@ type Container interface { // Delete removes the container Delete(context.Context, ...DeleteOpts) error // NewTask creates a new task based on the container metadata - NewTask(context.Context, IOCreation, ...NewTaskOpts) (Task, error) + NewTask(context.Context, cio.Creation, ...NewTaskOpts) (Task, error) // Spec returns the OCI runtime specification Spec(context.Context) (*specs.Spec, error) // Task returns the current task for the container // - // If IOAttach options are passed the client will reattach to the IO for the running + // If cio.Attach options are passed the client will reattach to the IO for the running // task. If no task exists for the container a NotFound error is returned // // Clients must make sure that only one reader is attached to the task and consuming // the output from the task's fifos - Task(context.Context, IOAttach) (Task, error) + Task(context.Context, cio.Attach) (Task, error) // Image returns the image that the container is based on Image(context.Context) (Image, error) // Labels returns the labels set on the container @@ -138,7 +139,7 @@ func (c *container) Delete(ctx context.Context, opts ...DeleteOpts) error { return c.client.ContainerService().Delete(ctx, c.id) } -func (c *container) Task(ctx context.Context, attach IOAttach) (Task, error) { +func (c *container) Task(ctx context.Context, attach cio.Attach) (Task, error) { return c.loadTask(ctx, attach) } @@ -161,7 +162,7 @@ func (c *container) Image(ctx context.Context) (Image, error) { }, nil } -func (c *container) NewTask(ctx context.Context, ioCreate IOCreation, opts ...NewTaskOpts) (Task, error) { +func (c *container) NewTask(ctx context.Context, ioCreate cio.Creation, opts ...NewTaskOpts) (Task, error) { i, err := ioCreate(c.id) if err != nil { return nil, err @@ -251,7 +252,7 @@ func (c *container) Update(ctx context.Context, opts ...UpdateContainerOpts) err return nil } -func (c *container) loadTask(ctx context.Context, ioAttach IOAttach) (Task, error) { +func (c *container) loadTask(ctx context.Context, ioAttach cio.Attach) (Task, error) { response, err := c.client.TaskService().Get(ctx, &tasks.GetRequest{ ContainerID: c.id, }) @@ -262,7 +263,7 @@ func (c *container) loadTask(ctx context.Context, ioAttach IOAttach) (Task, erro } return nil, err } - var i IO + var i cio.IO if ioAttach != nil { if i, err = attachExistingIO(response, ioAttach); err != nil { return nil, err @@ -281,9 +282,9 @@ func (c *container) get(ctx context.Context) (containers.Container, error) { return c.client.ContainerService().Get(ctx, c.id) } -func attachExistingIO(response *tasks.GetResponse, ioAttach IOAttach) (IO, error) { +func attachExistingIO(response *tasks.GetResponse, ioAttach cio.Attach) (cio.IO, error) { // get the existing fifo paths from the task information stored by the daemon - paths := &FIFOSet{ + paths := &cio.FIFOSet{ Dir: getFifoDir([]string{ response.Process.Stdin, response.Process.Stdout, diff --git a/vendor/github.com/containerd/containerd/container_opts.go b/vendor/github.com/containerd/containerd/container_opts.go index 4c534fe12d..fb22a90963 100644 --- a/vendor/github.com/containerd/containerd/container_opts.go +++ b/vendor/github.com/containerd/containerd/container_opts.go @@ -5,10 +5,12 @@ import ( "github.com/containerd/containerd/containers" "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/oci" "github.com/containerd/containerd/platforms" "github.com/containerd/typeurl" "github.com/gogo/protobuf/types" "github.com/opencontainers/image-spec/identity" + specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" ) @@ -164,3 +166,29 @@ func WithContainerExtension(name string, extension interface{}) NewContainerOpts return nil } } + +// WithNewSpec generates a new spec for a new container +func WithNewSpec(opts ...oci.SpecOpts) NewContainerOpts { + return func(ctx context.Context, client *Client, c *containers.Container) error { + s, err := oci.GenerateSpec(ctx, client, c, opts...) + if err != nil { + return err + } + c.Spec, err = typeurl.MarshalAny(s) + return err + } +} + +// WithSpec sets the provided spec on the container +func WithSpec(s *specs.Spec, opts ...oci.SpecOpts) NewContainerOpts { + return func(ctx context.Context, client *Client, c *containers.Container) error { + for _, o := range opts { + if err := o(ctx, client, c, s); err != nil { + return err + } + } + var err error + c.Spec, err = typeurl.MarshalAny(s) + return err + } +} diff --git a/vendor/github.com/containerd/containerd/container_opts_unix.go b/vendor/github.com/containerd/containerd/container_opts_unix.go index e2cd7c939d..bb431e51f7 100644 --- a/vendor/github.com/containerd/containerd/container_opts_unix.go +++ b/vendor/github.com/containerd/containerd/container_opts_unix.go @@ -6,12 +6,17 @@ import ( "context" "encoding/json" "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" "github.com/containerd/containerd/api/types" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/mount" "github.com/containerd/containerd/platforms" "github.com/gogo/protobuf/proto" protobuf "github.com/gogo/protobuf/types" @@ -19,6 +24,7 @@ import ( "github.com/opencontainers/image-spec/identity" "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" + "golang.org/x/sys/unix" ) // WithCheckpoint allows a container to be created from the checkpointed information @@ -122,3 +128,91 @@ func decodeIndex(ctx context.Context, store content.Store, id digest.Digest) (*v return &index, nil } + +// WithRemappedSnapshot creates a new snapshot and remaps the uid/gid for the +// filesystem to be used by a container with user namespaces +func WithRemappedSnapshot(id string, i Image, uid, gid uint32) NewContainerOpts { + return withRemappedSnapshotBase(id, i, uid, gid, false) +} + +// WithRemappedSnapshotView is similar to WithRemappedSnapshot but rootfs is mounted as read-only. +func WithRemappedSnapshotView(id string, i Image, uid, gid uint32) NewContainerOpts { + return withRemappedSnapshotBase(id, i, uid, gid, true) +} + +func withRemappedSnapshotBase(id string, i Image, uid, gid uint32, readonly bool) NewContainerOpts { + return func(ctx context.Context, client *Client, c *containers.Container) error { + diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default()) + if err != nil { + return err + } + + setSnapshotterIfEmpty(c) + + var ( + snapshotter = client.SnapshotService(c.Snapshotter) + parent = identity.ChainID(diffIDs).String() + usernsID = fmt.Sprintf("%s-%d-%d", parent, uid, gid) + ) + if _, err := snapshotter.Stat(ctx, usernsID); err == nil { + if _, err := snapshotter.Prepare(ctx, id, usernsID); err == nil { + c.SnapshotKey = id + c.Image = i.Name() + return nil + } else if !errdefs.IsNotFound(err) { + return err + } + } + mounts, err := snapshotter.Prepare(ctx, usernsID+"-remap", parent) + if err != nil { + return err + } + if err := remapRootFS(mounts, uid, gid); err != nil { + snapshotter.Remove(ctx, usernsID) + return err + } + if err := snapshotter.Commit(ctx, usernsID, usernsID+"-remap"); err != nil { + return err + } + if readonly { + _, err = snapshotter.View(ctx, id, usernsID) + } else { + _, err = snapshotter.Prepare(ctx, id, usernsID) + } + if err != nil { + return err + } + c.SnapshotKey = id + c.Image = i.Name() + return nil + } +} + +func remapRootFS(mounts []mount.Mount, uid, gid uint32) error { + root, err := ioutil.TempDir("", "ctd-remap") + if err != nil { + return err + } + defer os.RemoveAll(root) + for _, m := range mounts { + if err := m.Mount(root); err != nil { + return err + } + } + defer unix.Unmount(root, 0) + return filepath.Walk(root, incrementFS(root, uid, gid)) +} + +func incrementFS(root string, uidInc, gidInc uint32) filepath.WalkFunc { + return func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + var ( + stat = info.Sys().(*syscall.Stat_t) + u, g = int(stat.Uid + uidInc), int(stat.Gid + gidInc) + ) + // be sure the lchown the path as to not de-reference the symlink to a host file + return os.Lchown(path, u, g) + } +} diff --git a/vendor/github.com/containerd/containerd/content/helpers.go b/vendor/github.com/containerd/containerd/content/helpers.go index 32efc6ca0e..86b853685b 100644 --- a/vendor/github.com/containerd/containerd/content/helpers.go +++ b/vendor/github.com/containerd/containerd/content/helpers.go @@ -2,7 +2,6 @@ package content import ( "context" - "fmt" "io" "sync" @@ -11,13 +10,12 @@ import ( "github.com/pkg/errors" ) -var ( - bufPool = sync.Pool{ - New: func() interface{} { - return make([]byte, 1<<20) - }, - } -) +var bufPool = sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 1<<20) + return &buffer + }, +} // NewReader returns a io.Reader from a ReaderAt func NewReader(ra ReaderAt) io.Reader { @@ -89,10 +87,10 @@ func Copy(ctx context.Context, cw Writer, r io.Reader, size int64, expected dige } } - buf := bufPool.Get().([]byte) + buf := bufPool.Get().(*[]byte) defer bufPool.Put(buf) - if _, err := io.CopyBuffer(cw, r, buf); err != nil { + if _, err := io.CopyBuffer(cw, r, *buf); err != nil { return err } @@ -119,7 +117,7 @@ func seekReader(r io.Reader, offset, size int64) (io.Reader, error) { if ok { nn, err := seeker.Seek(offset, io.SeekStart) if nn != offset { - return nil, fmt.Errorf("failed to seek to offset %v", offset) + return nil, errors.Wrapf(err, "failed to seek to offset %v", offset) } if err != nil { diff --git a/vendor/github.com/containerd/containerd/content/local/locks.go b/vendor/github.com/containerd/containerd/content/local/locks.go index cf5d0c59fb..9a6c62fd85 100644 --- a/vendor/github.com/containerd/containerd/content/local/locks.go +++ b/vendor/github.com/containerd/containerd/content/local/locks.go @@ -8,7 +8,6 @@ import ( ) // Handles locking references -// TODO: use boltdb for lock status var ( // locks lets us lock in process diff --git a/vendor/github.com/containerd/containerd/content/local/store.go b/vendor/github.com/containerd/containerd/content/local/store.go index 14a98881dd..56f99bb097 100644 --- a/vendor/github.com/containerd/containerd/content/local/store.go +++ b/vendor/github.com/containerd/containerd/content/local/store.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "io/ioutil" + "math/rand" "os" "path/filepath" "strconv" @@ -20,13 +21,12 @@ import ( "github.com/pkg/errors" ) -var ( - bufPool = sync.Pool{ - New: func() interface{} { - return make([]byte, 1<<20) - }, - } -) +var bufPool = sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 1<<20) + return &buffer + }, +} // LabelStore is used to store mutable labels for digests type LabelStore interface { @@ -219,7 +219,7 @@ func (s *store) Walk(ctx context.Context, fn content.WalkFunc, filters ...string // TODO(stevvooe): There are few more cases with subdirs that should be // handled in case the layout gets corrupted. This isn't strict enough - // an may spew bad data. + // and may spew bad data. if path == root { return nil @@ -324,12 +324,28 @@ func (s *store) status(ingestPath string) (content.Status, error) { return content.Status{}, err } + startedAt, err := readFileTimestamp(filepath.Join(ingestPath, "startedat")) + if err != nil { + return content.Status{}, errors.Wrapf(err, "could not read startedat") + } + + updatedAt, err := readFileTimestamp(filepath.Join(ingestPath, "updatedat")) + if err != nil { + return content.Status{}, errors.Wrapf(err, "could not read updatedat") + } + + // because we don't write updatedat on every write, the mod time may + // actually be more up to date. + if fi.ModTime().After(updatedAt) { + updatedAt = fi.ModTime() + } + return content.Status{ Ref: ref, Offset: fi.Size(), Total: s.total(ingestPath), - UpdatedAt: fi.ModTime(), - StartedAt: getStartTime(fi), + UpdatedAt: updatedAt, + StartedAt: startedAt, }, nil } @@ -369,6 +385,37 @@ func (s *store) total(ingestPath string) int64 { // // The argument `ref` is used to uniquely identify a long-lived writer transaction. func (s *store) Writer(ctx context.Context, ref string, total int64, expected digest.Digest) (content.Writer, error) { + var lockErr error + for count := uint64(0); count < 10; count++ { + time.Sleep(time.Millisecond * time.Duration(rand.Intn(1< 0 { if err := ioutil.WriteFile(filepath.Join(path, "total"), []byte(fmt.Sprint(total)), 0666); err != nil { return nil, err } } - - startedAt = time.Now() - updatedAt = startedAt } fp, err := os.OpenFile(data, os.O_WRONLY|os.O_CREATE, 0666) @@ -452,6 +503,10 @@ func (s *store) Writer(ctx context.Context, ref string, total int64, expected di return nil, errors.Wrap(err, "failed to open data file") } + if _, err := fp.Seek(offset, io.SeekStart); err != nil { + return nil, errors.Wrap(err, "could not seek to current write offset") + } + return &writer{ s: s, fp: fp, @@ -509,3 +564,30 @@ func readFileString(path string) (string, error) { p, err := ioutil.ReadFile(path) return string(p), err } + +// readFileTimestamp reads a file with just a timestamp present. +func readFileTimestamp(p string) (time.Time, error) { + b, err := ioutil.ReadFile(p) + if err != nil { + if os.IsNotExist(err) { + err = errors.Wrap(errdefs.ErrNotFound, err.Error()) + } + return time.Time{}, err + } + + var t time.Time + if err := t.UnmarshalText(b); err != nil { + return time.Time{}, errors.Wrapf(err, "could not parse timestamp file %v", p) + } + + return t, nil +} + +func writeTimestampFile(p string, t time.Time) error { + b, err := t.MarshalText() + if err != nil { + return err + } + + return ioutil.WriteFile(p, b, 0666) +} diff --git a/vendor/github.com/containerd/containerd/content/local/store_unix.go b/vendor/github.com/containerd/containerd/content/local/store_unix.go index 0d500b84d0..c0587e1b23 100644 --- a/vendor/github.com/containerd/containerd/content/local/store_unix.go +++ b/vendor/github.com/containerd/containerd/content/local/store_unix.go @@ -10,15 +10,6 @@ import ( "github.com/containerd/containerd/sys" ) -func getStartTime(fi os.FileInfo) time.Time { - if st, ok := fi.Sys().(*syscall.Stat_t); ok { - return time.Unix(int64(sys.StatCtime(st).Sec), - int64(sys.StatCtime(st).Nsec)) - } - - return fi.ModTime() -} - func getATime(fi os.FileInfo) time.Time { if st, ok := fi.Sys().(*syscall.Stat_t); ok { return time.Unix(int64(sys.StatAtime(st).Sec), diff --git a/vendor/github.com/containerd/containerd/content/local/store_windows.go b/vendor/github.com/containerd/containerd/content/local/store_windows.go index 5f12ea5c42..f745aafdbd 100644 --- a/vendor/github.com/containerd/containerd/content/local/store_windows.go +++ b/vendor/github.com/containerd/containerd/content/local/store_windows.go @@ -5,10 +5,6 @@ import ( "time" ) -func getStartTime(fi os.FileInfo) time.Time { - return fi.ModTime() -} - func getATime(fi os.FileInfo) time.Time { return fi.ModTime() } diff --git a/vendor/github.com/containerd/containerd/content/local/writer.go b/vendor/github.com/containerd/containerd/content/local/writer.go index 8f1e92ded6..fbf39f755a 100644 --- a/vendor/github.com/containerd/containerd/content/local/writer.go +++ b/vendor/github.com/containerd/containerd/content/local/writer.go @@ -152,6 +152,7 @@ func (w *writer) Close() (err error) { if w.fp != nil { w.fp.Sync() err = w.fp.Close() + writeTimestampFile(filepath.Join(w.path, "updatedat"), w.updatedAt) w.fp = nil unlock(w.ref) return diff --git a/vendor/github.com/containerd/containerd/services/content/reader.go b/vendor/github.com/containerd/containerd/content_reader.go similarity index 97% rename from vendor/github.com/containerd/containerd/services/content/reader.go rename to vendor/github.com/containerd/containerd/content_reader.go index 024251c6d6..7acd2d30e8 100644 --- a/vendor/github.com/containerd/containerd/services/content/reader.go +++ b/vendor/github.com/containerd/containerd/content_reader.go @@ -1,4 +1,4 @@ -package content +package containerd import ( "context" diff --git a/vendor/github.com/containerd/containerd/services/content/store.go b/vendor/github.com/containerd/containerd/content_store.go similarity index 74% rename from vendor/github.com/containerd/containerd/services/content/store.go rename to vendor/github.com/containerd/containerd/content_store.go index b5aaa8577c..1b539694d1 100644 --- a/vendor/github.com/containerd/containerd/services/content/store.go +++ b/vendor/github.com/containerd/containerd/content_store.go @@ -1,4 +1,4 @@ -package content +package containerd import ( "context" @@ -11,18 +11,18 @@ import ( digest "github.com/opencontainers/go-digest" ) -type remoteStore struct { +type remoteContent struct { client contentapi.ContentClient } -// NewStoreFromClient returns a new content store -func NewStoreFromClient(client contentapi.ContentClient) content.Store { - return &remoteStore{ +// NewContentStoreFromClient returns a new content store +func NewContentStoreFromClient(client contentapi.ContentClient) content.Store { + return &remoteContent{ client: client, } } -func (rs *remoteStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { +func (rs *remoteContent) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { resp, err := rs.client.Info(ctx, &contentapi.InfoRequest{ Digest: dgst, }) @@ -33,7 +33,7 @@ func (rs *remoteStore) Info(ctx context.Context, dgst digest.Digest) (content.In return infoFromGRPC(resp.Info), nil } -func (rs *remoteStore) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error { +func (rs *remoteContent) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error { session, err := rs.client.List(ctx, &contentapi.ListContentRequest{ Filters: filters, }) @@ -61,7 +61,7 @@ func (rs *remoteStore) Walk(ctx context.Context, fn content.WalkFunc, filters .. return nil } -func (rs *remoteStore) Delete(ctx context.Context, dgst digest.Digest) error { +func (rs *remoteContent) Delete(ctx context.Context, dgst digest.Digest) error { if _, err := rs.client.Delete(ctx, &contentapi.DeleteContentRequest{ Digest: dgst, }); err != nil { @@ -71,7 +71,7 @@ func (rs *remoteStore) Delete(ctx context.Context, dgst digest.Digest) error { return nil } -func (rs *remoteStore) ReaderAt(ctx context.Context, dgst digest.Digest) (content.ReaderAt, error) { +func (rs *remoteContent) ReaderAt(ctx context.Context, dgst digest.Digest) (content.ReaderAt, error) { i, err := rs.Info(ctx, dgst) if err != nil { return nil, err @@ -85,7 +85,7 @@ func (rs *remoteStore) ReaderAt(ctx context.Context, dgst digest.Digest) (conten }, nil } -func (rs *remoteStore) Status(ctx context.Context, ref string) (content.Status, error) { +func (rs *remoteContent) Status(ctx context.Context, ref string) (content.Status, error) { resp, err := rs.client.Status(ctx, &contentapi.StatusRequest{ Ref: ref, }) @@ -104,7 +104,7 @@ func (rs *remoteStore) Status(ctx context.Context, ref string) (content.Status, }, nil } -func (rs *remoteStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { +func (rs *remoteContent) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { resp, err := rs.client.Update(ctx, &contentapi.UpdateRequest{ Info: infoToGRPC(info), UpdateMask: &protobuftypes.FieldMask{ @@ -117,7 +117,7 @@ func (rs *remoteStore) Update(ctx context.Context, info content.Info, fieldpaths return infoFromGRPC(resp.Info), nil } -func (rs *remoteStore) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) { +func (rs *remoteContent) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) { resp, err := rs.client.ListStatuses(ctx, &contentapi.ListStatusesRequest{ Filters: filters, }) @@ -140,7 +140,7 @@ func (rs *remoteStore) ListStatuses(ctx context.Context, filters ...string) ([]c return statuses, nil } -func (rs *remoteStore) Writer(ctx context.Context, ref string, size int64, expected digest.Digest) (content.Writer, error) { +func (rs *remoteContent) Writer(ctx context.Context, ref string, size int64, expected digest.Digest) (content.Writer, error) { wrclient, offset, err := rs.negotiate(ctx, ref, size, expected) if err != nil { return nil, errdefs.FromGRPC(err) @@ -154,7 +154,7 @@ func (rs *remoteStore) Writer(ctx context.Context, ref string, size int64, expec } // Abort implements asynchronous abort. It starts a new write session on the ref l -func (rs *remoteStore) Abort(ctx context.Context, ref string) error { +func (rs *remoteContent) Abort(ctx context.Context, ref string) error { if _, err := rs.client.Abort(ctx, &contentapi.AbortRequest{ Ref: ref, }); err != nil { @@ -164,7 +164,7 @@ func (rs *remoteStore) Abort(ctx context.Context, ref string) error { return nil } -func (rs *remoteStore) negotiate(ctx context.Context, ref string, size int64, expected digest.Digest) (contentapi.Content_WriteClient, int64, error) { +func (rs *remoteContent) negotiate(ctx context.Context, ref string, size int64, expected digest.Digest) (contentapi.Content_WriteClient, int64, error) { wrclient, err := rs.client.Write(ctx) if err != nil { return nil, 0, err diff --git a/vendor/github.com/containerd/containerd/services/content/writer.go b/vendor/github.com/containerd/containerd/content_writer.go similarity index 96% rename from vendor/github.com/containerd/containerd/services/content/writer.go rename to vendor/github.com/containerd/containerd/content_writer.go index cb45957f0a..b18f3512a2 100644 --- a/vendor/github.com/containerd/containerd/services/content/writer.go +++ b/vendor/github.com/containerd/containerd/content_writer.go @@ -1,4 +1,4 @@ -package content +package containerd import ( "context" @@ -41,7 +41,7 @@ func (rw *remoteWriter) Status() (content.Status, error) { Action: contentapi.WriteActionStat, }) if err != nil { - return content.Status{}, err + return content.Status{}, errors.Wrap(err, "error getting writer status") } return content.Status{ diff --git a/vendor/github.com/containerd/containerd/services/diff/client.go b/vendor/github.com/containerd/containerd/diff.go similarity index 85% rename from vendor/github.com/containerd/containerd/services/diff/client.go rename to vendor/github.com/containerd/containerd/diff.go index d34848be20..4e47efafc4 100644 --- a/vendor/github.com/containerd/containerd/services/diff/client.go +++ b/vendor/github.com/containerd/containerd/diff.go @@ -1,4 +1,4 @@ -package diff +package containerd import ( diffapi "github.com/containerd/containerd/api/services/diff/v1" @@ -12,16 +12,16 @@ import ( // NewDiffServiceFromClient returns a new diff service which communicates // over a GRPC connection. func NewDiffServiceFromClient(client diffapi.DiffClient) diff.Differ { - return &remote{ + return &diffRemote{ client: client, } } -type remote struct { +type diffRemote struct { client diffapi.DiffClient } -func (r *remote) Apply(ctx context.Context, diff ocispec.Descriptor, mounts []mount.Mount) (ocispec.Descriptor, error) { +func (r *diffRemote) Apply(ctx context.Context, diff ocispec.Descriptor, mounts []mount.Mount) (ocispec.Descriptor, error) { req := &diffapi.ApplyRequest{ Diff: fromDescriptor(diff), Mounts: fromMounts(mounts), @@ -33,7 +33,7 @@ func (r *remote) Apply(ctx context.Context, diff ocispec.Descriptor, mounts []mo return toDescriptor(resp.Applied), nil } -func (r *remote) DiffMounts(ctx context.Context, a, b []mount.Mount, opts ...diff.Opt) (ocispec.Descriptor, error) { +func (r *diffRemote) DiffMounts(ctx context.Context, a, b []mount.Mount, opts ...diff.Opt) (ocispec.Descriptor, error) { var config diff.Config for _, opt := range opts { if err := opt(&config); err != nil { diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go index 2aa2e11b4b..6a3bbcaa14 100644 --- a/vendor/github.com/containerd/containerd/errdefs/grpc.go +++ b/vendor/github.com/containerd/containerd/errdefs/grpc.go @@ -4,7 +4,6 @@ import ( "strings" "github.com/pkg/errors" - "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -61,7 +60,7 @@ func FromGRPC(err error) error { var cls error // divide these into error classes, becomes the cause - switch grpc.Code(err) { + switch code(err) { case codes.InvalidArgument: cls = ErrInvalidArgument case codes.AlreadyExists: @@ -94,7 +93,7 @@ func FromGRPC(err error) error { // Effectively, we just remove the string of cls from the end of err if it // appears there. func rebaseMessage(cls error, err error) string { - desc := grpc.ErrorDesc(err) + desc := errDesc(err) clss := cls.Error() if desc == clss { return "" @@ -107,3 +106,17 @@ func isGRPCError(err error) bool { _, ok := status.FromError(err) return ok } + +func code(err error) codes.Code { + if s, ok := status.FromError(err); ok { + return s.Code() + } + return codes.Unknown +} + +func errDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} diff --git a/vendor/github.com/containerd/containerd/events/events.go b/vendor/github.com/containerd/containerd/events/events.go index efe2f598b9..2bfedb1e83 100644 --- a/vendor/github.com/containerd/containerd/events/events.go +++ b/vendor/github.com/containerd/containerd/events/events.go @@ -2,10 +2,50 @@ package events import ( "context" + "time" - events "github.com/containerd/containerd/api/services/events/v1" + "github.com/containerd/typeurl" + "github.com/gogo/protobuf/types" ) +// Envelope provides the packaging for an event. +type Envelope struct { + Timestamp time.Time + Namespace string + Topic string + Event *types.Any +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (e *Envelope) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + // unhandled: timestamp + case "namespace": + return string(e.Namespace), len(e.Namespace) > 0 + case "topic": + return string(e.Topic), len(e.Topic) > 0 + case "event": + decoded, err := typeurl.UnmarshalAny(e.Event) + if err != nil { + return "", false + } + + adaptor, ok := decoded.(interface { + Field([]string) (string, bool) + }) + if !ok { + return "", false + } + return adaptor.Field(fieldpath[1:]) + } + return "", false +} + // Event is a generic interface for any type of event type Event interface{} @@ -16,16 +56,10 @@ type Publisher interface { // Forwarder forwards an event to the underlying event bus type Forwarder interface { - Forward(ctx context.Context, envelope *events.Envelope) error -} - -type publisherFunc func(ctx context.Context, topic string, event Event) error - -func (fn publisherFunc) Publish(ctx context.Context, topic string, event Event) error { - return fn(ctx, topic, event) + Forward(ctx context.Context, envelope *Envelope) error } // Subscriber allows callers to subscribe to events type Subscriber interface { - Subscribe(ctx context.Context, filters ...string) (ch <-chan *events.Envelope, errs <-chan error) + Subscribe(ctx context.Context, filters ...string) (ch <-chan *Envelope, errs <-chan error) } diff --git a/vendor/github.com/containerd/containerd/events/exchange/exchange.go b/vendor/github.com/containerd/containerd/events/exchange/exchange.go index 3fefb9c255..3178fc407a 100644 --- a/vendor/github.com/containerd/containerd/events/exchange/exchange.go +++ b/vendor/github.com/containerd/containerd/events/exchange/exchange.go @@ -5,7 +5,6 @@ import ( "strings" "time" - v1 "github.com/containerd/containerd/api/services/events/v1" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/events" "github.com/containerd/containerd/filters" @@ -31,11 +30,15 @@ func NewExchange() *Exchange { } } +var _ events.Publisher = &Exchange{} +var _ events.Forwarder = &Exchange{} +var _ events.Subscriber = &Exchange{} + // Forward accepts an envelope to be direcly distributed on the exchange. // // This is useful when an event is forwaded on behalf of another namespace or // when the event is propagated on behalf of another publisher. -func (e *Exchange) Forward(ctx context.Context, envelope *v1.Envelope) (err error) { +func (e *Exchange) Forward(ctx context.Context, envelope *events.Envelope) (err error) { if err := validateEnvelope(envelope); err != nil { return err } @@ -64,7 +67,7 @@ func (e *Exchange) Publish(ctx context.Context, topic string, event events.Event var ( namespace string encoded *types.Any - envelope v1.Envelope + envelope events.Envelope ) namespace, err = namespaces.NamespaceRequired(ctx) @@ -109,9 +112,9 @@ func (e *Exchange) Publish(ctx context.Context, topic string, event events.Event // Zero or more filters may be provided as strings. Only events that match // *any* of the provided filters will be sent on the channel. The filters use // the standard containerd filters package syntax. -func (e *Exchange) Subscribe(ctx context.Context, fs ...string) (ch <-chan *v1.Envelope, errs <-chan error) { +func (e *Exchange) Subscribe(ctx context.Context, fs ...string) (ch <-chan *events.Envelope, errs <-chan error) { var ( - evch = make(chan *v1.Envelope) + evch = make(chan *events.Envelope) errq = make(chan error, 1) channel = goevents.NewChannel(0) queue = goevents.NewQueue(channel) @@ -151,7 +154,7 @@ func (e *Exchange) Subscribe(ctx context.Context, fs ...string) (ch <-chan *v1.E for { select { case ev := <-channel.C: - env, ok := ev.(*v1.Envelope) + env, ok := ev.(*events.Envelope) if !ok { // TODO(stevvooe): For the most part, we are well protected // from this condition. Both Forward and Publish protect @@ -205,7 +208,7 @@ func validateTopic(topic string) error { return nil } -func validateEnvelope(envelope *v1.Envelope) error { +func validateEnvelope(envelope *events.Envelope) error { if err := namespaces.Validate(envelope.Namespace); err != nil { return errors.Wrapf(err, "event envelope has invalid namespace") } diff --git a/vendor/github.com/containerd/containerd/fs/copy.go b/vendor/github.com/containerd/containerd/fs/copy.go index 0d11fa527d..e8f452819b 100644 --- a/vendor/github.com/containerd/containerd/fs/copy.go +++ b/vendor/github.com/containerd/containerd/fs/copy.go @@ -9,13 +9,12 @@ import ( "github.com/pkg/errors" ) -var ( - bufferPool = &sync.Pool{ - New: func() interface{} { - return make([]byte, 32*1024) - }, - } -) +var bufferPool = &sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 32*1024) + return &buffer + }, +} // CopyDir copies the directory from src to dst. // Most efficient copy of files is attempted. diff --git a/vendor/github.com/containerd/containerd/fs/copy_linux.go b/vendor/github.com/containerd/containerd/fs/copy_linux.go index efe4753e07..c1fb2d1c40 100644 --- a/vendor/github.com/containerd/containerd/fs/copy_linux.go +++ b/vendor/github.com/containerd/containerd/fs/copy_linux.go @@ -43,8 +43,8 @@ func copyFileContent(dst, src *os.File) error { return errors.Wrap(err, "copy file range failed") } - buf := bufferPool.Get().([]byte) - _, err = io.CopyBuffer(dst, src, buf) + buf := bufferPool.Get().(*[]byte) + _, err = io.CopyBuffer(dst, src, *buf) bufferPool.Put(buf) return err } diff --git a/vendor/github.com/containerd/containerd/fs/copy_unix.go b/vendor/github.com/containerd/containerd/fs/copy_unix.go index 6234f3da38..b31a14fcdb 100644 --- a/vendor/github.com/containerd/containerd/fs/copy_unix.go +++ b/vendor/github.com/containerd/containerd/fs/copy_unix.go @@ -34,8 +34,8 @@ func copyFileInfo(fi os.FileInfo, name string) error { } func copyFileContent(dst, src *os.File) error { - buf := bufferPool.Get().([]byte) - _, err := io.CopyBuffer(dst, src, buf) + buf := bufferPool.Get().(*[]byte) + _, err := io.CopyBuffer(dst, src, *buf) bufferPool.Put(buf) return err diff --git a/vendor/github.com/containerd/containerd/fs/copy_windows.go b/vendor/github.com/containerd/containerd/fs/copy_windows.go index fb4933c252..6fb3de5710 100644 --- a/vendor/github.com/containerd/containerd/fs/copy_windows.go +++ b/vendor/github.com/containerd/containerd/fs/copy_windows.go @@ -18,8 +18,8 @@ func copyFileInfo(fi os.FileInfo, name string) error { } func copyFileContent(dst, src *os.File) error { - buf := bufferPool.Get().([]byte) - _, err := io.CopyBuffer(dst, src, buf) + buf := bufferPool.Get().(*[]byte) + _, err := io.CopyBuffer(dst, src, *buf) bufferPool.Put(buf) return err } diff --git a/vendor/github.com/containerd/containerd/fs/diff_unix.go b/vendor/github.com/containerd/containerd/fs/diff_unix.go index 36a0f3fde7..3751814443 100644 --- a/vendor/github.com/containerd/containerd/fs/diff_unix.go +++ b/vendor/github.com/containerd/containerd/fs/diff_unix.go @@ -5,36 +5,12 @@ package fs import ( "bytes" "os" - "path/filepath" - "strings" "syscall" "github.com/containerd/continuity/sysx" "github.com/pkg/errors" ) -// whiteouts are files with a special meaning for the layered filesystem. -// Docker uses AUFS whiteout files inside exported archives. In other -// filesystems these files are generated/handled on tar creation/extraction. - -// whiteoutPrefix prefix means file is a whiteout. If this is followed by a -// filename this means that file has been removed from the base layer. -const whiteoutPrefix = ".wh." - -// whiteoutMetaPrefix prefix means whiteout has a special meaning and is not -// for removing an actual file. Normally these files are excluded from exported -// archives. -const whiteoutMetaPrefix = whiteoutPrefix + whiteoutPrefix - -// whiteoutLinkDir is a directory AUFS uses for storing hardlink links to other -// layers. Normally these should not go into exported archives and all changed -// hardlinks should be copied to the top layer. -const whiteoutLinkDir = whiteoutMetaPrefix + "plnk" - -// whiteoutOpaqueDir file means directory has been made opaque - meaning -// readdir calls to this directory do not follow to lower layers. -const whiteoutOpaqueDir = whiteoutMetaPrefix + ".opq" - // detectDirDiff returns diff dir options if a directory could // be found in the mount info for upper which is the direct // diff with the provided lower directory @@ -45,26 +21,6 @@ func detectDirDiff(upper, lower string) *diffDirOptions { return nil } -func aufsMetadataSkip(path string) (skip bool, err error) { - skip, err = filepath.Match(string(os.PathSeparator)+whiteoutMetaPrefix+"*", path) - if err != nil { - skip = true - } - return -} - -func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { - f := filepath.Base(path) - - // If there is a whiteout, then the file was removed - if strings.HasPrefix(f, whiteoutPrefix) { - originalFile := f[len(whiteoutPrefix):] - return filepath.Join(filepath.Dir(path), originalFile), nil - } - - return "", nil -} - // compareSysStat returns whether the stats are equivalent, // whether the files are considered the same file, and // an error diff --git a/vendor/github.com/containerd/containerd/fs/diff_windows.go b/vendor/github.com/containerd/containerd/fs/diff_windows.go index 7bbd662849..8eed36507e 100644 --- a/vendor/github.com/containerd/containerd/fs/diff_windows.go +++ b/vendor/github.com/containerd/containerd/fs/diff_windows.go @@ -1,14 +1,25 @@ package fs -import "os" +import ( + "os" + + "golang.org/x/sys/windows" +) func detectDirDiff(upper, lower string) *diffDirOptions { return nil } func compareSysStat(s1, s2 interface{}) (bool, error) { - // TODO: Use windows specific sys type - return false, nil + f1, ok := s1.(windows.Win32FileAttributeData) + if !ok { + return false, nil + } + f2, ok := s2.(windows.Win32FileAttributeData) + if !ok { + return false, nil + } + return f1.FileAttributes == f2.FileAttributes, nil } func compareCapabilities(p1, p2 string) (bool, error) { diff --git a/vendor/github.com/containerd/containerd/fs/path.go b/vendor/github.com/containerd/containerd/fs/path.go index 644b1ee2ec..412da67115 100644 --- a/vendor/github.com/containerd/containerd/fs/path.go +++ b/vendor/github.com/containerd/containerd/fs/path.go @@ -74,11 +74,14 @@ func sameFile(f1, f2 *currentPath) (bool, error) { // If the timestamp may have been truncated in one of the // files, check content of file to determine difference if t1.Nanosecond() == 0 || t2.Nanosecond() == 0 { - if f1.f.Size() > 0 { - eq, err := compareFileContent(f1.fullPath, f2.fullPath) - if err != nil || !eq { - return eq, err - } + var eq bool + if (f1.f.Mode() & os.ModeSymlink) == os.ModeSymlink { + eq, err = compareSymlinkTarget(f1.fullPath, f2.fullPath) + } else if f1.f.Size() > 0 { + eq, err = compareFileContent(f1.fullPath, f2.fullPath) + } + if err != nil || !eq { + return eq, err } } else if t1.Nanosecond() != t2.Nanosecond() { return false, nil @@ -88,6 +91,18 @@ func sameFile(f1, f2 *currentPath) (bool, error) { return true, nil } +func compareSymlinkTarget(p1, p2 string) (bool, error) { + t1, err := os.Readlink(p1) + if err != nil { + return false, err + } + t2, err := os.Readlink(p2) + if err != nil { + return false, err + } + return t1 == t2, nil +} + const compareChuckSize = 32 * 1024 // compareFileContent compares the content of 2 same sized files diff --git a/vendor/github.com/containerd/containerd/gc/gc.go b/vendor/github.com/containerd/containerd/gc/gc.go index 70838a7627..66898c5deb 100644 --- a/vendor/github.com/containerd/containerd/gc/gc.go +++ b/vendor/github.com/containerd/containerd/gc/gc.go @@ -36,7 +36,7 @@ func Tricolor(roots []Node, refs func(ref Node) ([]Node, error)) (map[Node]struc var ( grays []Node // maintain a gray "stack" seen = map[Node]struct{}{} // or not "white", basically "seen" - reachable = map[Node]struct{}{} // or "block", in tri-color parlance + reachable = map[Node]struct{}{} // or "black", in tri-color parlance ) grays = append(grays, roots...) diff --git a/vendor/github.com/containerd/containerd/image.go b/vendor/github.com/containerd/containerd/image.go index 4f978e2ea7..202fc42bd4 100644 --- a/vendor/github.com/containerd/containerd/image.go +++ b/vendor/github.com/containerd/containerd/image.go @@ -9,7 +9,7 @@ import ( "github.com/containerd/containerd/images" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/rootfs" - "github.com/containerd/containerd/snapshot" + "github.com/containerd/containerd/snapshots" digest "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" @@ -32,6 +32,8 @@ type Image interface { Config(ctx context.Context) (ocispec.Descriptor, error) // IsUnpacked returns whether or not an image is unpacked. IsUnpacked(context.Context, string) (bool, error) + // ContentStore provides a content store which contains image blob data + ContentStore() content.Store } var _ = (Image)(&image{}) @@ -86,6 +88,12 @@ func (i *image) IsUnpacked(ctx context.Context, snapshotterName string) (bool, e } func (i *image) Unpack(ctx context.Context, snapshotterName string) error { + ctx, done, err := i.client.WithLease(ctx) + if err != nil { + return err + } + defer done() + layers, err := i.getLayers(ctx, platforms.Default()) if err != nil { return err @@ -104,7 +112,7 @@ func (i *image) Unpack(ctx context.Context, snapshotterName string) error { "containerd.io/uncompressed": layer.Diff.Digest.String(), } - unpacked, err = rootfs.ApplyLayer(ctx, layer, chain, sn, a, snapshot.WithLabels(labels)) + unpacked, err = rootfs.ApplyLayer(ctx, layer, chain, sn, a, snapshots.WithLabels(labels)) if err != nil { return err } @@ -160,3 +168,7 @@ func (i *image) getLayers(ctx context.Context, platform string) ([]rootfs.Layer, } return layers, nil } + +func (i *image) ContentStore() content.Store { + return i.client.ContentStore() +} diff --git a/vendor/github.com/containerd/containerd/image_store.go b/vendor/github.com/containerd/containerd/image_store.go new file mode 100644 index 0000000000..9a3aafc84f --- /dev/null +++ b/vendor/github.com/containerd/containerd/image_store.go @@ -0,0 +1,136 @@ +package containerd + +import ( + "context" + + imagesapi "github.com/containerd/containerd/api/services/images/v1" + "github.com/containerd/containerd/api/types" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + ptypes "github.com/gogo/protobuf/types" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type remoteImages struct { + client imagesapi.ImagesClient +} + +// NewImageStoreFromClient returns a new image store client +func NewImageStoreFromClient(client imagesapi.ImagesClient) images.Store { + return &remoteImages{ + client: client, + } +} + +func (s *remoteImages) Get(ctx context.Context, name string) (images.Image, error) { + resp, err := s.client.Get(ctx, &imagesapi.GetImageRequest{ + Name: name, + }) + if err != nil { + return images.Image{}, errdefs.FromGRPC(err) + } + + return imageFromProto(resp.Image), nil +} + +func (s *remoteImages) List(ctx context.Context, filters ...string) ([]images.Image, error) { + resp, err := s.client.List(ctx, &imagesapi.ListImagesRequest{ + Filters: filters, + }) + if err != nil { + return nil, errdefs.FromGRPC(err) + } + + return imagesFromProto(resp.Images), nil +} + +func (s *remoteImages) Create(ctx context.Context, image images.Image) (images.Image, error) { + created, err := s.client.Create(ctx, &imagesapi.CreateImageRequest{ + Image: imageToProto(&image), + }) + if err != nil { + return images.Image{}, errdefs.FromGRPC(err) + } + + return imageFromProto(&created.Image), nil +} + +func (s *remoteImages) Update(ctx context.Context, image images.Image, fieldpaths ...string) (images.Image, error) { + var updateMask *ptypes.FieldMask + if len(fieldpaths) > 0 { + updateMask = &ptypes.FieldMask{ + Paths: fieldpaths, + } + } + + updated, err := s.client.Update(ctx, &imagesapi.UpdateImageRequest{ + Image: imageToProto(&image), + UpdateMask: updateMask, + }) + if err != nil { + return images.Image{}, errdefs.FromGRPC(err) + } + + return imageFromProto(&updated.Image), nil +} + +func (s *remoteImages) Delete(ctx context.Context, name string, opts ...images.DeleteOpt) error { + var do images.DeleteOptions + for _, opt := range opts { + if err := opt(ctx, &do); err != nil { + return err + } + } + _, err := s.client.Delete(ctx, &imagesapi.DeleteImageRequest{ + Name: name, + Sync: do.Synchronous, + }) + + return errdefs.FromGRPC(err) +} + +func imageToProto(image *images.Image) imagesapi.Image { + return imagesapi.Image{ + Name: image.Name, + Labels: image.Labels, + Target: descToProto(&image.Target), + CreatedAt: image.CreatedAt, + UpdatedAt: image.UpdatedAt, + } +} + +func imageFromProto(imagepb *imagesapi.Image) images.Image { + return images.Image{ + Name: imagepb.Name, + Labels: imagepb.Labels, + Target: descFromProto(&imagepb.Target), + CreatedAt: imagepb.CreatedAt, + UpdatedAt: imagepb.UpdatedAt, + } +} + +func imagesFromProto(imagespb []imagesapi.Image) []images.Image { + var images []images.Image + + for _, image := range imagespb { + images = append(images, imageFromProto(&image)) + } + + return images +} + +func descFromProto(desc *types.Descriptor) ocispec.Descriptor { + return ocispec.Descriptor{ + MediaType: desc.MediaType, + Size: desc.Size_, + Digest: desc.Digest, + } +} + +func descToProto(desc *ocispec.Descriptor) types.Descriptor { + return types.Descriptor{ + MediaType: desc.MediaType, + Size_: desc.Size, + Digest: desc.Digest, + } +} diff --git a/vendor/github.com/containerd/containerd/images/image.go b/vendor/github.com/containerd/containerd/images/image.go index 4c78c6cc2d..e0d6990c41 100644 --- a/vendor/github.com/containerd/containerd/images/image.go +++ b/vendor/github.com/containerd/containerd/images/image.go @@ -38,6 +38,23 @@ type Image struct { CreatedAt, UpdatedAt time.Time } +// DeleteOptions provide options on image delete +type DeleteOptions struct { + Synchronous bool +} + +// DeleteOpt allows configuring a delete operation +type DeleteOpt func(context.Context, *DeleteOptions) error + +// SynchronousDelete is used to indicate that an image deletion and removal of +// the image resources should occur synchronously before returning a result. +func SynchronousDelete() DeleteOpt { + return func(ctx context.Context, o *DeleteOptions) error { + o.Synchronous = true + return nil + } +} + // Store and interact with images type Store interface { Get(ctx context.Context, name string) (Image, error) @@ -48,7 +65,7 @@ type Store interface { // one or more fieldpaths are provided, only those fields will be updated. Update(ctx context.Context, image Image, fieldpaths ...string) (Image, error) - Delete(ctx context.Context, name string) error + Delete(ctx context.Context, name string, opts ...DeleteOpt) error } // TODO(stevvooe): Many of these functions make strong platform assumptions, diff --git a/vendor/github.com/containerd/containerd/lease.go b/vendor/github.com/containerd/containerd/lease.go index 6ecc58dec3..8eb3bc0fe1 100644 --- a/vendor/github.com/containerd/containerd/lease.go +++ b/vendor/github.com/containerd/containerd/lease.go @@ -51,7 +51,8 @@ func (c *Client) ListLeases(ctx context.Context) ([]Lease, error) { return leases, nil } -func (c *Client) withLease(ctx context.Context) (context.Context, func() error, error) { +// WithLease attaches a lease on the context +func (c *Client) WithLease(ctx context.Context) (context.Context, func() error, error) { _, ok := leases.Lease(ctx) if ok { return ctx, func() error { diff --git a/vendor/github.com/containerd/containerd/linux/bundle.go b/vendor/github.com/containerd/containerd/linux/bundle.go index 72fcab9fe1..136f2ccb7f 100644 --- a/vendor/github.com/containerd/containerd/linux/bundle.go +++ b/vendor/github.com/containerd/containerd/linux/bundle.go @@ -10,7 +10,7 @@ import ( "path/filepath" "github.com/containerd/containerd/events/exchange" - "github.com/containerd/containerd/linux/runcopts" + "github.com/containerd/containerd/linux/runctypes" "github.com/containerd/containerd/linux/shim" "github.com/containerd/containerd/linux/shim/client" "github.com/pkg/errors" @@ -72,11 +72,11 @@ type bundle struct { } // ShimOpt specifies shim options for initialization and connection -type ShimOpt func(*bundle, string, *runcopts.RuncOptions) (shim.Config, client.Opt) +type ShimOpt func(*bundle, string, *runctypes.RuncOptions) (shim.Config, client.Opt) // ShimRemote is a ShimOpt for connecting and starting a remote shim func ShimRemote(shimBinary, daemonAddress, cgroup string, nonewns, debug bool, exitHandler func()) ShimOpt { - return func(b *bundle, ns string, ropts *runcopts.RuncOptions) (shim.Config, client.Opt) { + return func(b *bundle, ns string, ropts *runctypes.RuncOptions) (shim.Config, client.Opt) { return b.shimConfig(ns, ropts), client.WithStart(shimBinary, b.shimAddress(ns), daemonAddress, cgroup, nonewns, debug, exitHandler) } @@ -84,20 +84,20 @@ func ShimRemote(shimBinary, daemonAddress, cgroup string, nonewns, debug bool, e // ShimLocal is a ShimOpt for using an in process shim implementation func ShimLocal(exchange *exchange.Exchange) ShimOpt { - return func(b *bundle, ns string, ropts *runcopts.RuncOptions) (shim.Config, client.Opt) { + return func(b *bundle, ns string, ropts *runctypes.RuncOptions) (shim.Config, client.Opt) { return b.shimConfig(ns, ropts), client.WithLocal(exchange) } } // ShimConnect is a ShimOpt for connecting to an existing remote shim func ShimConnect() ShimOpt { - return func(b *bundle, ns string, ropts *runcopts.RuncOptions) (shim.Config, client.Opt) { + return func(b *bundle, ns string, ropts *runctypes.RuncOptions) (shim.Config, client.Opt) { return b.shimConfig(ns, ropts), client.WithConnect(b.shimAddress(ns)) } } // NewShimClient connects to the shim managing the bundle and tasks creating it if needed -func (b *bundle) NewShimClient(ctx context.Context, namespace string, getClientOpts ShimOpt, runcOpts *runcopts.RuncOptions) (*client.Client, error) { +func (b *bundle) NewShimClient(ctx context.Context, namespace string, getClientOpts ShimOpt, runcOpts *runctypes.RuncOptions) (*client.Client, error) { cfg, opt := getClientOpts(b, namespace, runcOpts) return client.New(ctx, cfg, opt) } @@ -120,7 +120,7 @@ func (b *bundle) shimAddress(namespace string) string { return filepath.Join(string(filepath.Separator), "containerd-shim", namespace, b.id, "shim.sock") } -func (b *bundle) shimConfig(namespace string, runcOptions *runcopts.RuncOptions) shim.Config { +func (b *bundle) shimConfig(namespace string, runcOptions *runctypes.RuncOptions) shim.Config { var ( criuPath string runtimeRoot string diff --git a/vendor/github.com/containerd/containerd/linux/shim/deleted_state.go b/vendor/github.com/containerd/containerd/linux/proc/deleted_state.go similarity index 79% rename from vendor/github.com/containerd/containerd/linux/shim/deleted_state.go rename to vendor/github.com/containerd/containerd/linux/proc/deleted_state.go index 6d22735012..fb25878047 100644 --- a/vendor/github.com/containerd/containerd/linux/shim/deleted_state.go +++ b/vendor/github.com/containerd/containerd/linux/proc/deleted_state.go @@ -1,12 +1,12 @@ // +build !windows -package shim +package proc import ( "context" "github.com/containerd/console" - shimapi "github.com/containerd/containerd/linux/shim/v1" + google_protobuf "github.com/gogo/protobuf/types" "github.com/pkg/errors" ) @@ -21,11 +21,11 @@ func (s *deletedState) Resume(ctx context.Context) error { return errors.Errorf("cannot resume a deleted process") } -func (s *deletedState) Update(context context.Context, r *shimapi.UpdateTaskRequest) error { +func (s *deletedState) Update(context context.Context, r *google_protobuf.Any) error { return errors.Errorf("cannot update a deleted process") } -func (s *deletedState) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskRequest) error { +func (s *deletedState) Checkpoint(ctx context.Context, r *CheckpointConfig) error { return errors.Errorf("cannot checkpoint a deleted process") } diff --git a/vendor/github.com/containerd/containerd/linux/shim/exec.go b/vendor/github.com/containerd/containerd/linux/proc/exec.go similarity index 73% rename from vendor/github.com/containerd/containerd/linux/shim/exec.go rename to vendor/github.com/containerd/containerd/linux/proc/exec.go index 3d27c911b5..00a8547b87 100644 --- a/vendor/github.com/containerd/containerd/linux/shim/exec.go +++ b/vendor/github.com/containerd/containerd/linux/proc/exec.go @@ -1,12 +1,12 @@ // +build !windows -package shim +package proc import ( "context" - "encoding/json" "fmt" "io" + "os" "path/filepath" "sync" "syscall" @@ -15,8 +15,6 @@ import ( "golang.org/x/sys/unix" "github.com/containerd/console" - "github.com/containerd/containerd/identifiers" - shimapi "github.com/containerd/containerd/linux/shim/v1" "github.com/containerd/fifo" runc "github.com/containerd/go-runc" specs "github.com/opencontainers/runtime-spec/specs-go" @@ -26,7 +24,7 @@ import ( type execProcess struct { wg sync.WaitGroup - processState + State mu sync.Mutex id string @@ -37,42 +35,14 @@ type execProcess struct { pid int closers []io.Closer stdin io.Closer - stdio stdio + stdio Stdio path string spec specs.Process - parent *initProcess + parent *Init waitBlock chan struct{} } -func newExecProcess(context context.Context, path string, r *shimapi.ExecProcessRequest, parent *initProcess, id string) (process, error) { - if err := identifiers.Validate(id); err != nil { - return nil, errors.Wrapf(err, "invalid exec id") - } - // process exec request - var spec specs.Process - if err := json.Unmarshal(r.Spec.Value, &spec); err != nil { - return nil, err - } - spec.Terminal = r.Terminal - - e := &execProcess{ - id: id, - path: path, - parent: parent, - spec: spec, - stdio: stdio{ - stdin: r.Stdin, - stdout: r.Stdout, - stderr: r.Stderr, - terminal: r.Terminal, - }, - waitBlock: make(chan struct{}), - } - e.processState = &execCreatedState{p: e} - return e, nil -} - func (e *execProcess) Wait() { <-e.waitBlock } @@ -102,7 +72,7 @@ func (e *execProcess) ExitedAt() time.Time { func (e *execProcess) setExited(status int) { e.status = status e.exited = time.Now() - e.parent.platform.shutdownConsole(context.Background(), e.console) + e.parent.platform.ShutdownConsole(context.Background(), e.console) close(e.waitBlock) } @@ -114,6 +84,9 @@ func (e *execProcess) delete(ctx context.Context) error { } e.io.Close() } + pidfile := filepath.Join(e.path, fmt.Sprintf("%s.pid", e.id)) + // silently ignore error + os.Remove(pidfile) return nil } @@ -138,7 +111,7 @@ func (e *execProcess) Stdin() io.Closer { return e.stdin } -func (e *execProcess) Stdio() stdio { +func (e *execProcess) Stdio() Stdio { return e.stdio } @@ -147,12 +120,12 @@ func (e *execProcess) start(ctx context.Context) (err error) { socket *runc.Socket pidfile = filepath.Join(e.path, fmt.Sprintf("%s.pid", e.id)) ) - if e.stdio.terminal { + if e.stdio.Terminal { if socket, err = runc.NewTempConsoleSocket(); err != nil { return errors.Wrap(err, "failed to create runc console socket") } defer socket.Close() - } else if e.stdio.isNull() { + } else if e.stdio.IsNull() { if e.io, err = runc.NewNullIO(); err != nil { return errors.Wrap(err, "creating new NULL IO") } @@ -170,12 +143,13 @@ func (e *execProcess) start(ctx context.Context) (err error) { opts.ConsoleSocket = socket } if err := e.parent.runtime.Exec(ctx, e.parent.id, e.spec, opts); err != nil { + close(e.waitBlock) return e.parent.runtimeError(err, "OCI runtime exec failed") } - if e.stdio.stdin != "" { - sc, err := fifo.OpenFifo(ctx, e.stdio.stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) + if e.stdio.Stdin != "" { + sc, err := fifo.OpenFifo(ctx, e.stdio.Stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) if err != nil { - return errors.Wrapf(err, "failed to open stdin fifo %s", e.stdio.stdin) + return errors.Wrapf(err, "failed to open stdin fifo %s", e.stdio.Stdin) } e.closers = append(e.closers, sc) e.stdin = sc @@ -186,11 +160,11 @@ func (e *execProcess) start(ctx context.Context) (err error) { if err != nil { return errors.Wrap(err, "failed to retrieve console master") } - if e.console, err = e.parent.platform.copyConsole(ctx, console, e.stdio.stdin, e.stdio.stdout, e.stdio.stderr, &e.wg, ©WaitGroup); err != nil { + if e.console, err = e.parent.platform.CopyConsole(ctx, console, e.stdio.Stdin, e.stdio.Stdout, e.stdio.Stderr, &e.wg, ©WaitGroup); err != nil { return errors.Wrap(err, "failed to start console copy") } - } else if !e.stdio.isNull() { - if err := copyPipes(ctx, e.io, e.stdio.stdin, e.stdio.stdout, e.stdio.stderr, &e.wg, ©WaitGroup); err != nil { + } else if !e.stdio.IsNull() { + if err := copyPipes(ctx, e.io, e.stdio.Stdin, e.stdio.Stdout, e.stdio.Stderr, &e.wg, ©WaitGroup); err != nil { return errors.Wrap(err, "failed to start io pipe copy") } } diff --git a/vendor/github.com/containerd/containerd/linux/shim/exec_state.go b/vendor/github.com/containerd/containerd/linux/proc/exec_state.go similarity index 93% rename from vendor/github.com/containerd/containerd/linux/shim/exec_state.go rename to vendor/github.com/containerd/containerd/linux/proc/exec_state.go index 4a4aaa2b4c..3c3c265829 100644 --- a/vendor/github.com/containerd/containerd/linux/shim/exec_state.go +++ b/vendor/github.com/containerd/containerd/linux/proc/exec_state.go @@ -1,6 +1,6 @@ // +build !windows -package shim +package proc import ( "context" @@ -16,11 +16,11 @@ type execCreatedState struct { func (s *execCreatedState) transition(name string) error { switch name { case "running": - s.p.processState = &execRunningState{p: s.p} + s.p.State = &execRunningState{p: s.p} case "stopped": - s.p.processState = &execStoppedState{p: s.p} + s.p.State = &execStoppedState{p: s.p} case "deleted": - s.p.processState = &deletedState{} + s.p.State = &deletedState{} default: return errors.Errorf("invalid state transition %q to %q", stateName(s), name) } @@ -77,7 +77,7 @@ type execRunningState struct { func (s *execRunningState) transition(name string) error { switch name { case "stopped": - s.p.processState = &execStoppedState{p: s.p} + s.p.State = &execStoppedState{p: s.p} default: return errors.Errorf("invalid state transition %q to %q", stateName(s), name) } @@ -130,7 +130,7 @@ type execStoppedState struct { func (s *execStoppedState) transition(name string) error { switch name { case "deleted": - s.p.processState = &deletedState{} + s.p.State = &deletedState{} default: return errors.Errorf("invalid state transition %q to %q", stateName(s), name) } diff --git a/vendor/github.com/containerd/containerd/linux/shim/init.go b/vendor/github.com/containerd/containerd/linux/proc/init.go similarity index 69% rename from vendor/github.com/containerd/containerd/linux/shim/init.go rename to vendor/github.com/containerd/containerd/linux/proc/init.go index 01c305bb67..f24f92f7da 100644 --- a/vendor/github.com/containerd/containerd/linux/shim/init.go +++ b/vendor/github.com/containerd/containerd/linux/proc/init.go @@ -1,6 +1,6 @@ // +build !windows -package shim +package proc import ( "context" @@ -15,14 +15,13 @@ import ( "time" "github.com/containerd/console" - "github.com/containerd/containerd/identifiers" - "github.com/containerd/containerd/linux/runcopts" - shimapi "github.com/containerd/containerd/linux/shim/v1" + "github.com/containerd/containerd/linux/runctypes" "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" "github.com/containerd/fifo" runc "github.com/containerd/go-runc" "github.com/containerd/typeurl" + google_protobuf "github.com/gogo/protobuf/types" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" ) @@ -30,7 +29,8 @@ import ( // InitPidFile name of the file that contains the init pid const InitPidFile = "init.pid" -type initProcess struct { +// Init represents an initial process for a container +type Init struct { wg sync.WaitGroup initState @@ -47,7 +47,7 @@ type initProcess struct { id string bundle string console console.Console - platform platform + platform Platform io runc.IO runtime *runc.Runc status int @@ -55,28 +55,42 @@ type initProcess struct { pid int closers []io.Closer stdin io.Closer - stdio stdio + stdio Stdio rootfs string IoUID int IoGID int } -func (s *Service) newInitProcess(context context.Context, r *shimapi.CreateTaskRequest) (*initProcess, error) { +// NewRunc returns a new runc instance for a process +func NewRunc(root, path, namespace, runtime, criu string, systemd bool) *runc.Runc { + if root == "" { + root = RuncRoot + } + return &runc.Runc{ + Command: runtime, + Log: filepath.Join(path, "log.json"), + LogFormat: runc.JSON, + PdeathSignal: syscall.SIGKILL, + Root: filepath.Join(root, namespace), + Criu: criu, + SystemdCgroup: systemd, + } +} + +// New returns a new init process +func New(context context.Context, path, workDir, runtimeRoot, namespace, criu string, systemdCgroup bool, platform Platform, r *CreateConfig) (*Init, error) { var success bool - if err := identifiers.Validate(r.ID); err != nil { - return nil, errors.Wrapf(err, "invalid task id") - } - var options runcopts.CreateOptions + var options runctypes.CreateOptions if r.Options != nil { v, err := typeurl.UnmarshalAny(r.Options) if err != nil { return nil, err } - options = *v.(*runcopts.CreateOptions) + options = *v.(*runctypes.CreateOptions) } - rootfs := filepath.Join(s.config.Path, "rootfs") + rootfs := filepath.Join(path, "rootfs") // count the number of successful mounts so we can undo // what was actually done rather than what should have been // done. @@ -98,32 +112,20 @@ func (s *Service) newInitProcess(context context.Context, r *shimapi.CreateTaskR return nil, errors.Wrapf(err, "failed to mount rootfs component %v", m) } } - root := s.config.RuntimeRoot - if root == "" { - root = RuncRoot - } - runtime := &runc.Runc{ - Command: r.Runtime, - Log: filepath.Join(s.config.Path, "log.json"), - LogFormat: runc.JSON, - PdeathSignal: syscall.SIGKILL, - Root: filepath.Join(root, s.config.Namespace), - Criu: s.config.Criu, - SystemdCgroup: s.config.SystemdCgroup, - } - p := &initProcess{ + runtime := NewRunc(runtimeRoot, path, namespace, r.Runtime, criu, systemdCgroup) + p := &Init{ id: r.ID, bundle: r.Bundle, runtime: runtime, - platform: s.platform, - stdio: stdio{ - stdin: r.Stdin, - stdout: r.Stdout, - stderr: r.Stderr, - terminal: r.Terminal, + platform: platform, + stdio: Stdio{ + Stdin: r.Stdin, + Stdout: r.Stdout, + Stderr: r.Stderr, + Terminal: r.Terminal, }, rootfs: rootfs, - workDir: s.config.WorkDir, + workDir: workDir, status: 0, waitBlock: make(chan struct{}), IoUID: int(options.IoUid), @@ -148,7 +150,7 @@ func (s *Service) newInitProcess(context context.Context, r *shimapi.CreateTaskR return nil, errors.Wrap(err, "failed to create OCI runtime io pipes") } } - pidFile := filepath.Join(s.config.Path, InitPidFile) + pidFile := filepath.Join(path, InitPidFile) if r.Checkpoint != "" { opts := &runc.RestoreOpts{ CheckpointOpts: runc.CheckpointOpts{ @@ -195,7 +197,7 @@ func (s *Service) newInitProcess(context context.Context, r *shimapi.CreateTaskR if err != nil { return nil, errors.Wrap(err, "failed to retrieve console master") } - console, err = s.platform.copyConsole(context, console, r.Stdin, r.Stdout, r.Stderr, &p.wg, ©WaitGroup) + console, err = platform.CopyConsole(context, console, r.Stdin, r.Stdout, r.Stderr, &p.wg, ©WaitGroup) if err != nil { return nil, errors.Wrap(err, "failed to start console copy") } @@ -216,31 +218,37 @@ func (s *Service) newInitProcess(context context.Context, r *shimapi.CreateTaskR return p, nil } -func (p *initProcess) Wait() { +// Wait for the process to exit +func (p *Init) Wait() { <-p.waitBlock } -func (p *initProcess) ID() string { +// ID of the process +func (p *Init) ID() string { return p.id } -func (p *initProcess) Pid() int { +// Pid of the process +func (p *Init) Pid() int { return p.pid } -func (p *initProcess) ExitStatus() int { +// ExitStatus of the process +func (p *Init) ExitStatus() int { p.mu.Lock() defer p.mu.Unlock() return p.status } -func (p *initProcess) ExitedAt() time.Time { +// ExitedAt at time when the process exited +func (p *Init) ExitedAt() time.Time { p.mu.Lock() defer p.mu.Unlock() return p.exited } -func (p *initProcess) Status(ctx context.Context) (string, error) { +// Status of the process +func (p *Init) Status(ctx context.Context) (string, error) { p.mu.Lock() defer p.mu.Unlock() c, err := p.runtime.State(ctx, p.id) @@ -253,20 +261,20 @@ func (p *initProcess) Status(ctx context.Context) (string, error) { return c.Status, nil } -func (p *initProcess) start(context context.Context) error { +func (p *Init) start(context context.Context) error { err := p.runtime.Start(context, p.id) return p.runtimeError(err, "OCI runtime start failed") } -func (p *initProcess) setExited(status int) { +func (p *Init) setExited(status int) { p.exited = time.Now() p.status = status - p.platform.shutdownConsole(context.Background(), p.console) + p.platform.ShutdownConsole(context.Background(), p.console) close(p.waitBlock) } -func (p *initProcess) delete(context context.Context) error { - p.killAll(context) +func (p *Init) delete(context context.Context) error { + p.KillAll(context) p.wg.Wait() err := p.runtime.Delete(context, p.id, nil) // ignore errors if a runtime has already deleted the process @@ -296,49 +304,82 @@ func (p *initProcess) delete(context context.Context) error { return err } -func (p *initProcess) resize(ws console.WinSize) error { +func (p *Init) resize(ws console.WinSize) error { if p.console == nil { return nil } return p.console.Resize(ws) } -func (p *initProcess) pause(context context.Context) error { +func (p *Init) pause(context context.Context) error { err := p.runtime.Pause(context, p.id) return p.runtimeError(err, "OCI runtime pause failed") } -func (p *initProcess) resume(context context.Context) error { +func (p *Init) resume(context context.Context) error { err := p.runtime.Resume(context, p.id) return p.runtimeError(err, "OCI runtime resume failed") } -func (p *initProcess) kill(context context.Context, signal uint32, all bool) error { +func (p *Init) kill(context context.Context, signal uint32, all bool) error { err := p.runtime.Kill(context, p.id, int(signal), &runc.KillOpts{ All: all, }) return checkKillError(err) } -func (p *initProcess) killAll(context context.Context) error { +// KillAll processes belonging to the init process +func (p *Init) KillAll(context context.Context) error { err := p.runtime.Kill(context, p.id, int(syscall.SIGKILL), &runc.KillOpts{ All: true, }) return p.runtimeError(err, "OCI runtime killall failed") } -func (p *initProcess) Stdin() io.Closer { +// Stdin of the process +func (p *Init) Stdin() io.Closer { return p.stdin } -func (p *initProcess) checkpoint(context context.Context, r *shimapi.CheckpointTaskRequest) error { - var options runcopts.CheckpointOptions +// Runtime returns the OCI runtime configured for the init process +func (p *Init) Runtime() *runc.Runc { + return p.runtime +} + +// Exec returns a new exec'd process +func (p *Init) Exec(context context.Context, path string, r *ExecConfig) (Process, error) { + // process exec request + var spec specs.Process + if err := json.Unmarshal(r.Spec.Value, &spec); err != nil { + return nil, err + } + spec.Terminal = r.Terminal + + e := &execProcess{ + id: r.ID, + path: path, + parent: p, + spec: spec, + stdio: Stdio{ + Stdin: r.Stdin, + Stdout: r.Stdout, + Stderr: r.Stderr, + Terminal: r.Terminal, + }, + waitBlock: make(chan struct{}), + } + e.State = &execCreatedState{p: e} + return e, nil +} + +func (p *Init) checkpoint(context context.Context, r *CheckpointConfig) error { + var options runctypes.CheckpointOptions if r.Options != nil { v, err := typeurl.UnmarshalAny(r.Options) if err != nil { return err } - options = *v.(*runcopts.CheckpointOptions) + options = *v.(*runctypes.CheckpointOptions) } var actions []runc.CheckpointAction if !options.Exit { @@ -364,19 +405,20 @@ func (p *initProcess) checkpoint(context context.Context, r *shimapi.CheckpointT return nil } -func (p *initProcess) update(context context.Context, r *shimapi.UpdateTaskRequest) error { +func (p *Init) update(context context.Context, r *google_protobuf.Any) error { var resources specs.LinuxResources - if err := json.Unmarshal(r.Resources.Value, &resources); err != nil { + if err := json.Unmarshal(r.Value, &resources); err != nil { return err } return p.runtime.Update(context, p.id, &resources) } -func (p *initProcess) Stdio() stdio { +// Stdio of the process +func (p *Init) Stdio() Stdio { return p.stdio } -func (p *initProcess) runtimeError(rErr error, msg string) error { +func (p *Init) runtimeError(rErr error, msg string) error { if rErr == nil { return nil } diff --git a/vendor/github.com/containerd/containerd/linux/shim/init_state.go b/vendor/github.com/containerd/containerd/linux/proc/init_state.go similarity index 87% rename from vendor/github.com/containerd/containerd/linux/shim/init_state.go rename to vendor/github.com/containerd/containerd/linux/proc/init_state.go index da7e15b001..b5b398ec39 100644 --- a/vendor/github.com/containerd/containerd/linux/shim/init_state.go +++ b/vendor/github.com/containerd/containerd/linux/proc/init_state.go @@ -1,6 +1,6 @@ // +build !windows -package shim +package proc import ( "context" @@ -9,23 +9,23 @@ import ( "github.com/containerd/console" "github.com/containerd/containerd/errdefs" - shimapi "github.com/containerd/containerd/linux/shim/v1" "github.com/containerd/fifo" runc "github.com/containerd/go-runc" + google_protobuf "github.com/gogo/protobuf/types" "github.com/pkg/errors" ) type initState interface { - processState + State Pause(context.Context) error Resume(context.Context) error - Update(context.Context, *shimapi.UpdateTaskRequest) error - Checkpoint(context.Context, *shimapi.CheckpointTaskRequest) error + Update(context.Context, *google_protobuf.Any) error + Checkpoint(context.Context, *CheckpointConfig) error } type createdState struct { - p *initProcess + p *Init } func (s *createdState) transition(name string) error { @@ -56,14 +56,14 @@ func (s *createdState) Resume(ctx context.Context) error { return errors.Errorf("cannot resume task in created state") } -func (s *createdState) Update(context context.Context, r *shimapi.UpdateTaskRequest) error { +func (s *createdState) Update(context context.Context, r *google_protobuf.Any) error { s.p.mu.Lock() defer s.p.mu.Unlock() return s.p.update(context, r) } -func (s *createdState) Checkpoint(context context.Context, r *shimapi.CheckpointTaskRequest) error { +func (s *createdState) Checkpoint(context context.Context, r *CheckpointConfig) error { s.p.mu.Lock() defer s.p.mu.Unlock() @@ -114,7 +114,7 @@ func (s *createdState) SetExited(status int) { } type createdCheckpointState struct { - p *initProcess + p *Init opts *runc.RestoreOpts } @@ -146,14 +146,14 @@ func (s *createdCheckpointState) Resume(ctx context.Context) error { return errors.Errorf("cannot resume task in created state") } -func (s *createdCheckpointState) Update(context context.Context, r *shimapi.UpdateTaskRequest) error { +func (s *createdCheckpointState) Update(context context.Context, r *google_protobuf.Any) error { s.p.mu.Lock() defer s.p.mu.Unlock() return s.p.update(context, r) } -func (s *createdCheckpointState) Checkpoint(context context.Context, r *shimapi.CheckpointTaskRequest) error { +func (s *createdCheckpointState) Checkpoint(context context.Context, r *CheckpointConfig) error { s.p.mu.Lock() defer s.p.mu.Unlock() @@ -175,17 +175,17 @@ func (s *createdCheckpointState) Start(ctx context.Context) error { return p.runtimeError(err, "OCI runtime restore failed") } sio := p.stdio - if sio.stdin != "" { - sc, err := fifo.OpenFifo(ctx, sio.stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) + if sio.Stdin != "" { + sc, err := fifo.OpenFifo(ctx, sio.Stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) if err != nil { - return errors.Wrapf(err, "failed to open stdin fifo %s", sio.stdin) + return errors.Wrapf(err, "failed to open stdin fifo %s", sio.Stdin) } p.stdin = sc p.closers = append(p.closers, sc) } var copyWaitGroup sync.WaitGroup - if !sio.isNull() { - if err := copyPipes(ctx, p.io, sio.stdin, sio.stdout, sio.stderr, &p.wg, ©WaitGroup); err != nil { + if !sio.IsNull() { + if err := copyPipes(ctx, p.io, sio.Stdin, sio.Stdout, sio.Stderr, &p.wg, ©WaitGroup); err != nil { return errors.Wrap(err, "failed to start io pipe copy") } } @@ -228,7 +228,7 @@ func (s *createdCheckpointState) SetExited(status int) { } type runningState struct { - p *initProcess + p *Init } func (s *runningState) transition(name string) error { @@ -259,14 +259,14 @@ func (s *runningState) Resume(ctx context.Context) error { return errors.Errorf("cannot resume a running process") } -func (s *runningState) Update(context context.Context, r *shimapi.UpdateTaskRequest) error { +func (s *runningState) Update(context context.Context, r *google_protobuf.Any) error { s.p.mu.Lock() defer s.p.mu.Unlock() return s.p.update(context, r) } -func (s *runningState) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskRequest) error { +func (s *runningState) Checkpoint(ctx context.Context, r *CheckpointConfig) error { s.p.mu.Lock() defer s.p.mu.Unlock() @@ -313,7 +313,7 @@ func (s *runningState) SetExited(status int) { } type pausedState struct { - p *initProcess + p *Init } func (s *pausedState) transition(name string) error { @@ -345,14 +345,14 @@ func (s *pausedState) Resume(ctx context.Context) error { return s.transition("running") } -func (s *pausedState) Update(context context.Context, r *shimapi.UpdateTaskRequest) error { +func (s *pausedState) Update(context context.Context, r *google_protobuf.Any) error { s.p.mu.Lock() defer s.p.mu.Unlock() return s.p.update(context, r) } -func (s *pausedState) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskRequest) error { +func (s *pausedState) Checkpoint(ctx context.Context, r *CheckpointConfig) error { s.p.mu.Lock() defer s.p.mu.Unlock() @@ -400,7 +400,7 @@ func (s *pausedState) SetExited(status int) { } type stoppedState struct { - p *initProcess + p *Init } func (s *stoppedState) transition(name string) error { @@ -427,14 +427,14 @@ func (s *stoppedState) Resume(ctx context.Context) error { return errors.Errorf("cannot resume a stopped container") } -func (s *stoppedState) Update(context context.Context, r *shimapi.UpdateTaskRequest) error { +func (s *stoppedState) Update(context context.Context, r *google_protobuf.Any) error { s.p.mu.Lock() defer s.p.mu.Unlock() return errors.Errorf("cannot update a stopped container") } -func (s *stoppedState) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskRequest) error { +func (s *stoppedState) Checkpoint(ctx context.Context, r *CheckpointConfig) error { s.p.mu.Lock() defer s.p.mu.Unlock() diff --git a/vendor/github.com/containerd/containerd/linux/shim/io.go b/vendor/github.com/containerd/containerd/linux/proc/io.go similarity index 66% rename from vendor/github.com/containerd/containerd/linux/shim/io.go rename to vendor/github.com/containerd/containerd/linux/proc/io.go index 49ba8e0692..e78b383008 100644 --- a/vendor/github.com/containerd/containerd/linux/shim/io.go +++ b/vendor/github.com/containerd/containerd/linux/proc/io.go @@ -1,6 +1,6 @@ // +build !windows -package shim +package proc import ( "context" @@ -9,44 +9,10 @@ import ( "sync" "syscall" - "github.com/containerd/console" "github.com/containerd/fifo" runc "github.com/containerd/go-runc" ) -func copyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) error { - if stdin != "" { - in, err := fifo.OpenFifo(ctx, stdin, syscall.O_RDONLY, 0) - if err != nil { - return err - } - cwg.Add(1) - go func() { - cwg.Done() - io.Copy(console, in) - }() - } - outw, err := fifo.OpenFifo(ctx, stdout, syscall.O_WRONLY, 0) - if err != nil { - return err - } - outr, err := fifo.OpenFifo(ctx, stdout, syscall.O_RDONLY, 0) - if err != nil { - return err - } - wg.Add(1) - cwg.Add(1) - go func() { - cwg.Done() - io.Copy(outw, console) - console.Close() - outr.Close() - outw.Close() - wg.Done() - }() - return nil -} - func copyPipes(ctx context.Context, rio runc.IO, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) error { for name, dest := range map[string]func(wc io.WriteCloser, rc io.Closer){ stdout: func(wc io.WriteCloser, rc io.Closer) { diff --git a/vendor/github.com/containerd/containerd/linux/shim/process.go b/vendor/github.com/containerd/containerd/linux/proc/process.go similarity index 61% rename from vendor/github.com/containerd/containerd/linux/shim/process.go rename to vendor/github.com/containerd/containerd/linux/proc/process.go index f0b4692388..c0d33adbce 100644 --- a/vendor/github.com/containerd/containerd/linux/shim/process.go +++ b/vendor/github.com/containerd/containerd/linux/proc/process.go @@ -1,30 +1,36 @@ // +build !windows -package shim +package proc import ( "context" "io" + "sync" "time" "github.com/containerd/console" "github.com/pkg/errors" ) -type stdio struct { - stdin string - stdout string - stderr string - terminal bool +// RuncRoot is the path to the root runc state directory +const RuncRoot = "/run/containerd/runc" + +// Stdio of a process +type Stdio struct { + Stdin string + Stdout string + Stderr string + Terminal bool } -func (s stdio) isNull() bool { - return s.stdin == "" && s.stdout == "" && s.stderr == "" +// IsNull returns true if the stdio is not defined +func (s Stdio) IsNull() bool { + return s.Stdin == "" && s.Stdout == "" && s.Stderr == "" } -type process interface { - processState - +// Process on a linux system +type Process interface { + State // ID returns the id for the process ID() string // Pid returns the pid for the process @@ -36,14 +42,15 @@ type process interface { // Stdin returns the process STDIN Stdin() io.Closer // Stdio returns io information for the container - Stdio() stdio + Stdio() Stdio // Status returns the process status Status(context.Context) (string, error) // Wait blocks until the process has exited Wait() } -type processState interface { +// State of a process +type State interface { // Resize resizes the process console Resize(ws console.WinSize) error // Start execution of the process @@ -71,3 +78,12 @@ func stateName(v interface{}) string { } panic(errors.Errorf("invalid state %v", v)) } + +// Platform handles platform-specific behavior that may differs across +// platform implementations +type Platform interface { + CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, + wg, cwg *sync.WaitGroup) (console.Console, error) + ShutdownConsole(ctx context.Context, console console.Console) error + Close() error +} diff --git a/vendor/github.com/containerd/containerd/linux/proc/types.go b/vendor/github.com/containerd/containerd/linux/proc/types.go new file mode 100644 index 0000000000..9055c25d10 --- /dev/null +++ b/vendor/github.com/containerd/containerd/linux/proc/types.go @@ -0,0 +1,37 @@ +package proc + +import ( + containerd_types "github.com/containerd/containerd/api/types" + google_protobuf "github.com/gogo/protobuf/types" +) + +// CreateConfig hold task creation configuration +type CreateConfig struct { + ID string + Bundle string + Runtime string + Rootfs []*containerd_types.Mount + Terminal bool + Stdin string + Stdout string + Stderr string + Checkpoint string + ParentCheckpoint string + Options *google_protobuf.Any +} + +// ExecConfig holds exec creation configuration +type ExecConfig struct { + ID string + Terminal bool + Stdin string + Stdout string + Stderr string + Spec *google_protobuf.Any +} + +// CheckpointConfig holds task checkpoint configuration +type CheckpointConfig struct { + Path string + Options *google_protobuf.Any +} diff --git a/vendor/github.com/containerd/containerd/linux/shim/utils.go b/vendor/github.com/containerd/containerd/linux/proc/utils.go similarity index 92% rename from vendor/github.com/containerd/containerd/linux/shim/utils.go rename to vendor/github.com/containerd/containerd/linux/proc/utils.go index 317f8daee5..1197957b57 100644 --- a/vendor/github.com/containerd/containerd/linux/shim/utils.go +++ b/vendor/github.com/containerd/containerd/linux/proc/utils.go @@ -1,6 +1,6 @@ // +build !windows -package shim +package proc import ( "encoding/json" @@ -10,7 +10,6 @@ import ( "time" "github.com/containerd/containerd/errdefs" - shimapi "github.com/containerd/containerd/linux/shim/v1" runc "github.com/containerd/go-runc" "github.com/pkg/errors" "golang.org/x/sys/unix" @@ -81,6 +80,6 @@ func checkKillError(err error) error { return errors.Wrapf(err, "unknown error after kill") } -func hasNoIO(r *shimapi.CreateTaskRequest) bool { +func hasNoIO(r *CreateConfig) bool { return r.Stdin == "" && r.Stdout == "" && r.Stderr == "" } diff --git a/vendor/github.com/containerd/containerd/linux/process.go b/vendor/github.com/containerd/containerd/linux/process.go index 0febff9e7c..10acc69d5b 100644 --- a/vendor/github.com/containerd/containerd/linux/process.go +++ b/vendor/github.com/containerd/containerd/linux/process.go @@ -5,6 +5,7 @@ package linux import ( "context" + eventstypes "github.com/containerd/containerd/api/events" "github.com/containerd/containerd/api/types/task" "github.com/containerd/containerd/errdefs" shim "github.com/containerd/containerd/linux/shim/v1" @@ -96,12 +97,17 @@ func (p *Process) CloseIO(ctx context.Context) error { // Start the process func (p *Process) Start(ctx context.Context) error { - _, err := p.t.shim.Start(ctx, &shim.StartRequest{ + r, err := p.t.shim.Start(ctx, &shim.StartRequest{ ID: p.id, }) if err != nil { return errdefs.FromGRPC(err) } + p.t.events.Publish(ctx, runtime.TaskExecStartedEventTopic, &eventstypes.TaskExecStarted{ + ContainerID: p.t.id, + Pid: r.Pid, + ExecID: p.id, + }) return nil } diff --git a/vendor/github.com/containerd/containerd/linux/runcopts/runc.pb.go b/vendor/github.com/containerd/containerd/linux/runctypes/runc.pb.go similarity index 81% rename from vendor/github.com/containerd/containerd/linux/runcopts/runc.pb.go rename to vendor/github.com/containerd/containerd/linux/runctypes/runc.pb.go index 0415e23d57..00a27bf06f 100644 --- a/vendor/github.com/containerd/containerd/linux/runcopts/runc.pb.go +++ b/vendor/github.com/containerd/containerd/linux/runctypes/runc.pb.go @@ -1,24 +1,25 @@ -// Code generated by protoc-gen-gogo. -// source: github.com/containerd/containerd/linux/runcopts/runc.proto -// DO NOT EDIT! +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/containerd/linux/runctypes/runc.proto /* - Package runcopts is a generated protocol buffer package. + Package runctypes is a generated protocol buffer package. It is generated from these files: - github.com/containerd/containerd/linux/runcopts/runc.proto + github.com/containerd/containerd/linux/runctypes/runc.proto It has these top-level messages: RuncOptions CreateOptions CheckpointOptions + ProcessDetails */ -package runcopts +package runctypes import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import _ "github.com/gogo/protobuf/gogoproto" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" import strings "strings" import reflect "reflect" @@ -79,10 +80,19 @@ func (m *CheckpointOptions) Reset() { *m = CheckpointOptions{ func (*CheckpointOptions) ProtoMessage() {} func (*CheckpointOptions) Descriptor() ([]byte, []int) { return fileDescriptorRunc, []int{2} } +type ProcessDetails struct { + ExecID string `protobuf:"bytes,1,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (m *ProcessDetails) Reset() { *m = ProcessDetails{} } +func (*ProcessDetails) ProtoMessage() {} +func (*ProcessDetails) Descriptor() ([]byte, []int) { return fileDescriptorRunc, []int{3} } + func init() { proto.RegisterType((*RuncOptions)(nil), "containerd.linux.runc.RuncOptions") proto.RegisterType((*CreateOptions)(nil), "containerd.linux.runc.CreateOptions") proto.RegisterType((*CheckpointOptions)(nil), "containerd.linux.runc.CheckpointOptions") + proto.RegisterType((*ProcessDetails)(nil), "containerd.linux.runc.ProcessDetails") } func (m *RuncOptions) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -334,24 +344,30 @@ func (m *CheckpointOptions) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Runc(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 +func (m *ProcessDetails) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func encodeFixed32Runc(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 + +func (m *ProcessDetails) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ExecID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRunc(dAtA, i, uint64(len(m.ExecID))) + i += copy(dAtA[i:], m.ExecID) + } + return i, nil } + func encodeVarintRunc(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -457,6 +473,16 @@ func (m *CheckpointOptions) Size() (n int) { return n } +func (m *ProcessDetails) Size() (n int) { + var l int + _ = l + l = len(m.ExecID) + if l > 0 { + n += 1 + l + sovRunc(uint64(l)) + } + return n +} + func sovRunc(x uint64) (n int) { for { n++ @@ -519,6 +545,16 @@ func (this *CheckpointOptions) String() string { }, "") return s } +func (this *ProcessDetails) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProcessDetails{`, + `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, + `}`, + }, "") + return s +} func valueToStringRunc(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -1187,6 +1223,85 @@ func (m *CheckpointOptions) Unmarshal(dAtA []byte) error { } return nil } +func (m *ProcessDetails) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRunc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProcessDetails: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProcessDetails: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRunc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRunc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExecID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRunc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRunc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipRunc(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 @@ -1293,40 +1408,43 @@ var ( ) func init() { - proto.RegisterFile("github.com/containerd/containerd/linux/runcopts/runc.proto", fileDescriptorRunc) + proto.RegisterFile("github.com/containerd/containerd/linux/runctypes/runc.proto", fileDescriptorRunc) } var fileDescriptorRunc = []byte{ - // 495 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x93, 0x31, 0x6f, 0xd3, 0x40, - 0x14, 0xc7, 0x7b, 0xb4, 0x4d, 0x9c, 0x4b, 0x53, 0xe0, 0x20, 0xd2, 0x51, 0x84, 0x09, 0x11, 0x48, - 0x61, 0x49, 0x24, 0x58, 0x10, 0x6c, 0x64, 0x60, 0x00, 0x4a, 0x65, 0xe8, 0xc2, 0x72, 0x72, 0xcf, - 0x87, 0xf3, 0x94, 0xf8, 0xde, 0xe9, 0xee, 0x4c, 0x93, 0xad, 0x9f, 0x80, 0xcf, 0xd5, 0x91, 0x91, - 0x91, 0xe6, 0x8b, 0x80, 0x7c, 0xb6, 0x0b, 0x2b, 0x2b, 0xdb, 0xff, 0xfd, 0xfe, 0xcf, 0x7e, 0x4f, - 0xff, 0xd3, 0xa3, 0x2f, 0x73, 0xf0, 0x8b, 0xf2, 0x6c, 0x2a, 0xb1, 0x98, 0x49, 0xd4, 0x3e, 0x05, - 0xad, 0x6c, 0xf6, 0xb7, 0x5c, 0x81, 0x2e, 0xd7, 0x33, 0x5b, 0x6a, 0x89, 0xc6, 0xbb, 0x20, 0xa6, - 0xc6, 0xa2, 0x47, 0x36, 0xfc, 0xd3, 0x35, 0x0d, 0x5d, 0xd3, 0xca, 0x3c, 0xba, 0x9b, 0x63, 0x8e, - 0xa1, 0x63, 0x56, 0xa9, 0xba, 0x79, 0xfc, 0x8d, 0xd0, 0x7e, 0x52, 0x6a, 0xf9, 0xc1, 0x78, 0x40, - 0xed, 0x18, 0xa7, 0x5d, 0x5b, 0x6a, 0x0f, 0x85, 0xe2, 0x64, 0x44, 0x26, 0xbd, 0xa4, 0x2d, 0xd9, - 0x23, 0x7a, 0xd0, 0x48, 0x61, 0x11, 0x3d, 0xbf, 0x11, 0xec, 0x7e, 0xc3, 0x12, 0x44, 0xcf, 0xee, - 0xd3, 0x9e, 0xb4, 0x50, 0x0a, 0x93, 0xfa, 0x05, 0xdf, 0x0d, 0x7e, 0x54, 0x81, 0x93, 0xd4, 0x2f, - 0xd8, 0x13, 0x7a, 0xe8, 0x36, 0xce, 0xab, 0x22, 0x13, 0x32, 0xb7, 0x58, 0x1a, 0xbe, 0x37, 0x22, - 0x93, 0x28, 0x19, 0x34, 0x74, 0x1e, 0xe0, 0xf8, 0x62, 0x97, 0x0e, 0xe6, 0x56, 0xa5, 0x5e, 0xb5, - 0x2b, 0x8d, 0xe9, 0x40, 0xa3, 0x30, 0xf0, 0x15, 0x7d, 0x3d, 0x99, 0x84, 0xef, 0xfa, 0x1a, 0x4f, - 0x2a, 0x16, 0x26, 0xdf, 0xa3, 0x11, 0x1a, 0xa5, 0x85, 0x97, 0x26, 0x2c, 0x16, 0x25, 0xdd, 0xaa, - 0xfe, 0x24, 0x0d, 0x7b, 0x46, 0x87, 0x6a, 0xed, 0x95, 0xd5, 0xe9, 0x4a, 0x94, 0x1a, 0xd6, 0xc2, - 0xa1, 0x5c, 0x2a, 0xef, 0xc2, 0x82, 0x51, 0x72, 0xa7, 0x35, 0x4f, 0x35, 0xac, 0x3f, 0xd6, 0x16, - 0x3b, 0xa2, 0x91, 0x57, 0xb6, 0x00, 0x9d, 0xae, 0x9a, 0x2d, 0xaf, 0x6b, 0xf6, 0x80, 0xd2, 0x2f, - 0xb0, 0x52, 0x62, 0x85, 0x72, 0xe9, 0xf8, 0x7e, 0x70, 0x7b, 0x15, 0x79, 0x57, 0x01, 0xf6, 0x94, - 0xde, 0x52, 0x85, 0xf1, 0x1b, 0xa1, 0xd3, 0x42, 0x39, 0x93, 0x4a, 0xe5, 0x78, 0x67, 0xb4, 0x3b, - 0xe9, 0x25, 0x37, 0x03, 0x3f, 0xbe, 0xc6, 0x55, 0xa2, 0x75, 0x12, 0x4e, 0x14, 0x98, 0x29, 0xde, - 0xad, 0x13, 0x6d, 0xd8, 0x7b, 0xcc, 0x14, 0x7b, 0x4c, 0x0f, 0x35, 0x0a, 0xad, 0xce, 0xc5, 0x52, - 0x6d, 0x2c, 0xe8, 0x9c, 0x47, 0x61, 0xe0, 0x81, 0xc6, 0x63, 0x75, 0xfe, 0xb6, 0x66, 0xec, 0x21, - 0xed, 0xbb, 0x05, 0x14, 0x6d, 0xae, 0xbd, 0xf0, 0x1f, 0x5a, 0xa1, 0x3a, 0x54, 0x36, 0xa4, 0x1d, - 0x40, 0x51, 0x42, 0xc6, 0xe9, 0x88, 0x4c, 0x06, 0xc9, 0x3e, 0xe0, 0x29, 0x64, 0x0d, 0xce, 0x21, - 0xe3, 0xfd, 0x16, 0xbf, 0x81, 0x6c, 0xfc, 0x8b, 0xd0, 0xdb, 0xf3, 0x85, 0x92, 0x4b, 0x83, 0xa0, - 0x7d, 0xfb, 0x0c, 0x8c, 0xee, 0xa9, 0x35, 0xb4, 0xe9, 0x07, 0xfd, 0xbf, 0xc6, 0xfe, 0x3a, 0xb9, - 0xbc, 0x8a, 0x77, 0x7e, 0x5c, 0xc5, 0x3b, 0x17, 0xdb, 0x98, 0x5c, 0x6e, 0x63, 0xf2, 0x7d, 0x1b, - 0x93, 0x9f, 0xdb, 0x98, 0x7c, 0x7e, 0xf1, 0x8f, 0x87, 0xf9, 0xaa, 0x15, 0x67, 0x9d, 0x70, 0x70, - 0xcf, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0xbe, 0xbb, 0xf0, 0x6c, 0xdb, 0x03, 0x00, 0x00, + // 540 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x93, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x86, 0x6b, 0xda, 0x26, 0xce, 0xa4, 0x29, 0xb0, 0x50, 0xc9, 0x14, 0x91, 0x86, 0x00, 0x52, + 0xb8, 0xa4, 0x12, 0x88, 0x0b, 0xbd, 0xb5, 0x45, 0xa8, 0x02, 0x4a, 0x65, 0x5a, 0x09, 0x71, 0x59, + 0xb9, 0xeb, 0x21, 0x59, 0x25, 0xde, 0x59, 0xed, 0xae, 0xa9, 0x73, 0xeb, 0x13, 0xf0, 0x5c, 0x3d, + 0x72, 0xe4, 0x84, 0x68, 0x5e, 0x04, 0xe4, 0x75, 0x1c, 0xb8, 0x72, 0xe5, 0xf6, 0xcf, 0xf7, 0x8f, + 0x3d, 0xa3, 0x7f, 0x35, 0xb0, 0x37, 0x92, 0x6e, 0x9c, 0x9f, 0x0f, 0x05, 0x65, 0xbb, 0x82, 0x94, + 0x4b, 0xa4, 0x42, 0x93, 0xfe, 0x2d, 0xa7, 0x52, 0xe5, 0xc5, 0xae, 0xc9, 0x95, 0x70, 0x33, 0x8d, + 0xd6, 0xab, 0xa1, 0x36, 0xe4, 0x88, 0x6d, 0xfd, 0x69, 0x1b, 0xfa, 0xb6, 0x61, 0x69, 0x6e, 0xdf, + 0x1d, 0xd1, 0x88, 0x7c, 0xc7, 0x6e, 0xa9, 0xaa, 0xe6, 0xfe, 0xd7, 0x00, 0xda, 0x71, 0xae, 0xc4, + 0x7b, 0xed, 0x24, 0x29, 0xcb, 0x22, 0x68, 0x9a, 0x5c, 0x39, 0x99, 0x61, 0x14, 0xf4, 0x82, 0x41, + 0x2b, 0xae, 0x4b, 0xf6, 0x10, 0x36, 0x16, 0x92, 0x1b, 0x22, 0x17, 0xdd, 0xf0, 0x76, 0x7b, 0xc1, + 0x62, 0x22, 0xc7, 0xee, 0x43, 0x4b, 0x18, 0x99, 0x73, 0x9d, 0xb8, 0x71, 0xb4, 0xea, 0xfd, 0xb0, + 0x04, 0x27, 0x89, 0x1b, 0xb3, 0x27, 0xb0, 0x69, 0x67, 0xd6, 0x61, 0x96, 0x72, 0x31, 0x32, 0x94, + 0xeb, 0x68, 0xad, 0x17, 0x0c, 0xc2, 0xb8, 0xb3, 0xa0, 0x07, 0x1e, 0xf6, 0x2f, 0x57, 0xa1, 0x73, + 0x60, 0x30, 0x71, 0x58, 0xaf, 0xd4, 0x87, 0x8e, 0x22, 0xae, 0xe5, 0x17, 0x72, 0xd5, 0xe4, 0xc0, + 0x7f, 0xd7, 0x56, 0x74, 0x52, 0x32, 0x3f, 0xf9, 0x1e, 0x84, 0xa4, 0x51, 0x71, 0x27, 0xb4, 0x5f, + 0x2c, 0x8c, 0x9b, 0x65, 0x7d, 0x2a, 0x34, 0x7b, 0x06, 0x5b, 0x58, 0x38, 0x34, 0x2a, 0x99, 0xf2, + 0x5c, 0xc9, 0x82, 0x5b, 0x12, 0x13, 0x74, 0xd6, 0x2f, 0x18, 0xc6, 0x77, 0x6a, 0xf3, 0x4c, 0xc9, + 0xe2, 0x43, 0x65, 0xb1, 0x6d, 0x08, 0x1d, 0x9a, 0x4c, 0xaa, 0x64, 0xba, 0xd8, 0x72, 0x59, 0xb3, + 0x07, 0x00, 0x9f, 0xe5, 0x14, 0xf9, 0x94, 0xc4, 0xc4, 0x46, 0xeb, 0xde, 0x6d, 0x95, 0xe4, 0x6d, + 0x09, 0xd8, 0x53, 0xb8, 0x85, 0x99, 0x76, 0x33, 0xae, 0x92, 0x0c, 0xad, 0x4e, 0x04, 0xda, 0xa8, + 0xd1, 0x5b, 0x1d, 0xb4, 0xe2, 0x9b, 0x9e, 0x1f, 0x2f, 0x71, 0x99, 0x68, 0x95, 0x84, 0xe5, 0x19, + 0xa5, 0x18, 0x35, 0xab, 0x44, 0x17, 0xec, 0x1d, 0xa5, 0xc8, 0x1e, 0xc3, 0xa6, 0x22, 0xae, 0xf0, + 0x82, 0x4f, 0x70, 0x66, 0xa4, 0x1a, 0x45, 0xa1, 0x1f, 0xb8, 0xa1, 0xe8, 0x18, 0x2f, 0xde, 0x54, + 0x8c, 0xed, 0x40, 0xdb, 0x8e, 0x65, 0x56, 0xe7, 0xda, 0xf2, 0xff, 0x81, 0x12, 0x55, 0xa1, 0xb2, + 0x2d, 0x68, 0x48, 0xe2, 0xb9, 0x4c, 0x23, 0xe8, 0x05, 0x83, 0x4e, 0xbc, 0x2e, 0xe9, 0x4c, 0xa6, + 0x0b, 0x3c, 0x92, 0x69, 0xd4, 0xae, 0xf1, 0x6b, 0x99, 0xf6, 0x7f, 0x05, 0x70, 0xfb, 0x60, 0x8c, + 0x62, 0xa2, 0x49, 0x2a, 0x57, 0x3f, 0x03, 0x83, 0x35, 0x2c, 0x64, 0x9d, 0xbe, 0xd7, 0xff, 0x6b, + 0xec, 0xfd, 0x17, 0xb0, 0x79, 0x62, 0x48, 0xa0, 0xb5, 0x87, 0xe8, 0x12, 0x39, 0xb5, 0xec, 0x11, + 0x34, 0xb1, 0x40, 0xc1, 0x65, 0x5a, 0xdd, 0xc5, 0x3e, 0xcc, 0x7f, 0xec, 0x34, 0x5e, 0x15, 0x28, + 0x8e, 0x0e, 0xe3, 0x46, 0x69, 0x1d, 0xa5, 0xfb, 0xa7, 0x57, 0xd7, 0xdd, 0x95, 0xef, 0xd7, 0xdd, + 0x95, 0xcb, 0x79, 0x37, 0xb8, 0x9a, 0x77, 0x83, 0x6f, 0xf3, 0x6e, 0xf0, 0x73, 0xde, 0x0d, 0x3e, + 0xbd, 0xfc, 0xd7, 0x83, 0xde, 0x5b, 0xaa, 0x8f, 0x2b, 0xe7, 0x0d, 0x7f, 0xab, 0xcf, 0x7f, 0x07, + 0x00, 0x00, 0xff, 0xff, 0xb1, 0xca, 0x85, 0x39, 0x17, 0x04, 0x00, 0x00, } diff --git a/vendor/github.com/containerd/containerd/linux/runcopts/runc.proto b/vendor/github.com/containerd/containerd/linux/runctypes/runc.proto similarity index 87% rename from vendor/github.com/containerd/containerd/linux/runcopts/runc.proto rename to vendor/github.com/containerd/containerd/linux/runctypes/runc.proto index 3d10dc9dc3..a73b1cafa7 100644 --- a/vendor/github.com/containerd/containerd/linux/runcopts/runc.proto +++ b/vendor/github.com/containerd/containerd/linux/runctypes/runc.proto @@ -2,9 +2,9 @@ syntax = "proto3"; package containerd.linux.runc; -import "gogoproto/gogo.proto"; +import weak "gogoproto/gogo.proto"; -option go_package = "github.com/containerd/containerd/linux/runcopts;runcopts"; +option go_package = "github.com/containerd/containerd/linux/runctypes;runctypes"; message RuncOptions { string runtime = 1; @@ -36,3 +36,7 @@ message CheckpointOptions { repeated string empty_namespaces = 6; string cgroups_mode = 7; } + +message ProcessDetails { + string exec_id = 1; +} diff --git a/vendor/github.com/containerd/containerd/linux/runtime.go b/vendor/github.com/containerd/containerd/linux/runtime.go index 44219e40d0..1ffaca11d4 100644 --- a/vendor/github.com/containerd/containerd/linux/runtime.go +++ b/vendor/github.com/containerd/containerd/linux/runtime.go @@ -11,14 +11,14 @@ import ( "time" "github.com/boltdb/bolt" - eventsapi "github.com/containerd/containerd/api/services/events/v1" + eventstypes "github.com/containerd/containerd/api/events" "github.com/containerd/containerd/api/types" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/events/exchange" "github.com/containerd/containerd/identifiers" - "github.com/containerd/containerd/linux/runcopts" - client "github.com/containerd/containerd/linux/shim" + "github.com/containerd/containerd/linux/proc" + "github.com/containerd/containerd/linux/runctypes" shim "github.com/containerd/containerd/linux/shim/v1" "github.com/containerd/containerd/log" "github.com/containerd/containerd/metadata" @@ -30,7 +30,7 @@ import ( "github.com/containerd/containerd/sys" runc "github.com/containerd/go-runc" "github.com/containerd/typeurl" - google_protobuf "github.com/golang/protobuf/ptypes/empty" + ptypes "github.com/gogo/protobuf/types" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -39,7 +39,7 @@ import ( var ( pluginID = fmt.Sprintf("%s.%s", plugin.RuntimePlugin, "linux") - empty = &google_protobuf.Empty{} + empty = &ptypes.Empty{} ) const ( @@ -193,7 +193,7 @@ func (r *Runtime) Create(ctx context.Context, id string, opts runtime.CreateOpts if err != nil { return nil, err } - cgroup = v.(*runcopts.CreateOptions).ShimCgroup + cgroup = v.(*runctypes.CreateOptions).ShimCgroup } exitHandler := func() { log.G(ctx).WithField("id", id).Info("shim reaped") @@ -242,14 +242,14 @@ func (r *Runtime) Create(ctx context.Context, id string, opts runtime.CreateOpts } }() - runtime := r.config.Runtime + rt := r.config.Runtime if ropts != nil && ropts.Runtime != "" { - runtime = ropts.Runtime + rt = ropts.Runtime } sopts := &shim.CreateTaskRequest{ ID: id, Bundle: bundle.path, - Runtime: runtime, + Runtime: rt, Stdin: opts.IO.Stdin, Stdout: opts.IO.Stdout, Stderr: opts.IO.Stderr, @@ -268,7 +268,8 @@ func (r *Runtime) Create(ctx context.Context, id string, opts runtime.CreateOpts if err != nil { return nil, errdefs.FromGRPC(err) } - t, err := newTask(id, namespace, int(cr.Pid), s, r.monitor) + t, err := newTask(id, namespace, int(cr.Pid), s, r.monitor, r.events, + proc.NewRunc(ropts.RuntimeRoot, sopts.Bundle, namespace, rt, ropts.CriuPath, ropts.SystemdCgroup)) if err != nil { return nil, err } @@ -285,6 +286,20 @@ func (r *Runtime) Create(ctx context.Context, id string, opts runtime.CreateOpts return nil, err } } + r.events.Publish(ctx, runtime.TaskCreateEventTopic, &eventstypes.TaskCreate{ + ContainerID: sopts.ID, + Bundle: sopts.Bundle, + Rootfs: sopts.Rootfs, + IO: &eventstypes.TaskIO{ + Stdin: sopts.Stdin, + Stdout: sopts.Stdout, + Stderr: sopts.Stderr, + Terminal: sopts.Terminal, + }, + Checkpoint: sopts.Checkpoint, + Pid: uint32(t.pid), + }) + return t, nil } @@ -322,6 +337,12 @@ func (r *Runtime) Delete(ctx context.Context, c runtime.Task) (*runtime.Exit, er if err := bundle.Delete(); err != nil { log.G(ctx).WithError(err).Error("failed to delete bundle") } + r.events.Publish(ctx, runtime.TaskDeleteEventTopic, &eventstypes.TaskDelete{ + ContainerID: lc.id, + ExitStatus: rsp.ExitStatus, + ExitedAt: rsp.ExitedAt, + Pid: rsp.Pid, + }) return &runtime.Exit{ Status: rsp.ExitStatus, Timestamp: rsp.ExitedAt, @@ -376,7 +397,8 @@ func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) { filepath.Join(r.state, ns, id), filepath.Join(r.root, ns, id), ) - pid, _ := runc.ReadPidFile(filepath.Join(bundle.path, client.InitPidFile)) + ctx = namespaces.WithNamespace(ctx, ns) + pid, _ := runc.ReadPidFile(filepath.Join(bundle.path, proc.InitPidFile)) s, err := bundle.NewShimClient(ctx, ns, ShimConnect(), nil) if err != nil { log.G(ctx).WithError(err).WithFields(logrus.Fields{ @@ -390,8 +412,15 @@ func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) { } continue } + ropts, err := r.getRuncOptions(ctx, id) + if err != nil { + log.G(ctx).WithError(err).WithField("id", id). + Error("get runtime options") + continue + } - t, err := newTask(id, ns, pid, s, r.monitor) + t, err := newTask(id, ns, pid, s, r.monitor, r.events, + proc.NewRunc(ropts.RuntimeRoot, bundle.path, ns, ropts.Runtime, ropts.CriuPath, ropts.SystemdCgroup)) if err != nil { log.G(ctx).WithError(err).Error("loading task type") continue @@ -423,7 +452,7 @@ func (r *Runtime) cleanupAfterDeadShim(ctx context.Context, bundle *bundle, ns, // Notify Client exitedAt := time.Now().UTC() - r.events.Publish(ctx, runtime.TaskExitEventTopic, &eventsapi.TaskExit{ + r.events.Publish(ctx, runtime.TaskExitEventTopic, &eventstypes.TaskExit{ ContainerID: id, ID: id, Pid: uint32(pid), @@ -435,7 +464,7 @@ func (r *Runtime) cleanupAfterDeadShim(ctx context.Context, bundle *bundle, ns, log.G(ctx).WithError(err).Error("delete bundle") } - r.events.Publish(ctx, runtime.TaskDeleteEventTopic, &eventsapi.TaskDelete{ + r.events.Publish(ctx, runtime.TaskDeleteEventTopic, &eventstypes.TaskDelete{ ContainerID: id, Pid: uint32(pid), ExitStatus: 128 + uint32(unix.SIGKILL), @@ -474,7 +503,7 @@ func (r *Runtime) getRuntime(ctx context.Context, ns, id string) (*runc.Runc, er var ( cmd = r.config.Runtime - root = client.RuncRoot + root = proc.RuncRoot ) if ropts != nil { if ropts.Runtime != "" { @@ -493,7 +522,7 @@ func (r *Runtime) getRuntime(ctx context.Context, ns, id string) (*runc.Runc, er }, nil } -func (r *Runtime) getRuncOptions(ctx context.Context, id string) (*runcopts.RuncOptions, error) { +func (r *Runtime) getRuncOptions(ctx context.Context, id string) (*runctypes.RuncOptions, error) { var container containers.Container if err := r.db.View(func(tx *bolt.Tx) error { @@ -510,12 +539,12 @@ func (r *Runtime) getRuncOptions(ctx context.Context, id string) (*runcopts.Runc if err != nil { return nil, err } - ropts, ok := v.(*runcopts.RuncOptions) + ropts, ok := v.(*runctypes.RuncOptions) if !ok { return nil, errors.New("invalid runtime options format") } return ropts, nil } - return nil, nil + return &runctypes.RuncOptions{}, nil } diff --git a/vendor/github.com/containerd/containerd/linux/shim/client/client.go b/vendor/github.com/containerd/containerd/linux/shim/client/client.go index 1cfe766c21..db59e2ceec 100644 --- a/vendor/github.com/containerd/containerd/linux/shim/client/client.go +++ b/vendor/github.com/containerd/containerd/linux/shim/client/client.go @@ -4,7 +4,6 @@ package client import ( "context" - "fmt" "io" "net" "os" @@ -18,6 +17,7 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" + "github.com/stevvooe/ttrpc" "github.com/containerd/containerd/events" "github.com/containerd/containerd/linux/shim" @@ -25,18 +25,17 @@ import ( "github.com/containerd/containerd/log" "github.com/containerd/containerd/reaper" "github.com/containerd/containerd/sys" - google_protobuf "github.com/golang/protobuf/ptypes/empty" - "google.golang.org/grpc" + ptypes "github.com/gogo/protobuf/types" ) -var empty = &google_protobuf.Empty{} +var empty = &ptypes.Empty{} // Opt is an option for a shim client configuration -type Opt func(context.Context, shim.Config) (shimapi.ShimClient, io.Closer, error) +type Opt func(context.Context, shim.Config) (shimapi.ShimService, io.Closer, error) // WithStart executes a new shim process func WithStart(binary, address, daemonAddress, cgroup string, nonewns, debug bool, exitHandler func()) Opt { - return func(ctx context.Context, config shim.Config) (_ shimapi.ShimClient, _ io.Closer, err error) { + return func(ctx context.Context, config shim.Config) (_ shimapi.ShimService, _ io.Closer, err error) { socket, err := newSocket(address) if err != nil { return nil, nil, err @@ -89,10 +88,15 @@ func WithStart(binary, address, daemonAddress, cgroup string, nonewns, debug boo } func newCommand(binary, daemonAddress string, nonewns, debug bool, config shim.Config, socket *os.File) *exec.Cmd { + selfExe, err := os.Executable() + if err != nil { + panic(err) + } args := []string{ "-namespace", config.Namespace, "-workdir", config.WorkDir, "-address", daemonAddress, + "-containerd-binary", selfExe, } if config.Criu != "" { @@ -134,24 +138,8 @@ func newSocket(address string) (*net.UnixListener, error) { return l.(*net.UnixListener), nil } -func connect(address string, d func(string, time.Duration) (net.Conn, error)) (*grpc.ClientConn, error) { - gopts := []grpc.DialOption{ - grpc.WithBlock(), - grpc.WithInsecure(), - grpc.WithTimeout(100 * time.Second), - grpc.WithDialer(d), - grpc.FailOnNonTempDialError(true), - } - conn, err := grpc.Dial(dialAddress(address), gopts...) - if err != nil { - return nil, errors.Wrapf(err, "failed to dial %q", address) - } - return conn, nil -} - -func dialer(address string, timeout time.Duration) (net.Conn, error) { - address = strings.TrimPrefix(address, "unix://") - return net.DialTimeout("unix", address, timeout) +func connect(address string, d func(string, time.Duration) (net.Conn, error)) (net.Conn, error) { + return d(address, 100*time.Second) } func annonDialer(address string, timeout time.Duration) (net.Conn, error) { @@ -159,24 +147,20 @@ func annonDialer(address string, timeout time.Duration) (net.Conn, error) { return net.DialTimeout("unix", "\x00"+address, timeout) } -func dialAddress(address string) string { - return fmt.Sprintf("unix://%s", address) -} - // WithConnect connects to an existing shim func WithConnect(address string) Opt { - return func(ctx context.Context, config shim.Config) (shimapi.ShimClient, io.Closer, error) { + return func(ctx context.Context, config shim.Config) (shimapi.ShimService, io.Closer, error) { conn, err := connect(address, annonDialer) if err != nil { return nil, nil, err } - return shimapi.NewShimClient(conn), conn, nil + return shimapi.NewShimClient(ttrpc.NewClient(conn)), conn, nil } } // WithLocal uses an in process shim -func WithLocal(publisher events.Publisher) func(context.Context, shim.Config) (shimapi.ShimClient, io.Closer, error) { - return func(ctx context.Context, config shim.Config) (shimapi.ShimClient, io.Closer, error) { +func WithLocal(publisher events.Publisher) func(context.Context, shim.Config) (shimapi.ShimService, io.Closer, error) { + return func(ctx context.Context, config shim.Config) (shimapi.ShimService, io.Closer, error) { service, err := shim.NewService(config, publisher) if err != nil { return nil, nil, err @@ -192,15 +176,15 @@ func New(ctx context.Context, config shim.Config, opt Opt) (*Client, error) { return nil, err } return &Client{ - ShimClient: s, - c: c, - exitCh: make(chan struct{}), + ShimService: s, + c: c, + exitCh: make(chan struct{}), }, nil } // Client is a shim client containing the connection to a shim type Client struct { - shimapi.ShimClient + shimapi.ShimService c io.Closer exitCh chan struct{} @@ -212,10 +196,9 @@ type Client struct { func (c *Client) IsAlive(ctx context.Context) (bool, error) { _, err := c.ShimInfo(ctx, empty) if err != nil { - if err != grpc.ErrServerStopped { - return false, err - } - return false, nil + // TODO(stevvooe): There are some error conditions that need to be + // handle with unix sockets existence to give the right answer here. + return false, err } return true, nil } diff --git a/vendor/github.com/containerd/containerd/linux/shim/local.go b/vendor/github.com/containerd/containerd/linux/shim/local.go index 5e5634d70a..42649771dc 100644 --- a/vendor/github.com/containerd/containerd/linux/shim/local.go +++ b/vendor/github.com/containerd/containerd/linux/shim/local.go @@ -3,17 +3,16 @@ package shim import ( + "context" "path/filepath" shimapi "github.com/containerd/containerd/linux/shim/v1" - google_protobuf "github.com/golang/protobuf/ptypes/empty" - "golang.org/x/net/context" + ptypes "github.com/gogo/protobuf/types" "golang.org/x/sys/unix" - "google.golang.org/grpc" ) // NewLocal returns a shim client implementation for issue commands to a shim -func NewLocal(s *Service) shimapi.ShimClient { +func NewLocal(s *Service) shimapi.ShimService { return &local{ s: s, } @@ -23,15 +22,15 @@ type local struct { s *Service } -func (c *local) Create(ctx context.Context, in *shimapi.CreateTaskRequest, opts ...grpc.CallOption) (*shimapi.CreateTaskResponse, error) { +func (c *local) Create(ctx context.Context, in *shimapi.CreateTaskRequest) (*shimapi.CreateTaskResponse, error) { return c.s.Create(ctx, in) } -func (c *local) Start(ctx context.Context, in *shimapi.StartRequest, opts ...grpc.CallOption) (*shimapi.StartResponse, error) { +func (c *local) Start(ctx context.Context, in *shimapi.StartRequest) (*shimapi.StartResponse, error) { return c.s.Start(ctx, in) } -func (c *local) Delete(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*shimapi.DeleteResponse, error) { +func (c *local) Delete(ctx context.Context, in *ptypes.Empty) (*shimapi.DeleteResponse, error) { // make sure we unmount the containers rootfs for this local if err := unix.Unmount(filepath.Join(c.s.config.Path, "rootfs"), 0); err != nil { return nil, err @@ -39,54 +38,54 @@ func (c *local) Delete(ctx context.Context, in *google_protobuf.Empty, opts ...g return c.s.Delete(ctx, in) } -func (c *local) DeleteProcess(ctx context.Context, in *shimapi.DeleteProcessRequest, opts ...grpc.CallOption) (*shimapi.DeleteResponse, error) { +func (c *local) DeleteProcess(ctx context.Context, in *shimapi.DeleteProcessRequest) (*shimapi.DeleteResponse, error) { return c.s.DeleteProcess(ctx, in) } -func (c *local) Exec(ctx context.Context, in *shimapi.ExecProcessRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { +func (c *local) Exec(ctx context.Context, in *shimapi.ExecProcessRequest) (*ptypes.Empty, error) { return c.s.Exec(ctx, in) } -func (c *local) ResizePty(ctx context.Context, in *shimapi.ResizePtyRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { +func (c *local) ResizePty(ctx context.Context, in *shimapi.ResizePtyRequest) (*ptypes.Empty, error) { return c.s.ResizePty(ctx, in) } -func (c *local) State(ctx context.Context, in *shimapi.StateRequest, opts ...grpc.CallOption) (*shimapi.StateResponse, error) { +func (c *local) State(ctx context.Context, in *shimapi.StateRequest) (*shimapi.StateResponse, error) { return c.s.State(ctx, in) } -func (c *local) Pause(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { +func (c *local) Pause(ctx context.Context, in *ptypes.Empty) (*ptypes.Empty, error) { return c.s.Pause(ctx, in) } -func (c *local) Resume(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { +func (c *local) Resume(ctx context.Context, in *ptypes.Empty) (*ptypes.Empty, error) { return c.s.Resume(ctx, in) } -func (c *local) Kill(ctx context.Context, in *shimapi.KillRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { +func (c *local) Kill(ctx context.Context, in *shimapi.KillRequest) (*ptypes.Empty, error) { return c.s.Kill(ctx, in) } -func (c *local) ListPids(ctx context.Context, in *shimapi.ListPidsRequest, opts ...grpc.CallOption) (*shimapi.ListPidsResponse, error) { +func (c *local) ListPids(ctx context.Context, in *shimapi.ListPidsRequest) (*shimapi.ListPidsResponse, error) { return c.s.ListPids(ctx, in) } -func (c *local) CloseIO(ctx context.Context, in *shimapi.CloseIORequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { +func (c *local) CloseIO(ctx context.Context, in *shimapi.CloseIORequest) (*ptypes.Empty, error) { return c.s.CloseIO(ctx, in) } -func (c *local) Checkpoint(ctx context.Context, in *shimapi.CheckpointTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { +func (c *local) Checkpoint(ctx context.Context, in *shimapi.CheckpointTaskRequest) (*ptypes.Empty, error) { return c.s.Checkpoint(ctx, in) } -func (c *local) ShimInfo(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*shimapi.ShimInfoResponse, error) { +func (c *local) ShimInfo(ctx context.Context, in *ptypes.Empty) (*shimapi.ShimInfoResponse, error) { return c.s.ShimInfo(ctx, in) } -func (c *local) Update(ctx context.Context, in *shimapi.UpdateTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { +func (c *local) Update(ctx context.Context, in *shimapi.UpdateTaskRequest) (*ptypes.Empty, error) { return c.s.Update(ctx, in) } -func (c *local) Wait(ctx context.Context, in *shimapi.WaitRequest, opts ...grpc.CallOption) (*shimapi.WaitResponse, error) { +func (c *local) Wait(ctx context.Context, in *shimapi.WaitRequest) (*shimapi.WaitResponse, error) { return c.s.Wait(ctx, in) } diff --git a/vendor/github.com/containerd/containerd/linux/shim/service.go b/vendor/github.com/containerd/containerd/linux/shim/service.go index 7b5c5e1164..1150d1cc80 100644 --- a/vendor/github.com/containerd/containerd/linux/shim/service.go +++ b/vendor/github.com/containerd/containerd/linux/shim/service.go @@ -3,34 +3,33 @@ package shim import ( + "context" "fmt" "os" "sync" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "github.com/containerd/console" - eventsapi "github.com/containerd/containerd/api/services/events/v1" + eventstypes "github.com/containerd/containerd/api/events" "github.com/containerd/containerd/api/types/task" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/events" + "github.com/containerd/containerd/linux/proc" + "github.com/containerd/containerd/linux/runctypes" shimapi "github.com/containerd/containerd/linux/shim/v1" "github.com/containerd/containerd/log" "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/reaper" "github.com/containerd/containerd/runtime" runc "github.com/containerd/go-runc" - google_protobuf "github.com/golang/protobuf/ptypes/empty" + "github.com/containerd/typeurl" + ptypes "github.com/gogo/protobuf/types" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) -var empty = &google_protobuf.Empty{} - -// RuncRoot is the path to the root runc state directory -const RuncRoot = "/run/containerd/runc" +var empty = &ptypes.Empty{} // Config contains shim specific configuration type Config struct { @@ -47,16 +46,16 @@ func NewService(config Config, publisher events.Publisher) (*Service, error) { if config.Namespace == "" { return nil, fmt.Errorf("shim namespace cannot be empty") } - context := namespaces.WithNamespace(context.Background(), config.Namespace) - context = log.WithLogger(context, logrus.WithFields(logrus.Fields{ + ctx := namespaces.WithNamespace(context.Background(), config.Namespace) + ctx = log.WithLogger(ctx, logrus.WithFields(logrus.Fields{ "namespace": config.Namespace, "path": config.Path, "pid": os.Getpid(), })) s := &Service{ config: config, - context: context, - processes: make(map[string]process), + context: ctx, + processes: make(map[string]proc.Process), events: make(chan interface{}, 128), ec: reaper.Default.Subscribe(), } @@ -68,23 +67,15 @@ func NewService(config Config, publisher events.Publisher) (*Service, error) { return s, nil } -// platform handles platform-specific behavior that may differs across -// platform implementations -type platform interface { - copyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error) - shutdownConsole(ctx context.Context, console console.Console) error - close() error -} - // Service is the shim implementation of a remote shim over GRPC type Service struct { mu sync.Mutex config Config context context.Context - processes map[string]process + processes map[string]proc.Process events chan interface{} - platform platform + platform proc.Platform ec chan runc.Exit // Filled by Create() @@ -96,7 +87,29 @@ type Service struct { func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (*shimapi.CreateTaskResponse, error) { s.mu.Lock() defer s.mu.Unlock() - process, err := s.newInitProcess(ctx, r) + process, err := proc.New( + ctx, + s.config.Path, + s.config.WorkDir, + s.config.RuntimeRoot, + s.config.Namespace, + s.config.Criu, + s.config.SystemdCgroup, + s.platform, + &proc.CreateConfig{ + ID: r.ID, + Bundle: r.Bundle, + Runtime: r.Runtime, + Rootfs: r.Rootfs, + Terminal: r.Terminal, + Stdin: r.Stdin, + Stdout: r.Stdout, + Stderr: r.Stderr, + Checkpoint: r.Checkpoint, + ParentCheckpoint: r.ParentCheckpoint, + Options: r.Options, + }, + ) if err != nil { return nil, errdefs.ToGRPC(err) } @@ -105,19 +118,6 @@ func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (*sh s.bundle = r.Bundle pid := process.Pid() s.processes[r.ID] = process - s.events <- &eventsapi.TaskCreate{ - ContainerID: r.ID, - Bundle: r.Bundle, - Rootfs: r.Rootfs, - IO: &eventsapi.TaskIO{ - Stdin: r.Stdin, - Stdout: r.Stdout, - Stderr: r.Stderr, - Terminal: r.Terminal, - }, - Checkpoint: r.Checkpoint, - Pid: uint32(pid), - } return &shimapi.CreateTaskResponse{ Pid: uint32(pid), }, nil @@ -134,19 +134,6 @@ func (s *Service) Start(ctx context.Context, r *shimapi.StartRequest) (*shimapi. if err := p.Start(ctx); err != nil { return nil, err } - if r.ID == s.id { - s.events <- &eventsapi.TaskStart{ - ContainerID: s.id, - Pid: uint32(p.Pid()), - } - } else { - pid := p.Pid() - s.events <- &eventsapi.TaskExecStarted{ - ContainerID: s.id, - ExecID: r.ID, - Pid: uint32(pid), - } - } return &shimapi.StartResponse{ ID: p.ID(), Pid: uint32(p.Pid()), @@ -154,25 +141,18 @@ func (s *Service) Start(ctx context.Context, r *shimapi.StartRequest) (*shimapi. } // Delete the initial process and container -func (s *Service) Delete(ctx context.Context, r *google_protobuf.Empty) (*shimapi.DeleteResponse, error) { +func (s *Service) Delete(ctx context.Context, r *ptypes.Empty) (*shimapi.DeleteResponse, error) { s.mu.Lock() defer s.mu.Unlock() p := s.processes[s.id] if p == nil { return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") } - if err := p.Delete(ctx); err != nil { return nil, err } delete(s.processes, s.id) - s.platform.close() - s.events <- &eventsapi.TaskDelete{ - ContainerID: s.id, - ExitStatus: uint32(p.ExitStatus()), - ExitedAt: p.ExitedAt(), - Pid: uint32(p.Pid()), - } + s.platform.Close() return &shimapi.DeleteResponse{ ExitStatus: uint32(p.ExitStatus()), ExitedAt: p.ExitedAt(), @@ -185,7 +165,7 @@ func (s *Service) DeleteProcess(ctx context.Context, r *shimapi.DeleteProcessReq s.mu.Lock() defer s.mu.Unlock() if r.ID == s.id { - return nil, grpc.Errorf(codes.InvalidArgument, "cannot delete init process with DeleteProcess") + return nil, status.Errorf(codes.InvalidArgument, "cannot delete init process with DeleteProcess") } p := s.processes[r.ID] if p == nil { @@ -203,7 +183,7 @@ func (s *Service) DeleteProcess(ctx context.Context, r *shimapi.DeleteProcessReq } // Exec an additional process inside the container -func (s *Service) Exec(ctx context.Context, r *shimapi.ExecProcessRequest) (*google_protobuf.Empty, error) { +func (s *Service) Exec(ctx context.Context, r *shimapi.ExecProcessRequest) (*ptypes.Empty, error) { s.mu.Lock() defer s.mu.Unlock() @@ -216,21 +196,23 @@ func (s *Service) Exec(ctx context.Context, r *shimapi.ExecProcessRequest) (*goo return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") } - process, err := newExecProcess(ctx, s.config.Path, r, p.(*initProcess), r.ID) + process, err := p.(*proc.Init).Exec(ctx, s.config.Path, &proc.ExecConfig{ + ID: r.ID, + Terminal: r.Terminal, + Stdin: r.Stdin, + Stdout: r.Stdout, + Stderr: r.Stderr, + Spec: r.Spec, + }) if err != nil { return nil, errdefs.ToGRPC(err) } s.processes[r.ID] = process - - s.events <- &eventsapi.TaskExecAdded{ - ContainerID: s.id, - ExecID: r.ID, - } return empty, nil } // ResizePty of a process -func (s *Service) ResizePty(ctx context.Context, r *shimapi.ResizePtyRequest) (*google_protobuf.Empty, error) { +func (s *Service) ResizePty(ctx context.Context, r *shimapi.ResizePtyRequest) (*ptypes.Empty, error) { s.mu.Lock() defer s.mu.Unlock() if r.ID == "" { @@ -281,51 +263,45 @@ func (s *Service) State(ctx context.Context, r *shimapi.StateRequest) (*shimapi. Bundle: s.bundle, Pid: uint32(p.Pid()), Status: status, - Stdin: sio.stdin, - Stdout: sio.stdout, - Stderr: sio.stderr, - Terminal: sio.terminal, + Stdin: sio.Stdin, + Stdout: sio.Stdout, + Stderr: sio.Stderr, + Terminal: sio.Terminal, ExitStatus: uint32(p.ExitStatus()), ExitedAt: p.ExitedAt(), }, nil } // Pause the container -func (s *Service) Pause(ctx context.Context, r *google_protobuf.Empty) (*google_protobuf.Empty, error) { +func (s *Service) Pause(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, error) { s.mu.Lock() defer s.mu.Unlock() p := s.processes[s.id] if p == nil { return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") } - if err := p.(*initProcess).Pause(ctx); err != nil { + if err := p.(*proc.Init).Pause(ctx); err != nil { return nil, err } - s.events <- &eventsapi.TaskPaused{ - ContainerID: s.id, - } return empty, nil } // Resume the container -func (s *Service) Resume(ctx context.Context, r *google_protobuf.Empty) (*google_protobuf.Empty, error) { +func (s *Service) Resume(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, error) { s.mu.Lock() defer s.mu.Unlock() p := s.processes[s.id] if p == nil { return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") } - if err := p.(*initProcess).Resume(ctx); err != nil { + if err := p.(*proc.Init).Resume(ctx); err != nil { return nil, err } - s.events <- &eventsapi.TaskResumed{ - ContainerID: s.id, - } return empty, nil } // Kill a process with the provided signal -func (s *Service) Kill(ctx context.Context, r *shimapi.KillRequest) (*google_protobuf.Empty, error) { +func (s *Service) Kill(ctx context.Context, r *shimapi.KillRequest) (*ptypes.Empty, error) { s.mu.Lock() defer s.mu.Unlock() if r.ID == "" { @@ -357,9 +333,23 @@ func (s *Service) ListPids(ctx context.Context, r *shimapi.ListPidsRequest) (*sh } var processes []*task.ProcessInfo for _, pid := range pids { - processes = append(processes, &task.ProcessInfo{ + pInfo := task.ProcessInfo{ Pid: pid, - }) + } + for _, p := range s.processes { + if p.Pid() == int(pid) { + d := &runctypes.ProcessDetails{ + ExecID: p.ID(), + } + a, err := typeurl.MarshalAny(d) + if err != nil { + return nil, errors.Wrapf(err, "failed to marshal process %d info", pid) + } + pInfo.Info = a + break + } + } + processes = append(processes, &pInfo) } return &shimapi.ListPidsResponse{ Processes: processes, @@ -367,7 +357,7 @@ func (s *Service) ListPids(ctx context.Context, r *shimapi.ListPidsRequest) (*sh } // CloseIO of a process -func (s *Service) CloseIO(ctx context.Context, r *shimapi.CloseIORequest) (*google_protobuf.Empty, error) { +func (s *Service) CloseIO(ctx context.Context, r *shimapi.CloseIORequest) (*ptypes.Empty, error) { s.mu.Lock() defer s.mu.Unlock() p := s.processes[r.ID] @@ -383,38 +373,38 @@ func (s *Service) CloseIO(ctx context.Context, r *shimapi.CloseIORequest) (*goog } // Checkpoint the container -func (s *Service) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskRequest) (*google_protobuf.Empty, error) { +func (s *Service) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskRequest) (*ptypes.Empty, error) { s.mu.Lock() defer s.mu.Unlock() p := s.processes[s.id] if p == nil { return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") } - if err := p.(*initProcess).Checkpoint(ctx, r); err != nil { + if err := p.(*proc.Init).Checkpoint(ctx, &proc.CheckpointConfig{ + Path: r.Path, + Options: r.Options, + }); err != nil { return nil, errdefs.ToGRPC(err) } - s.events <- &eventsapi.TaskCheckpointed{ - ContainerID: s.id, - } return empty, nil } // ShimInfo returns shim information such as the shim's pid -func (s *Service) ShimInfo(ctx context.Context, r *google_protobuf.Empty) (*shimapi.ShimInfoResponse, error) { +func (s *Service) ShimInfo(ctx context.Context, r *ptypes.Empty) (*shimapi.ShimInfoResponse, error) { return &shimapi.ShimInfoResponse{ ShimPid: uint32(os.Getpid()), }, nil } // Update a running container -func (s *Service) Update(ctx context.Context, r *shimapi.UpdateTaskRequest) (*google_protobuf.Empty, error) { +func (s *Service) Update(ctx context.Context, r *shimapi.UpdateTaskRequest) (*ptypes.Empty, error) { s.mu.Lock() defer s.mu.Unlock() p := s.processes[s.id] if p == nil { return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") } - if err := p.(*initProcess).Update(ctx, r); err != nil { + if err := p.(*proc.Init).Update(ctx, r.Resources); err != nil { return nil, errdefs.ToGRPC(err) } return empty, nil @@ -447,15 +437,15 @@ func (s *Service) checkProcesses(e runc.Exit) { defer s.mu.Unlock() for _, p := range s.processes { if p.Pid() == e.Pid { - if ip, ok := p.(*initProcess); ok { + if ip, ok := p.(*proc.Init); ok { // Ensure all children are killed - if err := ip.killAll(s.context); err != nil { + if err := ip.KillAll(s.context); err != nil { log.G(s.context).WithError(err).WithField("id", ip.ID()). Error("failed to kill init's children") } } p.SetExited(e.Status) - s.events <- &eventsapi.TaskExit{ + s.events <- &eventstypes.TaskExit{ ContainerID: s.id, ID: p.ID(), Pid: uint32(e.Pid), @@ -475,7 +465,7 @@ func (s *Service) getContainerPids(ctx context.Context, id string) ([]uint32, er return nil, errors.Wrapf(errdefs.ErrFailedPrecondition, "container must be created") } - ps, err := p.(*initProcess).runtime.Ps(ctx, id) + ps, err := p.(*proc.Init).Runtime().Ps(ctx, id) if err != nil { return nil, err } @@ -489,32 +479,32 @@ func (s *Service) getContainerPids(ctx context.Context, id string) ([]uint32, er func (s *Service) forward(publisher events.Publisher) { for e := range s.events { if err := publisher.Publish(s.context, getTopic(s.context, e), e); err != nil { - logrus.WithError(err).Error("post event") + log.G(s.context).WithError(err).Error("post event") } } } func getTopic(ctx context.Context, e interface{}) string { switch e.(type) { - case *eventsapi.TaskCreate: + case *eventstypes.TaskCreate: return runtime.TaskCreateEventTopic - case *eventsapi.TaskStart: + case *eventstypes.TaskStart: return runtime.TaskStartEventTopic - case *eventsapi.TaskOOM: + case *eventstypes.TaskOOM: return runtime.TaskOOMEventTopic - case *eventsapi.TaskExit: + case *eventstypes.TaskExit: return runtime.TaskExitEventTopic - case *eventsapi.TaskDelete: + case *eventstypes.TaskDelete: return runtime.TaskDeleteEventTopic - case *eventsapi.TaskExecAdded: + case *eventstypes.TaskExecAdded: return runtime.TaskExecAddedEventTopic - case *eventsapi.TaskExecStarted: + case *eventstypes.TaskExecStarted: return runtime.TaskExecStartedEventTopic - case *eventsapi.TaskPaused: + case *eventstypes.TaskPaused: return runtime.TaskPausedEventTopic - case *eventsapi.TaskResumed: + case *eventstypes.TaskResumed: return runtime.TaskResumedEventTopic - case *eventsapi.TaskCheckpointed: + case *eventstypes.TaskCheckpointed: return runtime.TaskCheckpointedEventTopic default: logrus.Warnf("no topic for type %#v", e) diff --git a/vendor/github.com/containerd/containerd/linux/shim/service_linux.go b/vendor/github.com/containerd/containerd/linux/shim/service_linux.go index 1d078ba73f..bbe9d188a0 100644 --- a/vendor/github.com/containerd/containerd/linux/shim/service_linux.go +++ b/vendor/github.com/containerd/containerd/linux/shim/service_linux.go @@ -1,6 +1,7 @@ package shim import ( + "context" "io" "sync" "syscall" @@ -8,14 +9,13 @@ import ( "github.com/containerd/console" "github.com/containerd/fifo" "github.com/pkg/errors" - "golang.org/x/net/context" ) type linuxPlatform struct { epoller *console.Epoller } -func (p *linuxPlatform) copyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error) { +func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error) { if p.epoller == nil { return nil, errors.New("uninitialized epoller") } @@ -58,7 +58,7 @@ func (p *linuxPlatform) copyConsole(ctx context.Context, console console.Console return epollConsole, nil } -func (p *linuxPlatform) shutdownConsole(ctx context.Context, cons console.Console) error { +func (p *linuxPlatform) ShutdownConsole(ctx context.Context, cons console.Console) error { if p.epoller == nil { return errors.New("uninitialized epoller") } @@ -69,7 +69,7 @@ func (p *linuxPlatform) shutdownConsole(ctx context.Context, cons console.Consol return epollConsole.Shutdown(p.epoller.CloseConsole) } -func (p *linuxPlatform) close() error { +func (p *linuxPlatform) Close() error { return p.epoller.Close() } diff --git a/vendor/github.com/containerd/containerd/linux/shim/service_unix.go b/vendor/github.com/containerd/containerd/linux/shim/service_unix.go index c00b853067..d4419e56ae 100644 --- a/vendor/github.com/containerd/containerd/linux/shim/service_unix.go +++ b/vendor/github.com/containerd/containerd/linux/shim/service_unix.go @@ -3,19 +3,19 @@ package shim import ( + "context" "io" "sync" "syscall" "github.com/containerd/console" "github.com/containerd/fifo" - "golang.org/x/net/context" ) type unixPlatform struct { } -func (p *unixPlatform) copyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error) { +func (p *unixPlatform) CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error) { if stdin != "" { in, err := fifo.OpenFifo(ctx, stdin, syscall.O_RDONLY, 0) if err != nil { @@ -48,11 +48,11 @@ func (p *unixPlatform) copyConsole(ctx context.Context, console console.Console, return console, nil } -func (p *unixPlatform) shutdownConsole(ctx context.Context, cons console.Console) error { +func (p *unixPlatform) ShutdownConsole(ctx context.Context, cons console.Console) error { return nil } -func (p *unixPlatform) close() error { +func (p *unixPlatform) Close() error { return nil } diff --git a/vendor/github.com/containerd/containerd/linux/shim/v1/shim.pb.go b/vendor/github.com/containerd/containerd/linux/shim/v1/shim.pb.go index 831d091bbd..fd4e32e88e 100644 --- a/vendor/github.com/containerd/containerd/linux/shim/v1/shim.pb.go +++ b/vendor/github.com/containerd/containerd/linux/shim/v1/shim.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/containerd/containerd/linux/shim/v1/shim.proto -// DO NOT EDIT! /* Package shim is a generated protocol buffer package. @@ -36,24 +35,23 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" import google_protobuf "github.com/gogo/protobuf/types" -import google_protobuf1 "github.com/golang/protobuf/ptypes/empty" -import _ "github.com/gogo/protobuf/gogoproto" +import google_protobuf1 "github.com/gogo/protobuf/types" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" import _ "github.com/gogo/protobuf/types" import containerd_types "github.com/containerd/containerd/api/types" import containerd_v1_types "github.com/containerd/containerd/api/types/task" import time "time" -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" import strings "strings" import reflect "reflect" +import context "context" +import github_com_stevvooe_ttrpc "github.com/stevvooe/ttrpc" + import io "io" // Reference imports to suppress errors if they are not otherwise used. @@ -283,578 +281,6 @@ func init() { proto.RegisterType((*WaitRequest)(nil), "containerd.runtime.linux.shim.v1.WaitRequest") proto.RegisterType((*WaitResponse)(nil), "containerd.runtime.linux.shim.v1.WaitResponse") } - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for Shim service - -type ShimClient interface { - // State returns shim and task state information. - State(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateResponse, error) - Create(ctx context.Context, in *CreateTaskRequest, opts ...grpc.CallOption) (*CreateTaskResponse, error) - Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) - Delete(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*DeleteResponse, error) - DeleteProcess(ctx context.Context, in *DeleteProcessRequest, opts ...grpc.CallOption) (*DeleteResponse, error) - ListPids(ctx context.Context, in *ListPidsRequest, opts ...grpc.CallOption) (*ListPidsResponse, error) - Pause(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) - Resume(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) - Checkpoint(ctx context.Context, in *CheckpointTaskRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) - Kill(ctx context.Context, in *KillRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) - Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) - ResizePty(ctx context.Context, in *ResizePtyRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) - CloseIO(ctx context.Context, in *CloseIORequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) - // ShimInfo returns information about the shim. - ShimInfo(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*ShimInfoResponse, error) - Update(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) - Wait(ctx context.Context, in *WaitRequest, opts ...grpc.CallOption) (*WaitResponse, error) -} - -type shimClient struct { - cc *grpc.ClientConn -} - -func NewShimClient(cc *grpc.ClientConn) ShimClient { - return &shimClient{cc} -} - -func (c *shimClient) State(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateResponse, error) { - out := new(StateResponse) - err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/State", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *shimClient) Create(ctx context.Context, in *CreateTaskRequest, opts ...grpc.CallOption) (*CreateTaskResponse, error) { - out := new(CreateTaskResponse) - err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Create", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *shimClient) Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) { - out := new(StartResponse) - err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Start", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *shimClient) Delete(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*DeleteResponse, error) { - out := new(DeleteResponse) - err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Delete", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *shimClient) DeleteProcess(ctx context.Context, in *DeleteProcessRequest, opts ...grpc.CallOption) (*DeleteResponse, error) { - out := new(DeleteResponse) - err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/DeleteProcess", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *shimClient) ListPids(ctx context.Context, in *ListPidsRequest, opts ...grpc.CallOption) (*ListPidsResponse, error) { - out := new(ListPidsResponse) - err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/ListPids", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *shimClient) Pause(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { - out := new(google_protobuf1.Empty) - err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Pause", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *shimClient) Resume(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { - out := new(google_protobuf1.Empty) - err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Resume", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *shimClient) Checkpoint(ctx context.Context, in *CheckpointTaskRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { - out := new(google_protobuf1.Empty) - err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Checkpoint", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *shimClient) Kill(ctx context.Context, in *KillRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { - out := new(google_protobuf1.Empty) - err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Kill", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *shimClient) Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { - out := new(google_protobuf1.Empty) - err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Exec", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *shimClient) ResizePty(ctx context.Context, in *ResizePtyRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { - out := new(google_protobuf1.Empty) - err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/ResizePty", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *shimClient) CloseIO(ctx context.Context, in *CloseIORequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { - out := new(google_protobuf1.Empty) - err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/CloseIO", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *shimClient) ShimInfo(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*ShimInfoResponse, error) { - out := new(ShimInfoResponse) - err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/ShimInfo", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *shimClient) Update(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { - out := new(google_protobuf1.Empty) - err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Update", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *shimClient) Wait(ctx context.Context, in *WaitRequest, opts ...grpc.CallOption) (*WaitResponse, error) { - out := new(WaitResponse) - err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Wait", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Shim service - -type ShimServer interface { - // State returns shim and task state information. - State(context.Context, *StateRequest) (*StateResponse, error) - Create(context.Context, *CreateTaskRequest) (*CreateTaskResponse, error) - Start(context.Context, *StartRequest) (*StartResponse, error) - Delete(context.Context, *google_protobuf1.Empty) (*DeleteResponse, error) - DeleteProcess(context.Context, *DeleteProcessRequest) (*DeleteResponse, error) - ListPids(context.Context, *ListPidsRequest) (*ListPidsResponse, error) - Pause(context.Context, *google_protobuf1.Empty) (*google_protobuf1.Empty, error) - Resume(context.Context, *google_protobuf1.Empty) (*google_protobuf1.Empty, error) - Checkpoint(context.Context, *CheckpointTaskRequest) (*google_protobuf1.Empty, error) - Kill(context.Context, *KillRequest) (*google_protobuf1.Empty, error) - Exec(context.Context, *ExecProcessRequest) (*google_protobuf1.Empty, error) - ResizePty(context.Context, *ResizePtyRequest) (*google_protobuf1.Empty, error) - CloseIO(context.Context, *CloseIORequest) (*google_protobuf1.Empty, error) - // ShimInfo returns information about the shim. - ShimInfo(context.Context, *google_protobuf1.Empty) (*ShimInfoResponse, error) - Update(context.Context, *UpdateTaskRequest) (*google_protobuf1.Empty, error) - Wait(context.Context, *WaitRequest) (*WaitResponse, error) -} - -func RegisterShimServer(s *grpc.Server, srv ShimServer) { - s.RegisterService(&_Shim_serviceDesc, srv) -} - -func _Shim_State_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ShimServer).State(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.runtime.linux.shim.v1.Shim/State", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ShimServer).State(ctx, req.(*StateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Shim_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ShimServer).Create(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Create", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ShimServer).Create(ctx, req.(*CreateTaskRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Shim_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StartRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ShimServer).Start(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Start", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ShimServer).Start(ctx, req.(*StartRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Shim_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(google_protobuf1.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ShimServer).Delete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Delete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ShimServer).Delete(ctx, req.(*google_protobuf1.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Shim_DeleteProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteProcessRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ShimServer).DeleteProcess(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.runtime.linux.shim.v1.Shim/DeleteProcess", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ShimServer).DeleteProcess(ctx, req.(*DeleteProcessRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Shim_ListPids_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListPidsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ShimServer).ListPids(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.runtime.linux.shim.v1.Shim/ListPids", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ShimServer).ListPids(ctx, req.(*ListPidsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Shim_Pause_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(google_protobuf1.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ShimServer).Pause(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Pause", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ShimServer).Pause(ctx, req.(*google_protobuf1.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Shim_Resume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(google_protobuf1.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ShimServer).Resume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Resume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ShimServer).Resume(ctx, req.(*google_protobuf1.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Shim_Checkpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CheckpointTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ShimServer).Checkpoint(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Checkpoint", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ShimServer).Checkpoint(ctx, req.(*CheckpointTaskRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Shim_Kill_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(KillRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ShimServer).Kill(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Kill", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ShimServer).Kill(ctx, req.(*KillRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Shim_Exec_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ExecProcessRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ShimServer).Exec(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Exec", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ShimServer).Exec(ctx, req.(*ExecProcessRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Shim_ResizePty_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ResizePtyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ShimServer).ResizePty(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.runtime.linux.shim.v1.Shim/ResizePty", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ShimServer).ResizePty(ctx, req.(*ResizePtyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Shim_CloseIO_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CloseIORequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ShimServer).CloseIO(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.runtime.linux.shim.v1.Shim/CloseIO", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ShimServer).CloseIO(ctx, req.(*CloseIORequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Shim_ShimInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(google_protobuf1.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ShimServer).ShimInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.runtime.linux.shim.v1.Shim/ShimInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ShimServer).ShimInfo(ctx, req.(*google_protobuf1.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Shim_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ShimServer).Update(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Update", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ShimServer).Update(ctx, req.(*UpdateTaskRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Shim_Wait_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(WaitRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ShimServer).Wait(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Wait", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ShimServer).Wait(ctx, req.(*WaitRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Shim_serviceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.runtime.linux.shim.v1.Shim", - HandlerType: (*ShimServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "State", - Handler: _Shim_State_Handler, - }, - { - MethodName: "Create", - Handler: _Shim_Create_Handler, - }, - { - MethodName: "Start", - Handler: _Shim_Start_Handler, - }, - { - MethodName: "Delete", - Handler: _Shim_Delete_Handler, - }, - { - MethodName: "DeleteProcess", - Handler: _Shim_DeleteProcess_Handler, - }, - { - MethodName: "ListPids", - Handler: _Shim_ListPids_Handler, - }, - { - MethodName: "Pause", - Handler: _Shim_Pause_Handler, - }, - { - MethodName: "Resume", - Handler: _Shim_Resume_Handler, - }, - { - MethodName: "Checkpoint", - Handler: _Shim_Checkpoint_Handler, - }, - { - MethodName: "Kill", - Handler: _Shim_Kill_Handler, - }, - { - MethodName: "Exec", - Handler: _Shim_Exec_Handler, - }, - { - MethodName: "ResizePty", - Handler: _Shim_ResizePty_Handler, - }, - { - MethodName: "CloseIO", - Handler: _Shim_CloseIO_Handler, - }, - { - MethodName: "ShimInfo", - Handler: _Shim_ShimInfo_Handler, - }, - { - MethodName: "Update", - Handler: _Shim_Update_Handler, - }, - { - MethodName: "Wait", - Handler: _Shim_Wait_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "github.com/containerd/containerd/linux/shim/v1/shim.proto", -} - func (m *CreateTaskRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1575,24 +1001,6 @@ func (m *WaitResponse) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Shim(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Shim(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintShim(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -2169,6 +1577,280 @@ func valueToStringShim(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } + +type ShimService interface { + State(ctx context.Context, req *StateRequest) (*StateResponse, error) + Create(ctx context.Context, req *CreateTaskRequest) (*CreateTaskResponse, error) + Start(ctx context.Context, req *StartRequest) (*StartResponse, error) + Delete(ctx context.Context, req *google_protobuf1.Empty) (*DeleteResponse, error) + DeleteProcess(ctx context.Context, req *DeleteProcessRequest) (*DeleteResponse, error) + ListPids(ctx context.Context, req *ListPidsRequest) (*ListPidsResponse, error) + Pause(ctx context.Context, req *google_protobuf1.Empty) (*google_protobuf1.Empty, error) + Resume(ctx context.Context, req *google_protobuf1.Empty) (*google_protobuf1.Empty, error) + Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*google_protobuf1.Empty, error) + Kill(ctx context.Context, req *KillRequest) (*google_protobuf1.Empty, error) + Exec(ctx context.Context, req *ExecProcessRequest) (*google_protobuf1.Empty, error) + ResizePty(ctx context.Context, req *ResizePtyRequest) (*google_protobuf1.Empty, error) + CloseIO(ctx context.Context, req *CloseIORequest) (*google_protobuf1.Empty, error) + ShimInfo(ctx context.Context, req *google_protobuf1.Empty) (*ShimInfoResponse, error) + Update(ctx context.Context, req *UpdateTaskRequest) (*google_protobuf1.Empty, error) + Wait(ctx context.Context, req *WaitRequest) (*WaitResponse, error) +} + +func RegisterShimService(srv *github_com_stevvooe_ttrpc.Server, svc ShimService) { + srv.Register("containerd.runtime.linux.shim.v1.Shim", map[string]github_com_stevvooe_ttrpc.Method{ + "State": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req StateRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.State(ctx, &req) + }, + "Create": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req CreateTaskRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Create(ctx, &req) + }, + "Start": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req StartRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Start(ctx, &req) + }, + "Delete": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req google_protobuf1.Empty + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Delete(ctx, &req) + }, + "DeleteProcess": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req DeleteProcessRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.DeleteProcess(ctx, &req) + }, + "ListPids": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ListPidsRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.ListPids(ctx, &req) + }, + "Pause": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req google_protobuf1.Empty + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Pause(ctx, &req) + }, + "Resume": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req google_protobuf1.Empty + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Resume(ctx, &req) + }, + "Checkpoint": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req CheckpointTaskRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Checkpoint(ctx, &req) + }, + "Kill": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req KillRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Kill(ctx, &req) + }, + "Exec": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ExecProcessRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Exec(ctx, &req) + }, + "ResizePty": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ResizePtyRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.ResizePty(ctx, &req) + }, + "CloseIO": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req CloseIORequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.CloseIO(ctx, &req) + }, + "ShimInfo": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req google_protobuf1.Empty + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.ShimInfo(ctx, &req) + }, + "Update": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req UpdateTaskRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Update(ctx, &req) + }, + "Wait": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req WaitRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Wait(ctx, &req) + }, + }) +} + +type shimClient struct { + client *github_com_stevvooe_ttrpc.Client +} + +func NewShimClient(client *github_com_stevvooe_ttrpc.Client) ShimService { + return &shimClient{ + client: client, + } +} + +func (c *shimClient) State(ctx context.Context, req *StateRequest) (*StateResponse, error) { + var resp StateResponse + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "State", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) Create(ctx context.Context, req *CreateTaskRequest) (*CreateTaskResponse, error) { + var resp CreateTaskResponse + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Create", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) Start(ctx context.Context, req *StartRequest) (*StartResponse, error) { + var resp StartResponse + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Start", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) Delete(ctx context.Context, req *google_protobuf1.Empty) (*DeleteResponse, error) { + var resp DeleteResponse + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Delete", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) DeleteProcess(ctx context.Context, req *DeleteProcessRequest) (*DeleteResponse, error) { + var resp DeleteResponse + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "DeleteProcess", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) ListPids(ctx context.Context, req *ListPidsRequest) (*ListPidsResponse, error) { + var resp ListPidsResponse + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "ListPids", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) Pause(ctx context.Context, req *google_protobuf1.Empty) (*google_protobuf1.Empty, error) { + var resp google_protobuf1.Empty + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Pause", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) Resume(ctx context.Context, req *google_protobuf1.Empty) (*google_protobuf1.Empty, error) { + var resp google_protobuf1.Empty + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Resume", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*google_protobuf1.Empty, error) { + var resp google_protobuf1.Empty + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Checkpoint", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) Kill(ctx context.Context, req *KillRequest) (*google_protobuf1.Empty, error) { + var resp google_protobuf1.Empty + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Kill", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) Exec(ctx context.Context, req *ExecProcessRequest) (*google_protobuf1.Empty, error) { + var resp google_protobuf1.Empty + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Exec", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) ResizePty(ctx context.Context, req *ResizePtyRequest) (*google_protobuf1.Empty, error) { + var resp google_protobuf1.Empty + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "ResizePty", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) CloseIO(ctx context.Context, req *CloseIORequest) (*google_protobuf1.Empty, error) { + var resp google_protobuf1.Empty + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "CloseIO", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) ShimInfo(ctx context.Context, req *google_protobuf1.Empty) (*ShimInfoResponse, error) { + var resp ShimInfoResponse + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "ShimInfo", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) Update(ctx context.Context, req *UpdateTaskRequest) (*google_protobuf1.Empty, error) { + var resp google_protobuf1.Empty + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Update", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) Wait(ctx context.Context, req *WaitRequest) (*WaitResponse, error) { + var resp WaitResponse + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Wait", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -4674,76 +4356,76 @@ func init() { } var fileDescriptorShim = []byte{ - // 1131 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x57, 0x4f, 0x4f, 0x1b, 0x47, - 0x14, 0x67, 0x8d, 0xff, 0x3e, 0xc7, 0x14, 0xa6, 0x84, 0x6e, 0x1c, 0xc9, 0x58, 0x2b, 0x35, 0xa2, - 0xaa, 0xb2, 0x2e, 0xa6, 0x4a, 0x9a, 0x56, 0x8a, 0x04, 0x24, 0xaa, 0x50, 0x8b, 0x82, 0x16, 0xd2, - 0x54, 0xad, 0x2a, 0xb4, 0x78, 0x07, 0x7b, 0x84, 0xbd, 0xb3, 0xd9, 0x99, 0xa5, 0xd0, 0x53, 0x4f, - 0x3d, 0xf7, 0xe3, 0xf4, 0x23, 0x70, 0xc8, 0xa1, 0xc7, 0x9e, 0xd2, 0xc6, 0xf7, 0x7e, 0x87, 0x68, - 0xfe, 0x18, 0xaf, 0x6d, 0x36, 0xbb, 0xe6, 0x82, 0xf7, 0xcd, 0xfc, 0xde, 0xcc, 0x9b, 0xf7, 0xfb, - 0xcd, 0x7b, 0x03, 0x3c, 0xe9, 0x12, 0xde, 0x8b, 0x4e, 0xec, 0x0e, 0x1d, 0xb4, 0x3a, 0xd4, 0xe7, - 0x2e, 0xf1, 0x71, 0xe8, 0xc5, 0x3f, 0xfb, 0xc4, 0x8f, 0x2e, 0x5a, 0xac, 0x47, 0x06, 0xad, 0xf3, - 0x4d, 0xf9, 0x6b, 0x07, 0x21, 0xe5, 0x14, 0x35, 0xc7, 0x20, 0x3b, 0x8c, 0x7c, 0x4e, 0x06, 0xd8, - 0x96, 0x60, 0x5b, 0x82, 0xce, 0x37, 0xeb, 0xf7, 0xba, 0x94, 0x76, 0xfb, 0xb8, 0x25, 0xf1, 0x27, - 0xd1, 0x69, 0xcb, 0xf5, 0x2f, 0x95, 0x73, 0xfd, 0xfe, 0xf4, 0x14, 0x1e, 0x04, 0x7c, 0x34, 0xb9, - 0xda, 0xa5, 0x5d, 0x2a, 0x3f, 0x5b, 0xe2, 0x4b, 0x8f, 0xae, 0x4f, 0xbb, 0x88, 0x1d, 0x19, 0x77, - 0x07, 0x81, 0x06, 0x3c, 0x4a, 0x3d, 0x8b, 0x1b, 0x90, 0x16, 0xbf, 0x0c, 0x30, 0x6b, 0x0d, 0x68, - 0xe4, 0x73, 0xed, 0xf7, 0xf5, 0x1c, 0x7e, 0xdc, 0x65, 0x67, 0xf2, 0x8f, 0xf2, 0xb5, 0xfe, 0xcf, - 0xc1, 0xca, 0x6e, 0x88, 0x5d, 0x8e, 0x8f, 0x5c, 0x76, 0xe6, 0xe0, 0xd7, 0x11, 0x66, 0x1c, 0xad, - 0x41, 0x8e, 0x78, 0xa6, 0xd1, 0x34, 0x36, 0x2a, 0x3b, 0xc5, 0xe1, 0xdb, 0xf5, 0xdc, 0xde, 0x33, - 0x27, 0x47, 0x3c, 0xb4, 0x06, 0xc5, 0x93, 0xc8, 0xf7, 0xfa, 0xd8, 0xcc, 0x89, 0x39, 0x47, 0x5b, - 0xc8, 0x84, 0x92, 0xce, 0xa0, 0xb9, 0x28, 0x27, 0x46, 0x26, 0x6a, 0x41, 0x31, 0xa4, 0x94, 0x9f, - 0x32, 0x33, 0xdf, 0x5c, 0xdc, 0xa8, 0xb6, 0x3f, 0xb1, 0x63, 0x59, 0x97, 0x21, 0xd9, 0xfb, 0xe2, - 0x28, 0x8e, 0x86, 0xa1, 0x3a, 0x94, 0x39, 0x0e, 0x07, 0xc4, 0x77, 0xfb, 0x66, 0xa1, 0x69, 0x6c, - 0x94, 0x9d, 0x6b, 0x1b, 0xad, 0x42, 0x81, 0x71, 0x8f, 0xf8, 0x66, 0x51, 0x6e, 0xa2, 0x0c, 0x11, - 0x14, 0xe3, 0x1e, 0x8d, 0xb8, 0x59, 0x52, 0x41, 0x29, 0x4b, 0x8f, 0xe3, 0x30, 0x34, 0xcb, 0xd7, - 0xe3, 0x38, 0x0c, 0x51, 0x03, 0xa0, 0xd3, 0xc3, 0x9d, 0xb3, 0x80, 0x12, 0x9f, 0x9b, 0x15, 0x39, - 0x17, 0x1b, 0x41, 0x9f, 0xc3, 0x4a, 0xe0, 0x86, 0xd8, 0xe7, 0xc7, 0x31, 0x18, 0x48, 0xd8, 0xb2, - 0x9a, 0xd8, 0x1d, 0x83, 0x6d, 0x28, 0xd1, 0x80, 0x13, 0xea, 0x33, 0xb3, 0xda, 0x34, 0x36, 0xaa, - 0xed, 0x55, 0x5b, 0xd1, 0x6c, 0x8f, 0x68, 0xb6, 0xb7, 0xfd, 0x4b, 0x67, 0x04, 0xb2, 0x1e, 0x00, - 0x8a, 0xa7, 0x9b, 0x05, 0xd4, 0x67, 0x18, 0x2d, 0xc3, 0x62, 0xa0, 0x13, 0x5e, 0x73, 0xc4, 0xa7, - 0xf5, 0x87, 0x01, 0x4b, 0xcf, 0x70, 0x1f, 0x73, 0x9c, 0x0c, 0x42, 0xeb, 0x50, 0xc5, 0x17, 0x84, - 0x1f, 0x33, 0xee, 0xf2, 0x88, 0x49, 0x4e, 0x6a, 0x0e, 0x88, 0xa1, 0x43, 0x39, 0x82, 0xb6, 0xa1, - 0x22, 0x2c, 0xec, 0x1d, 0xbb, 0x5c, 0x32, 0x53, 0x6d, 0xd7, 0x67, 0xe2, 0x3b, 0x1a, 0xc9, 0x70, - 0xa7, 0x7c, 0xf5, 0x76, 0x7d, 0xe1, 0xcf, 0x7f, 0xd7, 0x0d, 0xa7, 0xac, 0xdc, 0xb6, 0xb9, 0x65, - 0xc3, 0xaa, 0x8a, 0xe3, 0x20, 0xa4, 0x1d, 0xcc, 0x58, 0x8a, 0x44, 0xac, 0xbf, 0x0c, 0x40, 0xcf, - 0x2f, 0x70, 0x27, 0x1b, 0x7c, 0x82, 0xee, 0x5c, 0x12, 0xdd, 0x8b, 0x37, 0xd3, 0x9d, 0x4f, 0xa0, - 0xbb, 0x30, 0x41, 0xf7, 0x06, 0xe4, 0x59, 0x80, 0x3b, 0x52, 0x33, 0x49, 0xf4, 0x48, 0x84, 0x75, - 0x17, 0x3e, 0x9e, 0x88, 0x5c, 0xe5, 0xdd, 0xfa, 0x11, 0x96, 0x1d, 0xcc, 0xc8, 0x6f, 0xf8, 0x80, - 0x5f, 0xa6, 0x1d, 0x67, 0x15, 0x0a, 0xbf, 0x12, 0x8f, 0xf7, 0x34, 0x17, 0xca, 0x10, 0xa1, 0xf5, - 0x30, 0xe9, 0xf6, 0x14, 0x07, 0x35, 0x47, 0x5b, 0xd6, 0x03, 0xb8, 0x23, 0x88, 0xc2, 0x69, 0x39, - 0x7d, 0x93, 0x83, 0x9a, 0x06, 0x6a, 0x2d, 0xcc, 0x7b, 0x41, 0xb5, 0x76, 0x16, 0xc7, 0xda, 0xd9, - 0x12, 0xe9, 0x92, 0xb2, 0x11, 0x69, 0x5c, 0x6a, 0xdf, 0x8f, 0x5f, 0xcc, 0xf3, 0x4d, 0x7d, 0x37, - 0x95, 0x8e, 0x1c, 0x0d, 0x1d, 0x33, 0x52, 0xb8, 0x99, 0x91, 0x62, 0x02, 0x23, 0xa5, 0x09, 0x46, - 0xe2, 0x9c, 0x97, 0xa7, 0x38, 0x9f, 0x92, 0x74, 0xe5, 0xc3, 0x92, 0x86, 0x5b, 0x49, 0xfa, 0x05, - 0x54, 0xbf, 0x23, 0xfd, 0x7e, 0x86, 0x62, 0xc7, 0x48, 0x77, 0x24, 0xcc, 0x9a, 0xa3, 0x2d, 0x91, - 0x4b, 0xb7, 0xdf, 0x97, 0xb9, 0x2c, 0x3b, 0xe2, 0xd3, 0x7a, 0x0a, 0x4b, 0xbb, 0x7d, 0xca, 0xf0, - 0xde, 0x8b, 0x0c, 0xfa, 0x50, 0x09, 0x54, 0x5a, 0x57, 0x86, 0xf5, 0x19, 0x7c, 0xf4, 0x3d, 0x61, - 0xfc, 0x80, 0x78, 0xa9, 0xd7, 0xcb, 0x81, 0xe5, 0x31, 0x54, 0x8b, 0xe1, 0x29, 0x54, 0x02, 0xa5, - 0x59, 0xcc, 0x4c, 0x43, 0x96, 0xd9, 0xe6, 0x8d, 0x6c, 0x6a, 0x65, 0xef, 0xf9, 0xa7, 0xd4, 0x19, - 0xbb, 0x58, 0x3f, 0xc3, 0xdd, 0x71, 0x45, 0x8b, 0xb7, 0x01, 0x04, 0xf9, 0xc0, 0xe5, 0x3d, 0x15, - 0x86, 0x23, 0xbf, 0xe3, 0x05, 0x2f, 0x97, 0xa5, 0xe0, 0x3d, 0x84, 0xe5, 0xc3, 0x1e, 0x19, 0xc8, - 0x3d, 0x47, 0x01, 0xdf, 0x83, 0xb2, 0x68, 0xb1, 0xc7, 0xe3, 0x72, 0x56, 0x12, 0xf6, 0x01, 0xf1, - 0xac, 0x6f, 0x61, 0xe5, 0x65, 0xe0, 0x4d, 0xb5, 0xa3, 0x36, 0x54, 0x42, 0xcc, 0x68, 0x14, 0x76, - 0xe4, 0x01, 0x93, 0x77, 0x1d, 0xc3, 0xf4, 0xdd, 0x0a, 0x79, 0x5a, 0x42, 0x9f, 0xc8, 0xab, 0x25, - 0x70, 0x29, 0x57, 0x4b, 0x5f, 0xa1, 0xdc, 0xb8, 0x46, 0x7f, 0x0a, 0xd5, 0x57, 0x2e, 0x49, 0xdd, - 0x21, 0x84, 0x3b, 0x0a, 0xa6, 0x37, 0x98, 0x92, 0xb8, 0xf1, 0x61, 0x89, 0xe7, 0x6e, 0x23, 0xf1, - 0xf6, 0x9b, 0x2a, 0xe4, 0x45, 0xda, 0x51, 0x0f, 0x0a, 0xb2, 0x72, 0x20, 0xdb, 0x4e, 0x7b, 0xee, - 0xd8, 0xf1, 0x5a, 0x54, 0x6f, 0x65, 0xc6, 0xeb, 0x63, 0x31, 0x28, 0xaa, 0xce, 0x86, 0xb6, 0xd2, - 0x5d, 0x67, 0x9e, 0x1c, 0xf5, 0x2f, 0xe7, 0x73, 0xd2, 0x9b, 0xaa, 0xe3, 0x85, 0x3c, 0xe3, 0xf1, - 0xae, 0xe5, 0x90, 0xf1, 0x78, 0x31, 0x59, 0x38, 0x50, 0x54, 0x7d, 0x10, 0xad, 0xcd, 0x70, 0xf1, - 0x5c, 0xbc, 0xfd, 0xea, 0x5f, 0xa4, 0x2f, 0x39, 0xd5, 0xd1, 0x2f, 0xa1, 0x36, 0xd1, 0x5b, 0xd1, - 0xa3, 0xac, 0x4b, 0x4c, 0x76, 0xd7, 0x5b, 0x6c, 0xfd, 0x1a, 0xca, 0xa3, 0x3a, 0x82, 0x36, 0xd3, - 0xbd, 0xa7, 0xca, 0x53, 0xbd, 0x3d, 0x8f, 0x8b, 0xde, 0xf2, 0x31, 0x14, 0x0e, 0xdc, 0x88, 0x25, - 0x27, 0x30, 0x61, 0x1c, 0x7d, 0x05, 0x45, 0x07, 0xb3, 0x68, 0x30, 0xbf, 0xe7, 0x2f, 0x00, 0xb1, - 0xb7, 0xda, 0xe3, 0x0c, 0x12, 0xbb, 0xa9, 0x0e, 0x26, 0x2e, 0xbf, 0x0f, 0x79, 0xd1, 0x48, 0xd0, - 0xc3, 0xf4, 0x85, 0x63, 0x0d, 0x27, 0x71, 0xb9, 0x23, 0xc8, 0x8b, 0xf7, 0x07, 0xca, 0x70, 0x15, - 0x66, 0x5f, 0x58, 0x89, 0xab, 0xbe, 0x82, 0xca, 0xf5, 0xf3, 0x05, 0x65, 0xe0, 0x6d, 0xfa, 0xad, - 0x93, 0xb8, 0xf0, 0x21, 0x94, 0x74, 0xd7, 0x43, 0x19, 0xf4, 0x37, 0xd9, 0x20, 0x13, 0x17, 0xfd, - 0x01, 0xca, 0xa3, 0x76, 0x91, 0xc8, 0x76, 0x86, 0x43, 0xcc, 0xb4, 0x9c, 0x97, 0x50, 0x54, 0x7d, - 0x25, 0x4b, 0x75, 0x9a, 0xe9, 0x40, 0x89, 0xe1, 0x62, 0xc8, 0x8b, 0xda, 0x9e, 0x45, 0x01, 0xb1, - 0x56, 0x51, 0xb7, 0xb3, 0xc2, 0x55, 0xf4, 0x3b, 0xfb, 0x57, 0xef, 0x1a, 0x0b, 0xff, 0xbc, 0x6b, - 0x2c, 0xfc, 0x3e, 0x6c, 0x18, 0x57, 0xc3, 0x86, 0xf1, 0xf7, 0xb0, 0x61, 0xfc, 0x37, 0x6c, 0x18, - 0x3f, 0x6d, 0xcd, 0xf7, 0xff, 0xef, 0x37, 0xe2, 0xf7, 0xa4, 0x28, 0x4f, 0xb1, 0xf5, 0x3e, 0x00, - 0x00, 0xff, 0xff, 0xad, 0x60, 0x08, 0x28, 0x3d, 0x0f, 0x00, 0x00, + // 1133 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x57, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x36, 0x69, 0xfd, 0x8e, 0x22, 0xd7, 0xde, 0x3a, 0x2e, 0xa3, 0x00, 0xb2, 0x40, 0xa0, 0x81, + 0x8b, 0x22, 0x54, 0x2d, 0x17, 0x49, 0xd3, 0x02, 0x01, 0x6c, 0x27, 0x28, 0x8c, 0xd6, 0x88, 0x41, + 0x3b, 0x4d, 0xd0, 0xa2, 0x30, 0x68, 0x71, 0x2d, 0x2d, 0x2c, 0x91, 0x0c, 0x77, 0xe9, 0xda, 0x3d, + 0xf5, 0xd4, 0x73, 0x1f, 0xa7, 0x8f, 0xe0, 0x43, 0x0e, 0x3d, 0xf6, 0x94, 0x36, 0xba, 0xf7, 0x1d, + 0x8a, 0xfd, 0x91, 0x49, 0x49, 0x66, 0x48, 0xf9, 0x62, 0x71, 0x76, 0xbf, 0xd9, 0x9d, 0x9d, 0xef, + 0xdb, 0x99, 0x35, 0x3c, 0xe9, 0x11, 0xd6, 0x8f, 0x4e, 0xac, 0xae, 0x3f, 0x6c, 0x77, 0x7d, 0x8f, + 0x39, 0xc4, 0xc3, 0xa1, 0x9b, 0xfc, 0x1c, 0x10, 0x2f, 0xba, 0x68, 0xd3, 0x3e, 0x19, 0xb6, 0xcf, + 0x37, 0xc5, 0xaf, 0x15, 0x84, 0x3e, 0xf3, 0x51, 0x2b, 0x06, 0x59, 0x61, 0xe4, 0x31, 0x32, 0xc4, + 0x96, 0x00, 0x5b, 0x02, 0x74, 0xbe, 0xd9, 0xb8, 0xd7, 0xf3, 0xfd, 0xde, 0x00, 0xb7, 0x05, 0xfe, + 0x24, 0x3a, 0x6d, 0x3b, 0xde, 0xa5, 0x74, 0x6e, 0xdc, 0x9f, 0x9e, 0xc2, 0xc3, 0x80, 0x8d, 0x27, + 0x57, 0x7b, 0x7e, 0xcf, 0x17, 0x9f, 0x6d, 0xfe, 0xa5, 0x46, 0xd7, 0xa7, 0x5d, 0xf8, 0x8e, 0x94, + 0x39, 0xc3, 0x40, 0x01, 0x1e, 0x65, 0x9e, 0xc5, 0x09, 0x48, 0x9b, 0x5d, 0x06, 0x98, 0xb6, 0x87, + 0x7e, 0xe4, 0x31, 0xe5, 0xf7, 0xf5, 0x1c, 0x7e, 0xcc, 0xa1, 0x67, 0xe2, 0x8f, 0xf4, 0x35, 0xff, + 0xd3, 0x61, 0x65, 0x37, 0xc4, 0x0e, 0xc3, 0x47, 0x0e, 0x3d, 0xb3, 0xf1, 0x9b, 0x08, 0x53, 0x86, + 0xd6, 0x40, 0x27, 0xae, 0xa1, 0xb5, 0xb4, 0x8d, 0xea, 0x4e, 0x69, 0xf4, 0x6e, 0x5d, 0xdf, 0x7b, + 0x66, 0xeb, 0xc4, 0x45, 0x6b, 0x50, 0x3a, 0x89, 0x3c, 0x77, 0x80, 0x0d, 0x9d, 0xcf, 0xd9, 0xca, + 0x42, 0x06, 0x94, 0x55, 0x06, 0x8d, 0x45, 0x31, 0x31, 0x36, 0x51, 0x1b, 0x4a, 0xa1, 0xef, 0xb3, + 0x53, 0x6a, 0x14, 0x5a, 0x8b, 0x1b, 0xb5, 0xce, 0x27, 0x56, 0x22, 0xeb, 0x22, 0x24, 0x6b, 0x9f, + 0x1f, 0xc5, 0x56, 0x30, 0xd4, 0x80, 0x0a, 0xc3, 0xe1, 0x90, 0x78, 0xce, 0xc0, 0x28, 0xb6, 0xb4, + 0x8d, 0x8a, 0x7d, 0x6d, 0xa3, 0x55, 0x28, 0x52, 0xe6, 0x12, 0xcf, 0x28, 0x89, 0x4d, 0xa4, 0xc1, + 0x83, 0xa2, 0xcc, 0xf5, 0x23, 0x66, 0x94, 0x65, 0x50, 0xd2, 0x52, 0xe3, 0x38, 0x0c, 0x8d, 0xca, + 0xf5, 0x38, 0x0e, 0x43, 0xd4, 0x04, 0xe8, 0xf6, 0x71, 0xf7, 0x2c, 0xf0, 0x89, 0xc7, 0x8c, 0xaa, + 0x98, 0x4b, 0x8c, 0xa0, 0xcf, 0x61, 0x25, 0x70, 0x42, 0xec, 0xb1, 0xe3, 0x04, 0x0c, 0x04, 0x6c, + 0x59, 0x4e, 0xec, 0xc6, 0x60, 0x0b, 0xca, 0x7e, 0xc0, 0x88, 0xef, 0x51, 0xa3, 0xd6, 0xd2, 0x36, + 0x6a, 0x9d, 0x55, 0x4b, 0xd2, 0x6c, 0x8d, 0x69, 0xb6, 0xb6, 0xbd, 0x4b, 0x7b, 0x0c, 0x32, 0x1f, + 0x00, 0x4a, 0xa6, 0x9b, 0x06, 0xbe, 0x47, 0x31, 0x5a, 0x86, 0xc5, 0x40, 0x25, 0xbc, 0x6e, 0xf3, + 0x4f, 0xf3, 0x77, 0x0d, 0x96, 0x9e, 0xe1, 0x01, 0x66, 0x38, 0x1d, 0x84, 0xd6, 0xa1, 0x86, 0x2f, + 0x08, 0x3b, 0xa6, 0xcc, 0x61, 0x11, 0x15, 0x9c, 0xd4, 0x6d, 0xe0, 0x43, 0x87, 0x62, 0x04, 0x6d, + 0x43, 0x95, 0x5b, 0xd8, 0x3d, 0x76, 0x98, 0x60, 0xa6, 0xd6, 0x69, 0xcc, 0xc4, 0x77, 0x34, 0x96, + 0xe1, 0x4e, 0xe5, 0xea, 0xdd, 0xfa, 0xc2, 0x1f, 0xff, 0xac, 0x6b, 0x76, 0x45, 0xba, 0x6d, 0x33, + 0xd3, 0x82, 0x55, 0x19, 0xc7, 0x41, 0xe8, 0x77, 0x31, 0xa5, 0x19, 0x12, 0x31, 0xff, 0xd4, 0x00, + 0x3d, 0xbf, 0xc0, 0xdd, 0x7c, 0xf0, 0x09, 0xba, 0xf5, 0x34, 0xba, 0x17, 0x6f, 0xa6, 0xbb, 0x90, + 0x42, 0x77, 0x71, 0x82, 0xee, 0x0d, 0x28, 0xd0, 0x00, 0x77, 0x85, 0x66, 0xd2, 0xe8, 0x11, 0x08, + 0xf3, 0x2e, 0x7c, 0x3c, 0x11, 0xb9, 0xcc, 0xbb, 0xf9, 0x1a, 0x96, 0x6d, 0x4c, 0xc9, 0xaf, 0xf8, + 0x80, 0x5d, 0x66, 0x1d, 0x67, 0x15, 0x8a, 0xbf, 0x10, 0x97, 0xf5, 0x15, 0x17, 0xd2, 0xe0, 0xa1, + 0xf5, 0x31, 0xe9, 0xf5, 0x25, 0x07, 0x75, 0x5b, 0x59, 0xe6, 0x03, 0xb8, 0xc3, 0x89, 0xc2, 0x59, + 0x39, 0x7d, 0xab, 0x43, 0x5d, 0x01, 0x95, 0x16, 0xe6, 0xbd, 0xa0, 0x4a, 0x3b, 0x8b, 0xb1, 0x76, + 0xb6, 0x78, 0xba, 0x84, 0x6c, 0x78, 0x1a, 0x97, 0x3a, 0xf7, 0x93, 0x17, 0xf3, 0x7c, 0x53, 0xdd, + 0x4d, 0xa9, 0x23, 0x5b, 0x41, 0x63, 0x46, 0x8a, 0x37, 0x33, 0x52, 0x4a, 0x61, 0xa4, 0x3c, 0xc1, + 0x48, 0x92, 0xf3, 0xca, 0x14, 0xe7, 0x53, 0x92, 0xae, 0x7e, 0x58, 0xd2, 0x70, 0x2b, 0x49, 0xbf, + 0x80, 0xda, 0x77, 0x64, 0x30, 0xc8, 0x51, 0xec, 0x28, 0xe9, 0x8d, 0x85, 0x59, 0xb7, 0x95, 0xc5, + 0x73, 0xe9, 0x0c, 0x06, 0x22, 0x97, 0x15, 0x9b, 0x7f, 0x9a, 0x4f, 0x61, 0x69, 0x77, 0xe0, 0x53, + 0xbc, 0xf7, 0x22, 0x87, 0x3e, 0x64, 0x02, 0xa5, 0xd6, 0xa5, 0x61, 0x7e, 0x06, 0x1f, 0x7d, 0x4f, + 0x28, 0x3b, 0x20, 0x6e, 0xe6, 0xf5, 0xb2, 0x61, 0x39, 0x86, 0x2a, 0x31, 0x3c, 0x85, 0x6a, 0x20, + 0x35, 0x8b, 0xa9, 0xa1, 0x89, 0x32, 0xdb, 0xba, 0x91, 0x4d, 0xa5, 0xec, 0x3d, 0xef, 0xd4, 0xb7, + 0x63, 0x17, 0xf3, 0x27, 0xb8, 0x1b, 0x57, 0xb4, 0x64, 0x1b, 0x40, 0x50, 0x08, 0x1c, 0xd6, 0x97, + 0x61, 0xd8, 0xe2, 0x3b, 0x59, 0xf0, 0xf4, 0x3c, 0x05, 0xef, 0x21, 0x2c, 0x1f, 0xf6, 0xc9, 0x50, + 0xec, 0x39, 0x0e, 0xf8, 0x1e, 0x54, 0x78, 0x8b, 0x3d, 0x8e, 0xcb, 0x59, 0x99, 0xdb, 0x07, 0xc4, + 0x35, 0xbf, 0x85, 0x95, 0x97, 0x81, 0x3b, 0xd5, 0x8e, 0x3a, 0x50, 0x0d, 0x31, 0xf5, 0xa3, 0xb0, + 0x2b, 0x0e, 0x98, 0xbe, 0x6b, 0x0c, 0x53, 0x77, 0x2b, 0x64, 0x59, 0x09, 0x7d, 0x22, 0xae, 0x16, + 0xc7, 0x65, 0x5c, 0x2d, 0x75, 0x85, 0xf4, 0xb8, 0x46, 0x7f, 0x0a, 0xb5, 0x57, 0x0e, 0xc9, 0xdc, + 0x21, 0x84, 0x3b, 0x12, 0xa6, 0x36, 0x98, 0x92, 0xb8, 0xf6, 0x61, 0x89, 0xeb, 0xb7, 0x91, 0x78, + 0xe7, 0x6d, 0x0d, 0x0a, 0x3c, 0xed, 0xa8, 0x0f, 0x45, 0x51, 0x39, 0x90, 0x65, 0x65, 0x3d, 0x77, + 0xac, 0x64, 0x2d, 0x6a, 0xb4, 0x73, 0xe3, 0xd5, 0xb1, 0x28, 0x94, 0x64, 0x67, 0x43, 0x5b, 0xd9, + 0xae, 0x33, 0x4f, 0x8e, 0xc6, 0x97, 0xf3, 0x39, 0xa9, 0x4d, 0xe5, 0xf1, 0x42, 0x96, 0xf3, 0x78, + 0xd7, 0x72, 0xc8, 0x79, 0xbc, 0x84, 0x2c, 0x6c, 0x28, 0xc9, 0x3e, 0x88, 0xd6, 0x66, 0xb8, 0x78, + 0xce, 0xdf, 0x7e, 0x8d, 0x2f, 0xb2, 0x97, 0x9c, 0xea, 0xe8, 0x97, 0x50, 0x9f, 0xe8, 0xad, 0xe8, + 0x51, 0xde, 0x25, 0x26, 0xbb, 0xeb, 0x2d, 0xb6, 0x7e, 0x03, 0x95, 0x71, 0x1d, 0x41, 0x9b, 0xd9, + 0xde, 0x53, 0xe5, 0xa9, 0xd1, 0x99, 0xc7, 0x45, 0x6d, 0xf9, 0x18, 0x8a, 0x07, 0x4e, 0x44, 0xd3, + 0x13, 0x98, 0x32, 0x8e, 0xbe, 0x82, 0x92, 0x8d, 0x69, 0x34, 0x9c, 0xdf, 0xf3, 0x67, 0x80, 0xc4, + 0x5b, 0xed, 0x71, 0x0e, 0x89, 0xdd, 0x54, 0x07, 0x53, 0x97, 0xdf, 0x87, 0x02, 0x6f, 0x24, 0xe8, + 0x61, 0xf6, 0xc2, 0x89, 0x86, 0x93, 0xba, 0xdc, 0x11, 0x14, 0xf8, 0xfb, 0x03, 0xe5, 0xb8, 0x0a, + 0xb3, 0x2f, 0xac, 0xd4, 0x55, 0x5f, 0x41, 0xf5, 0xfa, 0xf9, 0x82, 0x72, 0xf0, 0x36, 0xfd, 0xd6, + 0x49, 0x5d, 0xf8, 0x10, 0xca, 0xaa, 0xeb, 0xa1, 0x1c, 0xfa, 0x9b, 0x6c, 0x90, 0xa9, 0x8b, 0xfe, + 0x00, 0x95, 0x71, 0xbb, 0x48, 0x65, 0x3b, 0xc7, 0x21, 0x66, 0x5a, 0xce, 0x4b, 0x28, 0xc9, 0xbe, + 0x92, 0xa7, 0x3a, 0xcd, 0x74, 0xa0, 0xd4, 0x70, 0x31, 0x14, 0x78, 0x6d, 0xcf, 0xa3, 0x80, 0x44, + 0xab, 0x68, 0x58, 0x79, 0xe1, 0x32, 0xfa, 0x9d, 0xfd, 0xab, 0xf7, 0xcd, 0x85, 0xbf, 0xdf, 0x37, + 0x17, 0x7e, 0x1b, 0x35, 0xb5, 0xab, 0x51, 0x53, 0xfb, 0x6b, 0xd4, 0xd4, 0xfe, 0x1d, 0x35, 0xb5, + 0x1f, 0xb7, 0xe6, 0xfb, 0xff, 0xf7, 0x1b, 0xfe, 0xfb, 0x5a, 0x3f, 0x29, 0x89, 0x73, 0x6c, 0xfd, + 0x1f, 0x00, 0x00, 0xff, 0xff, 0x4d, 0xd0, 0xe6, 0x46, 0x3f, 0x0f, 0x00, 0x00, } diff --git a/vendor/github.com/containerd/containerd/linux/shim/v1/shim.proto b/vendor/github.com/containerd/containerd/linux/shim/v1/shim.proto index 8d8af95785..6de8f1382e 100644 --- a/vendor/github.com/containerd/containerd/linux/shim/v1/shim.proto +++ b/vendor/github.com/containerd/containerd/linux/shim/v1/shim.proto @@ -4,7 +4,7 @@ package containerd.runtime.linux.shim.v1; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; -import "gogoproto/gogo.proto"; +import weak "gogoproto/gogo.proto"; import "google/protobuf/timestamp.proto"; import "github.com/containerd/containerd/api/types/mount.proto"; import "github.com/containerd/containerd/api/types/task/task.proto"; diff --git a/vendor/github.com/containerd/containerd/linux/task.go b/vendor/github.com/containerd/containerd/linux/task.go index 268e91a94a..85327cac30 100644 --- a/vendor/github.com/containerd/containerd/linux/task.go +++ b/vendor/github.com/containerd/containerd/linux/task.go @@ -4,30 +4,38 @@ package linux import ( "context" + "sync" "github.com/pkg/errors" "google.golang.org/grpc" "github.com/containerd/cgroups" + eventstypes "github.com/containerd/containerd/api/events" "github.com/containerd/containerd/api/types/task" "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events/exchange" + "github.com/containerd/containerd/identifiers" "github.com/containerd/containerd/linux/shim/client" shim "github.com/containerd/containerd/linux/shim/v1" "github.com/containerd/containerd/runtime" + runc "github.com/containerd/go-runc" "github.com/gogo/protobuf/types" ) // Task on a linux based system type Task struct { + mu sync.Mutex id string pid int shim *client.Client namespace string cg cgroups.Cgroup monitor runtime.TaskMonitor + events *exchange.Exchange + runtime *runc.Runc } -func newTask(id, namespace string, pid int, shim *client.Client, monitor runtime.TaskMonitor) (*Task, error) { +func newTask(id, namespace string, pid int, shim *client.Client, monitor runtime.TaskMonitor, events *exchange.Exchange, runtime *runc.Runc) (*Task, error) { var ( err error cg cgroups.Cgroup @@ -45,6 +53,8 @@ func newTask(id, namespace string, pid int, shim *client.Client, monitor runtime namespace: namespace, cg: cg, monitor: monitor, + events: events, + runtime: runtime, }, nil } @@ -64,7 +74,9 @@ func (t *Task) Info() runtime.TaskInfo { // Start the task func (t *Task) Start(ctx context.Context) error { + t.mu.Lock() hasCgroup := t.cg != nil + t.mu.Unlock() r, err := t.shim.Start(ctx, &shim.StartRequest{ ID: t.id, }) @@ -77,11 +89,17 @@ func (t *Task) Start(ctx context.Context) error { if err != nil { return err } + t.mu.Lock() t.cg = cg + t.mu.Unlock() if err := t.monitor.Monitor(t); err != nil { return err } } + t.events.Publish(ctx, runtime.TaskStartEventTopic, &eventstypes.TaskStart{ + ContainerID: t.id, + Pid: uint32(t.pid), + }) return nil } @@ -123,11 +141,13 @@ func (t *Task) State(ctx context.Context) (runtime.State, error) { // Pause the task and all processes func (t *Task) Pause(ctx context.Context) error { - _, err := t.shim.Pause(ctx, empty) - if err != nil { - err = errdefs.FromGRPC(err) + if _, err := t.shim.Pause(ctx, empty); err != nil { + return errdefs.FromGRPC(err) } - return err + t.events.Publish(ctx, runtime.TaskPausedEventTopic, &eventstypes.TaskPaused{ + ContainerID: t.id, + }) + return nil } // Resume the task and all processes @@ -135,6 +155,9 @@ func (t *Task) Resume(ctx context.Context) error { if _, err := t.shim.Resume(ctx, empty); err != nil { return errdefs.FromGRPC(err) } + t.events.Publish(ctx, runtime.TaskResumedEventTopic, &eventstypes.TaskResumed{ + ContainerID: t.id, + }) return nil } @@ -154,6 +177,9 @@ func (t *Task) Kill(ctx context.Context, signal uint32, all bool) error { // Exec creates a new process inside the task func (t *Task) Exec(ctx context.Context, id string, opts runtime.ExecOpts) (runtime.Process, error) { + if err := identifiers.Validate(id); err != nil { + return nil, errors.Wrapf(err, "invalid exec id") + } request := &shim.ExecProcessRequest{ ID: id, Stdin: opts.IO.Stdin, @@ -182,7 +208,8 @@ func (t *Task) Pids(ctx context.Context) ([]runtime.ProcessInfo, error) { var processList []runtime.ProcessInfo for _, p := range resp.Processes { processList = append(processList, runtime.ProcessInfo{ - Pid: p.Pid, + Pid: p.Pid, + Info: p.Info, }) } return processList, nil @@ -222,6 +249,9 @@ func (t *Task) Checkpoint(ctx context.Context, path string, options *types.Any) if _, err := t.shim.Checkpoint(ctx, r); err != nil { return errdefs.FromGRPC(err) } + t.events.Publish(ctx, runtime.TaskCheckpointedEventTopic, &eventstypes.TaskCheckpointed{ + ContainerID: t.id, + }) return nil } @@ -261,6 +291,8 @@ func (t *Task) Process(ctx context.Context, id string) (runtime.Process, error) // Metrics returns runtime specific system level metric information for the task func (t *Task) Metrics(ctx context.Context) (interface{}, error) { + t.mu.Lock() + defer t.mu.Unlock() if t.cg == nil { return nil, errors.Wrap(errdefs.ErrNotFound, "cgroup does not exist") } @@ -273,6 +305,8 @@ func (t *Task) Metrics(ctx context.Context) (interface{}, error) { // Cgroup returns the underlying cgroup for a linux task func (t *Task) Cgroup() (cgroups.Cgroup, error) { + t.mu.Lock() + defer t.mu.Unlock() if t.cg == nil { return nil, errors.Wrap(errdefs.ErrNotFound, "cgroup does not exist") } diff --git a/vendor/github.com/containerd/containerd/metadata/buckets.go b/vendor/github.com/containerd/containerd/metadata/buckets.go index b6a66ba48c..9325f1698a 100644 --- a/vendor/github.com/containerd/containerd/metadata/buckets.go +++ b/vendor/github.com/containerd/containerd/metadata/buckets.go @@ -31,7 +31,6 @@ var ( bucketKeyVersion = []byte(schemaVersion) bucketKeyDBVersion = []byte("version") // stores the version of the schema bucketKeyObjectLabels = []byte("labels") // stores the labels for a namespace. - bucketKeyObjectIndexes = []byte("indexes") // reserved bucketKeyObjectImages = []byte("images") // stores image objects bucketKeyObjectContainers = []byte("containers") // stores container objects bucketKeyObjectSnapshots = []byte("snapshots") // stores snapshot references diff --git a/vendor/github.com/containerd/containerd/metadata/containers.go b/vendor/github.com/containerd/containerd/metadata/containers.go index c9d8ab6469..4cca5f69fe 100644 --- a/vendor/github.com/containerd/containerd/metadata/containers.go +++ b/vendor/github.com/containerd/containerd/metadata/containers.go @@ -105,7 +105,7 @@ func (s *containerStore) Create(ctx context.Context, container containers.Contai cbkt, err := bkt.CreateBucket([]byte(container.ID)) if err != nil { if err == bolt.ErrBucketExists { - err = errors.Wrapf(errdefs.ErrAlreadyExists, "content %q", container.ID) + err = errors.Wrapf(errdefs.ErrAlreadyExists, "container %q", container.ID) } return containers.Container{}, err } diff --git a/vendor/github.com/containerd/containerd/metadata/content.go b/vendor/github.com/containerd/containerd/metadata/content.go index 0797345e21..c13f7867ee 100644 --- a/vendor/github.com/containerd/containerd/metadata/content.go +++ b/vendor/github.com/containerd/containerd/metadata/content.go @@ -184,6 +184,9 @@ func (cs *contentStore) Delete(ctx context.Context, dgst digest.Digest) error { if err := getBlobsBucket(tx, ns).DeleteBucket([]byte(dgst.String())); err != nil { return err } + if err := removeContentLease(ctx, tx, dgst); err != nil { + return err + } // Mark content store as dirty for triggering garbage collection cs.db.dirtyL.Lock() @@ -527,12 +530,14 @@ func writeInfo(info *content.Info, bkt *bolt.Bucket) error { return bkt.Put(bucketKeySize, sizeEncoded) } -func (cs *contentStore) garbageCollect(ctx context.Context) error { - lt1 := time.Now() +func (cs *contentStore) garbageCollect(ctx context.Context) (d time.Duration, err error) { cs.l.Lock() + t1 := time.Now() defer func() { + if err == nil { + d = time.Now().Sub(t1) + } cs.l.Unlock() - log.G(ctx).WithField("t", time.Now().Sub(lt1)).Debugf("content garbage collected") }() seen := map[string]struct{}{} @@ -567,10 +572,10 @@ func (cs *contentStore) garbageCollect(ctx context.Context) error { return nil }); err != nil { - return err + return 0, err } - return cs.Store.Walk(ctx, func(info content.Info) error { + err = cs.Store.Walk(ctx, func(info content.Info) error { if _, ok := seen[info.Digest.String()]; !ok { if err := cs.Store.Delete(ctx, info.Digest); err != nil { return err @@ -579,4 +584,5 @@ func (cs *contentStore) garbageCollect(ctx context.Context) error { } return nil }) + return } diff --git a/vendor/github.com/containerd/containerd/metadata/db.go b/vendor/github.com/containerd/containerd/metadata/db.go index 7c366ebcc4..1744321800 100644 --- a/vendor/github.com/containerd/containerd/metadata/db.go +++ b/vendor/github.com/containerd/containerd/metadata/db.go @@ -11,7 +11,7 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/gc" "github.com/containerd/containerd/log" - "github.com/containerd/containerd/snapshot" + "github.com/containerd/containerd/snapshots" "github.com/pkg/errors" ) @@ -53,13 +53,14 @@ type DB struct { dirtySS map[string]struct{} dirtyCS bool - // TODO: Keep track of stats such as pause time, number of collected objects, errors - lastCollection time.Time + // mutationCallbacks are called after each mutation with the flag + // set indicating whether any dirty flags are set + mutationCallbacks []func(bool) } // NewDB creates a new metadata database using the provided // bolt database, content store, and snapshotters. -func NewDB(db *bolt.DB, cs content.Store, ss map[string]snapshot.Snapshotter) *DB { +func NewDB(db *bolt.DB, cs content.Store, ss map[string]snapshots.Snapshotter) *DB { m := &DB{ db: db, ss: make(map[string]*snapshotter, len(ss)), @@ -170,7 +171,7 @@ func (m *DB) ContentStore() content.Store { // Snapshotter returns a namespaced content store for // the requested snapshotter name proxied to a snapshotter. -func (m *DB) Snapshotter(name string) snapshot.Snapshotter { +func (m *DB) Snapshotter(name string) snapshots.Snapshotter { sn, ok := m.ss[name] if !ok { return nil @@ -183,29 +184,53 @@ func (m *DB) View(fn func(*bolt.Tx) error) error { return m.db.View(fn) } -// Update runs a writable transation on the metadata store. +// Update runs a writable transaction on the metadata store. func (m *DB) Update(fn func(*bolt.Tx) error) error { m.wlock.RLock() defer m.wlock.RUnlock() - return m.db.Update(fn) + err := m.db.Update(fn) + if err == nil { + m.dirtyL.Lock() + dirty := m.dirtyCS || len(m.dirtySS) > 0 + for _, fn := range m.mutationCallbacks { + fn(dirty) + } + m.dirtyL.Unlock() + } + + return err +} + +// RegisterMutationCallback registers a function to be called after a metadata +// mutations has been performed. +// +// The callback function in an argument for whether a deletion has occurred +// since the last garbage collection. +func (m *DB) RegisterMutationCallback(fn func(bool)) { + m.dirtyL.Lock() + m.mutationCallbacks = append(m.mutationCallbacks, fn) + m.dirtyL.Unlock() +} + +// GCStats holds the duration for the different phases of the garbage collector +type GCStats struct { + MetaD time.Duration + ContentD time.Duration + SnapshotD map[string]time.Duration } // GarbageCollect starts garbage collection -func (m *DB) GarbageCollect(ctx context.Context) error { - lt1 := time.Now() +func (m *DB) GarbageCollect(ctx context.Context) (stats GCStats, err error) { m.wlock.Lock() - defer func() { - m.wlock.Unlock() - log.G(ctx).WithField("d", time.Now().Sub(lt1)).Debug("metadata garbage collected") - }() + t1 := time.Now() marked, err := m.getMarked(ctx) if err != nil { - return err + m.wlock.Unlock() + return GCStats{}, err } m.dirtyL.Lock() - defer m.dirtyL.Unlock() if err := m.db.Update(func(tx *bolt.Tx) error { ctx, cancel := context.WithCancel(ctx) @@ -232,26 +257,53 @@ func (m *DB) GarbageCollect(ctx context.Context) error { return nil }); err != nil { - return err + m.dirtyL.Unlock() + m.wlock.Unlock() + return GCStats{}, err } - m.lastCollection = time.Now() + var wg sync.WaitGroup if len(m.dirtySS) > 0 { + var sl sync.Mutex + stats.SnapshotD = map[string]time.Duration{} + wg.Add(len(m.dirtySS)) for snapshotterName := range m.dirtySS { log.G(ctx).WithField("snapshotter", snapshotterName).Debug("scheduling snapshotter cleanup") - go m.cleanupSnapshotter(snapshotterName) + go func(snapshotterName string) { + st1 := time.Now() + m.cleanupSnapshotter(snapshotterName) + + sl.Lock() + stats.SnapshotD[snapshotterName] = time.Now().Sub(st1) + sl.Unlock() + + wg.Done() + }(snapshotterName) } m.dirtySS = map[string]struct{}{} } if m.dirtyCS { + wg.Add(1) log.G(ctx).Debug("scheduling content cleanup") - go m.cleanupContent() + go func() { + ct1 := time.Now() + m.cleanupContent() + stats.ContentD = time.Now().Sub(ct1) + wg.Done() + }() m.dirtyCS = false } - return nil + m.dirtyL.Unlock() + + stats.MetaD = time.Now().Sub(t1) + m.wlock.Unlock() + + wg.Wait() + + return } func (m *DB) getMarked(ctx context.Context) (map[gc.Node]struct{}, error) { @@ -302,27 +354,35 @@ func (m *DB) getMarked(ctx context.Context) (map[gc.Node]struct{}, error) { return marked, nil } -func (m *DB) cleanupSnapshotter(name string) { +func (m *DB) cleanupSnapshotter(name string) (time.Duration, error) { ctx := context.Background() sn, ok := m.ss[name] if !ok { - return + return 0, nil } - err := sn.garbageCollect(ctx) + d, err := sn.garbageCollect(ctx) + logger := log.G(ctx).WithField("snapshotter", name) if err != nil { - log.G(ctx).WithError(err).WithField("snapshotter", name).Warn("garbage collection failed") + logger.WithError(err).Warn("snapshot garbage collection failed") + } else { + logger.WithField("d", d).Debugf("snapshot garbage collected") } + return d, err } -func (m *DB) cleanupContent() { +func (m *DB) cleanupContent() (time.Duration, error) { ctx := context.Background() if m.cs == nil { - return + return 0, nil } - err := m.cs.garbageCollect(ctx) + d, err := m.cs.garbageCollect(ctx) if err != nil { log.G(ctx).WithError(err).Warn("content garbage collection failed") + } else { + log.G(ctx).WithField("d", d).Debugf("content garbage collected") } + + return d, err } diff --git a/vendor/github.com/containerd/containerd/metadata/images.go b/vendor/github.com/containerd/containerd/metadata/images.go index 7e5e3c76e3..070439a8ca 100644 --- a/vendor/github.com/containerd/containerd/metadata/images.go +++ b/vendor/github.com/containerd/containerd/metadata/images.go @@ -100,10 +100,6 @@ func (s *imageStore) Create(ctx context.Context, image images.Image) (images.Ima return images.Image{}, err } - if image.Name == "" { - return images.Image{}, errors.Wrapf(errdefs.ErrInvalidArgument, "image name is required for create") - } - if err := validateImage(&image); err != nil { return images.Image{}, err } @@ -177,7 +173,7 @@ func (s *imageStore) Update(ctx context.Context, image images.Image, fieldpaths updated = image } - if err := validateImage(&image); err != nil { + if err := validateImage(&updated); err != nil { return err } @@ -187,7 +183,7 @@ func (s *imageStore) Update(ctx context.Context, image images.Image, fieldpaths }) } -func (s *imageStore) Delete(ctx context.Context, name string) error { +func (s *imageStore) Delete(ctx context.Context, name string, opts ...images.DeleteOpt) error { namespace, err := namespaces.NamespaceRequired(ctx) if err != nil { return err diff --git a/vendor/github.com/containerd/containerd/metadata/leases.go b/vendor/github.com/containerd/containerd/metadata/leases.go index 006123d45f..eff0b20987 100644 --- a/vendor/github.com/containerd/containerd/metadata/leases.go +++ b/vendor/github.com/containerd/containerd/metadata/leases.go @@ -55,7 +55,7 @@ func (lm *LeaseManager) Create(ctx context.Context, lid string, labels map[strin if err == bolt.ErrBucketExists { err = errdefs.ErrAlreadyExists } - return Lease{}, err + return Lease{}, errors.Wrapf(err, "lease %q", lid) } t := time.Now().UTC() @@ -155,7 +155,7 @@ func addSnapshotLease(ctx context.Context, tx *bolt.Tx, snapshotter, key string) namespace, ok := namespaces.Namespace(ctx) if !ok { - panic("namespace must already be required") + panic("namespace must already be checked") } bkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lid)) @@ -176,6 +176,26 @@ func addSnapshotLease(ctx context.Context, tx *bolt.Tx, snapshotter, key string) return bkt.Put([]byte(key), nil) } +func removeSnapshotLease(ctx context.Context, tx *bolt.Tx, snapshotter, key string) error { + lid, ok := leases.Lease(ctx) + if !ok { + return nil + } + + namespace, ok := namespaces.Namespace(ctx) + if !ok { + panic("namespace must already be checked") + } + + bkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lid), bucketKeyObjectSnapshots, []byte(snapshotter)) + if bkt == nil { + // Key does not exist so we return nil + return nil + } + + return bkt.Delete([]byte(key)) +} + func addContentLease(ctx context.Context, tx *bolt.Tx, dgst digest.Digest) error { lid, ok := leases.Lease(ctx) if !ok { @@ -199,3 +219,23 @@ func addContentLease(ctx context.Context, tx *bolt.Tx, dgst digest.Digest) error return bkt.Put([]byte(dgst.String()), nil) } + +func removeContentLease(ctx context.Context, tx *bolt.Tx, dgst digest.Digest) error { + lid, ok := leases.Lease(ctx) + if !ok { + return nil + } + + namespace, ok := namespaces.Namespace(ctx) + if !ok { + panic("namespace must already be checked") + } + + bkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lid), bucketKeyObjectContent) + if bkt == nil { + // Key does not exist so we return nil + return nil + } + + return bkt.Delete([]byte(dgst.String())) +} diff --git a/vendor/github.com/containerd/containerd/metadata/snapshot.go b/vendor/github.com/containerd/containerd/metadata/snapshot.go index 22ce3c8c03..3e501c5171 100644 --- a/vendor/github.com/containerd/containerd/metadata/snapshot.go +++ b/vendor/github.com/containerd/containerd/metadata/snapshot.go @@ -14,12 +14,12 @@ import ( "github.com/containerd/containerd/metadata/boltutil" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/snapshot" + "github.com/containerd/containerd/snapshots" "github.com/pkg/errors" ) type snapshotter struct { - snapshot.Snapshotter + snapshots.Snapshotter name string db *DB l sync.RWMutex @@ -27,7 +27,7 @@ type snapshotter struct { // newSnapshotter returns a new Snapshotter which namespaces the given snapshot // using the provided name and database. -func newSnapshotter(db *DB, name string, sn snapshot.Snapshotter) *snapshotter { +func newSnapshotter(db *DB, name string, sn snapshots.Snapshotter) *snapshotter { return &snapshotter{ Snapshotter: sn, name: name, @@ -39,14 +39,6 @@ func createKey(id uint64, namespace, key string) string { return fmt.Sprintf("%s/%d/%s", namespace, id, key) } -func trimKey(key string) string { - parts := strings.SplitN(key, "/", 3) - if len(parts) < 3 { - return "" - } - return parts[2] -} - func getKey(tx *bolt.Tx, ns, name, key string) string { bkt := getSnapshotterBucket(tx, ns, name) if bkt == nil { @@ -83,15 +75,15 @@ func (s *snapshotter) resolveKey(ctx context.Context, key string) (string, error return id, nil } -func (s *snapshotter) Stat(ctx context.Context, key string) (snapshot.Info, error) { +func (s *snapshotter) Stat(ctx context.Context, key string) (snapshots.Info, error) { ns, err := namespaces.NamespaceRequired(ctx) if err != nil { - return snapshot.Info{}, err + return snapshots.Info{}, err } var ( bkey string - local = snapshot.Info{ + local = snapshots.Info{ Name: key, } ) @@ -116,33 +108,33 @@ func (s *snapshotter) Stat(ctx context.Context, key string) (snapshot.Info, erro return nil }); err != nil { - return snapshot.Info{}, err + return snapshots.Info{}, err } info, err := s.Snapshotter.Stat(ctx, bkey) if err != nil { - return snapshot.Info{}, err + return snapshots.Info{}, err } return overlayInfo(info, local), nil } -func (s *snapshotter) Update(ctx context.Context, info snapshot.Info, fieldpaths ...string) (snapshot.Info, error) { +func (s *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) { s.l.RLock() defer s.l.RUnlock() ns, err := namespaces.NamespaceRequired(ctx) if err != nil { - return snapshot.Info{}, err + return snapshots.Info{}, err } if info.Name == "" { - return snapshot.Info{}, errors.Wrap(errdefs.ErrInvalidArgument, "") + return snapshots.Info{}, errors.Wrap(errdefs.ErrInvalidArgument, "") } var ( bkey string - local = snapshot.Info{ + local = snapshots.Info{ Name: info.Name, } ) @@ -203,18 +195,18 @@ func (s *snapshotter) Update(ctx context.Context, info snapshot.Info, fieldpaths return nil }); err != nil { - return snapshot.Info{}, err + return snapshots.Info{}, err } info, err = s.Snapshotter.Stat(ctx, bkey) if err != nil { - return snapshot.Info{}, err + return snapshots.Info{}, err } return overlayInfo(info, local), nil } -func overlayInfo(info, overlay snapshot.Info) snapshot.Info { +func overlayInfo(info, overlay snapshots.Info) snapshots.Info { // Merge info info.Name = overlay.Name info.Created = overlay.Created @@ -230,10 +222,10 @@ func overlayInfo(info, overlay snapshot.Info) snapshot.Info { return info } -func (s *snapshotter) Usage(ctx context.Context, key string) (snapshot.Usage, error) { +func (s *snapshotter) Usage(ctx context.Context, key string) (snapshots.Usage, error) { bkey, err := s.resolveKey(ctx, key) if err != nil { - return snapshot.Usage{}, err + return snapshots.Usage{}, err } return s.Snapshotter.Usage(ctx, bkey) } @@ -246,15 +238,15 @@ func (s *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, er return s.Snapshotter.Mounts(ctx, bkey) } -func (s *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshot.Opt) ([]mount.Mount, error) { +func (s *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { return s.createSnapshot(ctx, key, parent, false, opts) } -func (s *snapshotter) View(ctx context.Context, key, parent string, opts ...snapshot.Opt) ([]mount.Mount, error) { +func (s *snapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { return s.createSnapshot(ctx, key, parent, true, opts) } -func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, readonly bool, opts []snapshot.Opt) ([]mount.Mount, error) { +func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, readonly bool, opts []snapshots.Opt) ([]mount.Mount, error) { s.l.RLock() defer s.l.RUnlock() @@ -263,7 +255,7 @@ func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, re return nil, err } - var base snapshot.Info + var base snapshots.Info for _, opt := range opts { if err := opt(&base); err != nil { return nil, err @@ -284,10 +276,14 @@ func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, re bbkt, err := bkt.CreateBucket([]byte(key)) if err != nil { if err == bolt.ErrBucketExists { - err = errors.Wrapf(errdefs.ErrAlreadyExists, "snapshot %v already exists", key) + err = errors.Wrapf(errdefs.ErrAlreadyExists, "snapshot %q", key) } return err } + if err := addSnapshotLease(ctx, tx, s.name, key); err != nil { + return err + } + var bparent string if parent != "" { pbkt := bkt.Bucket([]byte(parent)) @@ -326,10 +322,6 @@ func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, re return err } - if err := addSnapshotLease(ctx, tx, s.name, key); err != nil { - return err - } - // TODO: Consider doing this outside of transaction to lessen // metadata lock time if readonly { @@ -344,7 +336,7 @@ func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, re return m, nil } -func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshot.Opt) error { +func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error { s.l.RLock() defer s.l.RUnlock() @@ -353,7 +345,7 @@ func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap return err } - var base snapshot.Info + var base snapshots.Info for _, opt := range opts { if err := opt(&base); err != nil { return err @@ -373,10 +365,13 @@ func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap bbkt, err := bkt.CreateBucket([]byte(name)) if err != nil { if err == bolt.ErrBucketExists { - err = errors.Wrapf(errdefs.ErrAlreadyExists, "snapshot %v already exists", name) + err = errors.Wrapf(errdefs.ErrAlreadyExists, "snapshot %q", name) } return err } + if err := addSnapshotLease(ctx, tx, s.name, name); err != nil { + return err + } obkt := bkt.Bucket([]byte(key)) if obkt == nil { @@ -425,9 +420,13 @@ func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap if err := boltutil.WriteLabels(bbkt, base.Labels); err != nil { return err } + if err := bkt.DeleteBucket([]byte(key)); err != nil { return err } + if err := removeSnapshotLease(ctx, tx, s.name, key); err != nil { + return err + } // TODO: Consider doing this outside of transaction to lessen // metadata lock time @@ -479,6 +478,9 @@ func (s *snapshotter) Remove(ctx context.Context, key string) error { if err := bkt.DeleteBucket([]byte(key)); err != nil { return err } + if err := removeSnapshotLease(ctx, tx, s.name, key); err != nil { + return err + } // Mark snapshotter as dirty for triggering garbage collection s.db.dirtyL.Lock() @@ -491,10 +493,10 @@ func (s *snapshotter) Remove(ctx context.Context, key string) error { type infoPair struct { bkey string - info snapshot.Info + info snapshots.Info } -func (s *snapshotter) Walk(ctx context.Context, fn func(context.Context, snapshot.Info) error) error { +func (s *snapshotter) Walk(ctx context.Context, fn func(context.Context, snapshots.Info) error) error { ns, err := namespaces.NamespaceRequired(ctx) if err != nil { return err @@ -531,7 +533,7 @@ func (s *snapshotter) Walk(ctx context.Context, fn func(context.Context, snapsho pair := infoPair{ bkey: string(sbkt.Get(bucketKeyName)), - info: snapshot.Info{ + info: snapshots.Info{ Name: string(k), Parent: string(sbkt.Get(bucketKeyParent)), }, @@ -584,7 +586,7 @@ func (s *snapshotter) Walk(ctx context.Context, fn func(context.Context, snapsho return nil } -func validateSnapshot(info *snapshot.Info) error { +func validateSnapshot(info *snapshots.Info) error { for k, v := range info.Labels { if err := labels.Validate(k, v); err != nil { return errors.Wrapf(err, "info.Labels") @@ -594,13 +596,14 @@ func validateSnapshot(info *snapshot.Info) error { return nil } -func (s *snapshotter) garbageCollect(ctx context.Context) error { - logger := log.G(ctx).WithField("snapshotter", s.name) - lt1 := time.Now() +func (s *snapshotter) garbageCollect(ctx context.Context) (d time.Duration, err error) { s.l.Lock() + t1 := time.Now() defer func() { + if err == nil { + d = time.Now().Sub(t1) + } s.l.Unlock() - logger.WithField("t", time.Now().Sub(lt1)).Debugf("garbage collected") }() seen := map[string]struct{}{} @@ -644,27 +647,30 @@ func (s *snapshotter) garbageCollect(ctx context.Context) error { return nil }); err != nil { - return err + return 0, err } roots, err := s.walkTree(ctx, seen) if err != nil { - return err + return 0, err } - // TODO: Unlock before prune (once nodes are fully unavailable) + // TODO: Unlock before removal (once nodes are fully unavailable). + // This could be achieved through doing prune inside the lock + // and having a cleanup method which actually performs the + // deletions on the snapshotters which support it. for _, node := range roots { if err := s.pruneBranch(ctx, node); err != nil { - return err + return 0, err } } - return nil + return } type treeNode struct { - info snapshot.Info + info snapshots.Info remove bool children []*treeNode } @@ -673,7 +679,7 @@ func (s *snapshotter) walkTree(ctx context.Context, seen map[string]struct{}) ([ roots := []*treeNode{} nodes := map[string]*treeNode{} - if err := s.Snapshotter.Walk(ctx, func(ctx context.Context, info snapshot.Info) error { + if err := s.Snapshotter.Walk(ctx, func(ctx context.Context, info snapshots.Info) error { _, isSeen := seen[info.Name] node, ok := nodes[info.Name] if !ok { @@ -724,3 +730,8 @@ func (s *snapshotter) pruneBranch(ctx context.Context, node *treeNode) error { return nil } + +// Close closes s.Snapshotter but not db +func (s *snapshotter) Close() error { + return s.Snapshotter.Close() +} diff --git a/vendor/github.com/containerd/containerd/services/namespaces/client.go b/vendor/github.com/containerd/containerd/namespaces.go similarity index 67% rename from vendor/github.com/containerd/containerd/services/namespaces/client.go rename to vendor/github.com/containerd/containerd/namespaces.go index fd59ec619d..36fc50cabf 100644 --- a/vendor/github.com/containerd/containerd/services/namespaces/client.go +++ b/vendor/github.com/containerd/containerd/namespaces.go @@ -1,4 +1,4 @@ -package namespaces +package containerd import ( "context" @@ -10,16 +10,16 @@ import ( "github.com/gogo/protobuf/types" ) -// NewStoreFromClient returns a new namespace store -func NewStoreFromClient(client api.NamespacesClient) namespaces.Store { - return &remote{client: client} +// NewNamespaceStoreFromClient returns a new namespace store +func NewNamespaceStoreFromClient(client api.NamespacesClient) namespaces.Store { + return &remoteNamespaces{client: client} } -type remote struct { +type remoteNamespaces struct { client api.NamespacesClient } -func (r *remote) Create(ctx context.Context, namespace string, labels map[string]string) error { +func (r *remoteNamespaces) Create(ctx context.Context, namespace string, labels map[string]string) error { var req api.CreateNamespaceRequest req.Namespace = api.Namespace{ @@ -35,7 +35,7 @@ func (r *remote) Create(ctx context.Context, namespace string, labels map[string return nil } -func (r *remote) Labels(ctx context.Context, namespace string) (map[string]string, error) { +func (r *remoteNamespaces) Labels(ctx context.Context, namespace string) (map[string]string, error) { var req api.GetNamespaceRequest req.Name = namespace @@ -47,7 +47,7 @@ func (r *remote) Labels(ctx context.Context, namespace string) (map[string]strin return resp.Namespace.Labels, nil } -func (r *remote) SetLabel(ctx context.Context, namespace, key, value string) error { +func (r *remoteNamespaces) SetLabel(ctx context.Context, namespace, key, value string) error { var req api.UpdateNamespaceRequest req.Namespace = api.Namespace{ @@ -67,7 +67,7 @@ func (r *remote) SetLabel(ctx context.Context, namespace, key, value string) err return nil } -func (r *remote) List(ctx context.Context) ([]string, error) { +func (r *remoteNamespaces) List(ctx context.Context) ([]string, error) { var req api.ListNamespacesRequest resp, err := r.client.List(ctx, &req) @@ -84,7 +84,7 @@ func (r *remote) List(ctx context.Context) ([]string, error) { return namespaces, nil } -func (r *remote) Delete(ctx context.Context, namespace string) error { +func (r *remoteNamespaces) Delete(ctx context.Context, namespace string) error { var req api.DeleteNamespaceRequest req.Name = namespace diff --git a/vendor/github.com/containerd/containerd/oci/client.go b/vendor/github.com/containerd/containerd/oci/client.go new file mode 100644 index 0000000000..d2cd355c56 --- /dev/null +++ b/vendor/github.com/containerd/containerd/oci/client.go @@ -0,0 +1,22 @@ +package oci + +import ( + "context" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/snapshots" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Client interface used by SpecOpt +type Client interface { + SnapshotService(snapshotterName string) snapshots.Snapshotter +} + +// Image interface used by some SpecOpt to query image configuration +type Image interface { + // Config descriptor for the image. + Config(ctx context.Context) (ocispec.Descriptor, error) + // ContentStore provides a content store which contains image blob data + ContentStore() content.Store +} diff --git a/vendor/github.com/containerd/containerd/spec.go b/vendor/github.com/containerd/containerd/oci/spec.go similarity index 75% rename from vendor/github.com/containerd/containerd/spec.go rename to vendor/github.com/containerd/containerd/oci/spec.go index 850f470a09..558a3570f1 100644 --- a/vendor/github.com/containerd/containerd/spec.go +++ b/vendor/github.com/containerd/containerd/oci/spec.go @@ -1,4 +1,4 @@ -package containerd +package oci import ( "context" @@ -9,7 +9,7 @@ import ( // GenerateSpec will generate a default spec from the provided image // for use as a containerd container -func GenerateSpec(ctx context.Context, client *Client, c *containers.Container, opts ...SpecOpts) (*specs.Spec, error) { +func GenerateSpec(ctx context.Context, client Client, c *containers.Container, opts ...SpecOpts) (*specs.Spec, error) { s, err := createDefaultSpec(ctx, c.ID) if err != nil { return nil, err diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts.go b/vendor/github.com/containerd/containerd/oci/spec_opts.go new file mode 100644 index 0000000000..c940c7aabd --- /dev/null +++ b/vendor/github.com/containerd/containerd/oci/spec_opts.go @@ -0,0 +1,35 @@ +package oci + +import ( + "context" + + "github.com/containerd/containerd/containers" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// SpecOpts sets spec specific information to a newly generated OCI spec +type SpecOpts func(context.Context, Client, *containers.Container, *specs.Spec) error + +// WithProcessArgs replaces the args on the generated spec +func WithProcessArgs(args ...string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error { + s.Process.Args = args + return nil + } +} + +// WithProcessCwd replaces the current working directory on the generated spec +func WithProcessCwd(cwd string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error { + s.Process.Cwd = cwd + return nil + } +} + +// WithHostname sets the container's hostname +func WithHostname(name string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error { + s.Hostname = name + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/spec_opts_unix.go b/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go similarity index 69% rename from vendor/github.com/containerd/containerd/spec_opts_unix.go rename to vendor/github.com/containerd/containerd/oci/spec_opts_unix.go index 01d5121d41..b17ca32162 100644 --- a/vendor/github.com/containerd/containerd/spec_opts_unix.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go @@ -1,6 +1,6 @@ // +build !windows -package containerd +package oci import ( "context" @@ -16,12 +16,9 @@ import ( "github.com/containerd/containerd/containers" "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/fs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/platforms" - "github.com/opencontainers/image-spec/identity" "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/runc/libcontainer/user" specs "github.com/opencontainers/runtime-spec/specs-go" @@ -30,7 +27,7 @@ import ( // WithTTY sets the information on the spec as well as the environment variables for // using a TTY -func WithTTY(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error { +func WithTTY(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error { s.Process.Terminal = true s.Process.Env = append(s.Process.Env, "TERM=xterm") return nil @@ -38,7 +35,7 @@ func WithTTY(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spe // WithHostNamespace allows a task to run inside the host's linux namespace func WithHostNamespace(ns specs.LinuxNamespaceType) SpecOpts { - return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error { + return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error { for i, n := range s.Linux.Namespaces { if n.Type == ns { s.Linux.Namespaces = append(s.Linux.Namespaces[:i], s.Linux.Namespaces[i+1:]...) @@ -52,7 +49,7 @@ func WithHostNamespace(ns specs.LinuxNamespaceType) SpecOpts { // WithLinuxNamespace uses the passed in namespace for the spec. If a namespace of the same type already exists in the // spec, the existing namespace is replaced by the one provided. func WithLinuxNamespace(ns specs.LinuxNamespace) SpecOpts { - return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error { + return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error { for i, n := range s.Linux.Namespaces { if n.Type == ns.Type { before := s.Linux.Namespaces[:i] @@ -68,13 +65,9 @@ func WithLinuxNamespace(ns specs.LinuxNamespace) SpecOpts { } // WithImageConfig configures the spec to from the configuration of an Image -func WithImageConfig(i Image) SpecOpts { - return func(ctx context.Context, client *Client, c *containers.Container, s *specs.Spec) error { - var ( - image = i.(*image) - store = client.ContentStore() - ) - ic, err := image.i.Config(ctx, store, platforms.Default()) +func WithImageConfig(image Image) SpecOpts { + return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) error { + ic, err := image.Config(ctx) if err != nil { return err } @@ -84,7 +77,7 @@ func WithImageConfig(i Image) SpecOpts { ) switch ic.MediaType { case v1.MediaTypeImageConfig, images.MediaTypeDockerSchema2Config: - p, err := content.ReadBlob(ctx, store, ic.Digest) + p, err := content.ReadBlob(ctx, image.ContentStore(), ic.Digest) if err != nil { return err } @@ -96,6 +89,11 @@ func WithImageConfig(i Image) SpecOpts { default: return fmt.Errorf("unknown image config media type %s", ic.MediaType) } + + if s.Process == nil { + s.Process = &specs.Process{} + } + s.Process.Env = append(s.Process.Env, config.Env...) cmd := config.Cmd s.Process.Args = append(config.Entrypoint, cmd...) @@ -140,7 +138,7 @@ func WithImageConfig(i Image) SpecOpts { // WithRootFSPath specifies unmanaged rootfs path. func WithRootFSPath(path string) SpecOpts { - return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error { + return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error { if s.Root == nil { s.Root = &specs.Root{} } @@ -152,7 +150,7 @@ func WithRootFSPath(path string) SpecOpts { // WithRootFSReadonly sets specs.Root.Readonly to true func WithRootFSReadonly() SpecOpts { - return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error { + return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error { if s.Root == nil { s.Root = &specs.Root{} } @@ -161,22 +159,14 @@ func WithRootFSReadonly() SpecOpts { } } -// WithResources sets the provided resources on the spec for task updates -func WithResources(resources *specs.LinuxResources) UpdateTaskOpts { - return func(ctx context.Context, client *Client, r *UpdateTaskInfo) error { - r.Resources = resources - return nil - } -} - // WithNoNewPrivileges sets no_new_privileges on the process for the container -func WithNoNewPrivileges(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error { +func WithNoNewPrivileges(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error { s.Process.NoNewPrivileges = true return nil } // WithHostHostsFile bind-mounts the host's /etc/hosts into the container as readonly -func WithHostHostsFile(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error { +func WithHostHostsFile(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error { s.Mounts = append(s.Mounts, specs.Mount{ Destination: "/etc/hosts", Type: "bind", @@ -187,7 +177,7 @@ func WithHostHostsFile(_ context.Context, _ *Client, _ *containers.Container, s } // WithHostResolvconf bind-mounts the host's /etc/resolv.conf into the container as readonly -func WithHostResolvconf(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error { +func WithHostResolvconf(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error { s.Mounts = append(s.Mounts, specs.Mount{ Destination: "/etc/resolv.conf", Type: "bind", @@ -198,7 +188,7 @@ func WithHostResolvconf(_ context.Context, _ *Client, _ *containers.Container, s } // WithHostLocaltime bind-mounts the host's /etc/localtime into the container as readonly -func WithHostLocaltime(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error { +func WithHostLocaltime(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error { s.Mounts = append(s.Mounts, specs.Mount{ Destination: "/etc/localtime", Type: "bind", @@ -211,7 +201,7 @@ func WithHostLocaltime(_ context.Context, _ *Client, _ *containers.Container, s // WithUserNamespace sets the uid and gid mappings for the task // this can be called multiple times to add more mappings to the generated spec func WithUserNamespace(container, host, size uint32) SpecOpts { - return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error { + return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error { var hasUserns bool for _, ns := range s.Linux.Namespaces { if ns.Type == specs.UserNamespace { @@ -235,68 +225,9 @@ func WithUserNamespace(container, host, size uint32) SpecOpts { } } -// WithRemappedSnapshot creates a new snapshot and remaps the uid/gid for the -// filesystem to be used by a container with user namespaces -func WithRemappedSnapshot(id string, i Image, uid, gid uint32) NewContainerOpts { - return withRemappedSnapshotBase(id, i, uid, gid, false) -} - -// WithRemappedSnapshotView is similar to WithRemappedSnapshot but rootfs is mounted as read-only. -func WithRemappedSnapshotView(id string, i Image, uid, gid uint32) NewContainerOpts { - return withRemappedSnapshotBase(id, i, uid, gid, true) -} - -func withRemappedSnapshotBase(id string, i Image, uid, gid uint32, readonly bool) NewContainerOpts { - return func(ctx context.Context, client *Client, c *containers.Container) error { - diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default()) - if err != nil { - return err - } - - setSnapshotterIfEmpty(c) - - var ( - snapshotter = client.SnapshotService(c.Snapshotter) - parent = identity.ChainID(diffIDs).String() - usernsID = fmt.Sprintf("%s-%d-%d", parent, uid, gid) - ) - if _, err := snapshotter.Stat(ctx, usernsID); err == nil { - if _, err := snapshotter.Prepare(ctx, id, usernsID); err == nil { - c.SnapshotKey = id - c.Image = i.Name() - return nil - } else if !errdefs.IsNotFound(err) { - return err - } - } - mounts, err := snapshotter.Prepare(ctx, usernsID+"-remap", parent) - if err != nil { - return err - } - if err := remapRootFS(mounts, uid, gid); err != nil { - snapshotter.Remove(ctx, usernsID) - return err - } - if err := snapshotter.Commit(ctx, usernsID, usernsID+"-remap"); err != nil { - return err - } - if readonly { - _, err = snapshotter.View(ctx, id, usernsID) - } else { - _, err = snapshotter.Prepare(ctx, id, usernsID) - } - if err != nil { - return err - } - c.SnapshotKey = id - c.Image = i.Name() - return nil - } -} - // WithCgroup sets the container's cgroup path func WithCgroup(path string) SpecOpts { - return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error { + return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error { s.Linux.CgroupsPath = path return nil } @@ -305,7 +236,7 @@ func WithCgroup(path string) SpecOpts { // WithNamespacedCgroup uses the namespace set on the context to create a // root directory for containers in the cgroup with the id as the subcgroup func WithNamespacedCgroup() SpecOpts { - return func(ctx context.Context, _ *Client, c *containers.Container, s *specs.Spec) error { + return func(ctx context.Context, _ Client, c *containers.Container, s *specs.Spec) error { namespace, err := namespaces.NamespaceRequired(ctx) if err != nil { return err @@ -317,7 +248,7 @@ func WithNamespacedCgroup() SpecOpts { // WithUIDGID allows the UID and GID for the Process to be set func WithUIDGID(uid, gid uint32) SpecOpts { - return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error { + return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error { s.Process.User.UID = uid s.Process.User.GID = gid return nil @@ -329,7 +260,7 @@ func WithUIDGID(uid, gid uint32) SpecOpts { // or uid is not found in /etc/passwd, it sets gid to be the same with // uid, and not returns error. func WithUserID(uid uint32) SpecOpts { - return func(ctx context.Context, client *Client, c *containers.Container, s *specs.Spec) error { + return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) error { if c.Snapshotter == "" { return errors.Errorf("no snapshotter set for container") } @@ -386,7 +317,7 @@ func WithUserID(uid uint32) SpecOpts { // does not exist, or the username is not found in /etc/passwd, // it returns error. func WithUsername(username string) SpecOpts { - return func(ctx context.Context, client *Client, c *containers.Container, s *specs.Spec) error { + return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) error { if c.Snapshotter == "" { return errors.Errorf("no snapshotter set for container") } diff --git a/vendor/github.com/containerd/containerd/spec_opts_windows.go b/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go similarity index 64% rename from vendor/github.com/containerd/containerd/spec_opts_windows.go rename to vendor/github.com/containerd/containerd/oci/spec_opts_windows.go index 1fc5d5e37d..3605f8e487 100644 --- a/vendor/github.com/containerd/containerd/spec_opts_windows.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go @@ -1,6 +1,6 @@ // +build windows -package containerd +package oci import ( "context" @@ -10,19 +10,14 @@ import ( "github.com/containerd/containerd/containers" "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" - "github.com/containerd/containerd/platforms" "github.com/opencontainers/image-spec/specs-go/v1" specs "github.com/opencontainers/runtime-spec/specs-go" ) // WithImageConfig configures the spec to from the configuration of an Image -func WithImageConfig(i Image) SpecOpts { - return func(ctx context.Context, client *Client, _ *containers.Container, s *specs.Spec) error { - var ( - image = i.(*image) - store = client.ContentStore() - ) - ic, err := image.i.Config(ctx, store, platforms.Default()) +func WithImageConfig(image Image) SpecOpts { + return func(ctx context.Context, client Client, _ *containers.Container, s *specs.Spec) error { + ic, err := image.Config(ctx) if err != nil { return err } @@ -32,7 +27,7 @@ func WithImageConfig(i Image) SpecOpts { ) switch ic.MediaType { case v1.MediaTypeImageConfig, images.MediaTypeDockerSchema2Config: - p, err := content.ReadBlob(ctx, store, ic.Digest) + p, err := content.ReadBlob(ctx, image.ContentStore(), ic.Digest) if err != nil { return err } @@ -55,7 +50,7 @@ func WithImageConfig(i Image) SpecOpts { // WithTTY sets the information on the spec as well as the environment variables for // using a TTY func WithTTY(width, height int) SpecOpts { - return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error { + return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error { s.Process.Terminal = true if s.Process.ConsoleSize == nil { s.Process.ConsoleSize = &specs.Box{} @@ -65,11 +60,3 @@ func WithTTY(width, height int) SpecOpts { return nil } } - -// WithResources sets the provided resources on the spec for task updates -func WithResources(resources *specs.WindowsResources) UpdateTaskOpts { - return func(ctx context.Context, client *Client, r *UpdateTaskInfo) error { - r.Resources = resources - return nil - } -} diff --git a/vendor/github.com/containerd/containerd/spec_unix.go b/vendor/github.com/containerd/containerd/oci/spec_unix.go similarity index 76% rename from vendor/github.com/containerd/containerd/spec_unix.go rename to vendor/github.com/containerd/containerd/oci/spec_unix.go index 957f90ef91..c8f3b37afb 100644 --- a/vendor/github.com/containerd/containerd/spec_unix.go +++ b/vendor/github.com/containerd/containerd/oci/spec_unix.go @@ -1,17 +1,11 @@ // +build !windows -package containerd +package oci import ( "context" - "io/ioutil" - "os" "path/filepath" - "syscall" - "golang.org/x/sys/unix" - - "github.com/containerd/containerd/mount" "github.com/containerd/containerd/namespaces" specs "github.com/opencontainers/runtime-spec/specs-go" ) @@ -142,8 +136,6 @@ func createDefaultSpec(ctx context.Context, id string) (*specs.Spec, error) { }, }, Linux: &specs.Linux{ - // TODO (AkihiroSuda): unmask /sys/firmware on Windows daemon for LCOW support? - // https://github.com/moby/moby/pull/33241/files#diff-a1f5051ce84e711a2ee688ab9ded5e74R215 MaskedPaths: []string{ "/proc/kcore", "/proc/latency_stats", @@ -175,32 +167,3 @@ func createDefaultSpec(ctx context.Context, id string) (*specs.Spec, error) { } return s, nil } - -func remapRootFS(mounts []mount.Mount, uid, gid uint32) error { - root, err := ioutil.TempDir("", "ctd-remap") - if err != nil { - return err - } - defer os.RemoveAll(root) - for _, m := range mounts { - if err := m.Mount(root); err != nil { - return err - } - } - defer unix.Unmount(root, 0) - return filepath.Walk(root, incrementFS(root, uid, gid)) -} - -func incrementFS(root string, uidInc, gidInc uint32) filepath.WalkFunc { - return func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - var ( - stat = info.Sys().(*syscall.Stat_t) - u, g = int(stat.Uid + uidInc), int(stat.Gid + gidInc) - ) - // be sure the lchown the path as to not de-reference the symlink to a host file - return os.Lchown(path, u, g) - } -} diff --git a/vendor/github.com/containerd/containerd/spec_windows.go b/vendor/github.com/containerd/containerd/oci/spec_windows.go similarity index 96% rename from vendor/github.com/containerd/containerd/spec_windows.go rename to vendor/github.com/containerd/containerd/oci/spec_windows.go index 16a58b4468..64c228883d 100644 --- a/vendor/github.com/containerd/containerd/spec_windows.go +++ b/vendor/github.com/containerd/containerd/oci/spec_windows.go @@ -1,4 +1,4 @@ -package containerd +package oci import ( "context" diff --git a/vendor/github.com/containerd/containerd/plugin/plugin.go b/vendor/github.com/containerd/containerd/plugin/plugin.go index 9bda46cbfa..5746bf72d7 100644 --- a/vendor/github.com/containerd/containerd/plugin/plugin.go +++ b/vendor/github.com/containerd/containerd/plugin/plugin.go @@ -54,6 +54,8 @@ const ( MetadataPlugin Type = "io.containerd.metadata.v1" // ContentPlugin implements a content store ContentPlugin Type = "io.containerd.content.v1" + // GCPlugin implements garbage collection policy + GCPlugin Type = "io.containerd.gc.v1" ) // Registration contains information for registering a plugin diff --git a/vendor/github.com/containerd/containerd/plugin/plugin_go18.go b/vendor/github.com/containerd/containerd/plugin/plugin_go18.go index d9101246e2..eee0e3fdb7 100644 --- a/vendor/github.com/containerd/containerd/plugin/plugin_go18.go +++ b/vendor/github.com/containerd/containerd/plugin/plugin_go18.go @@ -1,4 +1,4 @@ -// +build go1.8,!windows,amd64 +// +build go1.8,!windows,amd64,!static_build package plugin diff --git a/vendor/github.com/containerd/containerd/plugin/plugin_other.go b/vendor/github.com/containerd/containerd/plugin/plugin_other.go index 21a4570404..180917af80 100644 --- a/vendor/github.com/containerd/containerd/plugin/plugin_other.go +++ b/vendor/github.com/containerd/containerd/plugin/plugin_other.go @@ -1,4 +1,4 @@ -// +build !go1.8 windows !amd64 +// +build !go1.8 windows !amd64 static_build package plugin diff --git a/vendor/github.com/containerd/containerd/process.go b/vendor/github.com/containerd/containerd/process.go index e51367aaa1..ad1a2a1f41 100644 --- a/vendor/github.com/containerd/containerd/process.go +++ b/vendor/github.com/containerd/containerd/process.go @@ -7,6 +7,7 @@ import ( "time" "github.com/containerd/containerd/api/services/tasks/v1" + "github.com/containerd/containerd/cio" "github.com/containerd/containerd/errdefs" "github.com/pkg/errors" ) @@ -28,7 +29,7 @@ type Process interface { // Resize changes the width and heigh of the process's terminal Resize(ctx context.Context, w, h uint32) error // IO returns the io set for the process - IO() IO + IO() cio.IO // Status returns the executing status of the process Status(context.Context) (Status, error) } @@ -72,7 +73,7 @@ type process struct { id string task *task pid uint32 - io IO + io cio.IO } func (p *process) ID() string { @@ -104,7 +105,7 @@ func (p *process) Start(ctx context.Context) error { func (p *process) Kill(ctx context.Context, s syscall.Signal, opts ...KillOpts) error { var i KillInfo for _, o := range opts { - if err := o(ctx, p, &i); err != nil { + if err := o(ctx, &i); err != nil { return err } } @@ -154,7 +155,7 @@ func (p *process) CloseIO(ctx context.Context, opts ...IOCloserOpts) error { return errdefs.FromGRPC(err) } -func (p *process) IO() IO { +func (p *process) IO() cio.IO { return p.io } diff --git a/vendor/github.com/containerd/containerd/protobuf/google/rpc/code.pb.go b/vendor/github.com/containerd/containerd/protobuf/google/rpc/code.pb.go index 74537b7e61..c94ceb4389 100644 --- a/vendor/github.com/containerd/containerd/protobuf/google/rpc/code.pb.go +++ b/vendor/github.com/containerd/containerd/protobuf/google/rpc/code.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/containerd/containerd/protobuf/google/rpc/code.proto -// DO NOT EDIT! /* Package rpc is a generated protocol buffer package. diff --git a/vendor/github.com/containerd/containerd/protobuf/google/rpc/error_details.pb.go b/vendor/github.com/containerd/containerd/protobuf/google/rpc/error_details.pb.go index a61229d733..46953e7ff1 100644 --- a/vendor/github.com/containerd/containerd/protobuf/google/rpc/error_details.pb.go +++ b/vendor/github.com/containerd/containerd/protobuf/google/rpc/error_details.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/containerd/containerd/protobuf/google/rpc/error_details.proto -// DO NOT EDIT! package rpc @@ -670,24 +669,6 @@ func (m *LocalizedMessage) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64ErrorDetails(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32ErrorDetails(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintErrorDetails(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/github.com/containerd/containerd/protobuf/google/rpc/status.pb.go b/vendor/github.com/containerd/containerd/protobuf/google/rpc/status.pb.go index 80927bf655..fde1ca7b07 100644 --- a/vendor/github.com/containerd/containerd/protobuf/google/rpc/status.pb.go +++ b/vendor/github.com/containerd/containerd/protobuf/google/rpc/status.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/containerd/containerd/protobuf/google/rpc/status.proto -// DO NOT EDIT! package rpc @@ -131,24 +130,6 @@ func (m *Status) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Status(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Status(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintStatus(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/github.com/containerd/containerd/protobuf/plugin/doc.go b/vendor/github.com/containerd/containerd/protobuf/plugin/doc.go deleted file mode 100644 index b0736c3a05..0000000000 --- a/vendor/github.com/containerd/containerd/protobuf/plugin/doc.go +++ /dev/null @@ -1 +0,0 @@ -package plugin diff --git a/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.pb.go b/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.pb.go deleted file mode 100644 index 797b691666..0000000000 --- a/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.pb.go +++ /dev/null @@ -1,73 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: github.com/containerd/containerd/protobuf/plugin/fieldpath.proto -// DO NOT EDIT! - -/* -Package plugin is a generated protocol buffer package. - -It is generated from these files: - github.com/containerd/containerd/protobuf/plugin/fieldpath.proto - -It has these top-level messages: -*/ -package plugin - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" -import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -var E_FieldpathAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63300, - Name: "containerd.plugin.fieldpath_all", - Tag: "varint,63300,opt,name=fieldpath_all,json=fieldpathAll", - Filename: "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto", -} - -var E_Fieldpath = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64400, - Name: "containerd.plugin.fieldpath", - Tag: "varint,64400,opt,name=fieldpath", - Filename: "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto", -} - -func init() { - proto.RegisterExtension(E_FieldpathAll) - proto.RegisterExtension(E_Fieldpath) -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/protobuf/plugin/fieldpath.proto", fileDescriptorFieldpath) -} - -var fileDescriptorFieldpath = []byte{ - // 203 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x48, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x16, 0x14, 0xe5, 0x97, 0xe4, 0x27, 0x95, 0xa6, 0xe9, 0x17, 0xe4, 0x94, 0xa6, - 0x67, 0xe6, 0xe9, 0xa7, 0x65, 0xa6, 0xe6, 0xa4, 0x14, 0x24, 0x96, 0x64, 0xe8, 0x81, 0x65, 0x84, - 0x04, 0x11, 0x6a, 0xf5, 0x20, 0x4a, 0xa4, 0x14, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0x11, 0x5a, - 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0xf2, 0x8b, 0x20, 0x9a, 0xac, 0x9c, 0xb9, 0x78, - 0xe1, 0xe6, 0xc4, 0x27, 0xe6, 0xe4, 0x08, 0xc9, 0xe8, 0x41, 0xf4, 0xe8, 0xc1, 0xf4, 0xe8, 0xb9, - 0x65, 0xe6, 0xa4, 0xfa, 0x17, 0x94, 0x64, 0xe6, 0xe7, 0x15, 0x4b, 0x1c, 0x79, 0xc7, 0xac, 0xc0, - 0xa8, 0xc1, 0x11, 0xc4, 0x03, 0xd7, 0xe4, 0x98, 0x93, 0x63, 0x65, 0xcf, 0xc5, 0x09, 0xe7, 0x0b, - 0xc9, 0x63, 0x18, 0xe0, 0x9b, 0x5a, 0x5c, 0x9c, 0x98, 0x0e, 0x37, 0x63, 0xc2, 0x77, 0x88, 0x19, - 0x08, 0x3d, 0x4e, 0x12, 0x27, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xd0, 0xf0, 0x48, 0x8e, - 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x04, 0x04, 0x00, - 0x00, 0xff, 0xff, 0xd6, 0x21, 0x2a, 0xb6, 0x17, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.proto b/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.proto deleted file mode 100644 index 0674dc6514..0000000000 --- a/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.proto +++ /dev/null @@ -1,40 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto2"; -package containerd.plugin; - -import "google/protobuf/descriptor.proto"; - -extend google.protobuf.FileOptions { - optional bool fieldpath_all = 63300; -} - -extend google.protobuf.MessageOptions { - optional bool fieldpath = 64400; -} diff --git a/vendor/github.com/containerd/containerd/protobuf/plugin/helpers.go b/vendor/github.com/containerd/containerd/protobuf/plugin/helpers.go deleted file mode 100644 index 7a2af56fbf..0000000000 --- a/vendor/github.com/containerd/containerd/protobuf/plugin/helpers.go +++ /dev/null @@ -1,11 +0,0 @@ -package plugin - -import ( - "github.com/gogo/protobuf/proto" - "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" -) - -// FieldpathEnabled returns true if E_Fieldpath is enabled -func FieldpathEnabled(file *descriptor.FileDescriptorProto, message *descriptor.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Fieldpath, proto.GetBoolExtension(file.Options, E_FieldpathAll, false)) -} diff --git a/vendor/github.com/containerd/containerd/reaper/reaper.go b/vendor/github.com/containerd/containerd/reaper/reaper.go index d7dfbb2aa9..9127fc5a18 100644 --- a/vendor/github.com/containerd/containerd/reaper/reaper.go +++ b/vendor/github.com/containerd/containerd/reaper/reaper.go @@ -15,7 +15,7 @@ import ( // ErrNoSuchProcess is returned when the process no longer exists var ErrNoSuchProcess = errors.New("no such process") -const bufferSize = 2048 +const bufferSize = 1024 // Reap should be called when the process receives an SIGCHLD. Reap will reap // all exited processes and close their wait channels diff --git a/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go index 46677e4914..222cf83c0c 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go @@ -2,6 +2,7 @@ package docker import ( "context" + "fmt" "io" "net/http" "path" @@ -37,32 +38,60 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R return nil, err } - for _, u := range urls { - req, err := http.NewRequest(http.MethodGet, u, nil) - if err != nil { - return nil, err - } + return newHTTPReadSeeker(desc.Size, func(offset int64) (io.ReadCloser, error) { + for _, u := range urls { + rc, err := r.open(ctx, u, desc.MediaType, offset) + if err != nil { + if errdefs.IsNotFound(err) { + continue // try one of the other urls. + } - req.Header.Set("Accept", strings.Join([]string{desc.MediaType, `*`}, ", ")) - resp, err := r.doRequestWithRetries(ctx, req, nil) - if err != nil { - return nil, err - } - - if resp.StatusCode > 299 { - resp.Body.Close() - if resp.StatusCode == http.StatusNotFound { - continue // try one of the other urls. + return nil, err } - return nil, errors.Errorf("unexpected status code %v: %v", u, resp.Status) + + return rc, nil } - return resp.Body, nil + return nil, errors.Wrapf(errdefs.ErrNotFound, + "could not fetch content descriptor %v (%v) from remote", + desc.Digest, desc.MediaType) + + }) +} + +func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int64) (io.ReadCloser, error) { + req, err := http.NewRequest(http.MethodGet, u, nil) + if err != nil { + return nil, err } - return nil, errors.Wrapf(errdefs.ErrNotFound, - "could not fetch content descriptor %v (%v) from remote", - desc.Digest, desc.MediaType) + req.Header.Set("Accept", strings.Join([]string{mediatype, `*`}, ", ")) + + if offset > 0 { + // TODO(stevvooe): Only set this header in response to the + // "Accept-Ranges: bytes" header. + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + } + + resp, err := r.doRequestWithRetries(ctx, req, nil) + if err != nil { + return nil, err + } + + if resp.StatusCode > 299 { + // TODO(stevvooe): When doing a offset specific request, we should + // really distinguish between a 206 and a 200. In the case of 200, we + // can discard the bytes, hiding the seek behavior from the + // implementation. + + resp.Body.Close() + if resp.StatusCode == http.StatusNotFound { + return nil, errors.Wrapf(errdefs.ErrNotFound, "content at %v not found", u) + } + return nil, errors.Errorf("unexpected status code %v: %v", u, resp.Status) + } + + return resp.Body, nil } // getV2URLPaths generates the candidate urls paths for the object based on the diff --git a/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go b/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go new file mode 100644 index 0000000000..f6de60a274 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go @@ -0,0 +1,128 @@ +package docker + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/pkg/errors" +) + +type httpReadSeeker struct { + size int64 + offset int64 + rc io.ReadCloser + open func(offset int64) (io.ReadCloser, error) + closed bool +} + +func newHTTPReadSeeker(size int64, open func(offset int64) (io.ReadCloser, error)) (io.ReadCloser, error) { + return &httpReadSeeker{ + size: size, + open: open, + }, nil +} + +func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { + if hrs.closed { + return 0, io.EOF + } + + rd, err := hrs.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + hrs.offset += int64(n) + return +} + +func (hrs *httpReadSeeker) Close() error { + if hrs.closed { + return nil + } + hrs.closed = true + if hrs.rc != nil { + return hrs.rc.Close() + } + + return nil +} + +func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { + if hrs.closed { + return 0, errors.Wrap(errdefs.ErrUnavailable, "Fetcher.Seek: closed") + } + + abs := hrs.offset + switch whence { + case io.SeekStart: + abs = offset + case io.SeekCurrent: + abs += offset + case io.SeekEnd: + if hrs.size == -1 { + return 0, errors.Wrap(errdefs.ErrUnavailable, "Fetcher.Seek: unknown size, cannot seek from end") + } + abs = hrs.size + offset + default: + return 0, errors.Wrap(errdefs.ErrInvalidArgument, "Fetcher.Seek: invalid whence") + } + + if abs < 0 { + return 0, errors.Wrapf(errdefs.ErrInvalidArgument, "Fetcher.Seek: negative offset") + } + + if abs != hrs.offset { + if hrs.rc != nil { + if err := hrs.rc.Close(); err != nil { + log.L.WithError(err).Errorf("Fetcher.Seek: failed to close ReadCloser") + } + + hrs.rc = nil + } + + hrs.offset = abs + } + + return hrs.offset, nil +} + +func (hrs *httpReadSeeker) reader() (io.Reader, error) { + if hrs.rc != nil { + return hrs.rc, nil + } + + if hrs.size == -1 || hrs.offset < hrs.size { + // only try to reopen the body request if we are seeking to a value + // less than the actual size. + if hrs.open == nil { + return nil, errors.Wrapf(errdefs.ErrNotImplemented, "cannot open") + } + + rc, err := hrs.open(hrs.offset) + if err != nil { + return nil, errors.Wrapf(err, "httpReaderSeeker: failed open") + } + + if hrs.rc != nil { + if err := hrs.rc.Close(); err != nil { + log.L.WithError(err).Errorf("httpReadSeeker: failed to close ReadCloser") + } + } + hrs.rc = rc + } else { + // There is an edge case here where offset == size of the content. If + // we seek, we will probably get an error for content that cannot be + // sought (?). In that case, we should err on committing the content, + // as the length is already satisified but we just return the empty + // reader instead. + + hrs.rc = ioutil.NopCloser(bytes.NewReader([]byte{})) + } + + return hrs.rc, nil +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go index 24bd278a14..405480b541 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go @@ -36,7 +36,7 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten status, err := p.tracker.GetStatus(ref) if err == nil { if status.Offset == status.Total { - return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "ref %v already exists", ref) + return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "ref %v", ref) } // TODO: Handle incomplete status } else if !errdefs.IsNotFound(err) { @@ -52,7 +52,11 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: isManifest = true - existCheck = path.Join("manifests", desc.Digest.String()) + if p.tag == "" { + existCheck = path.Join("manifests", desc.Digest.String()) + } else { + existCheck = path.Join("manifests", p.tag) + } default: existCheck = path.Join("blobs", desc.Digest.String()) } @@ -71,15 +75,26 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten log.G(ctx).WithError(err).Debugf("Unable to check existence, continuing with push") } else { if resp.StatusCode == http.StatusOK { - p.tracker.SetStatus(ref, Status{ - Status: content.Status{ - Ref: ref, - // TODO: Set updated time? - }, - }) - return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v on remote", desc.Digest) - } - if resp.StatusCode != http.StatusNotFound { + var exists bool + if isManifest && p.tag != "" { + dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest")) + if dgstHeader == desc.Digest { + exists = true + } + } else { + exists = true + } + + if exists { + p.tracker.SetStatus(ref, Status{ + Status: content.Status{ + Ref: ref, + // TODO: Set updated time? + }, + }) + return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v on remote", desc.Digest) + } + } else if resp.StatusCode != http.StatusNotFound { // TODO: log error return nil, errors.Errorf("unexpected response: %s", resp.Status) } diff --git a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go index 7a11504956..57a18b664a 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go @@ -7,7 +7,6 @@ import ( "io" "io/ioutil" "net/http" - "net/textproto" "net/url" "path" "strconv" @@ -298,7 +297,7 @@ func (r *dockerBase) authorize(req *http.Request) { func (r *dockerBase) doRequest(ctx context.Context, req *http.Request) (*http.Response, error) { ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", req.URL.String())) - log.G(ctx).WithField("request.headers", req.Header).WithField("request.method", req.Method).Debug("Do request") + log.G(ctx).WithField("request.headers", req.Header).WithField("request.method", req.Method).Debug("do request") r.authorize(req) resp, err := ctxhttp.Do(ctx, r.client, req) if err != nil { @@ -405,22 +404,6 @@ func copyRequest(req *http.Request) (*http.Request, error) { return &ireq, nil } -func isManifestAccept(h http.Header) bool { - for _, ah := range h[textproto.CanonicalMIMEHeaderKey("Accept")] { - switch ah { - case images.MediaTypeDockerSchema2Manifest: - fallthrough - case images.MediaTypeDockerSchema2ManifestList: - fallthrough - case ocispec.MediaTypeImageManifest: - fallthrough - case ocispec.MediaTypeImageIndex: - return true - } - } - return false -} - func (r *dockerBase) setTokenAuth(ctx context.Context, params map[string]string) error { realm, ok := params["realm"] if !ok { diff --git a/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go index 52f83d43fa..6b74cd67ee 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go @@ -29,10 +29,6 @@ import ( const manifestSizeLimit = 8e6 // 8MB -var ( - mediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json" -) - type blobState struct { diffID digest.Digest empty bool @@ -87,6 +83,7 @@ func (c *Converter) Handle(ctx context.Context, desc ocispec.Descriptor) ([]ocis { MediaType: images.MediaTypeDockerSchema2LayerGzip, Digest: c.pulledManifest.FSLayers[i].BlobSum, + Size: -1, }, }, descs...) } @@ -213,10 +210,16 @@ func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) erro ref = remotes.MakeRefKey(ctx, desc) calc = newBlobStateCalculator() retry = 16 + size = desc.Size ) + // size may be unknown, set to zero for content ingest + if size == -1 { + size = 0 + } + tryit: - cw, err := c.contentStore.Writer(ctx, ref, desc.Size, desc.Digest) + cw, err := c.contentStore.Writer(ctx, ref, size, desc.Digest) if err != nil { if errdefs.IsUnavailable(err) { select { @@ -277,7 +280,8 @@ tryit: eg.Go(func() error { defer pw.Close() - return content.Copy(ctx, cw, io.TeeReader(rc, pw), desc.Size, desc.Digest) + + return content.Copy(ctx, cw, io.TeeReader(rc, pw), size, desc.Digest) }) if err := eg.Wait(); err != nil { @@ -285,7 +289,7 @@ tryit: } } - if desc.Size == 0 { + if desc.Size == -1 { info, err := c.contentStore.Info(ctx, desc.Digest) if err != nil { return errors.Wrap(err, "failed to get blob info") diff --git a/vendor/github.com/containerd/containerd/rootfs/apply.go b/vendor/github.com/containerd/containerd/rootfs/apply.go index a198c99f94..e6d2be6a71 100644 --- a/vendor/github.com/containerd/containerd/rootfs/apply.go +++ b/vendor/github.com/containerd/containerd/rootfs/apply.go @@ -9,7 +9,7 @@ import ( "github.com/containerd/containerd/diff" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" - "github.com/containerd/containerd/snapshot" + "github.com/containerd/containerd/snapshots" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" @@ -30,7 +30,7 @@ type Layer struct { // The returned result is a chain id digest representing all the applied layers. // Layers are applied in order they are given, making the first layer the // bottom-most layer in the layer chain. -func ApplyLayers(ctx context.Context, layers []Layer, sn snapshot.Snapshotter, a diff.Differ) (digest.Digest, error) { +func ApplyLayers(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, a diff.Differ) (digest.Digest, error) { var chain []digest.Digest for _, layer := range layers { if _, err := ApplyLayer(ctx, layer, chain, sn, a); err != nil { @@ -46,7 +46,7 @@ func ApplyLayers(ctx context.Context, layers []Layer, sn snapshot.Snapshotter, a // ApplyLayer applies a single layer on top of the given provided layer chain, // using the provided snapshotter and applier. If the layer was unpacked true // is returned, if the layer already exists false is returned. -func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snapshot.Snapshotter, a diff.Differ, opts ...snapshot.Opt) (bool, error) { +func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Differ, opts ...snapshots.Opt) (bool, error) { var ( parent = identity.ChainID(chain) chainID = identity.ChainID(append(chain, layer.Diff.Digest)) diff --git a/vendor/github.com/containerd/containerd/rootfs/diff.go b/vendor/github.com/containerd/containerd/rootfs/diff.go index 035eb30261..bab7a3cca1 100644 --- a/vendor/github.com/containerd/containerd/rootfs/diff.go +++ b/vendor/github.com/containerd/containerd/rootfs/diff.go @@ -5,7 +5,7 @@ import ( "github.com/containerd/containerd/diff" "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/snapshot" + "github.com/containerd/containerd/snapshots" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "golang.org/x/net/context" ) @@ -14,7 +14,7 @@ import ( // of the snapshot. A content ref is provided to track the progress of the // content creation and the provided snapshotter and mount differ are used // for calculating the diff. The descriptor for the layer diff is returned. -func Diff(ctx context.Context, snapshotID string, sn snapshot.Snapshotter, d diff.Differ, opts ...diff.Opt) (ocispec.Descriptor, error) { +func Diff(ctx context.Context, snapshotID string, sn snapshots.Snapshotter, d diff.Differ, opts ...diff.Opt) (ocispec.Descriptor, error) { info, err := sn.Stat(ctx, snapshotID) if err != nil { return ocispec.Descriptor{}, err @@ -28,7 +28,7 @@ func Diff(ctx context.Context, snapshotID string, sn snapshot.Snapshotter, d dif defer sn.Remove(ctx, lowerKey) var upper []mount.Mount - if info.Kind == snapshot.KindActive { + if info.Kind == snapshots.KindActive { upper, err = sn.Mounts(ctx, snapshotID) if err != nil { return ocispec.Descriptor{}, err diff --git a/vendor/github.com/containerd/containerd/rootfs/init.go b/vendor/github.com/containerd/containerd/rootfs/init.go index 271e6cee51..4f32f11ae7 100644 --- a/vendor/github.com/containerd/containerd/rootfs/init.go +++ b/vendor/github.com/containerd/containerd/rootfs/init.go @@ -8,7 +8,7 @@ import ( "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/snapshot" + "github.com/containerd/containerd/snapshots" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) @@ -26,7 +26,7 @@ type Mounter interface { } // InitRootFS initializes the snapshot for use as a rootfs -func InitRootFS(ctx context.Context, name string, parent digest.Digest, readonly bool, snapshotter snapshot.Snapshotter, mounter Mounter) ([]mount.Mount, error) { +func InitRootFS(ctx context.Context, name string, parent digest.Digest, readonly bool, snapshotter snapshots.Snapshotter, mounter Mounter) ([]mount.Mount, error) { _, err := snapshotter.Stat(ctx, name) if err == nil { return nil, errors.Errorf("rootfs already exists") @@ -51,7 +51,7 @@ func InitRootFS(ctx context.Context, name string, parent digest.Digest, readonly return snapshotter.Prepare(ctx, name, parentS) } -func createInitLayer(ctx context.Context, parent, initName string, initFn func(string) error, snapshotter snapshot.Snapshotter, mounter Mounter) (string, error) { +func createInitLayer(ctx context.Context, parent, initName string, initFn func(string) error, snapshotter snapshots.Snapshotter, mounter Mounter) (string, error) { initS := fmt.Sprintf("%s %s", parent, initName) if _, err := snapshotter.Stat(ctx, initS); err == nil { return initS, nil @@ -69,12 +69,12 @@ func createInitLayer(ctx context.Context, parent, initName string, initFn func(s if err != nil { return "", err } + defer func() { if err != nil { - // TODO: once implemented uncomment - //if rerr := snapshotter.Remove(ctx, td); rerr != nil { - // log.G(ctx).Errorf("Failed to remove snapshot %s: %v", td, merr) - //} + if rerr := snapshotter.Remove(ctx, td); rerr != nil { + log.G(ctx).Errorf("Failed to remove snapshot %s: %v", td, rerr) + } } }() diff --git a/vendor/github.com/containerd/containerd/server/config.go b/vendor/github.com/containerd/containerd/server/config.go index 26af539acc..f056c7b837 100644 --- a/vendor/github.com/containerd/containerd/server/config.go +++ b/vendor/github.com/containerd/containerd/server/config.go @@ -23,8 +23,8 @@ type Config struct { Metrics MetricsConfig `toml:"metrics"` // Plugins provides plugin specific configuration for the initialization of a plugin Plugins map[string]toml.Primitive `toml:"plugins"` - // Enable containerd as a subreaper - Subreaper bool `toml:"subreaper"` + // NoSubreaper disables containerd as a subreaper + NoSubreaper bool `toml:"no_subreaper"` // OOMScore adjust the containerd's oom score OOMScore int `toml:"oom_score"` // Cgroup specifies cgroup information for the containerd daemon process diff --git a/vendor/github.com/containerd/containerd/server/server.go b/vendor/github.com/containerd/containerd/server/server.go index f9ca044835..6af6df073a 100644 --- a/vendor/github.com/containerd/containerd/server/server.go +++ b/vendor/github.com/containerd/containerd/server/server.go @@ -18,7 +18,7 @@ import ( introspection "github.com/containerd/containerd/api/services/introspection/v1" leasesapi "github.com/containerd/containerd/api/services/leases/v1" namespaces "github.com/containerd/containerd/api/services/namespaces/v1" - snapshotapi "github.com/containerd/containerd/api/services/snapshot/v1" + snapshotsapi "github.com/containerd/containerd/api/services/snapshots/v1" tasks "github.com/containerd/containerd/api/services/tasks/v1" version "github.com/containerd/containerd/api/services/version/v1" "github.com/containerd/containerd/content" @@ -27,7 +27,7 @@ import ( "github.com/containerd/containerd/log" "github.com/containerd/containerd/metadata" "github.com/containerd/containerd/plugin" - "github.com/containerd/containerd/snapshot" + "github.com/containerd/containerd/snapshots" metrics "github.com/docker/go-metrics" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/pkg/errors" @@ -39,12 +39,15 @@ import ( // New creates and initializes a new containerd server func New(ctx context.Context, config *Config) (*Server, error) { - if config.Root == "" { + switch { + case config.Root == "": return nil, errors.New("root must be specified") - } - if config.State == "" { + case config.State == "": return nil, errors.New("state must be specified") + case config.Root == config.State: + return nil, errors.New("root and state must be different paths") } + if err := os.MkdirAll(config.Root, 0711); err != nil { return nil, err } @@ -196,7 +199,7 @@ func loadPlugins(config *Config) ([]*plugin.Registration, error) { return nil, err } - snapshotters := make(map[string]snapshot.Snapshotter) + snapshotters := make(map[string]snapshots.Snapshotter) for name, sn := range snapshottersRaw { sn, err := sn.Instance() if err != nil { @@ -204,7 +207,7 @@ func loadPlugins(config *Config) ([]*plugin.Registration, error) { Warnf("could not use snapshotter %v in metadata plugin", name) continue } - snapshotters[name] = sn.(snapshot.Snapshotter) + snapshotters[name] = sn.(snapshots.Snapshotter) } path := filepath.Join(ic.Root, "meta.db") @@ -246,7 +249,7 @@ func interceptor( // No need to change the context case version.VersionServer: ctx = log.WithModule(ctx, "version") - case snapshotapi.SnapshotsServer: + case snapshotsapi.SnapshotsServer: ctx = log.WithModule(ctx, "snapshot") case diff.DiffServer: ctx = log.WithModule(ctx, "diff") diff --git a/vendor/github.com/containerd/containerd/server/server_linux.go b/vendor/github.com/containerd/containerd/server/server_linux.go index 03244e90dd..f6f679cc90 100644 --- a/vendor/github.com/containerd/containerd/server/server_linux.go +++ b/vendor/github.com/containerd/containerd/server/server_linux.go @@ -12,7 +12,7 @@ import ( // apply sets config settings on the server process func apply(ctx context.Context, config *Config) error { - if config.Subreaper { + if !config.NoSubreaper { log.G(ctx).Info("setting subreaper...") if err := sys.SetSubreaper(1); err != nil { return err diff --git a/vendor/github.com/containerd/containerd/server/server_solaris.go b/vendor/github.com/containerd/containerd/server/server_solaris.go index 71e1c0927b..3c39816be2 100644 --- a/vendor/github.com/containerd/containerd/server/server_solaris.go +++ b/vendor/github.com/containerd/containerd/server/server_solaris.go @@ -2,13 +2,6 @@ package server import "context" -const ( - // DefaultAddress is the default unix socket address - DefaultAddress = "/var/run/containerd/containerd.sock" - // DefaultDebugAddress is the default unix socket address for pprof data - DefaultDebugAddress = "/var/run/containerd/debug.sock" -) - func apply(_ context.Context, _ *Config) error { return nil } diff --git a/vendor/github.com/containerd/containerd/server/server_unsupported.go b/vendor/github.com/containerd/containerd/server/server_unsupported.go index f820e3f0c2..4df599e114 100644 --- a/vendor/github.com/containerd/containerd/server/server_unsupported.go +++ b/vendor/github.com/containerd/containerd/server/server_unsupported.go @@ -4,19 +4,6 @@ package server import "context" -const ( - // DefaultRootDir is the default location used by containerd to store - // persistent data - DefaultRootDir = "/var/lib/containerd" - // DefaultStateDir is the default location used by containerd to store - // transient data - DefaultStateDir = "/run/containerd" - // DefaultAddress is the default unix socket address - DefaultAddress = "/run/containerd/containerd.sock" - // DefaultDebugAddress is the default unix socket address for pprof data - DefaultDebugAddress = "/run/containerd/debug.sock" -) - func apply(_ context.Context, _ *Config) error { return nil } diff --git a/vendor/github.com/containerd/containerd/server/server_windows.go b/vendor/github.com/containerd/containerd/server/server_windows.go index b35e776f75..37b71dfa1a 100644 --- a/vendor/github.com/containerd/containerd/server/server_windows.go +++ b/vendor/github.com/containerd/containerd/server/server_windows.go @@ -4,24 +4,6 @@ package server import ( "context" - "os" - "path/filepath" -) - -var ( - // DefaultRootDir is the default location used by containerd to store - // persistent data - DefaultRootDir = filepath.Join(os.Getenv("programfiles"), "containerd", "root") - // DefaultStateDir is the default location used by containerd to store - // transient data - DefaultStateDir = filepath.Join(os.Getenv("programfiles"), "containerd", "state") -) - -const ( - // DefaultAddress is the default winpipe address - DefaultAddress = `\\.\pipe\containerd-containerd` - // DefaultDebugAddress is the default winpipe address for pprof data - DefaultDebugAddress = `\\.\pipe\containerd-debug` ) func apply(_ context.Context, _ *Config) error { diff --git a/vendor/github.com/containerd/containerd/services/content/service.go b/vendor/github.com/containerd/containerd/services/content/service.go deleted file mode 100644 index 3784579d56..0000000000 --- a/vendor/github.com/containerd/containerd/services/content/service.go +++ /dev/null @@ -1,454 +0,0 @@ -package content - -import ( - "io" - "sync" - - api "github.com/containerd/containerd/api/services/content/v1" - eventsapi "github.com/containerd/containerd/api/services/events/v1" - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/events" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/metadata" - "github.com/containerd/containerd/plugin" - "github.com/golang/protobuf/ptypes/empty" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" -) - -type service struct { - store content.Store - publisher events.Publisher -} - -var bufPool = sync.Pool{ - New: func() interface{} { - return make([]byte, 1<<20) - }, -} - -var _ api.ContentServer = &service{} - -func init() { - plugin.Register(&plugin.Registration{ - Type: plugin.GRPCPlugin, - ID: "content", - Requires: []plugin.Type{ - plugin.MetadataPlugin, - }, - InitFn: func(ic *plugin.InitContext) (interface{}, error) { - m, err := ic.Get(plugin.MetadataPlugin) - if err != nil { - return nil, err - } - - s, err := NewService(m.(*metadata.DB).ContentStore(), ic.Events) - return s, err - }, - }) -} - -// NewService returns the content GRPC server -func NewService(cs content.Store, publisher events.Publisher) (api.ContentServer, error) { - return &service{ - store: cs, - publisher: publisher, - }, nil -} - -func (s *service) Register(server *grpc.Server) error { - api.RegisterContentServer(server, s) - return nil -} - -func (s *service) Info(ctx context.Context, req *api.InfoRequest) (*api.InfoResponse, error) { - if err := req.Digest.Validate(); err != nil { - return nil, grpc.Errorf(codes.InvalidArgument, "%q failed validation", req.Digest) - } - - bi, err := s.store.Info(ctx, req.Digest) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - - return &api.InfoResponse{ - Info: infoToGRPC(bi), - }, nil -} - -func (s *service) Update(ctx context.Context, req *api.UpdateRequest) (*api.UpdateResponse, error) { - if err := req.Info.Digest.Validate(); err != nil { - return nil, grpc.Errorf(codes.InvalidArgument, "%q failed validation", req.Info.Digest) - } - - info, err := s.store.Update(ctx, infoFromGRPC(req.Info), req.UpdateMask.GetPaths()...) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - - return &api.UpdateResponse{ - Info: infoToGRPC(info), - }, nil -} - -func (s *service) List(req *api.ListContentRequest, session api.Content_ListServer) error { - var ( - buffer []api.Info - sendBlock = func(block []api.Info) error { - // send last block - return session.Send(&api.ListContentResponse{ - Info: block, - }) - } - ) - - if err := s.store.Walk(session.Context(), func(info content.Info) error { - buffer = append(buffer, api.Info{ - Digest: info.Digest, - Size_: info.Size, - CreatedAt: info.CreatedAt, - Labels: info.Labels, - }) - - if len(buffer) >= 100 { - if err := sendBlock(buffer); err != nil { - return err - } - - buffer = buffer[:0] - } - - return nil - }, req.Filters...); err != nil { - return err - } - - if len(buffer) > 0 { - // send last block - if err := sendBlock(buffer); err != nil { - return err - } - } - - return nil -} - -func (s *service) Delete(ctx context.Context, req *api.DeleteContentRequest) (*empty.Empty, error) { - if err := req.Digest.Validate(); err != nil { - return nil, grpc.Errorf(codes.InvalidArgument, err.Error()) - } - - if err := s.store.Delete(ctx, req.Digest); err != nil { - return nil, errdefs.ToGRPC(err) - } - - if err := s.publisher.Publish(ctx, "/content/delete", &eventsapi.ContentDelete{ - Digest: req.Digest, - }); err != nil { - return nil, err - } - - return &empty.Empty{}, nil -} - -func (s *service) Read(req *api.ReadContentRequest, session api.Content_ReadServer) error { - if err := req.Digest.Validate(); err != nil { - return grpc.Errorf(codes.InvalidArgument, "%v: %v", req.Digest, err) - } - - oi, err := s.store.Info(session.Context(), req.Digest) - if err != nil { - return errdefs.ToGRPC(err) - } - - ra, err := s.store.ReaderAt(session.Context(), req.Digest) - if err != nil { - return errdefs.ToGRPC(err) - } - defer ra.Close() - - var ( - offset = req.Offset - size = req.Size_ - - // TODO(stevvooe): Using the global buffer pool. At 32KB, it is probably - // little inefficient for work over a fast network. We can tune this later. - p = bufPool.Get().([]byte) - ) - defer bufPool.Put(p) - - if offset < 0 { - offset = 0 - } - - if size <= 0 { - size = oi.Size - offset - } - - if offset+size > oi.Size { - return grpc.Errorf(codes.OutOfRange, "read past object length %v bytes", oi.Size) - } - - if _, err := io.CopyBuffer( - &readResponseWriter{session: session}, - io.NewSectionReader(ra, offset, size), p); err != nil { - return err - } - - return nil -} - -// readResponseWriter is a writer that places the output into ReadContentRequest messages. -// -// This allows io.CopyBuffer to do the heavy lifting of chunking the responses -// into the buffer size. -type readResponseWriter struct { - offset int64 - session api.Content_ReadServer -} - -func (rw *readResponseWriter) Write(p []byte) (n int, err error) { - if err := rw.session.Send(&api.ReadContentResponse{ - Offset: rw.offset, - Data: p, - }); err != nil { - return 0, err - } - - rw.offset += int64(len(p)) - return len(p), nil -} - -func (s *service) Status(ctx context.Context, req *api.StatusRequest) (*api.StatusResponse, error) { - status, err := s.store.Status(ctx, req.Ref) - if err != nil { - return nil, errdefs.ToGRPCf(err, "could not get status for ref %q", req.Ref) - } - - var resp api.StatusResponse - resp.Status = &api.Status{ - StartedAt: status.StartedAt, - UpdatedAt: status.UpdatedAt, - Ref: status.Ref, - Offset: status.Offset, - Total: status.Total, - Expected: status.Expected, - } - - return &resp, nil -} - -func (s *service) ListStatuses(ctx context.Context, req *api.ListStatusesRequest) (*api.ListStatusesResponse, error) { - statuses, err := s.store.ListStatuses(ctx, req.Filters...) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - - var resp api.ListStatusesResponse - for _, status := range statuses { - resp.Statuses = append(resp.Statuses, api.Status{ - StartedAt: status.StartedAt, - UpdatedAt: status.UpdatedAt, - Ref: status.Ref, - Offset: status.Offset, - Total: status.Total, - Expected: status.Expected, - }) - } - - return &resp, nil -} - -func (s *service) Write(session api.Content_WriteServer) (err error) { - var ( - ctx = session.Context() - msg api.WriteContentResponse - req *api.WriteContentRequest - ref string - total int64 - expected digest.Digest - ) - - defer func(msg *api.WriteContentResponse) { - // pump through the last message if no error was encountered - if err != nil { - if grpc.Code(err) != codes.AlreadyExists { - // TODO(stevvooe): Really need a log line here to track which - // errors are actually causing failure on the server side. May want - // to configure the service with an interceptor to make this work - // identically across all GRPC methods. - // - // This is pretty noisy, so we can remove it but leave it for now. - log.G(ctx).WithError(err).Error("(*service).Write failed") - } - - return - } - - err = session.Send(msg) - }(&msg) - - // handle the very first request! - req, err = session.Recv() - if err != nil { - return err - } - - ref = req.Ref - - if ref == "" { - return grpc.Errorf(codes.InvalidArgument, "first message must have a reference") - } - - fields := logrus.Fields{ - "ref": ref, - } - total = req.Total - expected = req.Expected - if total > 0 { - fields["total"] = total - } - - if expected != "" { - fields["expected"] = expected - } - - ctx = log.WithLogger(ctx, log.G(ctx).WithFields(fields)) - - log.G(ctx).Debug("(*service).Write started") - // this action locks the writer for the session. - wr, err := s.store.Writer(ctx, ref, total, expected) - if err != nil { - return errdefs.ToGRPC(err) - } - defer wr.Close() - - for { - msg.Action = req.Action - ws, err := wr.Status() - if err != nil { - return errdefs.ToGRPC(err) - } - - msg.Offset = ws.Offset // always set the offset. - - // NOTE(stevvooe): In general, there are two cases underwhich a remote - // writer is used. - // - // For pull, we almost always have this before fetching large content, - // through descriptors. We allow predeclaration of the expected size - // and digest. - // - // For push, it is more complex. If we want to cut through content into - // storage, we may have no expectation until we are done processing the - // content. The case here is the following: - // - // 1. Start writing content. - // 2. Compress inline. - // 3. Validate digest and size (maybe). - // - // Supporting these two paths is quite awkward but it lets both API - // users use the same writer style for each with a minimum of overhead. - if req.Expected != "" { - if expected != "" && expected != req.Expected { - return grpc.Errorf(codes.InvalidArgument, "inconsistent digest provided: %v != %v", req.Expected, expected) - } - expected = req.Expected - - if _, err := s.store.Info(session.Context(), req.Expected); err == nil { - if err := s.store.Abort(session.Context(), ref); err != nil { - log.G(ctx).WithError(err).Error("failed to abort write") - } - - return grpc.Errorf(codes.AlreadyExists, "blob with expected digest %v exists", req.Expected) - } - } - - if req.Total > 0 { - // Update the expected total. Typically, this could be seen at - // negotiation time or on a commit message. - if total > 0 && req.Total != total { - return grpc.Errorf(codes.InvalidArgument, "inconsistent total provided: %v != %v", req.Total, total) - } - total = req.Total - } - - switch req.Action { - case api.WriteActionStat: - msg.Digest = wr.Digest() - msg.StartedAt = ws.StartedAt - msg.UpdatedAt = ws.UpdatedAt - msg.Total = total - case api.WriteActionWrite, api.WriteActionCommit: - if req.Offset > 0 { - // validate the offset if provided - if req.Offset != ws.Offset { - return grpc.Errorf(codes.OutOfRange, "write @%v must occur at current offset %v", req.Offset, ws.Offset) - } - } - - if req.Offset == 0 && ws.Offset > 0 { - if err := wr.Truncate(req.Offset); err != nil { - return errors.Wrapf(err, "truncate failed") - } - msg.Offset = req.Offset - } - - // issue the write if we actually have data. - if len(req.Data) > 0 { - // While this looks like we could use io.WriterAt here, because we - // maintain the offset as append only, we just issue the write. - n, err := wr.Write(req.Data) - if err != nil { - return err - } - - if n != len(req.Data) { - // TODO(stevvooe): Perhaps, we can recover this by including it - // in the offset on the write return. - return grpc.Errorf(codes.DataLoss, "wrote %v of %v bytes", n, len(req.Data)) - } - - msg.Offset += int64(n) - } - - if req.Action == api.WriteActionCommit { - var opts []content.Opt - if req.Labels != nil { - opts = append(opts, content.WithLabels(req.Labels)) - } - if err := wr.Commit(ctx, total, expected, opts...); err != nil { - return err - } - } - - msg.Digest = wr.Digest() - } - - if err := session.Send(&msg); err != nil { - return err - } - - req, err = session.Recv() - if err != nil { - if err == io.EOF { - return nil - } - - return err - } - } -} - -func (s *service) Abort(ctx context.Context, req *api.AbortRequest) (*empty.Empty, error) { - if err := s.store.Abort(ctx, req.Ref); err != nil { - return nil, errdefs.ToGRPC(err) - } - - return &empty.Empty{}, nil -} diff --git a/vendor/github.com/containerd/containerd/services/diff/service.go b/vendor/github.com/containerd/containerd/services/diff/service.go deleted file mode 100644 index 81e44dcf6f..0000000000 --- a/vendor/github.com/containerd/containerd/services/diff/service.go +++ /dev/null @@ -1,142 +0,0 @@ -package diff - -import ( - diffapi "github.com/containerd/containerd/api/services/diff/v1" - "github.com/containerd/containerd/api/types" - "github.com/containerd/containerd/diff" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/plugin" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "golang.org/x/net/context" - "google.golang.org/grpc" -) - -type config struct { - // Order is the order of preference in which to try diff algorithms, the - // first differ which is supported is used. - // Note when multiple differs may be supported, this order will be - // respected for which is choosen. Each differ should return the same - // correct output, allowing any ordering to be used to prefer - // more optimimal implementations. - Order []string `toml:"default"` -} - -func init() { - plugin.Register(&plugin.Registration{ - Type: plugin.GRPCPlugin, - ID: "diff", - Requires: []plugin.Type{ - plugin.DiffPlugin, - }, - Config: &config{ - Order: []string{"walking"}, - }, - InitFn: func(ic *plugin.InitContext) (interface{}, error) { - differs, err := ic.GetByType(plugin.DiffPlugin) - if err != nil { - return nil, err - } - - orderedNames := ic.Config.(*config).Order - ordered := make([]diff.Differ, len(orderedNames)) - for i, n := range orderedNames { - differp, ok := differs[n] - if !ok { - return nil, errors.Errorf("needed differ not loaded: %s", n) - } - differ, err := differp.Instance() - if err != nil { - return nil, errors.Wrapf(err, "could not load required differ due plugin init error: %s", n) - } - - ordered[i] = differ.(diff.Differ) - } - - return &service{ - differs: ordered, - }, nil - }, - }) -} - -type service struct { - differs []diff.Differ -} - -func (s *service) Register(gs *grpc.Server) error { - diffapi.RegisterDiffServer(gs, s) - return nil -} - -func (s *service) Apply(ctx context.Context, er *diffapi.ApplyRequest) (*diffapi.ApplyResponse, error) { - var ( - ocidesc ocispec.Descriptor - err error - desc = toDescriptor(er.Diff) - mounts = toMounts(er.Mounts) - ) - - for _, differ := range s.differs { - ocidesc, err = differ.Apply(ctx, desc, mounts) - if !errdefs.IsNotImplemented(err) { - break - } - } - - if err != nil { - return nil, errdefs.ToGRPC(err) - } - - return &diffapi.ApplyResponse{ - Applied: fromDescriptor(ocidesc), - }, nil - -} - -func (s *service) Diff(ctx context.Context, dr *diffapi.DiffRequest) (*diffapi.DiffResponse, error) { - var ( - ocidesc ocispec.Descriptor - err error - aMounts = toMounts(dr.Left) - bMounts = toMounts(dr.Right) - ) - - var opts []diff.Opt - if dr.MediaType != "" { - opts = append(opts, diff.WithMediaType(dr.MediaType)) - } - if dr.Ref != "" { - opts = append(opts, diff.WithReference(dr.Ref)) - } - if dr.Labels != nil { - opts = append(opts, diff.WithLabels(dr.Labels)) - } - - for _, differ := range s.differs { - ocidesc, err = differ.DiffMounts(ctx, aMounts, bMounts, opts...) - if !errdefs.IsNotImplemented(err) { - break - } - } - if err != nil { - return nil, errdefs.ToGRPC(err) - } - - return &diffapi.DiffResponse{ - Diff: fromDescriptor(ocidesc), - }, nil -} - -func toMounts(apim []*types.Mount) []mount.Mount { - mounts := make([]mount.Mount, len(apim)) - for i, m := range apim { - mounts[i] = mount.Mount{ - Type: m.Type, - Source: m.Source, - Options: m.Options, - } - } - return mounts -} diff --git a/vendor/github.com/containerd/containerd/services/images/client.go b/vendor/github.com/containerd/containerd/services/images/client.go deleted file mode 100644 index f746ddce8b..0000000000 --- a/vendor/github.com/containerd/containerd/services/images/client.go +++ /dev/null @@ -1,81 +0,0 @@ -package images - -import ( - "context" - - imagesapi "github.com/containerd/containerd/api/services/images/v1" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/images" - ptypes "github.com/gogo/protobuf/types" -) - -type remoteStore struct { - client imagesapi.ImagesClient -} - -// NewStoreFromClient returns a new image store client -func NewStoreFromClient(client imagesapi.ImagesClient) images.Store { - return &remoteStore{ - client: client, - } -} - -func (s *remoteStore) Get(ctx context.Context, name string) (images.Image, error) { - resp, err := s.client.Get(ctx, &imagesapi.GetImageRequest{ - Name: name, - }) - if err != nil { - return images.Image{}, errdefs.FromGRPC(err) - } - - return imageFromProto(resp.Image), nil -} - -func (s *remoteStore) List(ctx context.Context, filters ...string) ([]images.Image, error) { - resp, err := s.client.List(ctx, &imagesapi.ListImagesRequest{ - Filters: filters, - }) - if err != nil { - return nil, errdefs.FromGRPC(err) - } - - return imagesFromProto(resp.Images), nil -} - -func (s *remoteStore) Create(ctx context.Context, image images.Image) (images.Image, error) { - created, err := s.client.Create(ctx, &imagesapi.CreateImageRequest{ - Image: imageToProto(&image), - }) - if err != nil { - return images.Image{}, errdefs.FromGRPC(err) - } - - return imageFromProto(&created.Image), nil -} - -func (s *remoteStore) Update(ctx context.Context, image images.Image, fieldpaths ...string) (images.Image, error) { - var updateMask *ptypes.FieldMask - if len(fieldpaths) > 0 { - updateMask = &ptypes.FieldMask{ - Paths: fieldpaths, - } - } - - updated, err := s.client.Update(ctx, &imagesapi.UpdateImageRequest{ - Image: imageToProto(&image), - UpdateMask: updateMask, - }) - if err != nil { - return images.Image{}, errdefs.FromGRPC(err) - } - - return imageFromProto(&updated.Image), nil -} - -func (s *remoteStore) Delete(ctx context.Context, name string) error { - _, err := s.client.Delete(ctx, &imagesapi.DeleteImageRequest{ - Name: name, - }) - - return errdefs.FromGRPC(err) -} diff --git a/vendor/github.com/containerd/containerd/services/images/helpers.go b/vendor/github.com/containerd/containerd/services/images/helpers.go deleted file mode 100644 index 374aefd6b6..0000000000 --- a/vendor/github.com/containerd/containerd/services/images/helpers.go +++ /dev/null @@ -1,64 +0,0 @@ -package images - -import ( - imagesapi "github.com/containerd/containerd/api/services/images/v1" - "github.com/containerd/containerd/api/types" - "github.com/containerd/containerd/images" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -func imagesToProto(images []images.Image) []imagesapi.Image { - var imagespb []imagesapi.Image - - for _, image := range images { - imagespb = append(imagespb, imageToProto(&image)) - } - - return imagespb -} - -func imagesFromProto(imagespb []imagesapi.Image) []images.Image { - var images []images.Image - - for _, image := range imagespb { - images = append(images, imageFromProto(&image)) - } - - return images -} - -func imageToProto(image *images.Image) imagesapi.Image { - return imagesapi.Image{ - Name: image.Name, - Labels: image.Labels, - Target: descToProto(&image.Target), - CreatedAt: image.CreatedAt, - UpdatedAt: image.UpdatedAt, - } -} - -func imageFromProto(imagepb *imagesapi.Image) images.Image { - return images.Image{ - Name: imagepb.Name, - Labels: imagepb.Labels, - Target: descFromProto(&imagepb.Target), - CreatedAt: imagepb.CreatedAt, - UpdatedAt: imagepb.UpdatedAt, - } -} - -func descFromProto(desc *types.Descriptor) ocispec.Descriptor { - return ocispec.Descriptor{ - MediaType: desc.MediaType, - Size: desc.Size_, - Digest: desc.Digest, - } -} - -func descToProto(desc *ocispec.Descriptor) types.Descriptor { - return types.Descriptor{ - MediaType: desc.MediaType, - Size_: desc.Size, - Digest: desc.Digest, - } -} diff --git a/vendor/github.com/containerd/containerd/services/images/service.go b/vendor/github.com/containerd/containerd/services/images/service.go deleted file mode 100644 index 3843df554c..0000000000 --- a/vendor/github.com/containerd/containerd/services/images/service.go +++ /dev/null @@ -1,183 +0,0 @@ -package images - -import ( - "github.com/boltdb/bolt" - eventsapi "github.com/containerd/containerd/api/services/events/v1" - imagesapi "github.com/containerd/containerd/api/services/images/v1" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/events" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/metadata" - "github.com/containerd/containerd/plugin" - "github.com/golang/protobuf/ptypes/empty" - "github.com/pkg/errors" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func init() { - plugin.Register(&plugin.Registration{ - Type: plugin.GRPCPlugin, - ID: "images", - Requires: []plugin.Type{ - plugin.MetadataPlugin, - }, - InitFn: func(ic *plugin.InitContext) (interface{}, error) { - m, err := ic.Get(plugin.MetadataPlugin) - if err != nil { - return nil, err - } - return NewService(m.(*metadata.DB), ic.Events), nil - }, - }) -} - -type service struct { - db *metadata.DB - publisher events.Publisher -} - -// NewService returns the GRPC image server -func NewService(db *metadata.DB, publisher events.Publisher) imagesapi.ImagesServer { - return &service{ - db: db, - publisher: publisher, - } -} - -func (s *service) Register(server *grpc.Server) error { - imagesapi.RegisterImagesServer(server, s) - return nil -} - -func (s *service) Get(ctx context.Context, req *imagesapi.GetImageRequest) (*imagesapi.GetImageResponse, error) { - var resp imagesapi.GetImageResponse - - return &resp, errdefs.ToGRPC(s.withStoreView(ctx, func(ctx context.Context, store images.Store) error { - image, err := store.Get(ctx, req.Name) - if err != nil { - return err - } - imagepb := imageToProto(&image) - resp.Image = &imagepb - return nil - })) -} - -func (s *service) List(ctx context.Context, req *imagesapi.ListImagesRequest) (*imagesapi.ListImagesResponse, error) { - var resp imagesapi.ListImagesResponse - - return &resp, errdefs.ToGRPC(s.withStoreView(ctx, func(ctx context.Context, store images.Store) error { - images, err := store.List(ctx, req.Filters...) - if err != nil { - return err - } - - resp.Images = imagesToProto(images) - return nil - })) -} - -func (s *service) Create(ctx context.Context, req *imagesapi.CreateImageRequest) (*imagesapi.CreateImageResponse, error) { - if req.Image.Name == "" { - return nil, status.Errorf(codes.InvalidArgument, "Image.Name required") - } - - var ( - image = imageFromProto(&req.Image) - resp imagesapi.CreateImageResponse - ) - if err := s.withStoreUpdate(ctx, func(ctx context.Context, store images.Store) error { - created, err := store.Create(ctx, image) - if err != nil { - return err - } - - resp.Image = imageToProto(&created) - return nil - }); err != nil { - return nil, errdefs.ToGRPC(err) - } - - if err := s.publisher.Publish(ctx, "/images/create", &eventsapi.ImageCreate{ - Name: resp.Image.Name, - Labels: resp.Image.Labels, - }); err != nil { - return nil, err - } - - return &resp, nil - -} - -func (s *service) Update(ctx context.Context, req *imagesapi.UpdateImageRequest) (*imagesapi.UpdateImageResponse, error) { - if req.Image.Name == "" { - return nil, status.Errorf(codes.InvalidArgument, "Image.Name required") - } - - var ( - image = imageFromProto(&req.Image) - resp imagesapi.UpdateImageResponse - ) - if err := s.withStoreUpdate(ctx, func(ctx context.Context, store images.Store) error { - var fieldpaths []string - if req.UpdateMask != nil && len(req.UpdateMask.Paths) > 0 { - for _, path := range req.UpdateMask.Paths { - fieldpaths = append(fieldpaths, path) - } - } - - updated, err := store.Update(ctx, image, fieldpaths...) - if err != nil { - return err - } - - resp.Image = imageToProto(&updated) - return nil - }); err != nil { - return nil, errdefs.ToGRPC(err) - } - - if err := s.publisher.Publish(ctx, "/images/update", &eventsapi.ImageUpdate{ - Name: resp.Image.Name, - Labels: resp.Image.Labels, - }); err != nil { - return nil, err - } - - return &resp, nil -} - -func (s *service) Delete(ctx context.Context, req *imagesapi.DeleteImageRequest) (*empty.Empty, error) { - if err := s.withStoreUpdate(ctx, func(ctx context.Context, store images.Store) error { - return errdefs.ToGRPC(store.Delete(ctx, req.Name)) - }); err != nil { - return nil, err - } - - if err := s.publisher.Publish(ctx, "/images/delete", &eventsapi.ImageDelete{ - Name: req.Name, - }); err != nil { - return nil, err - } - - if err := s.db.GarbageCollect(ctx); err != nil { - return nil, errdefs.ToGRPC(errors.Wrap(err, "garbage collection failed")) - } - - return &empty.Empty{}, nil -} - -func (s *service) withStore(ctx context.Context, fn func(ctx context.Context, store images.Store) error) func(tx *bolt.Tx) error { - return func(tx *bolt.Tx) error { return fn(ctx, metadata.NewImageStore(tx)) } -} - -func (s *service) withStoreView(ctx context.Context, fn func(ctx context.Context, store images.Store) error) error { - return s.db.View(s.withStore(ctx, fn)) -} - -func (s *service) withStoreUpdate(ctx context.Context, fn func(ctx context.Context, store images.Store) error) error { - return s.db.Update(s.withStore(ctx, fn)) -} diff --git a/vendor/github.com/containerd/containerd/services/namespaces/service.go b/vendor/github.com/containerd/containerd/services/namespaces/service.go deleted file mode 100644 index b795ab52f7..0000000000 --- a/vendor/github.com/containerd/containerd/services/namespaces/service.go +++ /dev/null @@ -1,212 +0,0 @@ -package namespaces - -import ( - "strings" - - "github.com/boltdb/bolt" - eventsapi "github.com/containerd/containerd/api/services/events/v1" - api "github.com/containerd/containerd/api/services/namespaces/v1" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/events" - "github.com/containerd/containerd/metadata" - "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/plugin" - "github.com/golang/protobuf/ptypes/empty" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" -) - -func init() { - plugin.Register(&plugin.Registration{ - Type: plugin.GRPCPlugin, - ID: "namespaces", - Requires: []plugin.Type{ - plugin.MetadataPlugin, - }, - InitFn: func(ic *plugin.InitContext) (interface{}, error) { - m, err := ic.Get(plugin.MetadataPlugin) - if err != nil { - return nil, err - } - return NewService(m.(*metadata.DB), ic.Events), nil - }, - }) -} - -type service struct { - db *metadata.DB - publisher events.Publisher -} - -var _ api.NamespacesServer = &service{} - -// NewService returns the GRPC namespaces server -func NewService(db *metadata.DB, publisher events.Publisher) api.NamespacesServer { - return &service{ - db: db, - publisher: publisher, - } -} - -func (s *service) Register(server *grpc.Server) error { - api.RegisterNamespacesServer(server, s) - return nil -} - -func (s *service) Get(ctx context.Context, req *api.GetNamespaceRequest) (*api.GetNamespaceResponse, error) { - var resp api.GetNamespaceResponse - - return &resp, s.withStoreView(ctx, func(ctx context.Context, store namespaces.Store) error { - labels, err := store.Labels(ctx, req.Name) - if err != nil { - return errdefs.ToGRPC(err) - } - - resp.Namespace = api.Namespace{ - Name: req.Name, - Labels: labels, - } - - return nil - }) -} - -func (s *service) List(ctx context.Context, req *api.ListNamespacesRequest) (*api.ListNamespacesResponse, error) { - var resp api.ListNamespacesResponse - - return &resp, s.withStoreView(ctx, func(ctx context.Context, store namespaces.Store) error { - namespaces, err := store.List(ctx) - if err != nil { - return err - } - - for _, namespace := range namespaces { - labels, err := store.Labels(ctx, namespace) - if err != nil { - // In general, this should be unlikely, since we are holding a - // transaction to service this request. - return errdefs.ToGRPC(err) - } - - resp.Namespaces = append(resp.Namespaces, api.Namespace{ - Name: namespace, - Labels: labels, - }) - } - - return nil - }) -} - -func (s *service) Create(ctx context.Context, req *api.CreateNamespaceRequest) (*api.CreateNamespaceResponse, error) { - var resp api.CreateNamespaceResponse - - if err := s.withStoreUpdate(ctx, func(ctx context.Context, store namespaces.Store) error { - if err := store.Create(ctx, req.Namespace.Name, req.Namespace.Labels); err != nil { - return errdefs.ToGRPC(err) - } - - for k, v := range req.Namespace.Labels { - if err := store.SetLabel(ctx, req.Namespace.Name, k, v); err != nil { - return err - } - } - - resp.Namespace = req.Namespace - return nil - }); err != nil { - return &resp, err - } - - if err := s.publisher.Publish(ctx, "/namespaces/create", &eventsapi.NamespaceCreate{ - Name: req.Namespace.Name, - Labels: req.Namespace.Labels, - }); err != nil { - return &resp, err - } - - return &resp, nil - -} - -func (s *service) Update(ctx context.Context, req *api.UpdateNamespaceRequest) (*api.UpdateNamespaceResponse, error) { - var resp api.UpdateNamespaceResponse - if err := s.withStoreUpdate(ctx, func(ctx context.Context, store namespaces.Store) error { - if req.UpdateMask != nil && len(req.UpdateMask.Paths) > 0 { - for _, path := range req.UpdateMask.Paths { - switch { - case strings.HasPrefix(path, "labels."): - key := strings.TrimPrefix(path, "labels.") - if err := store.SetLabel(ctx, req.Namespace.Name, key, req.Namespace.Labels[key]); err != nil { - return err - } - default: - return grpc.Errorf(codes.InvalidArgument, "cannot update %q field", path) - } - } - } else { - // clear out the existing labels and then set them to the incoming request. - // get current set of labels - labels, err := store.Labels(ctx, req.Namespace.Name) - if err != nil { - return errdefs.ToGRPC(err) - } - - for k := range labels { - if err := store.SetLabel(ctx, req.Namespace.Name, k, ""); err != nil { - return err - } - } - - for k, v := range req.Namespace.Labels { - if err := store.SetLabel(ctx, req.Namespace.Name, k, v); err != nil { - return err - } - - } - } - - return nil - }); err != nil { - return &resp, err - } - - if err := s.publisher.Publish(ctx, "/namespaces/update", &eventsapi.NamespaceUpdate{ - Name: req.Namespace.Name, - Labels: req.Namespace.Labels, - }); err != nil { - return &resp, err - } - - return &resp, nil -} - -func (s *service) Delete(ctx context.Context, req *api.DeleteNamespaceRequest) (*empty.Empty, error) { - if err := s.withStoreUpdate(ctx, func(ctx context.Context, store namespaces.Store) error { - return errdefs.ToGRPC(store.Delete(ctx, req.Name)) - }); err != nil { - return &empty.Empty{}, err - } - // set the namespace in the context before publishing the event - ctx = namespaces.WithNamespace(ctx, req.Name) - if err := s.publisher.Publish(ctx, "/namespaces/delete", &eventsapi.NamespaceDelete{ - Name: req.Name, - }); err != nil { - return &empty.Empty{}, err - } - - return &empty.Empty{}, nil -} - -func (s *service) withStore(ctx context.Context, fn func(ctx context.Context, store namespaces.Store) error) func(tx *bolt.Tx) error { - return func(tx *bolt.Tx) error { return fn(ctx, metadata.NewNamespaceStore(tx)) } -} - -func (s *service) withStoreView(ctx context.Context, fn func(ctx context.Context, store namespaces.Store) error) error { - return s.db.View(s.withStore(ctx, fn)) -} - -func (s *service) withStoreUpdate(ctx context.Context, fn func(ctx context.Context, store namespaces.Store) error) error { - return s.db.Update(s.withStore(ctx, fn)) -} diff --git a/vendor/github.com/containerd/containerd/services/snapshot/service.go b/vendor/github.com/containerd/containerd/services/snapshot/service.go deleted file mode 100644 index 716b4c42c5..0000000000 --- a/vendor/github.com/containerd/containerd/services/snapshot/service.go +++ /dev/null @@ -1,295 +0,0 @@ -package snapshot - -import ( - gocontext "context" - - eventsapi "github.com/containerd/containerd/api/services/events/v1" - snapshotapi "github.com/containerd/containerd/api/services/snapshot/v1" - "github.com/containerd/containerd/api/types" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/events" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/metadata" - "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/plugin" - "github.com/containerd/containerd/snapshot" - protoempty "github.com/golang/protobuf/ptypes/empty" - "golang.org/x/net/context" - "google.golang.org/grpc" -) - -func init() { - plugin.Register(&plugin.Registration{ - Type: plugin.GRPCPlugin, - ID: "snapshots", - Requires: []plugin.Type{ - plugin.MetadataPlugin, - }, - InitFn: newService, - }) -} - -var empty = &protoempty.Empty{} - -type service struct { - db *metadata.DB - publisher events.Publisher -} - -func newService(ic *plugin.InitContext) (interface{}, error) { - md, err := ic.Get(plugin.MetadataPlugin) - if err != nil { - return nil, err - } - - return &service{ - db: md.(*metadata.DB), - publisher: ic.Events, - }, nil -} - -func (s *service) getSnapshotter(name string) (snapshot.Snapshotter, error) { - if name == "" { - return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, "snapshotter argument missing") - } - - sn := s.db.Snapshotter(name) - if sn == nil { - return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, "snapshotter not loaded: %s", name) - } - return sn, nil -} - -func (s *service) Register(gs *grpc.Server) error { - snapshotapi.RegisterSnapshotsServer(gs, s) - return nil -} - -func (s *service) Prepare(ctx context.Context, pr *snapshotapi.PrepareSnapshotRequest) (*snapshotapi.PrepareSnapshotResponse, error) { - log.G(ctx).WithField("parent", pr.Parent).WithField("key", pr.Key).Debugf("Preparing snapshot") - sn, err := s.getSnapshotter(pr.Snapshotter) - if err != nil { - return nil, err - } - - var opts []snapshot.Opt - if pr.Labels != nil { - opts = append(opts, snapshot.WithLabels(pr.Labels)) - } - mounts, err := sn.Prepare(ctx, pr.Key, pr.Parent, opts...) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - - if err := s.publisher.Publish(ctx, "/snapshot/prepare", &eventsapi.SnapshotPrepare{ - Key: pr.Key, - Parent: pr.Parent, - }); err != nil { - return nil, err - } - return &snapshotapi.PrepareSnapshotResponse{ - Mounts: fromMounts(mounts), - }, nil -} - -func (s *service) View(ctx context.Context, pr *snapshotapi.ViewSnapshotRequest) (*snapshotapi.ViewSnapshotResponse, error) { - log.G(ctx).WithField("parent", pr.Parent).WithField("key", pr.Key).Debugf("Preparing view snapshot") - sn, err := s.getSnapshotter(pr.Snapshotter) - if err != nil { - return nil, err - } - var opts []snapshot.Opt - if pr.Labels != nil { - opts = append(opts, snapshot.WithLabels(pr.Labels)) - } - mounts, err := sn.View(ctx, pr.Key, pr.Parent, opts...) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - return &snapshotapi.ViewSnapshotResponse{ - Mounts: fromMounts(mounts), - }, nil -} - -func (s *service) Mounts(ctx context.Context, mr *snapshotapi.MountsRequest) (*snapshotapi.MountsResponse, error) { - log.G(ctx).WithField("key", mr.Key).Debugf("Getting snapshot mounts") - sn, err := s.getSnapshotter(mr.Snapshotter) - if err != nil { - return nil, err - } - - mounts, err := sn.Mounts(ctx, mr.Key) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - return &snapshotapi.MountsResponse{ - Mounts: fromMounts(mounts), - }, nil -} - -func (s *service) Commit(ctx context.Context, cr *snapshotapi.CommitSnapshotRequest) (*protoempty.Empty, error) { - log.G(ctx).WithField("key", cr.Key).WithField("name", cr.Name).Debugf("Committing snapshot") - sn, err := s.getSnapshotter(cr.Snapshotter) - if err != nil { - return nil, err - } - - var opts []snapshot.Opt - if cr.Labels != nil { - opts = append(opts, snapshot.WithLabels(cr.Labels)) - } - if err := sn.Commit(ctx, cr.Name, cr.Key, opts...); err != nil { - return nil, errdefs.ToGRPC(err) - } - - if err := s.publisher.Publish(ctx, "/snapshot/commit", &eventsapi.SnapshotCommit{ - Key: cr.Key, - Name: cr.Name, - }); err != nil { - return nil, err - } - return empty, nil -} - -func (s *service) Remove(ctx context.Context, rr *snapshotapi.RemoveSnapshotRequest) (*protoempty.Empty, error) { - log.G(ctx).WithField("key", rr.Key).Debugf("Removing snapshot") - sn, err := s.getSnapshotter(rr.Snapshotter) - if err != nil { - return nil, err - } - - if err := sn.Remove(ctx, rr.Key); err != nil { - return nil, errdefs.ToGRPC(err) - } - - if err := s.publisher.Publish(ctx, "/snapshot/remove", &eventsapi.SnapshotRemove{ - Key: rr.Key, - }); err != nil { - return nil, err - } - return empty, nil -} - -func (s *service) Stat(ctx context.Context, sr *snapshotapi.StatSnapshotRequest) (*snapshotapi.StatSnapshotResponse, error) { - log.G(ctx).WithField("key", sr.Key).Debugf("Statting snapshot") - sn, err := s.getSnapshotter(sr.Snapshotter) - if err != nil { - return nil, err - } - - info, err := sn.Stat(ctx, sr.Key) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - - return &snapshotapi.StatSnapshotResponse{Info: fromInfo(info)}, nil -} - -func (s *service) Update(ctx context.Context, sr *snapshotapi.UpdateSnapshotRequest) (*snapshotapi.UpdateSnapshotResponse, error) { - log.G(ctx).WithField("key", sr.Info.Name).Debugf("Updating snapshot") - sn, err := s.getSnapshotter(sr.Snapshotter) - if err != nil { - return nil, err - } - - info, err := sn.Update(ctx, toInfo(sr.Info), sr.UpdateMask.GetPaths()...) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - - return &snapshotapi.UpdateSnapshotResponse{Info: fromInfo(info)}, nil -} - -func (s *service) List(sr *snapshotapi.ListSnapshotsRequest, ss snapshotapi.Snapshots_ListServer) error { - sn, err := s.getSnapshotter(sr.Snapshotter) - if err != nil { - return err - } - - var ( - buffer []snapshotapi.Info - sendBlock = func(block []snapshotapi.Info) error { - return ss.Send(&snapshotapi.ListSnapshotsResponse{ - Info: block, - }) - } - ) - err = sn.Walk(ss.Context(), func(ctx gocontext.Context, info snapshot.Info) error { - buffer = append(buffer, fromInfo(info)) - - if len(buffer) >= 100 { - if err := sendBlock(buffer); err != nil { - return err - } - - buffer = buffer[:0] - } - - return nil - }) - if err != nil { - return err - } - if len(buffer) > 0 { - // Send remaining infos - if err := sendBlock(buffer); err != nil { - return err - } - } - - return nil -} - -func (s *service) Usage(ctx context.Context, ur *snapshotapi.UsageRequest) (*snapshotapi.UsageResponse, error) { - sn, err := s.getSnapshotter(ur.Snapshotter) - if err != nil { - return nil, err - } - - usage, err := sn.Usage(ctx, ur.Key) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - - return fromUsage(usage), nil -} - -func fromKind(kind snapshot.Kind) snapshotapi.Kind { - if kind == snapshot.KindActive { - return snapshotapi.KindActive - } - if kind == snapshot.KindView { - return snapshotapi.KindView - } - return snapshotapi.KindCommitted -} - -func fromInfo(info snapshot.Info) snapshotapi.Info { - return snapshotapi.Info{ - Name: info.Name, - Parent: info.Parent, - Kind: fromKind(info.Kind), - CreatedAt: info.Created, - UpdatedAt: info.Updated, - Labels: info.Labels, - } -} - -func fromUsage(usage snapshot.Usage) *snapshotapi.UsageResponse { - return &snapshotapi.UsageResponse{ - Inodes: usage.Inodes, - Size_: usage.Size, - } -} - -func fromMounts(mounts []mount.Mount) []*types.Mount { - out := make([]*types.Mount, len(mounts)) - for i, m := range mounts { - out[i] = &types.Mount{ - Type: m.Type, - Source: m.Source, - Options: m.Options, - } - } - return out -} diff --git a/vendor/github.com/containerd/containerd/services/snapshot/client.go b/vendor/github.com/containerd/containerd/snapshot.go similarity index 59% rename from vendor/github.com/containerd/containerd/services/snapshot/client.go rename to vendor/github.com/containerd/containerd/snapshot.go index a9b9ffe674..85bdba1b68 100644 --- a/vendor/github.com/containerd/containerd/services/snapshot/client.go +++ b/vendor/github.com/containerd/containerd/snapshot.go @@ -1,20 +1,20 @@ -package snapshot +package containerd import ( "context" "io" - snapshotapi "github.com/containerd/containerd/api/services/snapshot/v1" + snapshotsapi "github.com/containerd/containerd/api/services/snapshots/v1" "github.com/containerd/containerd/api/types" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/snapshot" + "github.com/containerd/containerd/snapshots" protobuftypes "github.com/gogo/protobuf/types" ) // NewSnapshotterFromClient returns a new Snapshotter which communicates // over a GRPC connection. -func NewSnapshotterFromClient(client snapshotapi.SnapshotsClient, snapshotterName string) snapshot.Snapshotter { +func NewSnapshotterFromClient(client snapshotsapi.SnapshotsClient, snapshotterName string) snapshots.Snapshotter { return &remoteSnapshotter{ client: client, snapshotterName: snapshotterName, @@ -22,25 +22,25 @@ func NewSnapshotterFromClient(client snapshotapi.SnapshotsClient, snapshotterNam } type remoteSnapshotter struct { - client snapshotapi.SnapshotsClient + client snapshotsapi.SnapshotsClient snapshotterName string } -func (r *remoteSnapshotter) Stat(ctx context.Context, key string) (snapshot.Info, error) { +func (r *remoteSnapshotter) Stat(ctx context.Context, key string) (snapshots.Info, error) { resp, err := r.client.Stat(ctx, - &snapshotapi.StatSnapshotRequest{ + &snapshotsapi.StatSnapshotRequest{ Snapshotter: r.snapshotterName, Key: key, }) if err != nil { - return snapshot.Info{}, errdefs.FromGRPC(err) + return snapshots.Info{}, errdefs.FromGRPC(err) } return toInfo(resp.Info), nil } -func (r *remoteSnapshotter) Update(ctx context.Context, info snapshot.Info, fieldpaths ...string) (snapshot.Info, error) { +func (r *remoteSnapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) { resp, err := r.client.Update(ctx, - &snapshotapi.UpdateSnapshotRequest{ + &snapshotsapi.UpdateSnapshotRequest{ Snapshotter: r.snapshotterName, Info: fromInfo(info), UpdateMask: &protobuftypes.FieldMask{ @@ -48,24 +48,24 @@ func (r *remoteSnapshotter) Update(ctx context.Context, info snapshot.Info, fiel }, }) if err != nil { - return snapshot.Info{}, errdefs.FromGRPC(err) + return snapshots.Info{}, errdefs.FromGRPC(err) } return toInfo(resp.Info), nil } -func (r *remoteSnapshotter) Usage(ctx context.Context, key string) (snapshot.Usage, error) { - resp, err := r.client.Usage(ctx, &snapshotapi.UsageRequest{ +func (r *remoteSnapshotter) Usage(ctx context.Context, key string) (snapshots.Usage, error) { + resp, err := r.client.Usage(ctx, &snapshotsapi.UsageRequest{ Snapshotter: r.snapshotterName, Key: key, }) if err != nil { - return snapshot.Usage{}, errdefs.FromGRPC(err) + return snapshots.Usage{}, errdefs.FromGRPC(err) } return toUsage(resp), nil } func (r *remoteSnapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) { - resp, err := r.client.Mounts(ctx, &snapshotapi.MountsRequest{ + resp, err := r.client.Mounts(ctx, &snapshotsapi.MountsRequest{ Snapshotter: r.snapshotterName, Key: key, }) @@ -75,14 +75,14 @@ func (r *remoteSnapshotter) Mounts(ctx context.Context, key string) ([]mount.Mou return toMounts(resp.Mounts), nil } -func (r *remoteSnapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshot.Opt) ([]mount.Mount, error) { - var local snapshot.Info +func (r *remoteSnapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + var local snapshots.Info for _, opt := range opts { if err := opt(&local); err != nil { return nil, err } } - resp, err := r.client.Prepare(ctx, &snapshotapi.PrepareSnapshotRequest{ + resp, err := r.client.Prepare(ctx, &snapshotsapi.PrepareSnapshotRequest{ Snapshotter: r.snapshotterName, Key: key, Parent: parent, @@ -94,14 +94,14 @@ func (r *remoteSnapshotter) Prepare(ctx context.Context, key, parent string, opt return toMounts(resp.Mounts), nil } -func (r *remoteSnapshotter) View(ctx context.Context, key, parent string, opts ...snapshot.Opt) ([]mount.Mount, error) { - var local snapshot.Info +func (r *remoteSnapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + var local snapshots.Info for _, opt := range opts { if err := opt(&local); err != nil { return nil, err } } - resp, err := r.client.View(ctx, &snapshotapi.ViewSnapshotRequest{ + resp, err := r.client.View(ctx, &snapshotsapi.ViewSnapshotRequest{ Snapshotter: r.snapshotterName, Key: key, Parent: parent, @@ -113,14 +113,14 @@ func (r *remoteSnapshotter) View(ctx context.Context, key, parent string, opts . return toMounts(resp.Mounts), nil } -func (r *remoteSnapshotter) Commit(ctx context.Context, name, key string, opts ...snapshot.Opt) error { - var local snapshot.Info +func (r *remoteSnapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error { + var local snapshots.Info for _, opt := range opts { if err := opt(&local); err != nil { return err } } - _, err := r.client.Commit(ctx, &snapshotapi.CommitSnapshotRequest{ + _, err := r.client.Commit(ctx, &snapshotsapi.CommitSnapshotRequest{ Snapshotter: r.snapshotterName, Name: name, Key: key, @@ -130,15 +130,15 @@ func (r *remoteSnapshotter) Commit(ctx context.Context, name, key string, opts . } func (r *remoteSnapshotter) Remove(ctx context.Context, key string) error { - _, err := r.client.Remove(ctx, &snapshotapi.RemoveSnapshotRequest{ + _, err := r.client.Remove(ctx, &snapshotsapi.RemoveSnapshotRequest{ Snapshotter: r.snapshotterName, Key: key, }) return errdefs.FromGRPC(err) } -func (r *remoteSnapshotter) Walk(ctx context.Context, fn func(context.Context, snapshot.Info) error) error { - sc, err := r.client.List(ctx, &snapshotapi.ListSnapshotsRequest{ +func (r *remoteSnapshotter) Walk(ctx context.Context, fn func(context.Context, snapshots.Info) error) error { + sc, err := r.client.List(ctx, &snapshotsapi.ListSnapshotsRequest{ Snapshotter: r.snapshotterName, }) if err != nil { @@ -163,18 +163,22 @@ func (r *remoteSnapshotter) Walk(ctx context.Context, fn func(context.Context, s } } -func toKind(kind snapshotapi.Kind) snapshot.Kind { - if kind == snapshotapi.KindActive { - return snapshot.KindActive - } - if kind == snapshotapi.KindView { - return snapshot.KindView - } - return snapshot.KindCommitted +func (r *remoteSnapshotter) Close() error { + return nil } -func toInfo(info snapshotapi.Info) snapshot.Info { - return snapshot.Info{ +func toKind(kind snapshotsapi.Kind) snapshots.Kind { + if kind == snapshotsapi.KindActive { + return snapshots.KindActive + } + if kind == snapshotsapi.KindView { + return snapshots.KindView + } + return snapshots.KindCommitted +} + +func toInfo(info snapshotsapi.Info) snapshots.Info { + return snapshots.Info{ Name: info.Name, Parent: info.Parent, Kind: toKind(info.Kind), @@ -184,8 +188,8 @@ func toInfo(info snapshotapi.Info) snapshot.Info { } } -func toUsage(resp *snapshotapi.UsageResponse) snapshot.Usage { - return snapshot.Usage{ +func toUsage(resp *snapshotsapi.UsageResponse) snapshots.Usage { + return snapshots.Usage{ Inodes: resp.Inodes, Size: resp.Size_, } @@ -202,3 +206,24 @@ func toMounts(mm []*types.Mount) []mount.Mount { } return mounts } + +func fromKind(kind snapshots.Kind) snapshotsapi.Kind { + if kind == snapshots.KindActive { + return snapshotsapi.KindActive + } + if kind == snapshots.KindView { + return snapshotsapi.KindView + } + return snapshotsapi.KindCommitted +} + +func fromInfo(info snapshots.Info) snapshotsapi.Info { + return snapshotsapi.Info{ + Name: info.Name, + Parent: info.Parent, + Kind: fromKind(info.Kind), + CreatedAt: info.Created, + UpdatedAt: info.Updated, + Labels: info.Labels, + } +} diff --git a/vendor/github.com/containerd/containerd/snapshot/snapshotter.go b/vendor/github.com/containerd/containerd/snapshots/snapshotter.go similarity index 97% rename from vendor/github.com/containerd/containerd/snapshot/snapshotter.go rename to vendor/github.com/containerd/containerd/snapshots/snapshotter.go index 2b3fe62755..cde4c72618 100644 --- a/vendor/github.com/containerd/containerd/snapshot/snapshotter.go +++ b/vendor/github.com/containerd/containerd/snapshots/snapshotter.go @@ -1,4 +1,4 @@ -package snapshot +package snapshots import ( "context" @@ -280,9 +280,7 @@ type Snapshotter interface { // A committed snapshot will be created under name with the parent of the // active snapshot. // - // Commit may be called multiple times on the same key. Snapshots created - // in this manner will all reference the parent used to start the - // transaction. + // After commit, the snapshot identified by key is removed. Commit(ctx context.Context, name, key string, opts ...Opt) error // Remove the committed or active snapshot by the provided key. @@ -296,6 +294,14 @@ type Snapshotter interface { // Walk all snapshots in the snapshotter. For each snapshot in the // snapshotter, the function will be called. Walk(ctx context.Context, fn func(context.Context, Info) error) error + + // Close releases the internal resources. + // + // Close is expected to be called on the end of the lifecycle of the snapshotter, + // but not mandatory. + // + // Close returns nil when it is already closed. + Close() error } // Opt allows setting mutable snapshot properties on creation diff --git a/vendor/github.com/containerd/containerd/spec_opts.go b/vendor/github.com/containerd/containerd/spec_opts.go deleted file mode 100644 index 2dbf8214ab..0000000000 --- a/vendor/github.com/containerd/containerd/spec_opts.go +++ /dev/null @@ -1,74 +0,0 @@ -package containerd - -import ( - "context" - - "github.com/containerd/containerd/containers" - "github.com/containerd/typeurl" - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -// SpecOpts sets spec specific information to a newly generated OCI spec -type SpecOpts func(context.Context, *Client, *containers.Container, *specs.Spec) error - -// WithProcessArgs replaces the args on the generated spec -func WithProcessArgs(args ...string) SpecOpts { - return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error { - s.Process.Args = args - return nil - } -} - -// WithProcessCwd replaces the current working directory on the generated spec -func WithProcessCwd(cwd string) SpecOpts { - return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error { - s.Process.Cwd = cwd - return nil - } -} - -// WithHostname sets the container's hostname -func WithHostname(name string) SpecOpts { - return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error { - s.Hostname = name - return nil - } -} - -// WithNewSpec generates a new spec for a new container -func WithNewSpec(opts ...SpecOpts) NewContainerOpts { - return func(ctx context.Context, client *Client, c *containers.Container) error { - s, err := createDefaultSpec(ctx, c.ID) - if err != nil { - return err - } - for _, o := range opts { - if err := o(ctx, client, c, s); err != nil { - return err - } - } - any, err := typeurl.MarshalAny(s) - if err != nil { - return err - } - c.Spec = any - return nil - } -} - -// WithSpec sets the provided spec on the container -func WithSpec(s *specs.Spec, opts ...SpecOpts) NewContainerOpts { - return func(ctx context.Context, client *Client, c *containers.Container) error { - for _, o := range opts { - if err := o(ctx, client, c, s); err != nil { - return err - } - } - any, err := typeurl.MarshalAny(s) - if err != nil { - return err - } - c.Spec = any - return nil - } -} diff --git a/vendor/github.com/containerd/containerd/task.go b/vendor/github.com/containerd/containerd/task.go index 7ae1bf6228..2cbcbaff10 100644 --- a/vendor/github.com/containerd/containerd/task.go +++ b/vendor/github.com/containerd/containerd/task.go @@ -8,12 +8,12 @@ import ( "io" goruntime "runtime" "strings" - "sync" "syscall" "time" "github.com/containerd/containerd/api/services/tasks/v1" "github.com/containerd/containerd/api/types" + "github.com/containerd/containerd/cio" "github.com/containerd/containerd/content" "github.com/containerd/containerd/diff" "github.com/containerd/containerd/errdefs" @@ -123,7 +123,7 @@ type Task interface { // Resume the execution of the task Resume(context.Context) error // Exec creates a new process inside the task - Exec(context.Context, string, *specs.Process, IOCreation) (Process, error) + Exec(context.Context, string, *specs.Process, cio.Creation) (Process, error) // Pids returns a list of system specific process ids inside the task Pids(context.Context) ([]ProcessInfo, error) // Checkpoint serializes the runtime and memory information of a task into an @@ -134,7 +134,7 @@ type Task interface { // Update modifies executing tasks with updated settings Update(context.Context, ...UpdateTaskOpts) error // LoadProcess loads a previously created exec'd process - LoadProcess(context.Context, string, IOAttach) (Process, error) + LoadProcess(context.Context, string, cio.Attach) (Process, error) // Metrics returns task metrics for runtime specific metrics // // The metric types are generic to containerd and change depending on the runtime @@ -148,11 +148,9 @@ var _ = (Task)(&task{}) type task struct { client *Client - io IO + io cio.IO id string pid uint32 - - mu sync.Mutex } // Pid returns the pid or process id for the task @@ -175,13 +173,14 @@ func (t *task) Start(ctx context.Context) error { func (t *task) Kill(ctx context.Context, s syscall.Signal, opts ...KillOpts) error { var i KillInfo for _, o := range opts { - if err := o(ctx, t, &i); err != nil { + if err := o(ctx, &i); err != nil { return err } } _, err := t.client.TaskService().Kill(ctx, &tasks.KillRequest{ Signal: uint32(s), ContainerID: t.id, + ExecID: i.ExecID, All: i.All, }) if err != nil { @@ -278,7 +277,7 @@ func (t *task) Delete(ctx context.Context, opts ...ProcessDeleteOpts) (*ExitStat return &ExitStatus{code: r.ExitStatus, exitedAt: r.ExitedAt}, nil } -func (t *task) Exec(ctx context.Context, id string, spec *specs.Process, ioCreate IOCreation) (Process, error) { +func (t *task) Exec(ctx context.Context, id string, spec *specs.Process, ioCreate cio.Creation) (Process, error) { if id == "" { return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "exec id must not be empty") } @@ -343,7 +342,7 @@ func (t *task) CloseIO(ctx context.Context, opts ...IOCloserOpts) error { return errdefs.FromGRPC(err) } -func (t *task) IO() IO { +func (t *task) IO() cio.IO { return t.io } @@ -357,7 +356,7 @@ func (t *task) Resize(ctx context.Context, w, h uint32) error { } func (t *task) Checkpoint(ctx context.Context, opts ...CheckpointTaskOpts) (Image, error) { - ctx, done, err := t.client.withLease(ctx) + ctx, done, err := t.client.WithLease(ctx) if err != nil { return nil, err } @@ -460,7 +459,7 @@ func (t *task) Update(ctx context.Context, opts ...UpdateTaskOpts) error { return errdefs.FromGRPC(err) } -func (t *task) LoadProcess(ctx context.Context, id string, ioAttach IOAttach) (Process, error) { +func (t *task) LoadProcess(ctx context.Context, id string, ioAttach cio.Attach) (Process, error) { response, err := t.client.TaskService().Get(ctx, &tasks.GetRequest{ ContainerID: t.id, ExecID: id, @@ -472,7 +471,7 @@ func (t *task) LoadProcess(ctx context.Context, id string, ioAttach IOAttach) (P } return nil, err } - var i IO + var i cio.IO if ioAttach != nil { if i, err = attachExistingIO(response, ioAttach); err != nil { return nil, err diff --git a/vendor/github.com/containerd/containerd/task_opts.go b/vendor/github.com/containerd/containerd/task_opts.go index 261ccba64b..a387adb6ed 100644 --- a/vendor/github.com/containerd/containerd/task_opts.go +++ b/vendor/github.com/containerd/containerd/task_opts.go @@ -5,7 +5,7 @@ import ( "syscall" "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/linux/runcopts" + "github.com/containerd/containerd/linux/runctypes" "github.com/containerd/containerd/mount" ) @@ -22,7 +22,7 @@ func WithRootFS(mounts []mount.Mount) NewTaskOpts { // WithExit causes the task to exit after a successful checkpoint func WithExit(r *CheckpointTaskInfo) error { - r.Options = &runcopts.CheckpointOptions{ + r.Options = &runctypes.CheckpointOptions{ Exit: true, } return nil @@ -65,13 +65,23 @@ type KillInfo struct { // All kills all processes inside the task // only valid on tasks, ignored on processes All bool + // ExecID is the ID of a process to kill + ExecID string } // KillOpts allows options to be set for the killing of a process -type KillOpts func(context.Context, Process, *KillInfo) error +type KillOpts func(context.Context, *KillInfo) error // WithKillAll kills all processes for a task -func WithKillAll(ctx context.Context, p Process, i *KillInfo) error { +func WithKillAll(ctx context.Context, i *KillInfo) error { i.All = true return nil } + +// WithKillExecID specifies the process ID +func WithKillExecID(execID string) KillOpts { + return func(ctx context.Context, i *KillInfo) error { + i.ExecID = execID + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/task_opts_linux.go b/vendor/github.com/containerd/containerd/task_opts_linux.go new file mode 100644 index 0000000000..5b91cb5485 --- /dev/null +++ b/vendor/github.com/containerd/containerd/task_opts_linux.go @@ -0,0 +1,15 @@ +package containerd + +import ( + "context" + + "github.com/opencontainers/runtime-spec/specs-go" +) + +// WithResources sets the provided resources for task updates +func WithResources(resources *specs.LinuxResources) UpdateTaskOpts { + return func(ctx context.Context, client *Client, r *UpdateTaskInfo) error { + r.Resources = resources + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/task_opts_windows.go b/vendor/github.com/containerd/containerd/task_opts_windows.go new file mode 100644 index 0000000000..d77402c675 --- /dev/null +++ b/vendor/github.com/containerd/containerd/task_opts_windows.go @@ -0,0 +1,15 @@ +package containerd + +import ( + "context" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// WithResources sets the provided resources on the spec for task updates +func WithResources(resources *specs.WindowsResources) UpdateTaskOpts { + return func(ctx context.Context, client *Client, r *UpdateTaskInfo) error { + r.Resources = resources + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/vendor.conf b/vendor/github.com/containerd/containerd/vendor.conf index 671346821c..382aaa66b2 100644 --- a/vendor/github.com/containerd/containerd/vendor.conf +++ b/vendor/github.com/containerd/containerd/vendor.conf @@ -1,7 +1,7 @@ github.com/coreos/go-systemd 48702e0da86bd25e76cfef347e2adeb434a0d0a6 github.com/containerd/go-runc ed1cbe1fc31f5fb2359d3a54b6330d1a097858b7 github.com/containerd/console 84eeaae905fa414d03e07bcd6c8d3f19e7cf180e -github.com/containerd/cgroups f7dd103d3e4e696aa67152f6b4ddd1779a3455a9 +github.com/containerd/cgroups 29da22c6171a4316169f9205ab6c49f59b5b852f github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788 github.com/docker/go-metrics 8fd5772bf1584597834c6f7961a530f06cbfbb87 github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9 @@ -13,8 +13,8 @@ github.com/prometheus/procfs fcdb11ccb4389efb1b210b7ffb623ab71c5fdd60 github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 github.com/matttproud/golang_protobuf_extensions v1.0.0 github.com/docker/go-units v0.3.1 -github.com/gogo/protobuf d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8 -github.com/golang/protobuf 5a0f697c9ed9d68fef0116532c6e05cfeae00e55 +github.com/gogo/protobuf v0.5 +github.com/golang/protobuf 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9 github.com/opencontainers/runtime-spec v1.0.0 github.com/opencontainers/runc 74a17296470088de3805e138d3d87c62e613dfc4 github.com/sirupsen/logrus v1.0.0 @@ -25,7 +25,7 @@ github.com/pmezard/go-difflib v1.0.0 github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6 github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6 -google.golang.org/grpc v1.3.0 +google.golang.org/grpc v1.7.2 github.com/pkg/errors v0.8.0 github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448 golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993 https://github.com/golang/sys @@ -35,9 +35,10 @@ golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c github.com/BurntSushi/toml v0.2.0-21-g9906417 github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 github.com/Microsoft/go-winio v0.4.4 -github.com/Microsoft/hcsshim v0.6.3 +github.com/Microsoft/hcsshim v0.6.7 github.com/Microsoft/opengcs v0.3.2 github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4 -github.com/dmcgowan/go-tar 2e2c51242e8993c50445dab7c03c8e7febddd0cf +github.com/dmcgowan/go-tar go1.10 +github.com/stevvooe/ttrpc 8c92e22ce0c492875ccaac3ab06143a77d8ed0c1 diff --git a/vendor/github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.pb.go b/vendor/github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.pb.go index 77c344d473..d2f9fcc465 100644 --- a/vendor/github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.pb.go +++ b/vendor/github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.proto -// DO NOT EDIT! /* Package hcsshimtypes is a generated protocol buffer package. @@ -17,7 +16,8 @@ package hcsshimtypes import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import _ "github.com/gogo/protobuf/gogoproto" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" import _ "github.com/gogo/protobuf/types" import _ "github.com/gogo/protobuf/types" @@ -61,6 +61,7 @@ type ProcessDetails struct { MemoryWorkingSetSharedBytes uint64 `protobuf:"varint,6,opt,name=memory_working_set_shared_bytes,json=memoryWorkingSetSharedBytes,proto3" json:"memory_working_set_shared_bytes,omitempty"` ProcessID uint32 `protobuf:"varint,7,opt,name=process_id,json=processId,proto3" json:"process_id,omitempty"` UserTime_100Ns uint64 `protobuf:"varint,8,opt,name=user_time_100_ns,json=userTime100Ns,proto3" json:"user_time_100_ns,omitempty"` + ExecID string `protobuf:"bytes,9,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` } func (m *ProcessDetails) Reset() { *m = ProcessDetails{} } @@ -156,27 +157,15 @@ func (m *ProcessDetails) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintHcsshim(dAtA, i, uint64(m.UserTime_100Ns)) } + if len(m.ExecID) > 0 { + dAtA[i] = 0x4a + i++ + i = encodeVarintHcsshim(dAtA, i, uint64(len(m.ExecID))) + i += copy(dAtA[i:], m.ExecID) + } return i, nil } -func encodeFixed64Hcsshim(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Hcsshim(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintHcsshim(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -221,6 +210,10 @@ func (m *ProcessDetails) Size() (n int) { if m.UserTime_100Ns != 0 { n += 1 + sovHcsshim(uint64(m.UserTime_100Ns)) } + l = len(m.ExecID) + if l > 0 { + n += 1 + l + sovHcsshim(uint64(l)) + } return n } @@ -260,6 +253,7 @@ func (this *ProcessDetails) String() string { `MemoryWorkingSetSharedBytes:` + fmt.Sprintf("%v", this.MemoryWorkingSetSharedBytes) + `,`, `ProcessID:` + fmt.Sprintf("%v", this.ProcessID) + `,`, `UserTime_100Ns:` + fmt.Sprintf("%v", this.UserTime_100Ns) + `,`, + `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, `}`, }, "") return s @@ -554,6 +548,35 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error { break } } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHcsshim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHcsshim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExecID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipHcsshim(dAtA[iNdEx:]) @@ -685,35 +708,37 @@ func init() { } var fileDescriptorHcsshim = []byte{ - // 479 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0x41, 0x6f, 0xd3, 0x30, - 0x14, 0xc7, 0x1b, 0x36, 0xc6, 0x62, 0x54, 0x60, 0x86, 0x43, 0x28, 0x90, 0x54, 0xbb, 0x50, 0x09, - 0x94, 0x74, 0x70, 0xe4, 0x44, 0x5a, 0x21, 0xed, 0x32, 0xa6, 0x0c, 0x09, 0x09, 0x21, 0x59, 0x6e, - 0xf2, 0x48, 0xad, 0xd5, 0x71, 0x64, 0xbb, 0x54, 0xbd, 0xf1, 0x11, 0x38, 0xf2, 0x49, 0xf8, 0x0c, - 0x3d, 0x72, 0xe4, 0x34, 0x58, 0x3e, 0x09, 0x8a, 0xed, 0x96, 0x51, 0x38, 0x71, 0xf3, 0xf3, 0xff, - 0xf7, 0x7e, 0xaf, 0x7e, 0x0d, 0x1a, 0x95, 0x4c, 0x4f, 0xe7, 0x93, 0x38, 0x17, 0x3c, 0xc9, 0x45, - 0xa5, 0x29, 0xab, 0x40, 0x16, 0x57, 0x8f, 0x0b, 0x56, 0x15, 0x62, 0xa1, 0x92, 0x69, 0xae, 0xd4, - 0x94, 0x71, 0xbd, 0xac, 0x61, 0x53, 0xc4, 0xb5, 0x14, 0x5a, 0xe0, 0xde, 0x6f, 0x3c, 0x76, 0x78, - 0xec, 0x88, 0xde, 0xbd, 0x52, 0x94, 0xc2, 0x60, 0x49, 0x7b, 0xb2, 0x1d, 0xbd, 0xb0, 0x14, 0xa2, - 0x9c, 0x41, 0x62, 0xaa, 0xc9, 0xfc, 0x43, 0x52, 0xcc, 0x25, 0xd5, 0x4c, 0x54, 0x2e, 0x8f, 0xb6, - 0x73, 0xcd, 0x38, 0x28, 0x4d, 0x79, 0x6d, 0x81, 0xc3, 0x1c, 0x75, 0x47, 0x12, 0xa8, 0x86, 0xd7, - 0x75, 0xdb, 0xa6, 0x70, 0x86, 0xb0, 0x06, 0xc9, 0x59, 0x45, 0x35, 0x90, 0xb5, 0x2d, 0xf0, 0xfa, - 0xde, 0xe0, 0xe6, 0xb3, 0xfb, 0xb1, 0xd5, 0xc5, 0x6b, 0x5d, 0x3c, 0x76, 0x40, 0xba, 0xbf, 0xba, - 0x88, 0x3a, 0x5f, 0x7e, 0x44, 0x5e, 0x76, 0xb0, 0x69, 0x5f, 0x87, 0x87, 0x5f, 0x77, 0xd0, 0xad, - 0x53, 0x29, 0x72, 0x50, 0x6a, 0x0c, 0x9a, 0xb2, 0x99, 0xc2, 0x8f, 0x10, 0x62, 0x9c, 0x96, 0x40, - 0x2a, 0xca, 0xc1, 0xe8, 0xfd, 0xcc, 0x37, 0x37, 0x27, 0x94, 0x03, 0x1e, 0x21, 0x94, 0x9b, 0x9f, - 0x55, 0x10, 0xaa, 0x83, 0x6b, 0x66, 0x7a, 0xef, 0xaf, 0xe9, 0x6f, 0xd6, 0x8f, 0xb1, 0xe3, 0x3f, - 0xb7, 0xe3, 0x7d, 0xd7, 0xf7, 0x52, 0xe3, 0x27, 0x08, 0x9f, 0x83, 0xac, 0x60, 0x46, 0xda, 0x57, - 0x93, 0xa3, 0xe1, 0x90, 0x54, 0x2a, 0xd8, 0xe9, 0x7b, 0x83, 0xdd, 0xec, 0xb6, 0x4d, 0x5a, 0xc3, - 0xd1, 0x70, 0x78, 0xa2, 0x70, 0x8c, 0xee, 0x72, 0xe0, 0x42, 0x2e, 0x49, 0x2e, 0x38, 0x67, 0x9a, - 0x4c, 0x96, 0x1a, 0x54, 0xb0, 0x6b, 0xe8, 0x03, 0x1b, 0x8d, 0x4c, 0x92, 0xb6, 0x01, 0x7e, 0x85, - 0xfa, 0x8e, 0x5f, 0x08, 0x79, 0xce, 0xaa, 0x92, 0x28, 0xd0, 0xa4, 0x96, 0xec, 0x63, 0xbb, 0x38, - 0xdb, 0x7c, 0xdd, 0x34, 0x3f, 0xb4, 0xdc, 0x5b, 0x8b, 0x9d, 0x81, 0x3e, 0xb5, 0x90, 0xf5, 0x8c, - 0x51, 0xf4, 0x0f, 0x8f, 0x9a, 0x52, 0x09, 0x85, 0xd3, 0xec, 0x19, 0xcd, 0x83, 0x6d, 0xcd, 0x99, - 0x61, 0xac, 0xe5, 0x29, 0x42, 0xb5, 0x5d, 0x30, 0x61, 0x45, 0x70, 0xa3, 0xef, 0x0d, 0xba, 0x69, - 0xb7, 0xb9, 0x88, 0x7c, 0xb7, 0xf6, 0xe3, 0x71, 0xe6, 0x3b, 0xe0, 0xb8, 0xc0, 0x8f, 0xd1, 0x9d, - 0xb9, 0x02, 0xf9, 0xc7, 0x5a, 0xf6, 0xcd, 0x90, 0x6e, 0x7b, 0xbf, 0x59, 0x4a, 0xfa, 0x7e, 0x75, - 0x19, 0x76, 0xbe, 0x5f, 0x86, 0x9d, 0x4f, 0x4d, 0xe8, 0xad, 0x9a, 0xd0, 0xfb, 0xd6, 0x84, 0xde, - 0xcf, 0x26, 0xf4, 0xde, 0xa5, 0xff, 0xf5, 0xbd, 0xbf, 0xb8, 0x5a, 0x4c, 0xf6, 0xcc, 0x1f, 0xf9, - 0xfc, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x22, 0xad, 0xdf, 0xf8, 0x3c, 0x03, 0x00, 0x00, + // 507 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0x41, 0x6f, 0xd3, 0x3c, + 0x18, 0xc7, 0x9b, 0x77, 0x7b, 0xbb, 0xc5, 0xa8, 0xc0, 0x0c, 0x87, 0x52, 0x20, 0xa9, 0xc6, 0x81, + 0x4a, 0xa0, 0xb4, 0x83, 0x23, 0x27, 0xd2, 0x82, 0xd4, 0xcb, 0x98, 0x32, 0x24, 0x10, 0x42, 0xb2, + 0xdc, 0xe4, 0x21, 0xb5, 0x56, 0xc7, 0x91, 0xed, 0xd2, 0xf5, 0xc6, 0x47, 0xe0, 0xc8, 0x47, 0xea, + 0x91, 0x23, 0x12, 0x52, 0x61, 0xf9, 0x24, 0xc8, 0x76, 0xba, 0x8d, 0xc1, 0x89, 0x9b, 0xed, 0xff, + 0xef, 0xf9, 0x3d, 0xf1, 0x63, 0x05, 0x0d, 0x73, 0xa6, 0xa7, 0xf3, 0x49, 0x94, 0x0a, 0xde, 0x4f, + 0x45, 0xa1, 0x29, 0x2b, 0x40, 0x66, 0x97, 0x97, 0x0b, 0x56, 0x64, 0x62, 0xa1, 0xfa, 0xd3, 0x54, + 0xa9, 0x29, 0xe3, 0x7a, 0x59, 0xc2, 0xf9, 0x26, 0x2a, 0xa5, 0xd0, 0x02, 0x77, 0x2e, 0xf0, 0xa8, + 0xc6, 0xa3, 0x9a, 0xe8, 0xdc, 0xce, 0x45, 0x2e, 0x2c, 0xd6, 0x37, 0x2b, 0x57, 0xd1, 0x09, 0x72, + 0x21, 0xf2, 0x19, 0xf4, 0xed, 0x6e, 0x32, 0xff, 0xd0, 0xcf, 0xe6, 0x92, 0x6a, 0x26, 0x8a, 0x3a, + 0x0f, 0xaf, 0xe6, 0x9a, 0x71, 0x50, 0x9a, 0xf2, 0xd2, 0x01, 0xfb, 0x29, 0x6a, 0x0d, 0x25, 0x50, + 0x0d, 0xaf, 0x4a, 0x53, 0xa6, 0x70, 0x82, 0xb0, 0x06, 0xc9, 0x59, 0x41, 0x35, 0x90, 0x8d, 0xad, + 0xed, 0x75, 0xbd, 0xde, 0xb5, 0x27, 0x77, 0x22, 0xa7, 0x8b, 0x36, 0xba, 0x68, 0x54, 0x03, 0xf1, + 0xee, 0x6a, 0x1d, 0x36, 0xbe, 0xfc, 0x08, 0xbd, 0x64, 0xef, 0xbc, 0x7c, 0x13, 0xee, 0x7f, 0xdf, + 0x42, 0xd7, 0x8f, 0xa4, 0x48, 0x41, 0xa9, 0x11, 0x68, 0xca, 0x66, 0x0a, 0xdf, 0x47, 0x88, 0x71, + 0x9a, 0x03, 0x29, 0x28, 0x07, 0xab, 0xf7, 0x13, 0xdf, 0x9e, 0x1c, 0x52, 0x0e, 0x78, 0x88, 0x50, + 0x6a, 0x3f, 0x2b, 0x23, 0x54, 0xb7, 0xff, 0xb3, 0xdd, 0x3b, 0x7f, 0x74, 0x7f, 0xbd, 0xb9, 0x8c, + 0x6b, 0xff, 0xd9, 0xb4, 0xf7, 0xeb, 0xba, 0xe7, 0x1a, 0x3f, 0x42, 0xf8, 0x04, 0x64, 0x01, 0x33, + 0x62, 0x6e, 0x4d, 0x0e, 0x06, 0x03, 0x52, 0xa8, 0xf6, 0x56, 0xd7, 0xeb, 0x6d, 0x27, 0x37, 0x5c, + 0x62, 0x0c, 0x07, 0x83, 0xc1, 0xa1, 0xc2, 0x11, 0xba, 0xc5, 0x81, 0x0b, 0xb9, 0x24, 0xa9, 0xe0, + 0x9c, 0x69, 0x32, 0x59, 0x6a, 0x50, 0xed, 0x6d, 0x4b, 0xef, 0xb9, 0x68, 0x68, 0x93, 0xd8, 0x04, + 0xf8, 0x25, 0xea, 0xd6, 0xfc, 0x42, 0xc8, 0x13, 0x56, 0xe4, 0x44, 0x81, 0x26, 0xa5, 0x64, 0x1f, + 0xcd, 0xe0, 0x5c, 0xf1, 0xff, 0xb6, 0xf8, 0x9e, 0xe3, 0xde, 0x38, 0xec, 0x18, 0xf4, 0x91, 0x83, + 0x9c, 0x67, 0x84, 0xc2, 0xbf, 0x78, 0xd4, 0x94, 0x4a, 0xc8, 0x6a, 0x4d, 0xd3, 0x6a, 0xee, 0x5e, + 0xd5, 0x1c, 0x5b, 0xc6, 0x59, 0x1e, 0x23, 0x54, 0xba, 0x01, 0x13, 0x96, 0xb5, 0x77, 0xba, 0x5e, + 0xaf, 0x15, 0xb7, 0xaa, 0x75, 0xe8, 0xd7, 0x63, 0x1f, 0x8f, 0x12, 0xbf, 0x06, 0xc6, 0x19, 0x7e, + 0x88, 0x6e, 0xce, 0x15, 0xc8, 0xdf, 0xc6, 0xb2, 0x6b, 0x9b, 0xb4, 0xcc, 0xf9, 0xc5, 0x50, 0x1e, + 0xa0, 0x1d, 0x38, 0x85, 0xd4, 0x38, 0x7d, 0xf3, 0x44, 0x31, 0xaa, 0xd6, 0x61, 0xf3, 0xc5, 0x29, + 0xa4, 0xe3, 0x51, 0xd2, 0x34, 0xd1, 0x38, 0x8b, 0xdf, 0xaf, 0xce, 0x82, 0xc6, 0xb7, 0xb3, 0xa0, + 0xf1, 0xa9, 0x0a, 0xbc, 0x55, 0x15, 0x78, 0x5f, 0xab, 0xc0, 0xfb, 0x59, 0x05, 0xde, 0xbb, 0xf8, + 0x9f, 0x7e, 0x8a, 0x67, 0x97, 0x37, 0x6f, 0x1b, 0x93, 0xa6, 0x7d, 0xef, 0xa7, 0xbf, 0x02, 0x00, + 0x00, 0xff, 0xff, 0x1e, 0xd7, 0x2f, 0xa8, 0x63, 0x03, 0x00, 0x00, } diff --git a/vendor/github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.proto b/vendor/github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.proto index 7fcc0547bc..5934fca7c2 100644 --- a/vendor/github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.proto +++ b/vendor/github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package containerd.windows.hcsshim; -import "gogoproto/gogo.proto"; +import weak "gogoproto/gogo.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; @@ -23,4 +23,5 @@ message ProcessDetails { uint64 memory_working_set_shared_bytes = 6; uint32 process_id = 7; uint64 user_time_100_ns = 8; + string exec_id = 9; } diff --git a/vendor/github.com/dmcgowan/go-tar/common.go b/vendor/github.com/dmcgowan/go-tar/common.go index d2ae66d554..e3609536c0 100644 --- a/vendor/github.com/dmcgowan/go-tar/common.go +++ b/vendor/github.com/dmcgowan/go-tar/common.go @@ -3,20 +3,23 @@ // license that can be found in the LICENSE file. // Package tar implements access to tar archives. -// It aims to cover most of the variations, including those produced -// by GNU and BSD tars. // -// References: -// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5 -// http://www.gnu.org/software/tar/manual/html_node/Standard.html -// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html +// Tape archives (tar) are a file format for storing a sequence of files that +// can be read and written in a streaming manner. +// This package aims to cover most variations of the format, +// including those produced by GNU and BSD tar tools. package tar import ( "errors" "fmt" + "io" + "math" "os" "path" + "reflect" + "strconv" + "strings" "time" ) @@ -24,42 +27,569 @@ import ( // architectures. If a large value is encountered when decoding, the result // stored in Header will be the truncated version. -// Header type flags. -const ( - TypeReg = '0' // regular file - TypeRegA = '\x00' // regular file - TypeLink = '1' // hard link - TypeSymlink = '2' // symbolic link - TypeChar = '3' // character device node - TypeBlock = '4' // block device node - TypeDir = '5' // directory - TypeFifo = '6' // fifo node - TypeCont = '7' // reserved - TypeXHeader = 'x' // extended header - TypeXGlobalHeader = 'g' // global extended header - TypeGNULongName = 'L' // Next file has a long name - TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name - TypeGNUSparse = 'S' // sparse file +var ( + ErrHeader = errors.New("tar: invalid tar header") + ErrWriteTooLong = errors.New("tar: write too long") + ErrFieldTooLong = errors.New("tar: header field too long") + ErrWriteAfterClose = errors.New("tar: write after close") + errMissData = errors.New("tar: sparse file references non-existent data") + errUnrefData = errors.New("tar: sparse file contains unreferenced data") + errWriteHole = errors.New("tar: write non-NUL byte in sparse hole") ) +type headerError []string + +func (he headerError) Error() string { + const prefix = "tar: cannot encode header" + var ss []string + for _, s := range he { + if s != "" { + ss = append(ss, s) + } + } + if len(ss) == 0 { + return prefix + } + return fmt.Sprintf("%s: %v", prefix, strings.Join(ss, "; and ")) +} + +// Type flags for Header.Typeflag. +const ( + // Type '0' indicates a regular file. + TypeReg = '0' + TypeRegA = '\x00' // For legacy support; use TypeReg instead + + // Type '1' to '6' are header-only flags and may not have a data body. + TypeLink = '1' // Hard link + TypeSymlink = '2' // Symbolic link + TypeChar = '3' // Character device node + TypeBlock = '4' // Block device node + TypeDir = '5' // Directory + TypeFifo = '6' // FIFO node + + // Type '7' is reserved. + TypeCont = '7' + + // Type 'x' is used by the PAX format to store key-value records that + // are only relevant to the next file. + // This package transparently handles these types. + TypeXHeader = 'x' + + // Type 'g' is used by the PAX format to store key-value records that + // are relevant to all subsequent files. + // This package only supports parsing and composing such headers, + // but does not currently support persisting the global state across files. + TypeXGlobalHeader = 'g' + + // Type 'S' indicates a sparse file in the GNU format. + // Header.SparseHoles should be populated when using this type. + TypeGNUSparse = 'S' + + // Types 'L' and 'K' are used by the GNU format for a meta file + // used to store the path or link name for the next file. + // This package transparently handles these types. + TypeGNULongName = 'L' + TypeGNULongLink = 'K' +) + +// Keywords for PAX extended header records. +const ( + paxNone = "" // Indicates that no PAX key is suitable + paxPath = "path" + paxLinkpath = "linkpath" + paxSize = "size" + paxUid = "uid" + paxGid = "gid" + paxUname = "uname" + paxGname = "gname" + paxMtime = "mtime" + paxAtime = "atime" + paxCtime = "ctime" // Removed from later revision of PAX spec, but was valid + paxCharset = "charset" // Currently unused + paxComment = "comment" // Currently unused + + paxSchilyXattr = "SCHILY.xattr." + + // Keywords for GNU sparse files in a PAX extended header. + paxGNUSparse = "GNU.sparse." + paxGNUSparseNumBlocks = "GNU.sparse.numblocks" + paxGNUSparseOffset = "GNU.sparse.offset" + paxGNUSparseNumBytes = "GNU.sparse.numbytes" + paxGNUSparseMap = "GNU.sparse.map" + paxGNUSparseName = "GNU.sparse.name" + paxGNUSparseMajor = "GNU.sparse.major" + paxGNUSparseMinor = "GNU.sparse.minor" + paxGNUSparseSize = "GNU.sparse.size" + paxGNUSparseRealSize = "GNU.sparse.realsize" +) + +// basicKeys is a set of the PAX keys for which we have built-in support. +// This does not contain "charset" or "comment", which are both PAX-specific, +// so adding them as first-class features of Header is unlikely. +// Users can use the PAXRecords field to set it themselves. +var basicKeys = map[string]bool{ + paxPath: true, paxLinkpath: true, paxSize: true, paxUid: true, paxGid: true, + paxUname: true, paxGname: true, paxMtime: true, paxAtime: true, paxCtime: true, +} + // A Header represents a single header in a tar archive. // Some fields may not be populated. +// +// For forward compatibility, users that retrieve a Header from Reader.Next, +// mutate it in some ways, and then pass it back to Writer.WriteHeader +// should do so by creating a new Header and copying the fields +// that they are interested in preserving. type Header struct { - Name string // name of header file entry - Mode int64 // permission and mode bits - Uid int // user id of owner - Gid int // group id of owner - Size int64 // length in bytes - ModTime time.Time // modified time - Typeflag byte // type of header entry - Linkname string // target name of link - Uname string // user name of owner - Gname string // group name of owner - Devmajor int64 // major number of character or block device - Devminor int64 // minor number of character or block device - AccessTime time.Time // access time - ChangeTime time.Time // status change time - Xattrs map[string]string + Typeflag byte // Type of header entry (should be TypeReg for most files) + + Name string // Name of file entry + Linkname string // Target name of link (valid for TypeLink or TypeSymlink) + + Size int64 // Logical file size in bytes + Mode int64 // Permission and mode bits + Uid int // User ID of owner + Gid int // Group ID of owner + Uname string // User name of owner + Gname string // Group name of owner + + // If the Format is unspecified, then Writer.WriteHeader rounds ModTime + // to the nearest second and ignores the AccessTime and ChangeTime fields. + // + // To use AccessTime or ChangeTime, specify the Format as PAX or GNU. + // To use sub-second resolution, specify the Format as PAX. + ModTime time.Time // Modification time + AccessTime time.Time // Access time (requires either PAX or GNU support) + ChangeTime time.Time // Change time (requires either PAX or GNU support) + + Devmajor int64 // Major device number (valid for TypeChar or TypeBlock) + Devminor int64 // Minor device number (valid for TypeChar or TypeBlock) + + // SparseHoles represents a sequence of holes in a sparse file. + // + // A file is sparse if len(SparseHoles) > 0 or Typeflag is TypeGNUSparse. + // If TypeGNUSparse is set, then the format is GNU, otherwise + // the format is PAX (by using GNU-specific PAX records). + // + // A sparse file consists of fragments of data, intermixed with holes + // (described by this field). A hole is semantically a block of NUL-bytes, + // but does not actually exist within the tar file. + // The holes must be sorted in ascending order, + // not overlap with each other, and not extend past the specified Size. + SparseHoles []SparseEntry + + // Xattrs stores extended attributes as PAX records under the + // "SCHILY.xattr." namespace. + // + // The following are semantically equivalent: + // h.Xattrs[key] = value + // h.PAXRecords["SCHILY.xattr."+key] = value + // + // When Writer.WriteHeader is called, the contents of Xattrs will take + // precedence over those in PAXRecords. + // + // Deprecated: Use PAXRecords instead. + Xattrs map[string]string + + // PAXRecords is a map of PAX extended header records. + // + // User-defined records should have keys of the following form: + // VENDOR.keyword + // Where VENDOR is some namespace in all uppercase, and keyword may + // not contain the '=' character (e.g., "GOLANG.pkg.version"). + // The key and value should be non-empty UTF-8 strings. + // + // When Writer.WriteHeader is called, PAX records derived from the + // the other fields in Header take precedence over PAXRecords. + PAXRecords map[string]string + + // Format specifies the format of the tar header. + // + // This is set by Reader.Next as a best-effort guess at the format. + // Since the Reader liberally reads some non-compliant files, + // it is possible for this to be FormatUnknown. + // + // If the format is unspecified when Writer.WriteHeader is called, + // then it uses the first format (in the order of USTAR, PAX, GNU) + // capable of encoding this Header (see Format). + Format Format +} + +// SparseEntry represents a Length-sized fragment at Offset in the file. +type SparseEntry struct{ Offset, Length int64 } + +func (s SparseEntry) endOffset() int64 { return s.Offset + s.Length } + +// A sparse file can be represented as either a sparseDatas or a sparseHoles. +// As long as the total size is known, they are equivalent and one can be +// converted to the other form and back. The various tar formats with sparse +// file support represent sparse files in the sparseDatas form. That is, they +// specify the fragments in the file that has data, and treat everything else as +// having zero bytes. As such, the encoding and decoding logic in this package +// deals with sparseDatas. +// +// However, the external API uses sparseHoles instead of sparseDatas because the +// zero value of sparseHoles logically represents a normal file (i.e., there are +// no holes in it). On the other hand, the zero value of sparseDatas implies +// that the file has no data in it, which is rather odd. +// +// As an example, if the underlying raw file contains the 10-byte data: +// var compactFile = "abcdefgh" +// +// And the sparse map has the following entries: +// var spd sparseDatas = []sparseEntry{ +// {Offset: 2, Length: 5}, // Data fragment for 2..6 +// {Offset: 18, Length: 3}, // Data fragment for 18..20 +// } +// var sph sparseHoles = []SparseEntry{ +// {Offset: 0, Length: 2}, // Hole fragment for 0..1 +// {Offset: 7, Length: 11}, // Hole fragment for 7..17 +// {Offset: 21, Length: 4}, // Hole fragment for 21..24 +// } +// +// Then the content of the resulting sparse file with a Header.Size of 25 is: +// var sparseFile = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4 +type ( + sparseDatas []SparseEntry + sparseHoles []SparseEntry +) + +// validateSparseEntries reports whether sp is a valid sparse map. +// It does not matter whether sp represents data fragments or hole fragments. +func validateSparseEntries(sp []SparseEntry, size int64) bool { + // Validate all sparse entries. These are the same checks as performed by + // the BSD tar utility. + if size < 0 { + return false + } + var pre SparseEntry + for _, cur := range sp { + switch { + case cur.Offset < 0 || cur.Length < 0: + return false // Negative values are never okay + case cur.Offset > math.MaxInt64-cur.Length: + return false // Integer overflow with large length + case cur.endOffset() > size: + return false // Region extends beyond the actual size + case pre.endOffset() > cur.Offset: + return false // Regions cannot overlap and must be in order + } + pre = cur + } + return true +} + +// alignSparseEntries mutates src and returns dst where each fragment's +// starting offset is aligned up to the nearest block edge, and each +// ending offset is aligned down to the nearest block edge. +// +// Even though the Go tar Reader and the BSD tar utility can handle entries +// with arbitrary offsets and lengths, the GNU tar utility can only handle +// offsets and lengths that are multiples of blockSize. +func alignSparseEntries(src []SparseEntry, size int64) []SparseEntry { + dst := src[:0] + for _, s := range src { + pos, end := s.Offset, s.endOffset() + pos += blockPadding(+pos) // Round-up to nearest blockSize + if end != size { + end -= blockPadding(-end) // Round-down to nearest blockSize + } + if pos < end { + dst = append(dst, SparseEntry{Offset: pos, Length: end - pos}) + } + } + return dst +} + +// invertSparseEntries converts a sparse map from one form to the other. +// If the input is sparseHoles, then it will output sparseDatas and vice-versa. +// The input must have been already validated. +// +// This function mutates src and returns a normalized map where: +// * adjacent fragments are coalesced together +// * only the last fragment may be empty +// * the endOffset of the last fragment is the total size +func invertSparseEntries(src []SparseEntry, size int64) []SparseEntry { + dst := src[:0] + var pre SparseEntry + for _, cur := range src { + if cur.Length == 0 { + continue // Skip empty fragments + } + pre.Length = cur.Offset - pre.Offset + if pre.Length > 0 { + dst = append(dst, pre) // Only add non-empty fragments + } + pre.Offset = cur.endOffset() + } + pre.Length = size - pre.Offset // Possibly the only empty fragment + return append(dst, pre) +} + +// fileState tracks the number of logical (includes sparse holes) and physical +// (actual in tar archive) bytes remaining for the current file. +// +// Invariant: LogicalRemaining >= PhysicalRemaining +type fileState interface { + LogicalRemaining() int64 + PhysicalRemaining() int64 +} + +// allowedFormats determines which formats can be used. +// The value returned is the logical OR of multiple possible formats. +// If the value is FormatUnknown, then the input Header cannot be encoded +// and an error is returned explaining why. +// +// As a by-product of checking the fields, this function returns paxHdrs, which +// contain all fields that could not be directly encoded. +// A value receiver ensures that this method does not mutate the source Header. +func (h Header) allowedFormats() (format Format, paxHdrs map[string]string, err error) { + format = FormatUSTAR | FormatPAX | FormatGNU + paxHdrs = make(map[string]string) + + var whyNoUSTAR, whyNoPAX, whyNoGNU string + var preferPAX bool // Prefer PAX over USTAR + verifyString := func(s string, size int, name, paxKey string) { + // NUL-terminator is optional for path and linkpath. + // Technically, it is required for uname and gname, + // but neither GNU nor BSD tar checks for it. + tooLong := len(s) > size + allowLongGNU := paxKey == paxPath || paxKey == paxLinkpath + if hasNUL(s) || (tooLong && !allowLongGNU) { + whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%q", name, s) + format.mustNotBe(FormatGNU) + } + if !isASCII(s) || tooLong { + canSplitUSTAR := paxKey == paxPath + if _, _, ok := splitUSTARPath(s); !canSplitUSTAR || !ok { + whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%q", name, s) + format.mustNotBe(FormatUSTAR) + } + if paxKey == paxNone { + whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%q", name, s) + format.mustNotBe(FormatPAX) + } else { + paxHdrs[paxKey] = s + } + } + if v, ok := h.PAXRecords[paxKey]; ok && v == s { + paxHdrs[paxKey] = v + } + } + verifyNumeric := func(n int64, size int, name, paxKey string) { + if !fitsInBase256(size, n) { + whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%d", name, n) + format.mustNotBe(FormatGNU) + } + if !fitsInOctal(size, n) { + whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%d", name, n) + format.mustNotBe(FormatUSTAR) + if paxKey == paxNone { + whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%d", name, n) + format.mustNotBe(FormatPAX) + } else { + paxHdrs[paxKey] = strconv.FormatInt(n, 10) + } + } + if v, ok := h.PAXRecords[paxKey]; ok && v == strconv.FormatInt(n, 10) { + paxHdrs[paxKey] = v + } + } + verifyTime := func(ts time.Time, size int, name, paxKey string) { + if ts.IsZero() { + return // Always okay + } + if !fitsInBase256(size, ts.Unix()) { + whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%v", name, ts) + format.mustNotBe(FormatGNU) + } + isMtime := paxKey == paxMtime + fitsOctal := fitsInOctal(size, ts.Unix()) + if (isMtime && !fitsOctal) || !isMtime { + whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%v", name, ts) + format.mustNotBe(FormatUSTAR) + } + needsNano := ts.Nanosecond() != 0 + if !isMtime || !fitsOctal || needsNano { + preferPAX = true // USTAR may truncate sub-second measurements + if paxKey == paxNone { + whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%v", name, ts) + format.mustNotBe(FormatPAX) + } else { + paxHdrs[paxKey] = formatPAXTime(ts) + } + } + if v, ok := h.PAXRecords[paxKey]; ok && v == formatPAXTime(ts) { + paxHdrs[paxKey] = v + } + } + + // Check basic fields. + var blk block + v7 := blk.V7() + ustar := blk.USTAR() + gnu := blk.GNU() + verifyString(h.Name, len(v7.Name()), "Name", paxPath) + verifyString(h.Linkname, len(v7.LinkName()), "Linkname", paxLinkpath) + verifyString(h.Uname, len(ustar.UserName()), "Uname", paxUname) + verifyString(h.Gname, len(ustar.GroupName()), "Gname", paxGname) + verifyNumeric(h.Mode, len(v7.Mode()), "Mode", paxNone) + verifyNumeric(int64(h.Uid), len(v7.UID()), "Uid", paxUid) + verifyNumeric(int64(h.Gid), len(v7.GID()), "Gid", paxGid) + verifyNumeric(h.Size, len(v7.Size()), "Size", paxSize) + verifyNumeric(h.Devmajor, len(ustar.DevMajor()), "Devmajor", paxNone) + verifyNumeric(h.Devminor, len(ustar.DevMinor()), "Devminor", paxNone) + verifyTime(h.ModTime, len(v7.ModTime()), "ModTime", paxMtime) + verifyTime(h.AccessTime, len(gnu.AccessTime()), "AccessTime", paxAtime) + verifyTime(h.ChangeTime, len(gnu.ChangeTime()), "ChangeTime", paxCtime) + + // Check for header-only types. + var whyOnlyPAX, whyOnlyGNU string + switch h.Typeflag { + case TypeReg, TypeChar, TypeBlock, TypeFifo, TypeGNUSparse: + // Exclude TypeLink and TypeSymlink, since they may reference directories. + if strings.HasSuffix(h.Name, "/") { + return FormatUnknown, nil, headerError{"filename may not have trailing slash"} + } + case TypeXHeader, TypeGNULongName, TypeGNULongLink: + return FormatUnknown, nil, headerError{"cannot manually encode TypeXHeader, TypeGNULongName, or TypeGNULongLink headers"} + case TypeXGlobalHeader: + if !reflect.DeepEqual(h, Header{Typeflag: h.Typeflag, Xattrs: h.Xattrs, PAXRecords: h.PAXRecords, Format: h.Format}) { + return FormatUnknown, nil, headerError{"only PAXRecords may be set for TypeXGlobalHeader"} + } + whyOnlyPAX = "only PAX supports TypeXGlobalHeader" + format.mayOnlyBe(FormatPAX) + } + if !isHeaderOnlyType(h.Typeflag) && h.Size < 0 { + return FormatUnknown, nil, headerError{"negative size on header-only type"} + } + + // Check PAX records. + if len(h.Xattrs) > 0 { + for k, v := range h.Xattrs { + paxHdrs[paxSchilyXattr+k] = v + } + whyOnlyPAX = "only PAX supports Xattrs" + format.mayOnlyBe(FormatPAX) + } + if len(h.PAXRecords) > 0 { + for k, v := range h.PAXRecords { + switch _, exists := paxHdrs[k]; { + case exists: + continue // Do not overwrite existing records + case h.Typeflag == TypeXGlobalHeader: + paxHdrs[k] = v // Copy all records + case !basicKeys[k] && !strings.HasPrefix(k, paxGNUSparse): + paxHdrs[k] = v // Ignore local records that may conflict + } + } + whyOnlyPAX = "only PAX supports PAXRecords" + format.mayOnlyBe(FormatPAX) + } + for k, v := range paxHdrs { + if !validPAXRecord(k, v) { + return FormatUnknown, nil, headerError{fmt.Sprintf("invalid PAX record: %q", k+" = "+v)} + } + } + + // Check sparse files. + if len(h.SparseHoles) > 0 || h.Typeflag == TypeGNUSparse { + if isHeaderOnlyType(h.Typeflag) { + return FormatUnknown, nil, headerError{"header-only type cannot be sparse"} + } + if !validateSparseEntries(h.SparseHoles, h.Size) { + return FormatUnknown, nil, headerError{"invalid sparse holes"} + } + if h.Typeflag == TypeGNUSparse { + whyOnlyGNU = "only GNU supports TypeGNUSparse" + format.mayOnlyBe(FormatGNU) + } else { + whyNoGNU = "GNU supports sparse files only with TypeGNUSparse" + format.mustNotBe(FormatGNU) + } + whyNoUSTAR = "USTAR does not support sparse files" + format.mustNotBe(FormatUSTAR) + } + + // Check desired format. + if wantFormat := h.Format; wantFormat != FormatUnknown { + if wantFormat.has(FormatPAX) && !preferPAX { + wantFormat.mayBe(FormatUSTAR) // PAX implies USTAR allowed too + } + format.mayOnlyBe(wantFormat) // Set union of formats allowed and format wanted + } + if format == FormatUnknown { + switch h.Format { + case FormatUSTAR: + err = headerError{"Format specifies USTAR", whyNoUSTAR, whyOnlyPAX, whyOnlyGNU} + case FormatPAX: + err = headerError{"Format specifies PAX", whyNoPAX, whyOnlyGNU} + case FormatGNU: + err = headerError{"Format specifies GNU", whyNoGNU, whyOnlyPAX} + default: + err = headerError{whyNoUSTAR, whyNoPAX, whyNoGNU, whyOnlyPAX, whyOnlyGNU} + } + } + return format, paxHdrs, err +} + +var sysSparseDetect func(f *os.File) (sparseHoles, error) +var sysSparsePunch func(f *os.File, sph sparseHoles) error + +// DetectSparseHoles searches for holes within f to populate SparseHoles +// on supported operating systems and filesystems. +// The file offset is cleared to zero. +// +// When packing a sparse file, DetectSparseHoles should be called prior to +// serializing the header to the archive with Writer.WriteHeader. +func (h *Header) DetectSparseHoles(f *os.File) (err error) { + defer func() { + if _, serr := f.Seek(0, io.SeekStart); err == nil { + err = serr + } + }() + + h.SparseHoles = nil + if sysSparseDetect != nil { + sph, err := sysSparseDetect(f) + h.SparseHoles = sph + return err + } + return nil +} + +// PunchSparseHoles destroys the contents of f, and prepares a sparse file +// (on supported operating systems and filesystems) +// with holes punched according to SparseHoles. +// The file offset is cleared to zero. +// +// When extracting a sparse file, PunchSparseHoles should be called prior to +// populating the content of a file with Reader.WriteTo. +func (h *Header) PunchSparseHoles(f *os.File) (err error) { + defer func() { + if _, serr := f.Seek(0, io.SeekStart); err == nil { + err = serr + } + }() + + if err := f.Truncate(0); err != nil { + return err + } + + var size int64 + if len(h.SparseHoles) > 0 { + size = h.SparseHoles[len(h.SparseHoles)-1].endOffset() + } + if !validateSparseEntries(h.SparseHoles, size) { + return errors.New("tar: invalid sparse holes") + } + + if size == 0 { + return nil // For non-sparse files, do nothing (other than Truncate) + } + if sysSparsePunch != nil { + return sysSparsePunch(f, h.SparseHoles) + } + return f.Truncate(size) } // FileInfo returns an os.FileInfo for the Header. @@ -92,63 +622,43 @@ func (fi headerFileInfo) Mode() (mode os.FileMode) { // Set setuid, setgid and sticky bits. if fi.h.Mode&c_ISUID != 0 { - // setuid mode |= os.ModeSetuid } if fi.h.Mode&c_ISGID != 0 { - // setgid mode |= os.ModeSetgid } if fi.h.Mode&c_ISVTX != 0 { - // sticky mode |= os.ModeSticky } - // Set file mode bits. - // clear perm, setuid, setgid and sticky bits. - m := os.FileMode(fi.h.Mode) &^ 07777 - if m == c_ISDIR { - // directory + // Set file mode bits; clear perm, setuid, setgid, and sticky bits. + switch m := os.FileMode(fi.h.Mode) &^ 07777; m { + case c_ISDIR: mode |= os.ModeDir - } - if m == c_ISFIFO { - // named pipe (FIFO) + case c_ISFIFO: mode |= os.ModeNamedPipe - } - if m == c_ISLNK { - // symbolic link + case c_ISLNK: mode |= os.ModeSymlink - } - if m == c_ISBLK { - // device file + case c_ISBLK: mode |= os.ModeDevice - } - if m == c_ISCHR { - // Unix character device + case c_ISCHR: mode |= os.ModeDevice mode |= os.ModeCharDevice - } - if m == c_ISSOCK { - // Unix domain socket + case c_ISSOCK: mode |= os.ModeSocket } switch fi.h.Typeflag { case TypeSymlink: - // symbolic link mode |= os.ModeSymlink case TypeChar: - // character device node mode |= os.ModeDevice mode |= os.ModeCharDevice case TypeBlock: - // block device node mode |= os.ModeDevice case TypeDir: - // directory mode |= os.ModeDir case TypeFifo: - // fifo node mode |= os.ModeNamedPipe } @@ -158,11 +668,15 @@ func (fi headerFileInfo) Mode() (mode os.FileMode) { // sysStat, if non-nil, populates h from system-dependent fields of fi. var sysStat func(fi os.FileInfo, h *Header) error -// Mode constants from the tar spec. const ( - c_ISUID = 04000 // Set uid - c_ISGID = 02000 // Set gid - c_ISVTX = 01000 // Save text (sticky bit) + // Mode constants from the USTAR spec: + // See http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06 + c_ISUID = 04000 // Set uid + c_ISGID = 02000 // Set gid + c_ISVTX = 01000 // Save text (sticky bit) + + // Common Unix mode constants; these are not defined in any common tar standard. + // Header.FileInfo understands these, but FileInfoHeader will never produce these. c_ISDIR = 040000 // Directory c_ISFIFO = 010000 // FIFO c_ISREG = 0100000 // Regular file @@ -172,30 +686,16 @@ const ( c_ISSOCK = 0140000 // Socket ) -// Keywords for the PAX Extended Header -const ( - paxAtime = "atime" - paxCharset = "charset" - paxComment = "comment" - paxCtime = "ctime" // please note that ctime is not a valid pax header. - paxGid = "gid" - paxGname = "gname" - paxLinkpath = "linkpath" - paxMtime = "mtime" - paxPath = "path" - paxSize = "size" - paxUid = "uid" - paxUname = "uname" - paxXattr = "SCHILY.xattr." - paxNone = "" -) - // FileInfoHeader creates a partially-populated Header from fi. // If fi describes a symlink, FileInfoHeader records link as the link target. // If fi describes a directory, a slash is appended to the name. -// Because os.FileInfo's Name method returns only the base name of -// the file it describes, it may be necessary to modify the Name field -// of the returned header to provide the full path name of the file. +// +// Since os.FileInfo's Name method only returns the base name of +// the file it describes, it may be necessary to modify Header.Name +// to provide the full path name of the file. +// +// This function does not populate Header.SparseHoles; +// for sparse file support, additionally call Header.DetectSparseHoles. func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) { if fi == nil { return nil, errors.New("tar: FileInfo is nil") @@ -208,32 +708,26 @@ func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) { } switch { case fm.IsRegular(): - h.Mode |= c_ISREG h.Typeflag = TypeReg h.Size = fi.Size() case fi.IsDir(): h.Typeflag = TypeDir - h.Mode |= c_ISDIR h.Name += "/" case fm&os.ModeSymlink != 0: h.Typeflag = TypeSymlink - h.Mode |= c_ISLNK h.Linkname = link case fm&os.ModeDevice != 0: if fm&os.ModeCharDevice != 0 { - h.Mode |= c_ISCHR h.Typeflag = TypeChar } else { - h.Mode |= c_ISBLK h.Typeflag = TypeBlock } case fm&os.ModeNamedPipe != 0: h.Typeflag = TypeFifo - h.Mode |= c_ISFIFO case fm&os.ModeSocket != 0: - h.Mode |= c_ISSOCK + return nil, fmt.Errorf("tar: sockets not supported") default: - return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm) + return nil, fmt.Errorf("tar: unknown file mode %v", fm) } if fm&os.ModeSetuid != 0 { h.Mode |= c_ISUID @@ -267,6 +761,15 @@ func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) { h.Size = 0 h.Linkname = sys.Linkname } + if sys.SparseHoles != nil { + h.SparseHoles = append([]SparseEntry{}, sys.SparseHoles...) + } + if sys.PAXRecords != nil { + h.PAXRecords = make(map[string]string) + for k, v := range sys.PAXRecords { + h.PAXRecords[k] = v + } + } } if sysStat != nil { return h, sysStat(fi, h) @@ -284,3 +787,10 @@ func isHeaderOnlyType(flag byte) bool { return false } } + +func min(a, b int64) int64 { + if a < b { + return a + } + return b +} diff --git a/vendor/github.com/dmcgowan/go-tar/format.go b/vendor/github.com/dmcgowan/go-tar/format.go index c2c9910d00..cf1289534f 100644 --- a/vendor/github.com/dmcgowan/go-tar/format.go +++ b/vendor/github.com/dmcgowan/go-tar/format.go @@ -4,38 +4,131 @@ package tar +import "strings" + +// Format represents the tar archive format. +// +// The original tar format was introduced in Unix V7. +// Since then, there have been multiple competing formats attempting to +// standardize or extend the V7 format to overcome its limitations. +// The most common formats are the USTAR, PAX, and GNU formats, +// each with their own advantages and limitations. +// +// The following table captures the capabilities of each format: +// +// | USTAR | PAX | GNU +// ------------------+--------+-----------+---------- +// Name | 256B | unlimited | unlimited +// Linkname | 100B | unlimited | unlimited +// Size | uint33 | unlimited | uint89 +// Mode | uint21 | uint21 | uint57 +// Uid/Gid | uint21 | unlimited | uint57 +// Uname/Gname | 32B | unlimited | 32B +// ModTime | uint33 | unlimited | int89 +// AccessTime | n/a | unlimited | int89 +// ChangeTime | n/a | unlimited | int89 +// Devmajor/Devminor | uint21 | uint21 | uint57 +// ------------------+--------+-----------+---------- +// string encoding | ASCII | UTF-8 | binary +// sub-second times | no | yes | no +// sparse files | no | yes | yes +// +// The table's upper portion shows the Header fields, where each format reports +// the maximum number of bytes allowed for each string field and +// the integer type used to store each numeric field +// (where timestamps are stored as the number of seconds since the Unix epoch). +// +// The table's lower portion shows specialized features of each format, +// such as supported string encodings, support for sub-second timestamps, +// or support for sparse files. +type Format int + // Constants to identify various tar formats. const ( - // The format is unknown. - formatUnknown = (1 << iota) / 2 // Sequence of 0, 1, 2, 4, 8, etc... + // Deliberately hide the meaning of constants from public API. + _ Format = (1 << iota) / 4 // Sequence of 0, 0, 1, 2, 4, 8, etc... + + // FormatUnknown indicates that the format is unknown. + FormatUnknown // The format of the original Unix V7 tar tool prior to standardization. formatV7 - // The old and new GNU formats, which are incompatible with USTAR. - // This does cover the old GNU sparse extension. - // This does not cover the GNU sparse extensions using PAX headers, - // versions 0.0, 0.1, and 1.0; these fall under the PAX format. - formatGNU + // FormatUSTAR represents the USTAR header format defined in POSIX.1-1988. + // + // While this format is compatible with most tar readers, + // the format has several limitations making it unsuitable for some usages. + // Most notably, it cannot support sparse files, files larger than 8GiB, + // filenames larger than 256 characters, and non-ASCII filenames. + // + // Reference: + // http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06 + FormatUSTAR + + // FormatPAX represents the PAX header format defined in POSIX.1-2001. + // + // PAX extends USTAR by writing a special file with Typeflag TypeXHeader + // preceding the original header. This file contains a set of key-value + // records, which are used to overcome USTAR's shortcomings, in addition to + // providing the ability to have sub-second resolution for timestamps. + // + // Some newer formats add their own extensions to PAX by defining their + // own keys and assigning certain semantic meaning to the associated values. + // For example, sparse file support in PAX is implemented using keys + // defined by the GNU manual (e.g., "GNU.sparse.map"). + // + // Reference: + // http://pubs.opengroup.org/onlinepubs/009695399/utilities/pax.html + FormatPAX + + // FormatGNU represents the GNU header format. + // + // The GNU header format is older than the USTAR and PAX standards and + // is not compatible with them. The GNU format supports + // arbitrary file sizes, filenames of arbitrary encoding and length, + // sparse files, and other features. + // + // It is recommended that PAX be chosen over GNU unless the target + // application can only parse GNU formatted archives. + // + // Reference: + // http://www.gnu.org/software/tar/manual/html_node/Standard.html + FormatGNU // Schily's tar format, which is incompatible with USTAR. // This does not cover STAR extensions to the PAX format; these fall under // the PAX format. formatSTAR - // USTAR is the former standardization of tar defined in POSIX.1-1988. - // This is incompatible with the GNU and STAR formats. - formatUSTAR - - // PAX is the latest standardization of tar defined in POSIX.1-2001. - // This is an extension of USTAR and is "backwards compatible" with it. - // - // Some newer formats add their own extensions to PAX, such as GNU sparse - // files and SCHILY extended attributes. Since they are backwards compatible - // with PAX, they will be labelled as "PAX". - formatPAX + formatMax ) +func (f Format) has(f2 Format) bool { return f&f2 != 0 } +func (f *Format) mayBe(f2 Format) { *f |= f2 } +func (f *Format) mayOnlyBe(f2 Format) { *f &= f2 } +func (f *Format) mustNotBe(f2 Format) { *f &^= f2 } + +var formatNames = map[Format]string{ + formatV7: "V7", FormatUSTAR: "USTAR", FormatPAX: "PAX", FormatGNU: "GNU", formatSTAR: "STAR", +} + +func (f Format) String() string { + var ss []string + for f2 := Format(1); f2 < formatMax; f2 <<= 1 { + if f.has(f2) { + ss = append(ss, formatNames[f2]) + } + } + switch len(ss) { + case 0: + return "" + case 1: + return ss[0] + default: + return "(" + strings.Join(ss, " | ") + ")" + } +} + // Magics used to identify various formats. const ( magicGNU, versionGNU = "ustar ", " \x00" @@ -50,6 +143,12 @@ const ( prefixSize = 155 // Max length of the prefix field in USTAR format ) +// blockPadding computes the number of bytes needed to pad offset up to the +// nearest block edge where 0 <= n < blockSize. +func blockPadding(offset int64) (n int64) { + return -offset & (blockSize - 1) +} + var zeroBlock block type block [blockSize]byte @@ -63,14 +162,14 @@ func (b *block) Sparse() sparseArray { return (sparseArray)(b[:]) } // GetFormat checks that the block is a valid tar header based on the checksum. // It then attempts to guess the specific format based on magic values. -// If the checksum fails, then formatUnknown is returned. -func (b *block) GetFormat() (format int) { +// If the checksum fails, then FormatUnknown is returned. +func (b *block) GetFormat() Format { // Verify checksum. var p parser value := p.parseOctal(b.V7().Chksum()) chksum1, chksum2 := b.ComputeChecksum() if p.err != nil || (value != chksum1 && value != chksum2) { - return formatUnknown + return FormatUnknown } // Guess the magic values. @@ -81,9 +180,9 @@ func (b *block) GetFormat() (format int) { case magic == magicUSTAR && trailer == trailerSTAR: return formatSTAR case magic == magicUSTAR: - return formatUSTAR + return FormatUSTAR | FormatPAX case magic == magicGNU && version == versionGNU: - return formatGNU + return FormatGNU default: return formatV7 } @@ -91,19 +190,19 @@ func (b *block) GetFormat() (format int) { // SetFormat writes the magic values necessary for specified format // and then updates the checksum accordingly. -func (b *block) SetFormat(format int) { +func (b *block) SetFormat(format Format) { // Set the magic values. - switch format { - case formatV7: + switch { + case format.has(formatV7): // Do nothing. - case formatGNU: + case format.has(FormatGNU): copy(b.GNU().Magic(), magicGNU) copy(b.GNU().Version(), versionGNU) - case formatSTAR: + case format.has(formatSTAR): copy(b.STAR().Magic(), magicUSTAR) copy(b.STAR().Version(), versionUSTAR) copy(b.STAR().Trailer(), trailerSTAR) - case formatUSTAR, formatPAX: + case format.has(FormatUSTAR | FormatPAX): copy(b.USTAR().Magic(), magicUSTAR) copy(b.USTAR().Version(), versionUSTAR) default: @@ -134,6 +233,11 @@ func (b *block) ComputeChecksum() (unsigned, signed int64) { return unsigned, signed } +// Reset clears the block with all zeros. +func (b *block) Reset() { + *b = block{} +} + type headerV7 [blockSize]byte func (h *headerV7) Name() []byte { return h[000:][:100] } @@ -187,11 +291,11 @@ func (h *headerUSTAR) Prefix() []byte { return h[345:][:155] } type sparseArray []byte -func (s sparseArray) Entry(i int) sparseNode { return (sparseNode)(s[i*24:]) } +func (s sparseArray) Entry(i int) sparseElem { return (sparseElem)(s[i*24:]) } func (s sparseArray) IsExtended() []byte { return s[24*s.MaxEntries():][:1] } func (s sparseArray) MaxEntries() int { return len(s) / 24 } -type sparseNode []byte +type sparseElem []byte -func (s sparseNode) Offset() []byte { return s[00:][:12] } -func (s sparseNode) NumBytes() []byte { return s[12:][:12] } +func (s sparseElem) Offset() []byte { return s[00:][:12] } +func (s sparseElem) Length() []byte { return s[12:][:12] } diff --git a/vendor/github.com/dmcgowan/go-tar/reader.go b/vendor/github.com/dmcgowan/go-tar/reader.go index a6142c6b86..1d0673020f 100644 --- a/vendor/github.com/dmcgowan/go-tar/reader.go +++ b/vendor/github.com/dmcgowan/go-tar/reader.go @@ -4,33 +4,23 @@ package tar -// TODO(dsymonds): -// - pax extensions - import ( "bytes" - "errors" "io" "io/ioutil" - "math" "strconv" "strings" "time" ) -var ( - ErrHeader = errors.New("archive/tar: invalid tar header") -) - -// A Reader provides sequential access to the contents of a tar archive. -// A tar archive consists of a sequence of files. -// The Next method advances to the next file in the archive (including the first), -// and then it can be treated as an io.Reader to access the file's data. +// Reader provides sequential access to the contents of a tar archive. +// Reader.Next advances to the next file in the archive (including the first), +// and then Reader can be treated as an io.Reader to access the file's data. type Reader struct { r io.Reader - pad int64 // amount of padding (ignored) after current file entry - curr numBytesReader // reader for current file entry - blk block // buffer to use as temporary local storage + pad int64 // Amount of padding (ignored) after current file entry + curr fileReader // Reader for current file entry + blk block // Buffer to use as temporary local storage // err is a persistent error. // It is only the responsibility of every exported method of Reader to @@ -38,68 +28,21 @@ type Reader struct { err error } -// A numBytesReader is an io.Reader with a numBytes method, returning the number -// of bytes remaining in the underlying encoded data. -type numBytesReader interface { +type fileReader interface { io.Reader - numBytes() int64 -} + fileState -// A regFileReader is a numBytesReader for reading file data from a tar archive. -type regFileReader struct { - r io.Reader // underlying reader - nb int64 // number of unread bytes for current file entry + WriteTo(io.Writer) (int64, error) } -// A sparseFileReader is a numBytesReader for reading sparse file data from a -// tar archive. -type sparseFileReader struct { - rfr numBytesReader // Reads the sparse-encoded file data - sp []sparseEntry // The sparse map for the file - pos int64 // Keeps track of file position - total int64 // Total size of the file -} - -// A sparseEntry holds a single entry in a sparse file's sparse map. -// -// Sparse files are represented using a series of sparseEntrys. -// Despite the name, a sparseEntry represents an actual data fragment that -// references data found in the underlying archive stream. All regions not -// covered by a sparseEntry are logically filled with zeros. -// -// For example, if the underlying raw file contains the 10-byte data: -// var compactData = "abcdefgh" -// -// And the sparse map has the following entries: -// var sp = []sparseEntry{ -// {offset: 2, numBytes: 5} // Data fragment for [2..7] -// {offset: 18, numBytes: 3} // Data fragment for [18..21] -// } -// -// Then the content of the resulting sparse file with a "real" size of 25 is: -// var sparseData = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4 -type sparseEntry struct { - offset int64 // Starting position of the fragment - numBytes int64 // Length of the fragment -} - -// Keywords for GNU sparse files in a PAX extended header -const ( - paxGNUSparseNumBlocks = "GNU.sparse.numblocks" - paxGNUSparseOffset = "GNU.sparse.offset" - paxGNUSparseNumBytes = "GNU.sparse.numbytes" - paxGNUSparseMap = "GNU.sparse.map" - paxGNUSparseName = "GNU.sparse.name" - paxGNUSparseMajor = "GNU.sparse.major" - paxGNUSparseMinor = "GNU.sparse.minor" - paxGNUSparseSize = "GNU.sparse.size" - paxGNUSparseRealSize = "GNU.sparse.realsize" -) - // NewReader creates a new Reader reading from r. -func NewReader(r io.Reader) *Reader { return &Reader{r: r} } +func NewReader(r io.Reader) *Reader { + return &Reader{r: r, curr: ®FileReader{r, 0}} +} // Next advances to the next entry in the tar archive. +// The Header.Size determines how many bytes can be read for the next file. +// Any remaining data in the current file is automatically discarded. // // io.EOF is returned at the end of the input. func (tr *Reader) Next() (*Header, error) { @@ -112,18 +55,26 @@ func (tr *Reader) Next() (*Header, error) { } func (tr *Reader) next() (*Header, error) { - var extHdrs map[string]string + var paxHdrs map[string]string + var gnuLongName, gnuLongLink string // Externally, Next iterates through the tar archive as if it is a series of // files. Internally, the tar format often uses fake "files" to add meta // data that describes the next file. These meta data "files" should not // normally be visible to the outside. As such, this loop iterates through // one or more "header files" until it finds a "normal file". + format := FormatUSTAR | FormatPAX | FormatGNU loop: for { - if err := tr.skipUnread(); err != nil { + // Discard the remainder of the file and any padding. + if err := discard(tr.r, tr.curr.PhysicalRemaining()); err != nil { return nil, err } + if _, err := tryReadFull(tr.r, tr.blk[:tr.pad]); err != nil { + return nil, err + } + tr.pad = 0 + hdr, rawHdr, err := tr.readHeader() if err != nil { return nil, err @@ -131,43 +82,57 @@ loop: if err := tr.handleRegularFile(hdr); err != nil { return nil, err } + format.mayOnlyBe(hdr.Format) // Check for PAX/GNU special headers and files. switch hdr.Typeflag { - case TypeXHeader: - extHdrs, err = parsePAX(tr) + case TypeXHeader, TypeXGlobalHeader: + format.mayOnlyBe(FormatPAX) + paxHdrs, err = parsePAX(tr) if err != nil { return nil, err } + if hdr.Typeflag == TypeXGlobalHeader { + mergePAX(hdr, paxHdrs) + return &Header{ + Typeflag: hdr.Typeflag, + Xattrs: hdr.Xattrs, + PAXRecords: hdr.PAXRecords, + Format: format, + }, nil + } continue loop // This is a meta header affecting the next header case TypeGNULongName, TypeGNULongLink: + format.mayOnlyBe(FormatGNU) realname, err := ioutil.ReadAll(tr) if err != nil { return nil, err } - // Convert GNU extensions to use PAX headers. - if extHdrs == nil { - extHdrs = make(map[string]string) - } var p parser switch hdr.Typeflag { case TypeGNULongName: - extHdrs[paxPath] = p.parseString(realname) + gnuLongName = p.parseString(realname) case TypeGNULongLink: - extHdrs[paxLinkpath] = p.parseString(realname) - } - if p.err != nil { - return nil, p.err + gnuLongLink = p.parseString(realname) } continue loop // This is a meta header affecting the next header default: // The old GNU sparse format is handled here since it is technically // just a regular file with additional attributes. - if err := mergePAX(hdr, extHdrs); err != nil { + if err := mergePAX(hdr, paxHdrs); err != nil { return nil, err } + if gnuLongName != "" { + hdr.Name = gnuLongName + } + if gnuLongLink != "" { + hdr.Linkname = gnuLongLink + } + if hdr.Typeflag == TypeRegA && strings.HasSuffix(hdr.Name, "/") { + hdr.Typeflag = TypeDir // Legacy archives use trailing slash for directories + } // The extended headers may have updated the size. // Thus, setup the regFileReader again after merging PAX headers. @@ -177,9 +142,15 @@ loop: // Sparse formats rely on being able to read from the logical data // section; there must be a preceding call to handleRegularFile. - if err := tr.handleSparseFile(hdr, rawHdr, extHdrs); err != nil { + if err := tr.handleSparseFile(hdr, rawHdr); err != nil { return nil, err } + + // Set the final guess at the format. + if format.has(FormatUSTAR) && format.has(FormatPAX) { + format.mayOnlyBe(FormatUSTAR) + } + hdr.Format = format return hdr, nil // This is a file, so stop } } @@ -197,105 +168,87 @@ func (tr *Reader) handleRegularFile(hdr *Header) error { return ErrHeader } - tr.pad = -nb & (blockSize - 1) // blockSize is a power of two + tr.pad = blockPadding(nb) tr.curr = ®FileReader{r: tr.r, nb: nb} return nil } // handleSparseFile checks if the current file is a sparse format of any type // and sets the curr reader appropriately. -func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block, extHdrs map[string]string) error { - var sp []sparseEntry +func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block) error { + var spd sparseDatas var err error if hdr.Typeflag == TypeGNUSparse { - sp, err = tr.readOldGNUSparseMap(hdr, rawHdr) - if err != nil { - return err - } + spd, err = tr.readOldGNUSparseMap(hdr, rawHdr) } else { - sp, err = tr.checkForGNUSparsePAXHeaders(hdr, extHdrs) - if err != nil { - return err - } + spd, err = tr.readGNUSparsePAXHeaders(hdr) } // If sp is non-nil, then this is a sparse file. - // Note that it is possible for len(sp) to be zero. - if sp != nil { - tr.curr, err = newSparseFileReader(tr.curr, sp, hdr.Size) + // Note that it is possible for len(sp) == 0. + if err == nil && spd != nil { + if isHeaderOnlyType(hdr.Typeflag) || !validateSparseEntries(spd, hdr.Size) { + return ErrHeader + } + sph := invertSparseEntries(spd, hdr.Size) + tr.curr = &sparseFileReader{tr.curr, sph, 0} + hdr.SparseHoles = append([]SparseEntry{}, sph...) } return err } -// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then -// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to -// be treated as a regular file. -func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) { - var sparseFormat string - - // Check for sparse format indicators - major, majorOk := headers[paxGNUSparseMajor] - minor, minorOk := headers[paxGNUSparseMinor] - sparseName, sparseNameOk := headers[paxGNUSparseName] - _, sparseMapOk := headers[paxGNUSparseMap] - sparseSize, sparseSizeOk := headers[paxGNUSparseSize] - sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize] - - // Identify which, if any, sparse format applies from which PAX headers are set - if majorOk && minorOk { - sparseFormat = major + "." + minor - } else if sparseNameOk && sparseMapOk { - sparseFormat = "0.1" - } else if sparseSizeOk { - sparseFormat = "0.0" - } else { - // Not a PAX format GNU sparse file. - return nil, nil +// readGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. +// If they are found, then this function reads the sparse map and returns it. +// This assumes that 0.0 headers have already been converted to 0.1 headers +// by the the PAX header parsing logic. +func (tr *Reader) readGNUSparsePAXHeaders(hdr *Header) (sparseDatas, error) { + // Identify the version of GNU headers. + var is1x0 bool + major, minor := hdr.PAXRecords[paxGNUSparseMajor], hdr.PAXRecords[paxGNUSparseMinor] + switch { + case major == "0" && (minor == "0" || minor == "1"): + is1x0 = false + case major == "1" && minor == "0": + is1x0 = true + case major != "" || minor != "": + return nil, nil // Unknown GNU sparse PAX version + case hdr.PAXRecords[paxGNUSparseMap] != "": + is1x0 = false // 0.0 and 0.1 did not have explicit version records, so guess + default: + return nil, nil // Not a PAX format GNU sparse file. } + hdr.Format.mayOnlyBe(FormatPAX) - // Check for unknown sparse format - if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" { - return nil, nil + // Update hdr from GNU sparse PAX headers. + if name := hdr.PAXRecords[paxGNUSparseName]; name != "" { + hdr.Name = name } - - // Update hdr from GNU sparse PAX headers - if sparseNameOk { - hdr.Name = sparseName + size := hdr.PAXRecords[paxGNUSparseSize] + if size == "" { + size = hdr.PAXRecords[paxGNUSparseRealSize] } - if sparseSizeOk { - realSize, err := strconv.ParseInt(sparseSize, 10, 64) + if size != "" { + n, err := strconv.ParseInt(size, 10, 64) if err != nil { return nil, ErrHeader } - hdr.Size = realSize - } else if sparseRealSizeOk { - realSize, err := strconv.ParseInt(sparseRealSize, 10, 64) - if err != nil { - return nil, ErrHeader - } - hdr.Size = realSize + hdr.Size = n } - // Set up the sparse map, according to the particular sparse format in use - var sp []sparseEntry - var err error - switch sparseFormat { - case "0.0", "0.1": - sp, err = readGNUSparseMap0x1(headers) - case "1.0": - sp, err = readGNUSparseMap1x0(tr.curr) + // Read the sparse map according to the appropriate format. + if is1x0 { + return readGNUSparseMap1x0(tr.curr) } - return sp, err + return readGNUSparseMap0x1(hdr.PAXRecords) } -// mergePAX merges well known headers according to PAX standard. -// In general headers with the same name as those found -// in the header struct overwrite those found in the header -// struct with higher precision or longer values. Esp. useful -// for name and linkname fields. -func mergePAX(hdr *Header, headers map[string]string) (err error) { - var id64 int64 - for k, v := range headers { +// mergePAX merges paxHdrs into hdr for all relevant fields of Header. +func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) { + for k, v := range paxHdrs { + if v == "" { + continue // Keep the original USTAR value + } + var id64 int64 switch k { case paxPath: hdr.Name = v @@ -320,17 +273,18 @@ func mergePAX(hdr *Header, headers map[string]string) (err error) { case paxSize: hdr.Size, err = strconv.ParseInt(v, 10, 64) default: - if strings.HasPrefix(k, paxXattr) { + if strings.HasPrefix(k, paxSchilyXattr) { if hdr.Xattrs == nil { hdr.Xattrs = make(map[string]string) } - hdr.Xattrs[k[len(paxXattr):]] = v + hdr.Xattrs[k[len(paxSchilyXattr):]] = v } } if err != nil { return ErrHeader } } + hdr.PAXRecords = paxHdrs return nil } @@ -348,7 +302,7 @@ func parsePAX(r io.Reader) (map[string]string, error) { // headers since 0.0 headers were not PAX compliant. var sparseMap []string - extHdrs := make(map[string]string) + paxHdrs := make(map[string]string) for len(sbuf) > 0 { key, value, residual, err := parsePAXRecord(sbuf) if err != nil { @@ -366,58 +320,13 @@ func parsePAX(r io.Reader) (map[string]string, error) { } sparseMap = append(sparseMap, value) default: - // According to PAX specification, a value is stored only if it is - // non-empty. Otherwise, the key is deleted. - if len(value) > 0 { - extHdrs[key] = value - } else { - delete(extHdrs, key) - } + paxHdrs[key] = value } } if len(sparseMap) > 0 { - extHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",") + paxHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",") } - return extHdrs, nil -} - -// skipUnread skips any unread bytes in the existing file entry, as well as any -// alignment padding. It returns io.ErrUnexpectedEOF if any io.EOF is -// encountered in the data portion; it is okay to hit io.EOF in the padding. -// -// Note that this function still works properly even when sparse files are being -// used since numBytes returns the bytes remaining in the underlying io.Reader. -func (tr *Reader) skipUnread() error { - dataSkip := tr.numBytes() // Number of data bytes to skip - totalSkip := dataSkip + tr.pad // Total number of bytes to skip - tr.curr, tr.pad = nil, 0 - - // If possible, Seek to the last byte before the end of the data section. - // Do this because Seek is often lazy about reporting errors; this will mask - // the fact that the tar stream may be truncated. We can rely on the - // io.CopyN done shortly afterwards to trigger any IO errors. - var seekSkipped int64 // Number of bytes skipped via Seek - if sr, ok := tr.r.(io.Seeker); ok && dataSkip > 1 { - // Not all io.Seeker can actually Seek. For example, os.Stdin implements - // io.Seeker, but calling Seek always returns an error and performs - // no action. Thus, we try an innocent seek to the current position - // to see if Seek is really supported. - pos1, err := sr.Seek(0, io.SeekCurrent) - if err == nil { - // Seek seems supported, so perform the real Seek. - pos2, err := sr.Seek(dataSkip-1, io.SeekCurrent) - if err != nil { - return err - } - seekSkipped = pos2 - pos1 - } - } - - copySkipped, err := io.CopyN(ioutil.Discard, tr.r, totalSkip-seekSkipped) - if err == io.EOF && seekSkipped+copySkipped < dataSkip { - err = io.ErrUnexpectedEOF - } - return err + return paxHdrs, nil } // readHeader reads the next block header and assumes that the underlying reader @@ -445,7 +354,7 @@ func (tr *Reader) readHeader() (*Header, *block, error) { // Verify the header matches a known format. format := tr.blk.GetFormat() - if format == formatUnknown { + if format == FormatUnknown { return nil, nil, ErrHeader } @@ -454,37 +363,86 @@ func (tr *Reader) readHeader() (*Header, *block, error) { // Unpack the V7 header. v7 := tr.blk.V7() + hdr.Typeflag = v7.TypeFlag()[0] hdr.Name = p.parseString(v7.Name()) + hdr.Linkname = p.parseString(v7.LinkName()) + hdr.Size = p.parseNumeric(v7.Size()) hdr.Mode = p.parseNumeric(v7.Mode()) hdr.Uid = int(p.parseNumeric(v7.UID())) hdr.Gid = int(p.parseNumeric(v7.GID())) - hdr.Size = p.parseNumeric(v7.Size()) hdr.ModTime = time.Unix(p.parseNumeric(v7.ModTime()), 0) - hdr.Typeflag = v7.TypeFlag()[0] - hdr.Linkname = p.parseString(v7.LinkName()) // Unpack format specific fields. if format > formatV7 { ustar := tr.blk.USTAR() hdr.Uname = p.parseString(ustar.UserName()) hdr.Gname = p.parseString(ustar.GroupName()) - if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock { - hdr.Devmajor = p.parseNumeric(ustar.DevMajor()) - hdr.Devminor = p.parseNumeric(ustar.DevMinor()) - } + hdr.Devmajor = p.parseNumeric(ustar.DevMajor()) + hdr.Devminor = p.parseNumeric(ustar.DevMinor()) var prefix string - switch format { - case formatUSTAR, formatGNU: - // TODO(dsnet): Do not use the prefix field for the GNU format! - // See golang.org/issues/12594 + switch { + case format.has(FormatUSTAR | FormatPAX): + hdr.Format = format ustar := tr.blk.USTAR() prefix = p.parseString(ustar.Prefix()) - case formatSTAR: + + // For Format detection, check if block is properly formatted since + // the parser is more liberal than what USTAR actually permits. + notASCII := func(r rune) bool { return r >= 0x80 } + if bytes.IndexFunc(tr.blk[:], notASCII) >= 0 { + hdr.Format = FormatUnknown // Non-ASCII characters in block. + } + nul := func(b []byte) bool { return int(b[len(b)-1]) == 0 } + if !(nul(v7.Size()) && nul(v7.Mode()) && nul(v7.UID()) && nul(v7.GID()) && + nul(v7.ModTime()) && nul(ustar.DevMajor()) && nul(ustar.DevMinor())) { + hdr.Format = FormatUnknown // Numeric fields must end in NUL + } + case format.has(formatSTAR): star := tr.blk.STAR() prefix = p.parseString(star.Prefix()) hdr.AccessTime = time.Unix(p.parseNumeric(star.AccessTime()), 0) hdr.ChangeTime = time.Unix(p.parseNumeric(star.ChangeTime()), 0) + case format.has(FormatGNU): + hdr.Format = format + var p2 parser + gnu := tr.blk.GNU() + if b := gnu.AccessTime(); b[0] != 0 { + hdr.AccessTime = time.Unix(p2.parseNumeric(b), 0) + } + if b := gnu.ChangeTime(); b[0] != 0 { + hdr.ChangeTime = time.Unix(p2.parseNumeric(b), 0) + } + + // Prior to Go1.8, the Writer had a bug where it would output + // an invalid tar file in certain rare situations because the logic + // incorrectly believed that the old GNU format had a prefix field. + // This is wrong and leads to an output file that mangles the + // atime and ctime fields, which are often left unused. + // + // In order to continue reading tar files created by former, buggy + // versions of Go, we skeptically parse the atime and ctime fields. + // If we are unable to parse them and the prefix field looks like + // an ASCII string, then we fallback on the pre-Go1.8 behavior + // of treating these fields as the USTAR prefix field. + // + // Note that this will not use the fallback logic for all possible + // files generated by a pre-Go1.8 toolchain. If the generated file + // happened to have a prefix field that parses as valid + // atime and ctime fields (e.g., when they are valid octal strings), + // then it is impossible to distinguish between an valid GNU file + // and an invalid pre-Go1.8 file. + // + // See https://golang.org/issues/12594 + // See https://golang.org/issues/21005 + if p2.err != nil { + hdr.AccessTime, hdr.ChangeTime = time.Time{}, time.Time{} + ustar := tr.blk.USTAR() + if s := p.parseString(ustar.Prefix()); isASCII(s) { + prefix = s + } + hdr.Format = FormatUnknown // Buggy file is not GNU + } } if len(prefix) > 0 { hdr.Name = prefix + "/" + hdr.Name @@ -501,21 +459,22 @@ func (tr *Reader) readHeader() (*Header, *block, error) { // The Header.Size does not reflect the size of any extended headers used. // Thus, this function will read from the raw io.Reader to fetch extra headers. // This method mutates blk in the process. -func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) ([]sparseEntry, error) { +func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) (sparseDatas, error) { // Make sure that the input format is GNU. // Unfortunately, the STAR format also has a sparse header format that uses // the same type flag but has a completely different layout. - if blk.GetFormat() != formatGNU { + if blk.GetFormat() != FormatGNU { return nil, ErrHeader } + hdr.Format.mayOnlyBe(FormatGNU) var p parser hdr.Size = p.parseNumeric(blk.GNU().RealSize()) if p.err != nil { return nil, p.err } - var s sparseArray = blk.GNU().Sparse() - var sp = make([]sparseEntry, 0, s.MaxEntries()) + s := blk.GNU().Sparse() + spd := make(sparseDatas, 0, s.MaxEntries()) for { for i := 0; i < s.MaxEntries(); i++ { // This termination condition is identical to GNU and BSD tar. @@ -523,25 +482,22 @@ func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) ([]sparseEntry, e break // Don't return, need to process extended headers (even if empty) } offset := p.parseNumeric(s.Entry(i).Offset()) - numBytes := p.parseNumeric(s.Entry(i).NumBytes()) + length := p.parseNumeric(s.Entry(i).Length()) if p.err != nil { return nil, p.err } - sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + spd = append(spd, SparseEntry{Offset: offset, Length: length}) } if s.IsExtended()[0] > 0 { // There are more entries. Read an extension header and parse its entries. - if _, err := io.ReadFull(tr.r, blk[:]); err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } + if _, err := mustReadFull(tr.r, blk[:]); err != nil { return nil, err } s = blk.Sparse() continue } - return sp, nil // Done + return spd, nil // Done } } @@ -549,28 +505,27 @@ func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) ([]sparseEntry, e // version 1.0. The format of the sparse map consists of a series of // newline-terminated numeric fields. The first field is the number of entries // and is always present. Following this are the entries, consisting of two -// fields (offset, numBytes). This function must stop reading at the end +// fields (offset, length). This function must stop reading at the end // boundary of the block containing the last newline. // // Note that the GNU manual says that numeric values should be encoded in octal // format. However, the GNU tar utility itself outputs these values in decimal. // As such, this library treats values as being encoded in decimal. -func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) { - var cntNewline int64 - var buf bytes.Buffer - var blk = make([]byte, blockSize) +func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { + var ( + cntNewline int64 + buf bytes.Buffer + blk block + ) - // feedTokens copies data in numBlock chunks from r into buf until there are + // feedTokens copies data in blocks from r into buf until there are // at least cnt newlines in buf. It will not read more blocks than needed. - var feedTokens = func(cnt int64) error { - for cntNewline < cnt { - if _, err := io.ReadFull(r, blk); err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } + feedTokens := func(n int64) error { + for cntNewline < n { + if _, err := mustReadFull(r, blk[:]); err != nil { return err } - buf.Write(blk) + buf.Write(blk[:]) for _, c := range blk { if c == '\n' { cntNewline++ @@ -582,10 +537,10 @@ func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) { // nextToken gets the next token delimited by a newline. This assumes that // at least one newline exists in the buffer. - var nextToken = func() string { + nextToken := func() string { cntNewline-- tok, _ := buf.ReadString('\n') - return tok[:len(tok)-1] // Cut off newline + return strings.TrimRight(tok, "\n") } // Parse for the number of entries. @@ -604,80 +559,67 @@ func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) { if err := feedTokens(2 * numEntries); err != nil { return nil, err } - sp := make([]sparseEntry, 0, numEntries) + spd := make(sparseDatas, 0, numEntries) for i := int64(0); i < numEntries; i++ { - offset, err := strconv.ParseInt(nextToken(), 10, 64) - if err != nil { + offset, err1 := strconv.ParseInt(nextToken(), 10, 64) + length, err2 := strconv.ParseInt(nextToken(), 10, 64) + if err1 != nil || err2 != nil { return nil, ErrHeader } - numBytes, err := strconv.ParseInt(nextToken(), 10, 64) - if err != nil { - return nil, ErrHeader - } - sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + spd = append(spd, SparseEntry{Offset: offset, Length: length}) } - return sp, nil + return spd, nil } // readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format // version 0.1. The sparse map is stored in the PAX headers. -func readGNUSparseMap0x1(extHdrs map[string]string) ([]sparseEntry, error) { +func readGNUSparseMap0x1(paxHdrs map[string]string) (sparseDatas, error) { // Get number of entries. // Use integer overflow resistant math to check this. - numEntriesStr := extHdrs[paxGNUSparseNumBlocks] + numEntriesStr := paxHdrs[paxGNUSparseNumBlocks] numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { return nil, ErrHeader } // There should be two numbers in sparseMap for each entry. - sparseMap := strings.Split(extHdrs[paxGNUSparseMap], ",") + sparseMap := strings.Split(paxHdrs[paxGNUSparseMap], ",") + if len(sparseMap) == 1 && sparseMap[0] == "" { + sparseMap = sparseMap[:0] + } if int64(len(sparseMap)) != 2*numEntries { return nil, ErrHeader } // Loop through the entries in the sparse map. // numEntries is trusted now. - sp := make([]sparseEntry, 0, numEntries) - for i := int64(0); i < numEntries; i++ { - offset, err := strconv.ParseInt(sparseMap[2*i], 10, 64) - if err != nil { + spd := make(sparseDatas, 0, numEntries) + for len(sparseMap) >= 2 { + offset, err1 := strconv.ParseInt(sparseMap[0], 10, 64) + length, err2 := strconv.ParseInt(sparseMap[1], 10, 64) + if err1 != nil || err2 != nil { return nil, ErrHeader } - numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 64) - if err != nil { - return nil, ErrHeader - } - sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + spd = append(spd, SparseEntry{Offset: offset, Length: length}) + sparseMap = sparseMap[2:] } - return sp, nil + return spd, nil } -// numBytes returns the number of bytes left to read in the current file's entry -// in the tar archive, or 0 if there is no current file. -func (tr *Reader) numBytes() int64 { - if tr.curr == nil { - // No current file, so no bytes - return 0 - } - return tr.curr.numBytes() -} - -// Read reads from the current entry in the tar archive. -// It returns 0, io.EOF when it reaches the end of that entry, -// until Next is called to advance to the next entry. +// Read reads from the current file in the tar archive. +// It returns (0, io.EOF) when it reaches the end of that file, +// until Next is called to advance to the next file. // -// Calling Read on special types like TypeLink, TypeSymLink, TypeChar, -// TypeBlock, TypeDir, and TypeFifo returns 0, io.EOF regardless of what +// If the current file is sparse, then the regions marked as a hole +// are read back as NUL-bytes. +// +// Calling Read on special types like TypeLink, TypeSymlink, TypeChar, +// TypeBlock, TypeDir, and TypeFifo returns (0, io.EOF) regardless of what // the Header.Size claims. func (tr *Reader) Read(b []byte) (int, error) { if tr.err != nil { return 0, tr.err } - if tr.curr == nil { - return 0, io.EOF - } - n, err := tr.curr.Read(b) if err != nil && err != io.EOF { tr.err = err @@ -685,116 +627,226 @@ func (tr *Reader) Read(b []byte) (int, error) { return n, err } -func (rfr *regFileReader) Read(b []byte) (n int, err error) { - if rfr.nb == 0 { - // file consumed - return 0, io.EOF +// WriteTo writes the content of the current file to w. +// The bytes written matches the number of remaining bytes in the current file. +// +// If the current file is sparse and w is an io.WriteSeeker, +// then WriteTo uses Seek to skip past holes defined in Header.SparseHoles, +// assuming that skipped regions are filled with NULs. +// This always writes the last byte to ensure w is the right size. +func (tr *Reader) WriteTo(w io.Writer) (int64, error) { + if tr.err != nil { + return 0, tr.err } - if int64(len(b)) > rfr.nb { - b = b[0:rfr.nb] - } - n, err = rfr.r.Read(b) - rfr.nb -= int64(n) - - if err == io.EOF && rfr.nb > 0 { - err = io.ErrUnexpectedEOF - } - return -} - -// numBytes returns the number of bytes left to read in the file's data in the tar archive. -func (rfr *regFileReader) numBytes() int64 { - return rfr.nb -} - -// newSparseFileReader creates a new sparseFileReader, but validates all of the -// sparse entries before doing so. -func newSparseFileReader(rfr numBytesReader, sp []sparseEntry, total int64) (*sparseFileReader, error) { - if total < 0 { - return nil, ErrHeader // Total size cannot be negative - } - - // Validate all sparse entries. These are the same checks as performed by - // the BSD tar utility. - for i, s := range sp { - switch { - case s.offset < 0 || s.numBytes < 0: - return nil, ErrHeader // Negative values are never okay - case s.offset > math.MaxInt64-s.numBytes: - return nil, ErrHeader // Integer overflow with large length - case s.offset+s.numBytes > total: - return nil, ErrHeader // Region extends beyond the "real" size - case i > 0 && sp[i-1].offset+sp[i-1].numBytes > s.offset: - return nil, ErrHeader // Regions can't overlap and must be in order - } - } - return &sparseFileReader{rfr: rfr, sp: sp, total: total}, nil -} - -// readHole reads a sparse hole ending at endOffset. -func (sfr *sparseFileReader) readHole(b []byte, endOffset int64) int { - n64 := endOffset - sfr.pos - if n64 > int64(len(b)) { - n64 = int64(len(b)) - } - n := int(n64) - for i := 0; i < n; i++ { - b[i] = 0 - } - sfr.pos += n64 - return n -} - -// Read reads the sparse file data in expanded form. -func (sfr *sparseFileReader) Read(b []byte) (n int, err error) { - // Skip past all empty fragments. - for len(sfr.sp) > 0 && sfr.sp[0].numBytes == 0 { - sfr.sp = sfr.sp[1:] - } - - // If there are no more fragments, then it is possible that there - // is one last sparse hole. - if len(sfr.sp) == 0 { - // This behavior matches the BSD tar utility. - // However, GNU tar stops returning data even if sfr.total is unmet. - if sfr.pos < sfr.total { - return sfr.readHole(b, sfr.total), nil - } - return 0, io.EOF - } - - // In front of a data fragment, so read a hole. - if sfr.pos < sfr.sp[0].offset { - return sfr.readHole(b, sfr.sp[0].offset), nil - } - - // In a data fragment, so read from it. - // This math is overflow free since we verify that offset and numBytes can - // be safely added when creating the sparseFileReader. - endPos := sfr.sp[0].offset + sfr.sp[0].numBytes // End offset of fragment - bytesLeft := endPos - sfr.pos // Bytes left in fragment - if int64(len(b)) > bytesLeft { - b = b[:bytesLeft] - } - - n, err = sfr.rfr.Read(b) - sfr.pos += int64(n) - if err == io.EOF { - if sfr.pos < endPos { - err = io.ErrUnexpectedEOF // There was supposed to be more data - } else if sfr.pos < sfr.total { - err = nil // There is still an implicit sparse hole at the end - } - } - - if sfr.pos == endPos { - sfr.sp = sfr.sp[1:] // We are done with this fragment, so pop it + n, err := tr.curr.WriteTo(w) + if err != nil { + tr.err = err } return n, err } -// numBytes returns the number of bytes left to read in the sparse file's -// sparse-encoded data in the tar archive. -func (sfr *sparseFileReader) numBytes() int64 { - return sfr.rfr.numBytes() +// regFileReader is a fileReader for reading data from a regular file entry. +type regFileReader struct { + r io.Reader // Underlying Reader + nb int64 // Number of remaining bytes to read +} + +func (fr *regFileReader) Read(b []byte) (n int, err error) { + if int64(len(b)) > fr.nb { + b = b[:fr.nb] + } + if len(b) > 0 { + n, err = fr.r.Read(b) + fr.nb -= int64(n) + } + switch { + case err == io.EOF && fr.nb > 0: + return n, io.ErrUnexpectedEOF + case err == nil && fr.nb == 0: + return n, io.EOF + default: + return n, err + } +} + +func (fr *regFileReader) WriteTo(w io.Writer) (int64, error) { + return io.Copy(w, struct{ io.Reader }{fr}) +} + +func (fr regFileReader) LogicalRemaining() int64 { + return fr.nb +} + +func (fr regFileReader) PhysicalRemaining() int64 { + return fr.nb +} + +// sparseFileReader is a fileReader for reading data from a sparse file entry. +type sparseFileReader struct { + fr fileReader // Underlying fileReader + sp sparseHoles // Normalized list of sparse holes + pos int64 // Current position in sparse file +} + +func (sr *sparseFileReader) Read(b []byte) (n int, err error) { + finished := int64(len(b)) >= sr.LogicalRemaining() + if finished { + b = b[:sr.LogicalRemaining()] + } + + b0 := b + endPos := sr.pos + int64(len(b)) + for endPos > sr.pos && err == nil { + var nf int // Bytes read in fragment + holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset() + if sr.pos < holeStart { // In a data fragment + bf := b[:min(int64(len(b)), holeStart-sr.pos)] + nf, err = tryReadFull(sr.fr, bf) + } else { // In a hole fragment + bf := b[:min(int64(len(b)), holeEnd-sr.pos)] + nf, err = tryReadFull(zeroReader{}, bf) + } + b = b[nf:] + sr.pos += int64(nf) + if sr.pos >= holeEnd && len(sr.sp) > 1 { + sr.sp = sr.sp[1:] // Ensure last fragment always remains + } + } + + n = len(b0) - len(b) + switch { + case err == io.EOF: + return n, errMissData // Less data in dense file than sparse file + case err != nil: + return n, err + case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0: + return n, errUnrefData // More data in dense file than sparse file + case finished: + return n, io.EOF + default: + return n, nil + } +} + +func (sr *sparseFileReader) WriteTo(w io.Writer) (n int64, err error) { + ws, ok := w.(io.WriteSeeker) + if ok { + if _, err := ws.Seek(0, io.SeekCurrent); err != nil { + ok = false // Not all io.Seeker can really seek + } + } + if !ok { + return io.Copy(w, struct{ io.Reader }{sr}) + } + + var writeLastByte bool + pos0 := sr.pos + for sr.LogicalRemaining() > 0 && !writeLastByte && err == nil { + var nf int64 // Size of fragment + holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset() + if sr.pos < holeStart { // In a data fragment + nf = holeStart - sr.pos + nf, err = io.CopyN(ws, sr.fr, nf) + } else { // In a hole fragment + nf = holeEnd - sr.pos + if sr.PhysicalRemaining() == 0 { + writeLastByte = true + nf-- + } + _, err = ws.Seek(nf, io.SeekCurrent) + } + sr.pos += nf + if sr.pos >= holeEnd && len(sr.sp) > 1 { + sr.sp = sr.sp[1:] // Ensure last fragment always remains + } + } + + // If the last fragment is a hole, then seek to 1-byte before EOF, and + // write a single byte to ensure the file is the right size. + if writeLastByte && err == nil { + _, err = ws.Write([]byte{0}) + sr.pos++ + } + + n = sr.pos - pos0 + switch { + case err == io.EOF: + return n, errMissData // Less data in dense file than sparse file + case err != nil: + return n, err + case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0: + return n, errUnrefData // More data in dense file than sparse file + default: + return n, nil + } +} + +func (sr sparseFileReader) LogicalRemaining() int64 { + return sr.sp[len(sr.sp)-1].endOffset() - sr.pos +} +func (sr sparseFileReader) PhysicalRemaining() int64 { + return sr.fr.PhysicalRemaining() +} + +type zeroReader struct{} + +func (zeroReader) Read(b []byte) (int, error) { + for i := range b { + b[i] = 0 + } + return len(b), nil +} + +// mustReadFull is like io.ReadFull except it returns +// io.ErrUnexpectedEOF when io.EOF is hit before len(b) bytes are read. +func mustReadFull(r io.Reader, b []byte) (int, error) { + n, err := tryReadFull(r, b) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return n, err +} + +// tryReadFull is like io.ReadFull except it returns +// io.EOF when it is hit before len(b) bytes are read. +func tryReadFull(r io.Reader, b []byte) (n int, err error) { + for len(b) > n && err == nil { + var nn int + nn, err = r.Read(b[n:]) + n += nn + } + if len(b) == n && err == io.EOF { + err = nil + } + return n, err +} + +// discard skips n bytes in r, reporting an error if unable to do so. +func discard(r io.Reader, n int64) error { + // If possible, Seek to the last byte before the end of the data section. + // Do this because Seek is often lazy about reporting errors; this will mask + // the fact that the stream may be truncated. We can rely on the + // io.CopyN done shortly afterwards to trigger any IO errors. + var seekSkipped int64 // Number of bytes skipped via Seek + if sr, ok := r.(io.Seeker); ok && n > 1 { + // Not all io.Seeker can actually Seek. For example, os.Stdin implements + // io.Seeker, but calling Seek always returns an error and performs + // no action. Thus, we try an innocent seek to the current position + // to see if Seek is really supported. + pos1, err := sr.Seek(0, io.SeekCurrent) + if pos1 >= 0 && err == nil { + // Seek seems supported, so perform the real Seek. + pos2, err := sr.Seek(n-1, io.SeekCurrent) + if pos2 < 0 || err != nil { + return err + } + seekSkipped = pos2 - pos1 + } + } + + copySkipped, err := io.CopyN(ioutil.Discard, r, n-seekSkipped) + if err == io.EOF && seekSkipped+copySkipped < n { + err = io.ErrUnexpectedEOF + } + return err } diff --git a/vendor/github.com/dmcgowan/go-tar/sparse_unix.go b/vendor/github.com/dmcgowan/go-tar/sparse_unix.go new file mode 100644 index 0000000000..c623c1ee4f --- /dev/null +++ b/vendor/github.com/dmcgowan/go-tar/sparse_unix.go @@ -0,0 +1,77 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin dragonfly freebsd openbsd netbsd solaris + +package tar + +import ( + "io" + "os" + "runtime" + "syscall" +) + +func init() { + sysSparseDetect = sparseDetectUnix +} + +func sparseDetectUnix(f *os.File) (sph sparseHoles, err error) { + // SEEK_DATA and SEEK_HOLE originated from Solaris and support for it + // has been added to most of the other major Unix systems. + var seekData, seekHole = 3, 4 // SEEK_DATA/SEEK_HOLE from unistd.h + + if runtime.GOOS == "darwin" { + // Darwin has the constants swapped, compared to all other UNIX. + seekData, seekHole = 4, 3 + } + + // Check for seekData/seekHole support. + // Different OS and FS may differ in the exact errno that is returned when + // there is no support. Rather than special-casing every possible errno + // representing "not supported", just assume that a non-nil error means + // that seekData/seekHole is not supported. + if _, err := f.Seek(0, seekHole); err != nil { + return nil, nil + } + + // Populate the SparseHoles. + var last, pos int64 = -1, 0 + for { + // Get the location of the next hole section. + if pos, err = fseek(f, pos, seekHole); pos == last || err != nil { + return sph, err + } + offset := pos + last = pos + + // Get the location of the next data section. + if pos, err = fseek(f, pos, seekData); pos == last || err != nil { + return sph, err + } + length := pos - offset + last = pos + + if length > 0 { + sph = append(sph, SparseEntry{offset, length}) + } + } +} + +func fseek(f *os.File, pos int64, whence int) (int64, error) { + pos, err := f.Seek(pos, whence) + if errno(err) == syscall.ENXIO { + // SEEK_DATA returns ENXIO when past the last data fragment, + // which makes determining the size of the last hole difficult. + pos, err = f.Seek(0, io.SeekEnd) + } + return pos, err +} + +func errno(err error) error { + if perr, ok := err.(*os.PathError); ok { + return perr.Err + } + return err +} diff --git a/vendor/github.com/dmcgowan/go-tar/sparse_windows.go b/vendor/github.com/dmcgowan/go-tar/sparse_windows.go new file mode 100644 index 0000000000..05bf1a90bb --- /dev/null +++ b/vendor/github.com/dmcgowan/go-tar/sparse_windows.go @@ -0,0 +1,129 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package tar + +import ( + "os" + "syscall" + "unsafe" +) + +var errInvalidFunc = syscall.Errno(1) // ERROR_INVALID_FUNCTION from WinError.h + +func init() { + sysSparseDetect = sparseDetectWindows + sysSparsePunch = sparsePunchWindows +} + +func sparseDetectWindows(f *os.File) (sph sparseHoles, err error) { + const queryAllocRanges = 0x000940CF // FSCTL_QUERY_ALLOCATED_RANGES from WinIoCtl.h + type allocRangeBuffer struct{ offset, length int64 } // FILE_ALLOCATED_RANGE_BUFFER from WinIoCtl.h + + s, err := f.Stat() + if err != nil { + return nil, err + } + + queryRange := allocRangeBuffer{0, s.Size()} + allocRanges := make([]allocRangeBuffer, 64) + + // Repeatedly query for ranges until the input buffer is large enough. + var bytesReturned uint32 + for { + err := syscall.DeviceIoControl( + syscall.Handle(f.Fd()), queryAllocRanges, + (*byte)(unsafe.Pointer(&queryRange)), uint32(unsafe.Sizeof(queryRange)), + (*byte)(unsafe.Pointer(&allocRanges[0])), uint32(len(allocRanges)*int(unsafe.Sizeof(allocRanges[0]))), + &bytesReturned, nil, + ) + if err == syscall.ERROR_MORE_DATA { + allocRanges = make([]allocRangeBuffer, 2*len(allocRanges)) + continue + } + if err == errInvalidFunc { + return nil, nil // Sparse file not supported on this FS + } + if err != nil { + return nil, err + } + break + } + n := bytesReturned / uint32(unsafe.Sizeof(allocRanges[0])) + allocRanges = append(allocRanges[:n], allocRangeBuffer{s.Size(), 0}) + + // Invert the data fragments into hole fragments. + var pos int64 + for _, r := range allocRanges { + if r.offset > pos { + sph = append(sph, SparseEntry{pos, r.offset - pos}) + } + pos = r.offset + r.length + } + return sph, nil +} + +func sparsePunchWindows(f *os.File, sph sparseHoles) error { + const setSparse = 0x000900C4 // FSCTL_SET_SPARSE from WinIoCtl.h + const setZeroData = 0x000980C8 // FSCTL_SET_ZERO_DATA from WinIoCtl.h + type zeroDataInfo struct{ start, end int64 } // FILE_ZERO_DATA_INFORMATION from WinIoCtl.h + + // Set the file as being sparse. + var bytesReturned uint32 + devErr := syscall.DeviceIoControl( + syscall.Handle(f.Fd()), setSparse, + nil, 0, nil, 0, + &bytesReturned, nil, + ) + if devErr != nil && devErr != errInvalidFunc { + return devErr + } + + // Set the file to the right size. + var size int64 + if len(sph) > 0 { + size = sph[len(sph)-1].endOffset() + } + if err := f.Truncate(size); err != nil { + return err + } + if devErr == errInvalidFunc { + // Sparse file not supported on this FS. + // Call sparsePunchManual since SetEndOfFile does not guarantee that + // the extended space is filled with zeros. + return sparsePunchManual(f, sph) + } + + // Punch holes for all relevant fragments. + for _, s := range sph { + zdi := zeroDataInfo{s.Offset, s.endOffset()} + err := syscall.DeviceIoControl( + syscall.Handle(f.Fd()), setZeroData, + (*byte)(unsafe.Pointer(&zdi)), uint32(unsafe.Sizeof(zdi)), + nil, 0, + &bytesReturned, nil, + ) + if err != nil { + return err + } + } + return nil +} + +// sparsePunchManual writes zeros into each hole. +func sparsePunchManual(f *os.File, sph sparseHoles) error { + const chunkSize = 32 << 10 + zbuf := make([]byte, chunkSize) + for _, s := range sph { + for pos := s.Offset; pos < s.endOffset(); pos += chunkSize { + n := min(chunkSize, s.endOffset()-pos) + if _, err := f.WriteAt(zbuf[:n], pos); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/dmcgowan/go-tar/stat_atim.go b/vendor/github.com/dmcgowan/go-tar/stat_actime1.go similarity index 100% rename from vendor/github.com/dmcgowan/go-tar/stat_atim.go rename to vendor/github.com/dmcgowan/go-tar/stat_actime1.go diff --git a/vendor/github.com/dmcgowan/go-tar/stat_atimespec.go b/vendor/github.com/dmcgowan/go-tar/stat_actime2.go similarity index 100% rename from vendor/github.com/dmcgowan/go-tar/stat_atimespec.go rename to vendor/github.com/dmcgowan/go-tar/stat_actime2.go diff --git a/vendor/github.com/dmcgowan/go-tar/stat_unix.go b/vendor/github.com/dmcgowan/go-tar/stat_unix.go index cb843db4cf..868105f338 100644 --- a/vendor/github.com/dmcgowan/go-tar/stat_unix.go +++ b/vendor/github.com/dmcgowan/go-tar/stat_unix.go @@ -8,6 +8,10 @@ package tar import ( "os" + "os/user" + "runtime" + "strconv" + "sync" "syscall" ) @@ -15,6 +19,10 @@ func init() { sysStat = statUnix } +// userMap and groupMap caches UID and GID lookups for performance reasons. +// The downside is that renaming uname or gname by the OS never takes effect. +var userMap, groupMap sync.Map // map[int]string + func statUnix(fi os.FileInfo, h *Header) error { sys, ok := fi.Sys().(*syscall.Stat_t) if !ok { @@ -22,11 +30,67 @@ func statUnix(fi os.FileInfo, h *Header) error { } h.Uid = int(sys.Uid) h.Gid = int(sys.Gid) - // TODO(bradfitz): populate username & group. os/user - // doesn't cache LookupId lookups, and lacks group - // lookup functions. + + // Best effort at populating Uname and Gname. + // The os/user functions may fail for any number of reasons + // (not implemented on that platform, cgo not enabled, etc). + if u, ok := userMap.Load(h.Uid); ok { + h.Uname = u.(string) + } else if u, err := user.LookupId(strconv.Itoa(h.Uid)); err == nil { + h.Uname = u.Username + userMap.Store(h.Uid, h.Uname) + } + if g, ok := groupMap.Load(h.Gid); ok { + h.Gname = g.(string) + } else if g, err := user.LookupGroupId(strconv.Itoa(h.Gid)); err == nil { + h.Gname = g.Name + groupMap.Store(h.Gid, h.Gname) + } + h.AccessTime = statAtime(sys) h.ChangeTime = statCtime(sys) - // TODO(bradfitz): major/minor device numbers? + + // Best effort at populating Devmajor and Devminor. + if h.Typeflag == TypeChar || h.Typeflag == TypeBlock { + dev := uint64(sys.Rdev) // May be int32 or uint32 + switch runtime.GOOS { + case "linux": + // Copied from golang.org/x/sys/unix/dev_linux.go. + major := uint32((dev & 0x00000000000fff00) >> 8) + major |= uint32((dev & 0xfffff00000000000) >> 32) + minor := uint32((dev & 0x00000000000000ff) >> 0) + minor |= uint32((dev & 0x00000ffffff00000) >> 12) + h.Devmajor, h.Devminor = int64(major), int64(minor) + case "darwin": + // Copied from golang.org/x/sys/unix/dev_darwin.go. + major := uint32((dev >> 24) & 0xff) + minor := uint32(dev & 0xffffff) + h.Devmajor, h.Devminor = int64(major), int64(minor) + case "dragonfly": + // Copied from golang.org/x/sys/unix/dev_dragonfly.go. + major := uint32((dev >> 8) & 0xff) + minor := uint32(dev & 0xffff00ff) + h.Devmajor, h.Devminor = int64(major), int64(minor) + case "freebsd": + // Copied from golang.org/x/sys/unix/dev_freebsd.go. + major := uint32((dev >> 8) & 0xff) + minor := uint32(dev & 0xffff00ff) + h.Devmajor, h.Devminor = int64(major), int64(minor) + case "netbsd": + // Copied from golang.org/x/sys/unix/dev_netbsd.go. + major := uint32((dev & 0x000fff00) >> 8) + minor := uint32((dev & 0x000000ff) >> 0) + minor |= uint32((dev & 0xfff00000) >> 12) + h.Devmajor, h.Devminor = int64(major), int64(minor) + case "openbsd": + // Copied from golang.org/x/sys/unix/dev_openbsd.go. + major := uint32((dev & 0x0000ff00) >> 8) + minor := uint32((dev & 0x000000ff) >> 0) + minor |= uint32((dev & 0xffff0000) >> 8) + h.Devmajor, h.Devminor = int64(major), int64(minor) + default: + // TODO: Implement solaris (see https://golang.org/issue/8106) + } + } return nil } diff --git a/vendor/github.com/dmcgowan/go-tar/strconv.go b/vendor/github.com/dmcgowan/go-tar/strconv.go index bb5b51c02d..8bbd65cd1a 100644 --- a/vendor/github.com/dmcgowan/go-tar/strconv.go +++ b/vendor/github.com/dmcgowan/go-tar/strconv.go @@ -12,26 +12,34 @@ import ( "time" ) +// hasNUL reports whether the NUL character exists within s. +func hasNUL(s string) bool { + return strings.IndexByte(s, 0) >= 0 +} + +// isASCII reports whether the input is an ASCII C-style string. func isASCII(s string) bool { for _, c := range s { - if c >= 0x80 { + if c >= 0x80 || c == 0x00 { return false } } return true } +// toASCII converts the input to an ASCII C-style string. +// This a best effort conversion, so invalid characters are dropped. func toASCII(s string) string { if isASCII(s) { return s } - var buf bytes.Buffer + b := make([]byte, 0, len(s)) for _, c := range s { - if c < 0x80 { - buf.WriteByte(byte(c)) + if c < 0x80 && c != 0x00 { + b = append(b, byte(c)) } } - return buf.String() + return string(b) } type parser struct { @@ -45,23 +53,28 @@ type formatter struct { // parseString parses bytes as a NUL-terminated C-style string. // If a NUL byte is not found then the whole slice is returned as a string. func (*parser) parseString(b []byte) string { - n := 0 - for n < len(b) && b[n] != 0 { - n++ + if i := bytes.IndexByte(b, 0); i >= 0 { + return string(b[:i]) } - return string(b[0:n]) + return string(b) } -// Write s into b, terminating it with a NUL if there is room. +// formatString copies s into b, NUL-terminating if possible. func (f *formatter) formatString(b []byte, s string) { if len(s) > len(b) { f.err = ErrFieldTooLong - return } - ascii := toASCII(s) - copy(b, ascii) - if len(ascii) < len(b) { - b[len(ascii)] = 0 + copy(b, s) + if len(s) < len(b) { + b[len(s)] = 0 + } + + // Some buggy readers treat regular files with a trailing slash + // in the V7 path field as a directory even though the full path + // recorded elsewhere (e.g., via PAX record) contains no trailing slash. + if len(s) > len(b) && b[len(b)-1] == '/' { + n := len(strings.TrimRight(s[:len(b)], "/")) + b[n] = 0 // Replace trailing slash with NUL terminator } } @@ -73,7 +86,7 @@ func (f *formatter) formatString(b []byte, s string) { // that the first byte can only be either 0x80 or 0xff. Thus, the first byte is // equivalent to the sign bit in two's complement form. func fitsInBase256(n int, x int64) bool { - var binBits = uint(n-1) * 8 + binBits := uint(n-1) * 8 return n >= 9 || (x >= -1<= 0; i-- { b[i] = byte(x) @@ -155,6 +174,11 @@ func (p *parser) parseOctal(b []byte) int64 { } func (f *formatter) formatOctal(b []byte, x int64) { + if !fitsInOctal(len(b), x) { + x = 0 // Last resort, just write zero + f.err = ErrFieldTooLong + } + s := strconv.FormatInt(x, 8) // Add leading zeros, but leave room for a NUL. if n := len(b) - len(s) - 1; n > 0 { @@ -163,6 +187,13 @@ func (f *formatter) formatOctal(b []byte, x int64) { f.formatString(b, s) } +// fitsInOctal reports whether the integer x fits in a field n-bytes long +// using octal encoding with the appropriate NUL terminator. +func fitsInOctal(n int, x int64) bool { + octBits := uint(n-1) * 3 + return x >= 0 && (n >= 22 || x < 1<= 0 { + return false + } + switch k { + case paxPath, paxLinkpath, paxUname, paxGname: + return !hasNUL(v) + default: + return !hasNUL(k) } - return record } diff --git a/vendor/github.com/dmcgowan/go-tar/writer.go b/vendor/github.com/dmcgowan/go-tar/writer.go index 596fb8b9e1..2eed619348 100644 --- a/vendor/github.com/dmcgowan/go-tar/writer.go +++ b/vendor/github.com/dmcgowan/go-tar/writer.go @@ -4,12 +4,8 @@ package tar -// TODO(dsymonds): -// - catch more errors (no first header, etc.) - import ( "bytes" - "errors" "fmt" "io" "path" @@ -19,234 +15,365 @@ import ( "time" ) -var ( - ErrWriteTooLong = errors.New("archive/tar: write too long") - ErrFieldTooLong = errors.New("archive/tar: header field too long") - ErrWriteAfterClose = errors.New("archive/tar: write after close") - errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values") -) - -// A Writer provides sequential writing of a tar archive in POSIX.1 format. -// A tar archive consists of a sequence of files. -// Call WriteHeader to begin a new file, and then call Write to supply that file's data, -// writing at most hdr.Size bytes in total. +// Writer provides sequential writing of a tar archive. +// Write.WriteHeader begins a new file with the provided Header, +// and then Writer can be treated as an io.Writer to supply that file's data. type Writer struct { - w io.Writer - err error - nb int64 // number of unwritten bytes for current file entry - pad int64 // amount of padding to write after current file entry - closed bool - usedBinary bool // whether the binary numeric field extension was used - preferPax bool // use PAX header instead of binary numeric header - hdrBuff block // buffer to use in writeHeader when writing a regular header - paxHdrBuff block // buffer to use in writeHeader when writing a PAX header + w io.Writer + pad int64 // Amount of padding to write after current file entry + curr fileWriter // Writer for current file entry + hdr Header // Shallow copy of Header that is safe for mutations + blk block // Buffer to use as temporary local storage + + // err is a persistent error. + // It is only the responsibility of every exported method of Writer to + // ensure that this error is sticky. + err error } // NewWriter creates a new Writer writing to w. -func NewWriter(w io.Writer) *Writer { return &Writer{w: w} } +func NewWriter(w io.Writer) *Writer { + return &Writer{w: w, curr: ®FileWriter{w, 0}} +} -// Flush finishes writing the current file (optional). +type fileWriter interface { + io.Writer + fileState + + ReadFrom(io.Reader) (int64, error) +} + +// Flush finishes writing the current file's block padding. +// The current file must be fully written before Flush can be called. +// +// Deprecated: This is unnecessary as the next call to WriteHeader or Close +// will implicitly flush out the file's padding. func (tw *Writer) Flush() error { - if tw.nb > 0 { - tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb) - return tw.err - } - - n := tw.nb + tw.pad - for n > 0 && tw.err == nil { - nr := n - if nr > blockSize { - nr = blockSize - } - var nw int - nw, tw.err = tw.w.Write(zeroBlock[0:nr]) - n -= int64(nw) - } - tw.nb = 0 - tw.pad = 0 - return tw.err -} - -var ( - minTime = time.Unix(0, 0) - // There is room for 11 octal digits (33 bits) of mtime. - maxTime = minTime.Add((1<<33 - 1) * time.Second) -) - -// WriteHeader writes hdr and prepares to accept the file's contents. -// WriteHeader calls Flush if it is not the first header. -// Calling after a Close will return ErrWriteAfterClose. -func (tw *Writer) WriteHeader(hdr *Header) error { - return tw.writeHeader(hdr, true) -} - -// WriteHeader writes hdr and prepares to accept the file's contents. -// WriteHeader calls Flush if it is not the first header. -// Calling after a Close will return ErrWriteAfterClose. -// As this method is called internally by writePax header to allow it to -// suppress writing the pax header. -func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error { - if tw.closed { - return ErrWriteAfterClose - } - if tw.err == nil { - tw.Flush() - } if tw.err != nil { return tw.err } - - // a map to hold pax header records, if any are needed - paxHeaders := make(map[string]string) - - // TODO(dsnet): we might want to use PAX headers for - // subsecond time resolution, but for now let's just capture - // too long fields or non ascii characters - - // We need to select which scratch buffer to use carefully, - // since this method is called recursively to write PAX headers. - // If allowPax is true, this is the non-recursive call, and we will use hdrBuff. - // If allowPax is false, we are being called by writePAXHeader, and hdrBuff is - // already being used by the non-recursive call, so we must use paxHdrBuff. - header := &tw.hdrBuff - if !allowPax { - header = &tw.paxHdrBuff + if nb := tw.curr.LogicalRemaining(); nb > 0 { + return fmt.Errorf("tar: missed writing %d bytes", nb) } - copy(header[:], zeroBlock[:]) - - // Wrappers around formatter that automatically sets paxHeaders if the - // argument extends beyond the capacity of the input byte slice. - var f formatter - var formatString = func(b []byte, s string, paxKeyword string) { - needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s) - if needsPaxHeader { - paxHeaders[paxKeyword] = s - return - } - f.formatString(b, s) - } - var formatNumeric = func(b []byte, x int64, paxKeyword string) { - // Try octal first. - s := strconv.FormatInt(x, 8) - if len(s) < len(b) { - f.formatOctal(b, x) - return - } - - // If it is too long for octal, and PAX is preferred, use a PAX header. - if paxKeyword != paxNone && tw.preferPax { - f.formatOctal(b, 0) - s := strconv.FormatInt(x, 10) - paxHeaders[paxKeyword] = s - return - } - - tw.usedBinary = true - f.formatNumeric(b, x) - } - - // Handle out of range ModTime carefully. - var modTime int64 - if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) { - modTime = hdr.ModTime.Unix() - } - - v7 := header.V7() - formatString(v7.Name(), hdr.Name, paxPath) - // TODO(dsnet): The GNU format permits the mode field to be encoded in - // base-256 format. Thus, we can use formatNumeric instead of formatOctal. - f.formatOctal(v7.Mode(), hdr.Mode) - formatNumeric(v7.UID(), int64(hdr.Uid), paxUid) - formatNumeric(v7.GID(), int64(hdr.Gid), paxGid) - formatNumeric(v7.Size(), hdr.Size, paxSize) - // TODO(dsnet): Consider using PAX for finer time granularity. - formatNumeric(v7.ModTime(), modTime, paxNone) - v7.TypeFlag()[0] = hdr.Typeflag - formatString(v7.LinkName(), hdr.Linkname, paxLinkpath) - - ustar := header.USTAR() - formatString(ustar.UserName(), hdr.Uname, paxUname) - formatString(ustar.GroupName(), hdr.Gname, paxGname) - formatNumeric(ustar.DevMajor(), hdr.Devmajor, paxNone) - formatNumeric(ustar.DevMinor(), hdr.Devminor, paxNone) - - // TODO(dsnet): The logic surrounding the prefix field is broken when trying - // to encode the header as GNU format. The challenge with the current logic - // is that we are unsure what format we are using at any given moment until - // we have processed *all* of the fields. The problem is that by the time - // all fields have been processed, some work has already been done to handle - // each field under the assumption that it is for one given format or - // another. In some situations, this causes the Writer to be confused and - // encode a prefix field when the format being used is GNU. Thus, producing - // an invalid tar file. - // - // As a short-term fix, we disable the logic to use the prefix field, which - // will force the badly generated GNU files to become encoded as being - // the PAX format. - // - // As an alternative fix, we could hard-code preferPax to be true. However, - // this is problematic for the following reasons: - // * The preferPax functionality is not tested at all. - // * This can result in headers that try to use both the GNU and PAX - // features at the same time, which is also wrong. - // - // The proper fix for this is to use a two-pass method: - // * The first pass simply determines what set of formats can possibly - // encode the given header. - // * The second pass actually encodes the header as that given format - // without worrying about violating the format. - // - // See the following: - // https://golang.org/issue/12594 - // https://golang.org/issue/17630 - // https://golang.org/issue/9683 - const usePrefix = false - - // try to use a ustar header when only the name is too long - _, paxPathUsed := paxHeaders[paxPath] - if usePrefix && !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed { - prefix, suffix, ok := splitUSTARPath(hdr.Name) - if ok { - // Since we can encode in USTAR format, disable PAX header. - delete(paxHeaders, paxPath) - - // Update the path fields - formatString(v7.Name(), suffix, paxNone) - formatString(ustar.Prefix(), prefix, paxNone) - } - } - - if tw.usedBinary { - header.SetFormat(formatGNU) - } else { - header.SetFormat(formatUSTAR) - } - - // Check if there were any formatting errors. - if f.err != nil { - tw.err = f.err + if _, tw.err = tw.w.Write(zeroBlock[:tw.pad]); tw.err != nil { return tw.err } + tw.pad = 0 + return nil +} - if allowPax { - for k, v := range hdr.Xattrs { - paxHeaders[paxXattr+k] = v +// WriteHeader writes hdr and prepares to accept the file's contents. +// The Header.Size determines how many bytes can be written for the next file. +// If the current file is not fully written, then this returns an error. +// This implicitly flushes any padding necessary before writing the header. +func (tw *Writer) WriteHeader(hdr *Header) error { + if err := tw.Flush(); err != nil { + return err + } + tw.hdr = *hdr // Shallow copy of Header + + // Round ModTime and ignore AccessTime and ChangeTime unless + // the format is explicitly chosen. + // This ensures nominal usage of WriteHeader (without specifying the format) + // does not always result in the PAX format being chosen, which + // causes a 1KiB increase to every header. + if tw.hdr.Format == FormatUnknown { + tw.hdr.ModTime = tw.hdr.ModTime.Round(time.Second) + tw.hdr.AccessTime = time.Time{} + tw.hdr.ChangeTime = time.Time{} + } + + allowedFormats, paxHdrs, err := tw.hdr.allowedFormats() + switch { + case allowedFormats.has(FormatUSTAR): + tw.err = tw.writeUSTARHeader(&tw.hdr) + return tw.err + case allowedFormats.has(FormatPAX): + tw.err = tw.writePAXHeader(&tw.hdr, paxHdrs) + return tw.err + case allowedFormats.has(FormatGNU): + tw.err = tw.writeGNUHeader(&tw.hdr) + return tw.err + default: + return err // Non-fatal error + } +} + +func (tw *Writer) writeUSTARHeader(hdr *Header) error { + // Check if we can use USTAR prefix/suffix splitting. + var namePrefix string + if prefix, suffix, ok := splitUSTARPath(hdr.Name); ok { + namePrefix, hdr.Name = prefix, suffix + } + + // Pack the main header. + var f formatter + blk := tw.templateV7Plus(hdr, f.formatString, f.formatOctal) + f.formatString(blk.USTAR().Prefix(), namePrefix) + blk.SetFormat(FormatUSTAR) + if f.err != nil { + return f.err // Should never happen since header is validated + } + return tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag) +} + +func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error { + realName, realSize := hdr.Name, hdr.Size + + // Handle sparse files. + var spd sparseDatas + var spb []byte + if len(hdr.SparseHoles) > 0 { + sph := append([]SparseEntry{}, hdr.SparseHoles...) // Copy sparse map + sph = alignSparseEntries(sph, hdr.Size) + spd = invertSparseEntries(sph, hdr.Size) + + // Format the sparse map. + hdr.Size = 0 // Replace with encoded size + spb = append(strconv.AppendInt(spb, int64(len(spd)), 10), '\n') + for _, s := range spd { + hdr.Size += s.Length + spb = append(strconv.AppendInt(spb, s.Offset, 10), '\n') + spb = append(strconv.AppendInt(spb, s.Length, 10), '\n') + } + pad := blockPadding(int64(len(spb))) + spb = append(spb, zeroBlock[:pad]...) + hdr.Size += int64(len(spb)) // Accounts for encoded sparse map + + // Add and modify appropriate PAX records. + dir, file := path.Split(realName) + hdr.Name = path.Join(dir, "GNUSparseFile.0", file) + paxHdrs[paxGNUSparseMajor] = "1" + paxHdrs[paxGNUSparseMinor] = "0" + paxHdrs[paxGNUSparseName] = realName + paxHdrs[paxGNUSparseRealSize] = strconv.FormatInt(realSize, 10) + paxHdrs[paxSize] = strconv.FormatInt(hdr.Size, 10) + delete(paxHdrs, paxPath) // Recorded by paxGNUSparseName + } + + // Write PAX records to the output. + isGlobal := hdr.Typeflag == TypeXGlobalHeader + if len(paxHdrs) > 0 || isGlobal { + // Sort keys for deterministic ordering. + var keys []string + for k := range paxHdrs { + keys = append(keys, k) + } + sort.Strings(keys) + + // Write each record to a buffer. + var buf bytes.Buffer + for _, k := range keys { + rec, err := formatPAXRecord(k, paxHdrs[k]) + if err != nil { + return err + } + buf.WriteString(rec) + } + + // Write the extended header file. + var name string + var flag byte + if isGlobal { + name = "GlobalHead.0.0" + flag = TypeXGlobalHeader + } else { + dir, file := path.Split(realName) + name = path.Join(dir, "PaxHeaders.0", file) + flag = TypeXHeader + } + data := buf.String() + if err := tw.writeRawFile(name, data, flag, FormatPAX); err != nil || isGlobal { + return err // Global headers return here } } - if len(paxHeaders) > 0 { - if !allowPax { - return errInvalidHeader + // Pack the main header. + var f formatter // Ignore errors since they are expected + fmtStr := func(b []byte, s string) { f.formatString(b, toASCII(s)) } + blk := tw.templateV7Plus(hdr, fmtStr, f.formatOctal) + blk.SetFormat(FormatPAX) + if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil { + return err + } + + // Write the sparse map and setup the sparse writer if necessary. + if len(spd) > 0 { + // Use tw.curr since the sparse map is accounted for in hdr.Size. + if _, err := tw.curr.Write(spb); err != nil { + return err } - if err := tw.writePAXHeader(hdr, paxHeaders); err != nil { + tw.curr = &sparseFileWriter{tw.curr, spd, 0} + } + return nil +} + +func (tw *Writer) writeGNUHeader(hdr *Header) error { + // Use long-link files if Name or Linkname exceeds the field size. + const longName = "././@LongLink" + if len(hdr.Name) > nameSize { + data := hdr.Name + "\x00" + if err := tw.writeRawFile(longName, data, TypeGNULongName, FormatGNU); err != nil { + return err + } + } + if len(hdr.Linkname) > nameSize { + data := hdr.Linkname + "\x00" + if err := tw.writeRawFile(longName, data, TypeGNULongLink, FormatGNU); err != nil { return err } } - tw.nb = hdr.Size - tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize - _, tw.err = tw.w.Write(header[:]) - return tw.err + // Pack the main header. + var f formatter // Ignore errors since they are expected + var spd sparseDatas + var spb []byte + blk := tw.templateV7Plus(hdr, f.formatString, f.formatNumeric) + if !hdr.AccessTime.IsZero() { + f.formatNumeric(blk.GNU().AccessTime(), hdr.AccessTime.Unix()) + } + if !hdr.ChangeTime.IsZero() { + f.formatNumeric(blk.GNU().ChangeTime(), hdr.ChangeTime.Unix()) + } + if hdr.Typeflag == TypeGNUSparse { + sph := append([]SparseEntry{}, hdr.SparseHoles...) // Copy sparse map + sph = alignSparseEntries(sph, hdr.Size) + spd = invertSparseEntries(sph, hdr.Size) + + // Format the sparse map. + formatSPD := func(sp sparseDatas, sa sparseArray) sparseDatas { + for i := 0; len(sp) > 0 && i < sa.MaxEntries(); i++ { + f.formatNumeric(sa.Entry(i).Offset(), sp[0].Offset) + f.formatNumeric(sa.Entry(i).Length(), sp[0].Length) + sp = sp[1:] + } + if len(sp) > 0 { + sa.IsExtended()[0] = 1 + } + return sp + } + sp2 := formatSPD(spd, blk.GNU().Sparse()) + for len(sp2) > 0 { + var spHdr block + sp2 = formatSPD(sp2, spHdr.Sparse()) + spb = append(spb, spHdr[:]...) + } + + // Update size fields in the header block. + realSize := hdr.Size + hdr.Size = 0 // Encoded size; does not account for encoded sparse map + for _, s := range spd { + hdr.Size += s.Length + } + copy(blk.V7().Size(), zeroBlock[:]) // Reset field + f.formatNumeric(blk.V7().Size(), hdr.Size) + f.formatNumeric(blk.GNU().RealSize(), realSize) + } + blk.SetFormat(FormatGNU) + if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil { + return err + } + + // Write the extended sparse map and setup the sparse writer if necessary. + if len(spd) > 0 { + // Use tw.w since the sparse map is not accounted for in hdr.Size. + if _, err := tw.w.Write(spb); err != nil { + return err + } + tw.curr = &sparseFileWriter{tw.curr, spd, 0} + } + return nil +} + +type ( + stringFormatter func([]byte, string) + numberFormatter func([]byte, int64) +) + +// templateV7Plus fills out the V7 fields of a block using values from hdr. +// It also fills out fields (uname, gname, devmajor, devminor) that are +// shared in the USTAR, PAX, and GNU formats using the provided formatters. +// +// The block returned is only valid until the next call to +// templateV7Plus or writeRawFile. +func (tw *Writer) templateV7Plus(hdr *Header, fmtStr stringFormatter, fmtNum numberFormatter) *block { + tw.blk.Reset() + + modTime := hdr.ModTime + if modTime.IsZero() { + modTime = time.Unix(0, 0) + } + + v7 := tw.blk.V7() + v7.TypeFlag()[0] = hdr.Typeflag + fmtStr(v7.Name(), hdr.Name) + fmtStr(v7.LinkName(), hdr.Linkname) + fmtNum(v7.Mode(), hdr.Mode) + fmtNum(v7.UID(), int64(hdr.Uid)) + fmtNum(v7.GID(), int64(hdr.Gid)) + fmtNum(v7.Size(), hdr.Size) + fmtNum(v7.ModTime(), modTime.Unix()) + + ustar := tw.blk.USTAR() + fmtStr(ustar.UserName(), hdr.Uname) + fmtStr(ustar.GroupName(), hdr.Gname) + fmtNum(ustar.DevMajor(), hdr.Devmajor) + fmtNum(ustar.DevMinor(), hdr.Devminor) + + return &tw.blk +} + +// writeRawFile writes a minimal file with the given name and flag type. +// It uses format to encode the header format and will write data as the body. +// It uses default values for all of the other fields (as BSD and GNU tar does). +func (tw *Writer) writeRawFile(name, data string, flag byte, format Format) error { + tw.blk.Reset() + + // Best effort for the filename. + name = toASCII(name) + if len(name) > nameSize { + name = name[:nameSize] + } + name = strings.TrimRight(name, "/") + + var f formatter + v7 := tw.blk.V7() + v7.TypeFlag()[0] = flag + f.formatString(v7.Name(), name) + f.formatOctal(v7.Mode(), 0) + f.formatOctal(v7.UID(), 0) + f.formatOctal(v7.GID(), 0) + f.formatOctal(v7.Size(), int64(len(data))) // Must be < 8GiB + f.formatOctal(v7.ModTime(), 0) + tw.blk.SetFormat(format) + if f.err != nil { + return f.err // Only occurs if size condition is violated + } + + // Write the header and data. + if err := tw.writeRawHeader(&tw.blk, int64(len(data)), flag); err != nil { + return err + } + _, err := io.WriteString(tw, data) + return err +} + +// writeRawHeader writes the value of blk, regardless of its value. +// It sets up the Writer such that it can accept a file of the given size. +// If the flag is a special header-only flag, then the size is treated as zero. +func (tw *Writer) writeRawHeader(blk *block, size int64, flag byte) error { + if err := tw.Flush(); err != nil { + return err + } + if _, err := tw.w.Write(blk[:]); err != nil { + return err + } + if isHeaderOnlyType(flag) { + size = 0 + } + tw.curr = ®FileWriter{tw.w, size} + tw.pad = blockPadding(size) + return nil } // splitUSTARPath splits a path according to USTAR prefix and suffix rules. @@ -270,95 +397,233 @@ func splitUSTARPath(name string) (prefix, suffix string, ok bool) { return name[:i], name[i+1:], true } -// writePaxHeader writes an extended pax header to the -// archive. -func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error { - // Prepare extended header - ext := new(Header) - ext.Typeflag = TypeXHeader - // Setting ModTime is required for reader parsing to - // succeed, and seems harmless enough. - ext.ModTime = hdr.ModTime - // The spec asks that we namespace our pseudo files - // with the current pid. However, this results in differing outputs - // for identical inputs. As such, the constant 0 is now used instead. - // golang.org/issue/12358 - dir, file := path.Split(hdr.Name) - fullName := path.Join(dir, "PaxHeaders.0", file) - - ascii := toASCII(fullName) - if len(ascii) > nameSize { - ascii = ascii[:nameSize] - } - ext.Name = ascii - // Construct the body - var buf bytes.Buffer - - // Keys are sorted before writing to body to allow deterministic output. - keys := make([]string, 0, len(paxHeaders)) - for k := range paxHeaders { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k])) - } - - ext.Size = int64(len(buf.Bytes())) - if err := tw.writeHeader(ext, false); err != nil { - return err - } - if _, err := tw.Write(buf.Bytes()); err != nil { - return err - } - if err := tw.Flush(); err != nil { - return err - } - return nil -} - -// Write writes to the current entry in the tar archive. +// Write writes to the current file in the tar archive. // Write returns the error ErrWriteTooLong if more than -// hdr.Size bytes are written after WriteHeader. -func (tw *Writer) Write(b []byte) (n int, err error) { - if tw.closed { - err = ErrWriteAfterClose - return +// Header.Size bytes are written after WriteHeader. +// +// If the current file is sparse, then the regions marked as a hole +// must be written as NUL-bytes. +// +// Calling Write on special types like TypeLink, TypeSymlink, TypeChar, +// TypeBlock, TypeDir, and TypeFifo returns (0, ErrWriteTooLong) regardless +// of what the Header.Size claims. +func (tw *Writer) Write(b []byte) (int, error) { + if tw.err != nil { + return 0, tw.err } - overwrite := false - if int64(len(b)) > tw.nb { - b = b[0:tw.nb] - overwrite = true + n, err := tw.curr.Write(b) + if err != nil && err != ErrWriteTooLong { + tw.err = err } - n, err = tw.w.Write(b) - tw.nb -= int64(n) - if err == nil && overwrite { - err = ErrWriteTooLong - return - } - tw.err = err - return + return n, err } -// Close closes the tar archive, flushing any unwritten -// data to the underlying writer. -func (tw *Writer) Close() error { - if tw.err != nil || tw.closed { - return tw.err +// ReadFrom populates the content of the current file by reading from r. +// The bytes read must match the number of remaining bytes in the current file. +// +// If the current file is sparse and r is an io.ReadSeeker, +// then ReadFrom uses Seek to skip past holes defined in Header.SparseHoles, +// assuming that skipped regions are all NULs. +// This always reads the last byte to ensure r is the right size. +func (tw *Writer) ReadFrom(r io.Reader) (int64, error) { + if tw.err != nil { + return 0, tw.err + } + n, err := tw.curr.ReadFrom(r) + if err != nil && err != ErrWriteTooLong { + tw.err = err + } + return n, err +} + +// Close closes the tar archive by flushing the padding, and writing the footer. +// If the current file (from a prior call to WriteHeader) is not fully written, +// then this returns an error. +func (tw *Writer) Close() error { + if tw.err == ErrWriteAfterClose { + return nil } - tw.Flush() - tw.closed = true if tw.err != nil { return tw.err } - // trailer: two zero blocks - for i := 0; i < 2; i++ { - _, tw.err = tw.w.Write(zeroBlock[:]) - if tw.err != nil { - break + // Trailer: two zero blocks. + err := tw.Flush() + for i := 0; i < 2 && err == nil; i++ { + _, err = tw.w.Write(zeroBlock[:]) + } + + // Ensure all future actions are invalid. + tw.err = ErrWriteAfterClose + return err // Report IO errors +} + +// regFileWriter is a fileWriter for writing data to a regular file entry. +type regFileWriter struct { + w io.Writer // Underlying Writer + nb int64 // Number of remaining bytes to write +} + +func (fw *regFileWriter) Write(b []byte) (n int, err error) { + overwrite := int64(len(b)) > fw.nb + if overwrite { + b = b[:fw.nb] + } + if len(b) > 0 { + n, err = fw.w.Write(b) + fw.nb -= int64(n) + } + switch { + case err != nil: + return n, err + case overwrite: + return n, ErrWriteTooLong + default: + return n, nil + } +} + +func (fw *regFileWriter) ReadFrom(r io.Reader) (int64, error) { + return io.Copy(struct{ io.Writer }{fw}, r) +} + +func (fw regFileWriter) LogicalRemaining() int64 { + return fw.nb +} +func (fw regFileWriter) PhysicalRemaining() int64 { + return fw.nb +} + +// sparseFileWriter is a fileWriter for writing data to a sparse file entry. +type sparseFileWriter struct { + fw fileWriter // Underlying fileWriter + sp sparseDatas // Normalized list of data fragments + pos int64 // Current position in sparse file +} + +func (sw *sparseFileWriter) Write(b []byte) (n int, err error) { + overwrite := int64(len(b)) > sw.LogicalRemaining() + if overwrite { + b = b[:sw.LogicalRemaining()] + } + + b0 := b + endPos := sw.pos + int64(len(b)) + for endPos > sw.pos && err == nil { + var nf int // Bytes written in fragment + dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset() + if sw.pos < dataStart { // In a hole fragment + bf := b[:min(int64(len(b)), dataStart-sw.pos)] + nf, err = zeroWriter{}.Write(bf) + } else { // In a data fragment + bf := b[:min(int64(len(b)), dataEnd-sw.pos)] + nf, err = sw.fw.Write(bf) + } + b = b[nf:] + sw.pos += int64(nf) + if sw.pos >= dataEnd && len(sw.sp) > 1 { + sw.sp = sw.sp[1:] // Ensure last fragment always remains } } - return tw.err + + n = len(b0) - len(b) + switch { + case err == ErrWriteTooLong: + return n, errMissData // Not possible; implies bug in validation logic + case err != nil: + return n, err + case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0: + return n, errUnrefData // Not possible; implies bug in validation logic + case overwrite: + return n, ErrWriteTooLong + default: + return n, nil + } +} + +func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) { + rs, ok := r.(io.ReadSeeker) + if ok { + if _, err := rs.Seek(0, io.SeekCurrent); err != nil { + ok = false // Not all io.Seeker can really seek + } + } + if !ok { + return io.Copy(struct{ io.Writer }{sw}, r) + } + + var readLastByte bool + pos0 := sw.pos + for sw.LogicalRemaining() > 0 && !readLastByte && err == nil { + var nf int64 // Size of fragment + dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset() + if sw.pos < dataStart { // In a hole fragment + nf = dataStart - sw.pos + if sw.PhysicalRemaining() == 0 { + readLastByte = true + nf-- + } + _, err = rs.Seek(nf, io.SeekCurrent) + } else { // In a data fragment + nf = dataEnd - sw.pos + nf, err = io.CopyN(sw.fw, rs, nf) + } + sw.pos += nf + if sw.pos >= dataEnd && len(sw.sp) > 1 { + sw.sp = sw.sp[1:] // Ensure last fragment always remains + } + } + + // If the last fragment is a hole, then seek to 1-byte before EOF, and + // read a single byte to ensure the file is the right size. + if readLastByte && err == nil { + _, err = mustReadFull(rs, []byte{0}) + sw.pos++ + } + + n = sw.pos - pos0 + switch { + case err == io.EOF: + return n, io.ErrUnexpectedEOF + case err == ErrWriteTooLong: + return n, errMissData // Not possible; implies bug in validation logic + case err != nil: + return n, err + case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0: + return n, errUnrefData // Not possible; implies bug in validation logic + default: + return n, ensureEOF(rs) + } +} + +func (sw sparseFileWriter) LogicalRemaining() int64 { + return sw.sp[len(sw.sp)-1].endOffset() - sw.pos +} +func (sw sparseFileWriter) PhysicalRemaining() int64 { + return sw.fw.PhysicalRemaining() +} + +// zeroWriter may only be written with NULs, otherwise it returns errWriteHole. +type zeroWriter struct{} + +func (zeroWriter) Write(b []byte) (int, error) { + for i, c := range b { + if c != 0 { + return i, errWriteHole + } + } + return len(b), nil +} + +// ensureEOF checks whether r is at EOF, reporting ErrWriteTooLong if not so. +func ensureEOF(r io.Reader) error { + n, err := tryReadFull(r, []byte{0}) + switch { + case n > 0: + return ErrWriteTooLong + case err == io.EOF: + return nil + default: + return err + } } diff --git a/vendor/github.com/stevvooe/ttrpc/LICENSE b/vendor/github.com/stevvooe/ttrpc/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/stevvooe/ttrpc/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/stevvooe/ttrpc/README.md b/vendor/github.com/stevvooe/ttrpc/README.md new file mode 100644 index 0000000000..b246e578b2 --- /dev/null +++ b/vendor/github.com/stevvooe/ttrpc/README.md @@ -0,0 +1,52 @@ +# ttrpc + +[![Build Status](https://travis-ci.org/stevvooe/ttrpc.svg?branch=master)](https://travis-ci.org/stevvooe/ttrpc) + +GRPC for low-memory environments. + +The existing grpc-go project requires a lot of memory overhead for importing +packages and at runtime. While this is great for many services with low density +requirements, this can be a problem when running a large number of services on +a single machine or on a machine with a small amount of memory. + +Using the same GRPC definitions, this project reduces the binary size and +protocol overhead required. We do this by eliding the `net/http`, `net/http2` +and `grpc` package used by grpc replacing it with a lightweight framing +protocol. The result are smaller binaries that use less resident memory with +the same ease of use as GRPC. + +Please note that while this project supports generating either end of the +protocol, the generated service definitions will be incompatible with regular +GRPC services, as they do not speak the same protocol. + +# Usage + +Create a gogo vanity binary (see +[`cmd/protoc-gen-gogottrpc/main.go`](cmd/protoc-gen-gogottrpc/main.go) for an +example with the ttrpc plugin enabled. + +It's recommended to use [`protobuild`](https://github.com/stevvooe/protobuild) +to build the protobufs for this project, but this will work with protoc +directly, if required. + +# Differences from GRPC + +- The protocol stack has been replaced with a lighter protocol that doesn't + require http, http2 and tls. +- The client and server interface are identical whereas in GRPC there is a + client and server interface that are different. +- The Go stdlib context package is used instead. +- No support for streams yet. + +# Status + +Very new. YMMV. + +TODO: + +- [X] Plumb error codes and GRPC status +- [X] Remove use of any type and dependency on typeurl package +- [X] Ensure that protocol can support streaming in the future +- [ ] Document protocol layout +- [ ] Add testing under concurrent load to ensure +- [ ] Verify connection error handling diff --git a/vendor/github.com/stevvooe/ttrpc/channel.go b/vendor/github.com/stevvooe/ttrpc/channel.go new file mode 100644 index 0000000000..4a33827a43 --- /dev/null +++ b/vendor/github.com/stevvooe/ttrpc/channel.go @@ -0,0 +1,135 @@ +package ttrpc + +import ( + "bufio" + "context" + "encoding/binary" + "io" + "sync" + + "github.com/pkg/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + messageHeaderLength = 10 + messageLengthMax = 4 << 20 +) + +type messageType uint8 + +const ( + messageTypeRequest messageType = 0x1 + messageTypeResponse messageType = 0x2 +) + +// messageHeader represents the fixed-length message header of 10 bytes sent +// with every request. +type messageHeader struct { + Length uint32 // length excluding this header. b[:4] + StreamID uint32 // identifies which request stream message is a part of. b[4:8] + Type messageType // message type b[8] + Flags uint8 // reserved b[9] +} + +func readMessageHeader(p []byte, r io.Reader) (messageHeader, error) { + _, err := io.ReadFull(r, p[:messageHeaderLength]) + if err != nil { + return messageHeader{}, err + } + + return messageHeader{ + Length: binary.BigEndian.Uint32(p[:4]), + StreamID: binary.BigEndian.Uint32(p[4:8]), + Type: messageType(p[8]), + Flags: p[9], + }, nil +} + +func writeMessageHeader(w io.Writer, p []byte, mh messageHeader) error { + binary.BigEndian.PutUint32(p[:4], mh.Length) + binary.BigEndian.PutUint32(p[4:8], mh.StreamID) + p[8] = byte(mh.Type) + p[9] = mh.Flags + + _, err := w.Write(p[:]) + return err +} + +var buffers sync.Pool + +type channel struct { + bw *bufio.Writer + br *bufio.Reader + hrbuf [messageHeaderLength]byte // avoid alloc when reading header + hwbuf [messageHeaderLength]byte +} + +func newChannel(w io.Writer, r io.Reader) *channel { + return &channel{ + bw: bufio.NewWriter(w), + br: bufio.NewReader(r), + } +} + +// recv a message from the channel. The returned buffer contains the message. +// +// If a valid grpc status is returned, the message header +// returned will be valid and caller should send that along to +// the correct consumer. The bytes on the underlying channel +// will be discarded. +func (ch *channel) recv(ctx context.Context) (messageHeader, []byte, error) { + mh, err := readMessageHeader(ch.hrbuf[:], ch.br) + if err != nil { + return messageHeader{}, nil, err + } + + if mh.Length > uint32(messageLengthMax) { + if _, err := ch.br.Discard(int(mh.Length)); err != nil { + return mh, nil, errors.Wrapf(err, "failed to discard after receiving oversized message") + } + + return mh, nil, status.Errorf(codes.ResourceExhausted, "message length %v exceed maximum message size of %v", mh.Length, messageLengthMax) + } + + p := ch.getmbuf(int(mh.Length)) + if _, err := io.ReadFull(ch.br, p); err != nil { + return messageHeader{}, nil, errors.Wrapf(err, "failed reading message") + } + + return mh, p, nil +} + +func (ch *channel) send(ctx context.Context, streamID uint32, t messageType, p []byte) error { + if err := writeMessageHeader(ch.bw, ch.hwbuf[:], messageHeader{Length: uint32(len(p)), StreamID: streamID, Type: t}); err != nil { + return err + } + + _, err := ch.bw.Write(p) + if err != nil { + return err + } + + return ch.bw.Flush() +} + +func (ch *channel) getmbuf(size int) []byte { + // we can't use the standard New method on pool because we want to allocate + // based on size. + b, ok := buffers.Get().(*[]byte) + if !ok || cap(*b) < size { + // TODO(stevvooe): It may be better to allocate these in fixed length + // buckets to reduce fragmentation but its not clear that would help + // with performance. An ilogb approach or similar would work well. + bb := make([]byte, size) + b = &bb + } else { + *b = (*b)[:size] + } + return *b +} + +func (ch *channel) putmbuf(p []byte) { + buffers.Put(&p) +} diff --git a/vendor/github.com/stevvooe/ttrpc/client.go b/vendor/github.com/stevvooe/ttrpc/client.go new file mode 100644 index 0000000000..ca76afe19a --- /dev/null +++ b/vendor/github.com/stevvooe/ttrpc/client.go @@ -0,0 +1,211 @@ +package ttrpc + +import ( + "context" + "net" + "sync" + + "github.com/containerd/containerd/log" + "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" + "google.golang.org/grpc/status" +) + +type Client struct { + codec codec + conn net.Conn + channel *channel + calls chan *callRequest + + closed chan struct{} + closeOnce sync.Once + done chan struct{} + err error +} + +func NewClient(conn net.Conn) *Client { + c := &Client{ + codec: codec{}, + conn: conn, + channel: newChannel(conn, conn), + calls: make(chan *callRequest), + closed: make(chan struct{}), + done: make(chan struct{}), + } + + go c.run() + return c +} + +type callRequest struct { + ctx context.Context + req *Request + resp *Response // response will be written back here + errs chan error // error written here on completion +} + +func (c *Client) Call(ctx context.Context, service, method string, req, resp interface{}) error { + payload, err := c.codec.Marshal(req) + if err != nil { + return err + } + + var ( + creq = &Request{ + Service: service, + Method: method, + Payload: payload, + } + + cresp = &Response{} + ) + + if err := c.dispatch(ctx, creq, cresp); err != nil { + return err + } + + if err := c.codec.Unmarshal(cresp.Payload, resp); err != nil { + return err + } + + if cresp.Status == nil { + return errors.New("no status provided on response") + } + + return status.ErrorProto(cresp.Status) +} + +func (c *Client) dispatch(ctx context.Context, req *Request, resp *Response) error { + errs := make(chan error, 1) + call := &callRequest{ + req: req, + resp: resp, + errs: errs, + } + + select { + case c.calls <- call: + case <-c.done: + return c.err + } + + select { + case err := <-errs: + return err + case <-c.done: + return c.err + } +} + +func (c *Client) Close() error { + c.closeOnce.Do(func() { + close(c.closed) + }) + + return nil +} + +type message struct { + messageHeader + p []byte + err error +} + +func (c *Client) run() { + var ( + streamID uint32 = 1 + waiters = make(map[uint32]*callRequest) + calls = c.calls + incoming = make(chan *message) + shutdown = make(chan struct{}) + shutdownErr error + ) + + go func() { + defer close(shutdown) + + // start one more goroutine to recv messages without blocking. + for { + mh, p, err := c.channel.recv(context.TODO()) + if err != nil { + _, ok := status.FromError(err) + if !ok { + // treat all errors that are not an rpc status as terminal. + // all others poison the connection. + shutdownErr = err + return + } + } + select { + case incoming <- &message{ + messageHeader: mh, + p: p[:mh.Length], + err: err, + }: + case <-c.done: + return + } + } + }() + + defer c.conn.Close() + defer close(c.done) + + for { + select { + case call := <-calls: + if err := c.send(call.ctx, streamID, messageTypeRequest, call.req); err != nil { + call.errs <- err + continue + } + + waiters[streamID] = call + streamID += 2 // enforce odd client initiated request ids + case msg := <-incoming: + call, ok := waiters[msg.StreamID] + if !ok { + log.L.Errorf("ttrpc: received message for unknown channel %v", msg.StreamID) + continue + } + + call.errs <- c.recv(call.resp, msg) + delete(waiters, msg.StreamID) + case <-shutdown: + shutdownErr = errors.Wrapf(shutdownErr, "ttrpc: client shutting down") + c.err = shutdownErr + for _, waiter := range waiters { + waiter.errs <- shutdownErr + } + c.Close() + return + case <-c.closed: + // broadcast the shutdown error to the remaining waiters. + for _, waiter := range waiters { + waiter.errs <- shutdownErr + } + return + } + } +} + +func (c *Client) send(ctx context.Context, streamID uint32, mtype messageType, msg interface{}) error { + p, err := c.codec.Marshal(msg) + if err != nil { + return err + } + + return c.channel.send(ctx, streamID, mtype, p) +} + +func (c *Client) recv(resp *Response, msg *message) error { + if msg.err != nil { + return msg.err + } + + if msg.Type != messageTypeResponse { + return errors.New("unkown message type received") + } + + defer c.channel.putmbuf(msg.p) + return proto.Unmarshal(msg.p, resp) +} diff --git a/vendor/github.com/stevvooe/ttrpc/codec.go b/vendor/github.com/stevvooe/ttrpc/codec.go new file mode 100644 index 0000000000..7956a72220 --- /dev/null +++ b/vendor/github.com/stevvooe/ttrpc/codec.go @@ -0,0 +1,26 @@ +package ttrpc + +import ( + "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" +) + +type codec struct{} + +func (c codec) Marshal(msg interface{}) ([]byte, error) { + switch v := msg.(type) { + case proto.Message: + return proto.Marshal(v) + default: + return nil, errors.Errorf("ttrpc: cannot marshal unknown type: %T", msg) + } +} + +func (c codec) Unmarshal(p []byte, msg interface{}) error { + switch v := msg.(type) { + case proto.Message: + return proto.Unmarshal(p, v) + default: + return errors.Errorf("ttrpc: cannot unmarshal into unknown type: %T", msg) + } +} diff --git a/vendor/github.com/stevvooe/ttrpc/server.go b/vendor/github.com/stevvooe/ttrpc/server.go new file mode 100644 index 0000000000..ed2d14cf7b --- /dev/null +++ b/vendor/github.com/stevvooe/ttrpc/server.go @@ -0,0 +1,416 @@ +package ttrpc + +import ( + "context" + "math/rand" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/containerd/containerd/log" + "github.com/pkg/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var ( + ErrServerClosed = errors.New("ttrpc: server close") +) + +type Server struct { + services *serviceSet + codec codec + + mu sync.Mutex + listeners map[net.Listener]struct{} + connections map[*serverConn]struct{} // all connections to current state + done chan struct{} // marks point at which we stop serving requests +} + +func NewServer() *Server { + return &Server{ + services: newServiceSet(), + done: make(chan struct{}), + listeners: make(map[net.Listener]struct{}), + connections: make(map[*serverConn]struct{}), + } +} + +func (s *Server) Register(name string, methods map[string]Method) { + s.services.register(name, methods) +} + +func (s *Server) Serve(l net.Listener) error { + s.addListener(l) + defer s.closeListener(l) + + var ( + ctx = context.Background() + backoff time.Duration + ) + + for { + conn, err := l.Accept() + if err != nil { + select { + case <-s.done: + return ErrServerClosed + default: + } + + if terr, ok := err.(interface { + Temporary() bool + }); ok && terr.Temporary() { + if backoff == 0 { + backoff = time.Millisecond + } else { + backoff *= 2 + } + + if max := time.Second; backoff > max { + backoff = max + } + + sleep := time.Duration(rand.Int63n(int64(backoff))) + log.L.WithError(err).Errorf("ttrpc: failed accept; backoff %v", sleep) + time.Sleep(sleep) + continue + } + + return err + } + + backoff = 0 + sc := s.newConn(conn) + go sc.run(ctx) + } +} + +func (s *Server) Shutdown(ctx context.Context) error { + s.mu.Lock() + lnerr := s.closeListeners() + select { + case <-s.done: + default: + // protected by mutex + close(s.done) + } + s.mu.Unlock() + + ticker := time.NewTicker(200 * time.Millisecond) + defer ticker.Stop() + for { + if s.closeIdleConns() { + return lnerr + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + } + } +} + +// Close the server without waiting for active connections. +func (s *Server) Close() error { + s.mu.Lock() + defer s.mu.Unlock() + + select { + case <-s.done: + default: + // protected by mutex + close(s.done) + } + + err := s.closeListeners() + for c := range s.connections { + c.close() + delete(s.connections, c) + } + + return err +} + +func (s *Server) addListener(l net.Listener) { + s.mu.Lock() + defer s.mu.Unlock() + s.listeners[l] = struct{}{} +} + +func (s *Server) closeListener(l net.Listener) error { + s.mu.Lock() + defer s.mu.Unlock() + + return s.closeListenerLocked(l) +} + +func (s *Server) closeListenerLocked(l net.Listener) error { + defer delete(s.listeners, l) + return l.Close() +} + +func (s *Server) closeListeners() error { + var err error + for l := range s.listeners { + if cerr := s.closeListenerLocked(l); cerr != nil && err == nil { + err = cerr + } + } + return err +} + +func (s *Server) addConnection(c *serverConn) { + s.mu.Lock() + defer s.mu.Unlock() + + s.connections[c] = struct{}{} +} + +func (s *Server) closeIdleConns() bool { + s.mu.Lock() + defer s.mu.Unlock() + quiescent := true + for c := range s.connections { + st, ok := c.getState() + if !ok || st != connStateIdle { + quiescent = false + continue + } + c.close() + delete(s.connections, c) + } + return quiescent +} + +type connState int + +const ( + connStateActive = iota + 1 // outstanding requests + connStateIdle // no requests + connStateClosed // closed connection +) + +func (cs connState) String() string { + switch cs { + case connStateActive: + return "active" + case connStateIdle: + return "idle" + case connStateClosed: + return "closed" + default: + return "unknown" + } +} + +func (s *Server) newConn(conn net.Conn) *serverConn { + c := &serverConn{ + server: s, + conn: conn, + shutdown: make(chan struct{}), + } + c.setState(connStateIdle) + s.addConnection(c) + return c +} + +type serverConn struct { + server *Server + conn net.Conn + state atomic.Value + + shutdownOnce sync.Once + shutdown chan struct{} // forced shutdown, used by close +} + +func (c *serverConn) getState() (connState, bool) { + cs, ok := c.state.Load().(connState) + return cs, ok +} + +func (c *serverConn) setState(newstate connState) { + c.state.Store(newstate) +} + +func (c *serverConn) close() error { + c.shutdownOnce.Do(func() { + close(c.shutdown) + }) + + return nil +} + +func (c *serverConn) run(sctx context.Context) { + type ( + request struct { + id uint32 + req *Request + } + + response struct { + id uint32 + resp *Response + } + ) + + var ( + ch = newChannel(c.conn, c.conn) + ctx, cancel = context.WithCancel(sctx) + active int + state connState = connStateIdle + responses = make(chan response) + requests = make(chan request) + recvErr = make(chan error, 1) + shutdown = c.shutdown + done = make(chan struct{}) + ) + + defer c.conn.Close() + defer cancel() + defer close(done) + + go func(recvErr chan error) { + defer close(recvErr) + sendImmediate := func(id uint32, st *status.Status) bool { + select { + case responses <- response{ + // even though we've had an invalid stream id, we send it + // back on the same stream id so the client knows which + // stream id was bad. + id: id, + resp: &Response{ + Status: st.Proto(), + }, + }: + return true + case <-c.shutdown: + return false + case <-done: + return false + } + } + + for { + select { + case <-c.shutdown: + return + case <-done: + return + default: // proceed + } + + mh, p, err := ch.recv(ctx) + if err != nil { + status, ok := status.FromError(err) + if !ok { + recvErr <- err + return + } + + // in this case, we send an error for that particular message + // when the status is defined. + if !sendImmediate(mh.StreamID, status) { + return + } + + continue + } + + if mh.Type != messageTypeRequest { + // we must ignore this for future compat. + continue + } + + var req Request + if err := c.server.codec.Unmarshal(p, &req); err != nil { + ch.putmbuf(p) + if !sendImmediate(mh.StreamID, status.Newf(codes.InvalidArgument, "unmarshal request error: %v", err)) { + return + } + continue + } + ch.putmbuf(p) + + if mh.StreamID%2 != 1 { + // enforce odd client initiated identifiers. + if !sendImmediate(mh.StreamID, status.Newf(codes.InvalidArgument, "StreamID must be odd for client initiated streams")) { + return + } + continue + } + + // Forward the request to the main loop. We don't wait on s.done + // because we have already accepted the client request. + select { + case requests <- request{ + id: mh.StreamID, + req: &req, + }: + case <-done: + return + } + } + }(recvErr) + + for { + newstate := state + switch { + case active > 0: + newstate = connStateActive + shutdown = nil + case active == 0: + newstate = connStateIdle + shutdown = c.shutdown // only enable this branch in idle mode + } + + if newstate != state { + c.setState(newstate) + state = newstate + } + + select { + case request := <-requests: + active++ + go func(id uint32) { + p, status := c.server.services.call(ctx, request.req.Service, request.req.Method, request.req.Payload) + resp := &Response{ + Status: status.Proto(), + Payload: p, + } + + select { + case responses <- response{ + id: id, + resp: resp, + }: + case <-done: + } + }(request.id) + case response := <-responses: + p, err := c.server.codec.Marshal(response.resp) + if err != nil { + log.L.WithError(err).Error("failed marshaling response") + return + } + + if err := ch.send(ctx, response.id, messageTypeResponse, p); err != nil { + log.L.WithError(err).Error("failed sending message on channel") + return + } + + active-- + case err := <-recvErr: + // TODO(stevvooe): Not wildly clear what we should do in this + // branch. Basically, it means that we are no longer receiving + // requests due to a terminal error. + recvErr = nil // connection is now "closing" + if err != nil { + log.L.WithError(err).Error("error receiving message") + } + case <-shutdown: + return + } + } +} diff --git a/vendor/github.com/stevvooe/ttrpc/services.go b/vendor/github.com/stevvooe/ttrpc/services.go new file mode 100644 index 0000000000..b9a749e3de --- /dev/null +++ b/vendor/github.com/stevvooe/ttrpc/services.go @@ -0,0 +1,134 @@ +package ttrpc + +import ( + "context" + "io" + "os" + "path" + + "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type Method func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) + +type ServiceDesc struct { + Methods map[string]Method + + // TODO(stevvooe): Add stream support. +} + +type serviceSet struct { + services map[string]ServiceDesc +} + +func newServiceSet() *serviceSet { + return &serviceSet{ + services: make(map[string]ServiceDesc), + } +} + +func (s *serviceSet) register(name string, methods map[string]Method) { + if _, ok := s.services[name]; ok { + panic(errors.Errorf("duplicate service %v registered", name)) + } + + s.services[name] = ServiceDesc{ + Methods: methods, + } +} + +func (s *serviceSet) call(ctx context.Context, serviceName, methodName string, p []byte) ([]byte, *status.Status) { + p, err := s.dispatch(ctx, serviceName, methodName, p) + st, ok := status.FromError(err) + if !ok { + st = status.New(convertCode(err), err.Error()) + } + + return p, st +} + +func (s *serviceSet) dispatch(ctx context.Context, serviceName, methodName string, p []byte) ([]byte, error) { + method, err := s.resolve(serviceName, methodName) + if err != nil { + return nil, err + } + + unmarshal := func(obj interface{}) error { + switch v := obj.(type) { + case proto.Message: + if err := proto.Unmarshal(p, v); err != nil { + return status.Errorf(codes.Internal, "ttrpc: error unmarshaling payload: %v", err.Error()) + } + default: + return status.Errorf(codes.Internal, "ttrpc: error unsupported request type: %T", v) + } + return nil + } + + resp, err := method(ctx, unmarshal) + if err != nil { + return nil, err + } + + switch v := resp.(type) { + case proto.Message: + r, err := proto.Marshal(v) + if err != nil { + return nil, status.Errorf(codes.Internal, "ttrpc: error marshaling payload: %v", err.Error()) + } + + return r, nil + default: + return nil, status.Errorf(codes.Internal, "ttrpc: error unsupported response type: %T", v) + } +} + +func (s *serviceSet) resolve(service, method string) (Method, error) { + srv, ok := s.services[service] + if !ok { + return nil, status.Errorf(codes.NotFound, "service %v", service) + } + + mthd, ok := srv.Methods[method] + if !ok { + return nil, status.Errorf(codes.NotFound, "method %v", method) + } + + return mthd, nil +} + +// convertCode maps stdlib go errors into grpc space. +// +// This is ripped from the grpc-go code base. +func convertCode(err error) codes.Code { + switch err { + case nil: + return codes.OK + case io.EOF: + return codes.OutOfRange + case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF: + return codes.FailedPrecondition + case os.ErrInvalid: + return codes.InvalidArgument + case context.Canceled: + return codes.Canceled + case context.DeadlineExceeded: + return codes.DeadlineExceeded + } + switch { + case os.IsExist(err): + return codes.AlreadyExists + case os.IsNotExist(err): + return codes.NotFound + case os.IsPermission(err): + return codes.PermissionDenied + } + return codes.Unknown +} + +func fullPath(service, method string) string { + return "/" + path.Join("/", service, method) +} diff --git a/vendor/github.com/stevvooe/ttrpc/types.go b/vendor/github.com/stevvooe/ttrpc/types.go new file mode 100644 index 0000000000..a522b0cf26 --- /dev/null +++ b/vendor/github.com/stevvooe/ttrpc/types.go @@ -0,0 +1,26 @@ +package ttrpc + +import ( + "fmt" + + spb "google.golang.org/genproto/googleapis/rpc/status" +) + +type Request struct { + Service string `protobuf:"bytes,1,opt,name=service,proto3"` + Method string `protobuf:"bytes,2,opt,name=method,proto3"` + Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3"` +} + +func (r *Request) Reset() { *r = Request{} } +func (r *Request) String() string { return fmt.Sprintf("%+#v", r) } +func (r *Request) ProtoMessage() {} + +type Response struct { + Status *spb.Status `protobuf:"bytes,1,opt,name=status,proto3"` + Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3"` +} + +func (r *Response) Reset() { *r = Response{} } +func (r *Response) String() string { return fmt.Sprintf("%+#v", r) } +func (r *Response) ProtoMessage() {}