Use containerd Status variable when checking container state
Signed-off-by: Kenfe-Mickael Laventure <mickael.laventure@gmail.com>
This commit is contained in:
parent
9771780a01
commit
0ea0b2becf
38 changed files with 1938 additions and 817 deletions
|
@ -18,7 +18,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
containerd "github.com/docker/containerd/api/grpc/types"
|
||||
containerd "github.com/containerd/containerd/api/grpc/types"
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/api/types"
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
|
|
|
@ -4,7 +4,7 @@ TOMLV_COMMIT=9baf8a8a9f2ed20a8e54160840c492f937eeaf9a
|
|||
|
||||
# When updating RUNC_COMMIT, also update runc in vendor.conf accordingly
|
||||
RUNC_COMMIT=992a5be178a62e026f4069f443c6164912adbf09
|
||||
CONTAINERD_COMMIT=8ef7df579710405c4bb6e0812495671002ce08e0
|
||||
CONTAINERD_COMMIT=3addd840653146c90a254301d6c3a663c7fd6429
|
||||
TINI_COMMIT=949e6facb77383876aeff8a6944dde66b3089574
|
||||
LIBNETWORK_COMMIT=7b2b1feb1de4817d522cc372af149ff48d25028e
|
||||
VNDR_COMMIT=c56e082291115e369f77601f9c071dd0b87c7120
|
||||
|
|
|
@ -9,7 +9,8 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
containerd "github.com/docker/containerd/api/grpc/types"
|
||||
containerd "github.com/containerd/containerd/api/grpc/types"
|
||||
containerd_runtime_types "github.com/containerd/containerd/runtime"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
|
@ -467,7 +468,7 @@ func (clnt *client) Restore(containerID string, attachStdio StdioCallback, optio
|
|||
cont, err := clnt.getContainerdContainer(containerID)
|
||||
// Get its last event
|
||||
ev, eerr := clnt.getContainerLastEvent(containerID)
|
||||
if err != nil || cont.Status == "stopped" {
|
||||
if err != nil || containerd_runtime_types.State(cont.Status) == containerd_runtime_types.Stopped {
|
||||
if err != nil {
|
||||
logrus.Warnf("libcontainerd: failed to retrieve container %s state: %v", containerID, err)
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
containerd "github.com/docker/containerd/api/grpc/types"
|
||||
containerd "github.com/containerd/containerd/api/grpc/types"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"golang.org/x/net/context"
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
containerd "github.com/docker/containerd/api/grpc/types"
|
||||
containerd "github.com/containerd/containerd/api/grpc/types"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/tonistiigi/fifo"
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
goruntime "runtime"
|
||||
"strings"
|
||||
|
||||
containerd "github.com/docker/containerd/api/grpc/types"
|
||||
containerd "github.com/containerd/containerd/api/grpc/types"
|
||||
"github.com/tonistiigi/fifo"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/sys/unix"
|
||||
|
|
|
@ -19,7 +19,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
containerd "github.com/docker/containerd/api/grpc/types"
|
||||
containerd "github.com/containerd/containerd/api/grpc/types"
|
||||
"github.com/docker/docker/pkg/locker"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
|
|
|
@ -3,7 +3,7 @@ package libcontainerd
|
|||
import (
|
||||
"io"
|
||||
|
||||
containerd "github.com/docker/containerd/api/grpc/types"
|
||||
containerd "github.com/containerd/containerd/api/grpc/types"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package libcontainerd
|
||||
|
||||
import (
|
||||
containerd "github.com/docker/containerd/api/grpc/types"
|
||||
containerd "github.com/containerd/containerd/api/grpc/types"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package libcontainerd
|
||||
|
||||
import (
|
||||
containerd "github.com/docker/containerd/api/grpc/types"
|
||||
containerd "github.com/containerd/containerd/api/grpc/types"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ package libcontainerd
|
|||
import (
|
||||
"syscall"
|
||||
|
||||
containerd "github.com/docker/containerd/api/grpc/types"
|
||||
containerd "github.com/containerd/containerd/api/grpc/types"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ package libcontainerd
|
|||
import (
|
||||
"syscall"
|
||||
|
||||
containerd "github.com/docker/containerd/api/grpc/types"
|
||||
containerd "github.com/containerd/containerd/api/grpc/types"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ google.golang.org/grpc v1.0.4
|
|||
github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f
|
||||
|
||||
# When updating, also update RUNC_COMMIT in hack/dockerfile/binaries-commits accordingly
|
||||
github.com/opencontainers/runc b6b70e53451794e8333e9b602cc096b47a20bd0f
|
||||
github.com/opencontainers/runc 992a5be178a62e026f4069f443c6164912adbf09
|
||||
github.com/opencontainers/runtime-spec v1.0.0-rc5 # specs
|
||||
|
||||
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
|
||||
|
@ -101,7 +101,7 @@ google.golang.org/genproto b3e7c2fb04031add52c4817f53f43757ccbf9c18
|
|||
github.com/docker/docker-credential-helpers v0.5.0
|
||||
|
||||
# containerd
|
||||
github.com/docker/containerd 8ef7df579710405c4bb6e0812495671002ce08e0
|
||||
github.com/containerd/containerd 3addd840653146c90a254301d6c3a663c7fd6429
|
||||
github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
|
||||
|
||||
# cluster
|
||||
|
|
18
vendor/github.com/containerd/containerd/osutils/fds.go
generated
vendored
Normal file
18
vendor/github.com/containerd/containerd/osutils/fds.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
// +build !windows,!darwin
|
||||
|
||||
package osutils
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// GetOpenFds returns the number of open fds for the process provided by pid
|
||||
func GetOpenFds(pid int) (int, error) {
|
||||
dirs, err := ioutil.ReadDir(filepath.Join("/proc", strconv.Itoa(pid), "fd"))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return len(dirs), nil
|
||||
}
|
15
vendor/github.com/containerd/containerd/osutils/pdeathsig_linux.go
generated
vendored
Normal file
15
vendor/github.com/containerd/containerd/osutils/pdeathsig_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
// +build !solaris
|
||||
|
||||
package osutils
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// SetPDeathSig sets the parent death signal to SIGKILL so that if the
|
||||
// shim dies the container process also dies.
|
||||
func SetPDeathSig() *syscall.SysProcAttr {
|
||||
return &syscall.SysProcAttr{
|
||||
Pdeathsig: syscall.SIGKILL,
|
||||
}
|
||||
}
|
8
vendor/github.com/containerd/containerd/osutils/pdeathsig_solaris.go
generated
vendored
Normal file
8
vendor/github.com/containerd/containerd/osutils/pdeathsig_solaris.go
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
// +build solaris
|
||||
|
||||
package osutils
|
||||
|
||||
// SetPDeathSig is a no-op on Solaris as Pdeathsig is not defined.
|
||||
func SetPDeathSig() *syscall.SysProcAttr {
|
||||
return nil
|
||||
}
|
48
vendor/github.com/containerd/containerd/osutils/prctl.go
generated
vendored
Normal file
48
vendor/github.com/containerd/containerd/osutils/prctl.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
// +build linux
|
||||
|
||||
// Package osutils provide access to the Get Child and Set Child prctl
|
||||
// flags.
|
||||
// See http://man7.org/linux/man-pages/man2/prctl.2.html
|
||||
package osutils
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// PR_SET_CHILD_SUBREAPER allows setting the child subreaper.
|
||||
// If arg2 is nonzero, set the "child subreaper" attribute of the
|
||||
// calling process; if arg2 is zero, unset the attribute. When a
|
||||
// process is marked as a child subreaper, all of the children
|
||||
// that it creates, and their descendants, will be marked as
|
||||
// having a subreaper. In effect, a subreaper fulfills the role
|
||||
// of init(1) for its descendant processes. Upon termination of
|
||||
// a process that is orphaned (i.e., its immediate parent has
|
||||
// already terminated) and marked as having a subreaper, the
|
||||
// nearest still living ancestor subreaper will receive a SIGCHLD
|
||||
// signal and be able to wait(2) on the process to discover its
|
||||
// termination status.
|
||||
const prSetChildSubreaper = 36
|
||||
|
||||
// PR_GET_CHILD_SUBREAPER allows retrieving the current child
|
||||
// subreaper.
|
||||
// Return the "child subreaper" setting of the caller, in the
|
||||
// location pointed to by (int *) arg2.
|
||||
const prGetChildSubreaper = 37
|
||||
|
||||
// GetSubreaper returns the subreaper setting for the calling process
|
||||
func GetSubreaper() (int, error) {
|
||||
var i uintptr
|
||||
if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, prGetChildSubreaper, uintptr(unsafe.Pointer(&i)), 0); err != 0 {
|
||||
return -1, err
|
||||
}
|
||||
return int(i), nil
|
||||
}
|
||||
|
||||
// SetSubreaper sets the value i as the subreaper setting for the calling process
|
||||
func SetSubreaper(i int) error {
|
||||
if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, prSetChildSubreaper, uintptr(i), 0); err != 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
19
vendor/github.com/containerd/containerd/osutils/prctl_solaris.go
generated
vendored
Normal file
19
vendor/github.com/containerd/containerd/osutils/prctl_solaris.go
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
// +build solaris
|
||||
|
||||
package osutils
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
//Solaris TODO
|
||||
|
||||
// GetSubreaper returns the subreaper setting for the calling process
|
||||
func GetSubreaper() (int, error) {
|
||||
return 0, errors.New("osutils GetSubreaper not implemented on Solaris")
|
||||
}
|
||||
|
||||
// SetSubreaper sets the value i as the subreaper setting for the calling process
|
||||
func SetSubreaper(i int) error {
|
||||
return nil
|
||||
}
|
51
vendor/github.com/containerd/containerd/osutils/reaper.go
generated
vendored
Normal file
51
vendor/github.com/containerd/containerd/osutils/reaper.go
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
// +build !windows
|
||||
|
||||
package osutils
|
||||
|
||||
import "syscall"
|
||||
|
||||
// Exit is the wait4 information from an exited process
|
||||
type Exit struct {
|
||||
Pid int
|
||||
Status int
|
||||
}
|
||||
|
||||
// Reap reaps all child processes for the calling process and returns their
|
||||
// exit information
|
||||
func Reap(wait bool) (exits []Exit, err error) {
|
||||
var (
|
||||
ws syscall.WaitStatus
|
||||
rus syscall.Rusage
|
||||
)
|
||||
flag := syscall.WNOHANG
|
||||
if wait {
|
||||
flag = 0
|
||||
}
|
||||
for {
|
||||
pid, err := syscall.Wait4(-1, &ws, flag, &rus)
|
||||
if err != nil {
|
||||
if err == syscall.ECHILD {
|
||||
return exits, nil
|
||||
}
|
||||
return exits, err
|
||||
}
|
||||
if pid <= 0 {
|
||||
return exits, nil
|
||||
}
|
||||
exits = append(exits, Exit{
|
||||
Pid: pid,
|
||||
Status: exitStatus(ws),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const exitSignalOffset = 128
|
||||
|
||||
// exitStatus returns the correct exit status for a process based on if it
|
||||
// was signaled or exited cleanly
|
||||
func exitStatus(status syscall.WaitStatus) int {
|
||||
if status.Signaled() {
|
||||
return exitSignalOffset + int(status.Signal())
|
||||
}
|
||||
return status.ExitStatus()
|
||||
}
|
749
vendor/github.com/containerd/containerd/runtime/container.go
generated
vendored
Normal file
749
vendor/github.com/containerd/containerd/runtime/container.go
generated
vendored
Normal file
|
@ -0,0 +1,749 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containerd/containerd/specs"
|
||||
ocs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Container defines the operations allowed on a container
|
||||
type Container interface {
|
||||
// ID returns the container ID
|
||||
ID() string
|
||||
// Path returns the path to the bundle
|
||||
Path() string
|
||||
// Start starts the init process of the container
|
||||
Start(ctx context.Context, checkpointPath string, s Stdio) (Process, error)
|
||||
// Exec starts another process in an existing container
|
||||
Exec(context.Context, string, specs.ProcessSpec, Stdio) (Process, error)
|
||||
// Delete removes the container's state and any resources
|
||||
Delete() error
|
||||
// Processes returns all the containers processes that have been added
|
||||
Processes() ([]Process, error)
|
||||
// State returns the containers runtime state
|
||||
State() State
|
||||
// Resume resumes a paused container
|
||||
Resume() error
|
||||
// Pause pauses a running container
|
||||
Pause() error
|
||||
// RemoveProcess removes the specified process from the container
|
||||
RemoveProcess(string) error
|
||||
// Checkpoints returns all the checkpoints for a container
|
||||
Checkpoints(checkpointDir string) ([]Checkpoint, error)
|
||||
// Checkpoint creates a new checkpoint
|
||||
Checkpoint(checkpoint Checkpoint, checkpointDir string) error
|
||||
// DeleteCheckpoint deletes the checkpoint for the provided name
|
||||
DeleteCheckpoint(name string, checkpointDir string) error
|
||||
// Labels are user provided labels for the container
|
||||
Labels() []string
|
||||
// Pids returns all pids inside the container
|
||||
Pids() ([]int, error)
|
||||
// Stats returns realtime container stats and resource information
|
||||
Stats() (*Stat, error)
|
||||
// Name or path of the OCI compliant runtime used to execute the container
|
||||
Runtime() string
|
||||
// OOM signals the channel if the container received an OOM notification
|
||||
OOM() (OOM, error)
|
||||
// UpdateResource updates the containers resources to new values
|
||||
UpdateResources(*Resource) error
|
||||
|
||||
// Status return the current status of the container.
|
||||
Status() (State, error)
|
||||
}
|
||||
|
||||
// OOM wraps a container OOM.
|
||||
type OOM interface {
|
||||
io.Closer
|
||||
FD() int
|
||||
ContainerID() string
|
||||
Flush()
|
||||
Removed() bool
|
||||
}
|
||||
|
||||
// Stdio holds the path to the 3 pipes used for the standard ios.
|
||||
type Stdio struct {
|
||||
Stdin string
|
||||
Stdout string
|
||||
Stderr string
|
||||
}
|
||||
|
||||
// NewStdio wraps the given standard io path into an Stdio struct.
|
||||
// If a given parameter is the empty string, it is replaced by "/dev/null"
|
||||
func NewStdio(stdin, stdout, stderr string) Stdio {
|
||||
for _, s := range []*string{
|
||||
&stdin, &stdout, &stderr,
|
||||
} {
|
||||
if *s == "" {
|
||||
*s = "/dev/null"
|
||||
}
|
||||
}
|
||||
return Stdio{
|
||||
Stdin: stdin,
|
||||
Stdout: stdout,
|
||||
Stderr: stderr,
|
||||
}
|
||||
}
|
||||
|
||||
// ContainerOpts keeps the options passed at container creation
|
||||
type ContainerOpts struct {
|
||||
Root string
|
||||
ID string
|
||||
Bundle string
|
||||
Runtime string
|
||||
RuntimeArgs []string
|
||||
Shim string
|
||||
Labels []string
|
||||
NoPivotRoot bool
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// New returns a new container
|
||||
func New(opts ContainerOpts) (Container, error) {
|
||||
c := &container{
|
||||
root: opts.Root,
|
||||
id: opts.ID,
|
||||
bundle: opts.Bundle,
|
||||
labels: opts.Labels,
|
||||
processes: make(map[string]*process),
|
||||
runtime: opts.Runtime,
|
||||
runtimeArgs: opts.RuntimeArgs,
|
||||
shim: opts.Shim,
|
||||
noPivotRoot: opts.NoPivotRoot,
|
||||
timeout: opts.Timeout,
|
||||
}
|
||||
if err := os.Mkdir(filepath.Join(c.root, c.id), 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := os.Create(filepath.Join(c.root, c.id, StateFile))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
if err := json.NewEncoder(f).Encode(state{
|
||||
Bundle: c.bundle,
|
||||
Labels: c.labels,
|
||||
Runtime: c.runtime,
|
||||
RuntimeArgs: c.runtimeArgs,
|
||||
Shim: c.shim,
|
||||
NoPivotRoot: opts.NoPivotRoot,
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Load return a new container from the matchin state file on disk.
|
||||
func Load(root, id, shimName string, timeout time.Duration) (Container, error) {
|
||||
var s state
|
||||
f, err := os.Open(filepath.Join(root, id, StateFile))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
if err := json.NewDecoder(f).Decode(&s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &container{
|
||||
root: root,
|
||||
id: id,
|
||||
bundle: s.Bundle,
|
||||
labels: s.Labels,
|
||||
runtime: s.Runtime,
|
||||
runtimeArgs: s.RuntimeArgs,
|
||||
shim: s.Shim,
|
||||
noPivotRoot: s.NoPivotRoot,
|
||||
processes: make(map[string]*process),
|
||||
timeout: timeout,
|
||||
}
|
||||
|
||||
if c.shim == "" {
|
||||
c.shim = shimName
|
||||
}
|
||||
|
||||
dirs, err := ioutil.ReadDir(filepath.Join(root, id))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, d := range dirs {
|
||||
if !d.IsDir() {
|
||||
continue
|
||||
}
|
||||
pid := d.Name()
|
||||
s, err := readProcessState(filepath.Join(root, id, pid))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p, err := loadProcess(filepath.Join(root, id, pid), pid, c, s)
|
||||
if err != nil {
|
||||
logrus.WithField("id", id).WithField("pid", pid).Debugf("containerd: error loading process %s", err)
|
||||
continue
|
||||
}
|
||||
c.processes[pid] = p
|
||||
}
|
||||
|
||||
_, err = os.Stat(c.bundle)
|
||||
if err != nil && !os.IsExist(err) {
|
||||
for key, p := range c.processes {
|
||||
if key == InitProcessID {
|
||||
p.Delete()
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("bundle dir %s don't exist", c.bundle)
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func readProcessState(dir string) (*ProcessState, error) {
|
||||
f, err := os.Open(filepath.Join(dir, "process.json"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
var s ProcessState
|
||||
if err := json.NewDecoder(f).Decode(&s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &s, nil
|
||||
}
|
||||
|
||||
type container struct {
|
||||
// path to store runtime state information
|
||||
root string
|
||||
id string
|
||||
bundle string
|
||||
runtime string
|
||||
runtimeArgs []string
|
||||
shim string
|
||||
processes map[string]*process
|
||||
labels []string
|
||||
oomFds []int
|
||||
noPivotRoot bool
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func (c *container) ID() string {
|
||||
return c.id
|
||||
}
|
||||
|
||||
func (c *container) Path() string {
|
||||
return c.bundle
|
||||
}
|
||||
|
||||
func (c *container) Labels() []string {
|
||||
return c.labels
|
||||
}
|
||||
|
||||
func (c *container) readSpec() (*specs.Spec, error) {
|
||||
var spec specs.Spec
|
||||
f, err := os.Open(filepath.Join(c.bundle, "config.json"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
if err := json.NewDecoder(f).Decode(&spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &spec, nil
|
||||
}
|
||||
|
||||
func (c *container) Delete() error {
|
||||
var err error
|
||||
args := append(c.runtimeArgs, "delete", c.id)
|
||||
if b, derr := exec.Command(c.runtime, args...).CombinedOutput(); derr != nil && !strings.Contains(string(b), "does not exist") {
|
||||
err = fmt.Errorf("%s: %q", derr, string(b))
|
||||
}
|
||||
if rerr := os.RemoveAll(filepath.Join(c.root, c.id)); rerr != nil {
|
||||
if err != nil {
|
||||
err = fmt.Errorf("%s; failed to remove %s: %s", err, filepath.Join(c.root, c.id), rerr)
|
||||
} else {
|
||||
err = rerr
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *container) Processes() ([]Process, error) {
|
||||
out := []Process{}
|
||||
for _, p := range c.processes {
|
||||
out = append(out, p)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *container) RemoveProcess(pid string) error {
|
||||
delete(c.processes, pid)
|
||||
return os.RemoveAll(filepath.Join(c.root, c.id, pid))
|
||||
}
|
||||
|
||||
func (c *container) State() State {
|
||||
proc := c.processes[InitProcessID]
|
||||
if proc == nil {
|
||||
return Stopped
|
||||
}
|
||||
return proc.State()
|
||||
}
|
||||
|
||||
func (c *container) Runtime() string {
|
||||
return c.runtime
|
||||
}
|
||||
|
||||
func (c *container) Pause() error {
|
||||
args := c.runtimeArgs
|
||||
args = append(args, "pause", c.id)
|
||||
b, err := exec.Command(c.runtime, args...).CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %q", err.Error(), string(b))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *container) Resume() error {
|
||||
args := c.runtimeArgs
|
||||
args = append(args, "resume", c.id)
|
||||
b, err := exec.Command(c.runtime, args...).CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %q", err.Error(), string(b))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *container) Checkpoints(checkpointDir string) ([]Checkpoint, error) {
|
||||
if checkpointDir == "" {
|
||||
checkpointDir = filepath.Join(c.bundle, "checkpoints")
|
||||
}
|
||||
|
||||
dirs, err := ioutil.ReadDir(checkpointDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var out []Checkpoint
|
||||
for _, d := range dirs {
|
||||
if !d.IsDir() {
|
||||
continue
|
||||
}
|
||||
path := filepath.Join(checkpointDir, d.Name(), "config.json")
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var cpt Checkpoint
|
||||
if err := json.Unmarshal(data, &cpt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out = append(out, cpt)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *container) Checkpoint(cpt Checkpoint, checkpointDir string) error {
|
||||
if checkpointDir == "" {
|
||||
checkpointDir = filepath.Join(c.bundle, "checkpoints")
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(checkpointDir, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path := filepath.Join(checkpointDir, cpt.Name)
|
||||
if err := os.Mkdir(path, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.Create(filepath.Join(path, "config.json"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cpt.Created = time.Now()
|
||||
err = json.NewEncoder(f).Encode(cpt)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args := []string{
|
||||
"checkpoint",
|
||||
"--image-path", path,
|
||||
"--work-path", filepath.Join(path, "criu.work"),
|
||||
}
|
||||
add := func(flags ...string) {
|
||||
args = append(args, flags...)
|
||||
}
|
||||
add(c.runtimeArgs...)
|
||||
if !cpt.Exit {
|
||||
add("--leave-running")
|
||||
}
|
||||
if cpt.Shell {
|
||||
add("--shell-job")
|
||||
}
|
||||
if cpt.TCP {
|
||||
add("--tcp-established")
|
||||
}
|
||||
if cpt.UnixSockets {
|
||||
add("--ext-unix-sk")
|
||||
}
|
||||
for _, ns := range cpt.EmptyNS {
|
||||
add("--empty-ns", ns)
|
||||
}
|
||||
add(c.id)
|
||||
out, err := exec.Command(c.runtime, args...).CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %q", err.Error(), string(out))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *container) DeleteCheckpoint(name string, checkpointDir string) error {
|
||||
if checkpointDir == "" {
|
||||
checkpointDir = filepath.Join(c.bundle, "checkpoints")
|
||||
}
|
||||
return os.RemoveAll(filepath.Join(checkpointDir, name))
|
||||
}
|
||||
|
||||
func (c *container) Start(ctx context.Context, checkpointPath string, s Stdio) (Process, error) {
|
||||
processRoot := filepath.Join(c.root, c.id, InitProcessID)
|
||||
if err := os.Mkdir(processRoot, 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cmd := exec.Command(c.shim,
|
||||
c.id, c.bundle, c.runtime,
|
||||
)
|
||||
cmd.Dir = processRoot
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||
Setpgid: true,
|
||||
}
|
||||
spec, err := c.readSpec()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config := &processConfig{
|
||||
checkpoint: checkpointPath,
|
||||
root: processRoot,
|
||||
id: InitProcessID,
|
||||
c: c,
|
||||
stdio: s,
|
||||
spec: spec,
|
||||
processSpec: specs.ProcessSpec(spec.Process),
|
||||
}
|
||||
p, err := newProcess(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := c.createCmd(ctx, InitProcessID, cmd, p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (c *container) Exec(ctx context.Context, pid string, pspec specs.ProcessSpec, s Stdio) (pp Process, err error) {
|
||||
processRoot := filepath.Join(c.root, c.id, pid)
|
||||
if err := os.Mkdir(processRoot, 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
c.RemoveProcess(pid)
|
||||
}
|
||||
}()
|
||||
cmd := exec.Command(c.shim,
|
||||
c.id, c.bundle, c.runtime,
|
||||
)
|
||||
cmd.Dir = processRoot
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||
Setpgid: true,
|
||||
}
|
||||
spec, err := c.readSpec()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config := &processConfig{
|
||||
exec: true,
|
||||
id: pid,
|
||||
root: processRoot,
|
||||
c: c,
|
||||
processSpec: pspec,
|
||||
spec: spec,
|
||||
stdio: s,
|
||||
}
|
||||
p, err := newProcess(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := c.createCmd(ctx, pid, cmd, p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (c *container) createCmd(ctx context.Context, pid string, cmd *exec.Cmd, p *process) error {
|
||||
p.cmd = cmd
|
||||
if err := cmd.Start(); err != nil {
|
||||
close(p.cmdDoneCh)
|
||||
if exErr, ok := err.(*exec.Error); ok {
|
||||
if exErr.Err == exec.ErrNotFound || exErr.Err == os.ErrNotExist {
|
||||
return fmt.Errorf("%s not installed on system", c.shim)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
// We need the pid file to have been written to run
|
||||
defer func() {
|
||||
go func() {
|
||||
err := p.cmd.Wait()
|
||||
if err == nil {
|
||||
p.cmdSuccess = true
|
||||
}
|
||||
|
||||
if same, err := p.isSameProcess(); same && p.pid > 0 {
|
||||
// The process changed its PR_SET_PDEATHSIG, so force
|
||||
// kill it
|
||||
logrus.Infof("containerd: %s:%s (pid %v) has become an orphan, killing it", p.container.id, p.id, p.pid)
|
||||
err = unix.Kill(p.pid, syscall.SIGKILL)
|
||||
if err != nil && err != syscall.ESRCH {
|
||||
logrus.Errorf("containerd: unable to SIGKILL %s:%s (pid %v): %v", p.container.id, p.id, p.pid, err)
|
||||
} else {
|
||||
for {
|
||||
err = unix.Kill(p.pid, 0)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
close(p.cmdDoneCh)
|
||||
}()
|
||||
}()
|
||||
|
||||
ch := make(chan error)
|
||||
go func() {
|
||||
if err := c.waitForCreate(p, cmd); err != nil {
|
||||
ch <- err
|
||||
return
|
||||
}
|
||||
c.processes[pid] = p
|
||||
ch <- nil
|
||||
}()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
cmd.Process.Kill()
|
||||
cmd.Wait()
|
||||
<-ch
|
||||
return ctx.Err()
|
||||
case err := <-ch:
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func hostIDFromMap(id uint32, mp []ocs.LinuxIDMapping) int {
|
||||
for _, m := range mp {
|
||||
if (id >= m.ContainerID) && (id <= (m.ContainerID + m.Size - 1)) {
|
||||
return int(m.HostID + (id - m.ContainerID))
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (c *container) Stats() (*Stat, error) {
|
||||
now := time.Now()
|
||||
args := c.runtimeArgs
|
||||
args = append(args, "events", "--stats", c.id)
|
||||
out, err := exec.Command(c.runtime, args...).CombinedOutput()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: %q", err.Error(), out)
|
||||
}
|
||||
s := struct {
|
||||
Data *Stat `json:"data"`
|
||||
}{}
|
||||
if err := json.Unmarshal(out, &s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.Data.Timestamp = now
|
||||
return s.Data, nil
|
||||
}
|
||||
|
||||
// Status implements the runtime Container interface.
|
||||
func (c *container) Status() (State, error) {
|
||||
args := c.runtimeArgs
|
||||
args = append(args, "state", c.id)
|
||||
|
||||
out, err := exec.Command(c.runtime, args...).CombinedOutput()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("%s: %q", err.Error(), out)
|
||||
}
|
||||
|
||||
// We only require the runtime json output to have a top level Status field.
|
||||
var s struct {
|
||||
Status State `json:"status"`
|
||||
}
|
||||
if err := json.Unmarshal(out, &s); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return s.Status, nil
|
||||
}
|
||||
|
||||
func (c *container) writeEventFD(root string, cfd, efd int) error {
|
||||
f, err := os.OpenFile(filepath.Join(root, "cgroup.event_control"), os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = f.WriteString(fmt.Sprintf("%d %d", efd, cfd))
|
||||
return err
|
||||
}
|
||||
|
||||
type waitArgs struct {
|
||||
pid int
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *container) waitForCreate(p *process, cmd *exec.Cmd) error {
|
||||
wc := make(chan error, 1)
|
||||
go func() {
|
||||
for {
|
||||
if _, err := p.getPidFromFile(); err != nil {
|
||||
if os.IsNotExist(err) || err == errInvalidPidInt || err == errContainerNotFound {
|
||||
alive, err := isAlive(cmd)
|
||||
if err != nil {
|
||||
wc <- err
|
||||
return
|
||||
}
|
||||
if !alive {
|
||||
// runc could have failed to run the container so lets get the error
|
||||
// out of the logs or the shim could have encountered an error
|
||||
messages, err := readLogMessages(filepath.Join(p.root, "shim-log.json"))
|
||||
if err != nil {
|
||||
wc <- err
|
||||
return
|
||||
}
|
||||
for _, m := range messages {
|
||||
if m.Level == "error" {
|
||||
wc <- fmt.Errorf("shim error: %v", m.Msg)
|
||||
return
|
||||
}
|
||||
}
|
||||
// no errors reported back from shim, check for runc/runtime errors
|
||||
messages, err = readLogMessages(filepath.Join(p.root, "log.json"))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = ErrContainerNotStarted
|
||||
}
|
||||
wc <- err
|
||||
return
|
||||
}
|
||||
for _, m := range messages {
|
||||
if m.Level == "error" {
|
||||
wc <- fmt.Errorf("oci runtime error: %v", m.Msg)
|
||||
return
|
||||
}
|
||||
}
|
||||
wc <- ErrContainerNotStarted
|
||||
return
|
||||
}
|
||||
time.Sleep(15 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
wc <- err
|
||||
return
|
||||
}
|
||||
// the pid file was read successfully
|
||||
wc <- nil
|
||||
return
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case err := <-wc:
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = p.saveStartTime()
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
logrus.Warnf("containerd: unable to save %s:%s starttime: %v", p.container.id, p.id, err)
|
||||
}
|
||||
return nil
|
||||
case <-time.After(c.timeout):
|
||||
cmd.Process.Kill()
|
||||
cmd.Wait()
|
||||
return ErrContainerStartTimeout
|
||||
}
|
||||
}
|
||||
|
||||
// isAlive checks if the shim that launched the container is still alive
|
||||
func isAlive(cmd *exec.Cmd) (bool, error) {
|
||||
if _, err := syscall.Wait4(cmd.Process.Pid, nil, syscall.WNOHANG, nil); err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if err := syscall.Kill(cmd.Process.Pid, 0); err != nil {
|
||||
if err == syscall.ESRCH {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
type oom struct {
|
||||
id string
|
||||
root string
|
||||
eventfd int
|
||||
}
|
||||
|
||||
func (o *oom) ContainerID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
func (o *oom) FD() int {
|
||||
return o.eventfd
|
||||
}
|
||||
|
||||
func (o *oom) Flush() {
|
||||
buf := make([]byte, 8)
|
||||
syscall.Read(o.eventfd, buf)
|
||||
}
|
||||
|
||||
func (o *oom) Removed() bool {
|
||||
_, err := os.Lstat(filepath.Join(o.root, "cgroup.event_control"))
|
||||
return os.IsNotExist(err)
|
||||
}
|
||||
|
||||
func (o *oom) Close() error {
|
||||
return syscall.Close(o.eventfd)
|
||||
}
|
||||
|
||||
type message struct {
|
||||
Level string `json:"level"`
|
||||
Msg string `json:"msg"`
|
||||
}
|
||||
|
||||
func readLogMessages(path string) ([]message, error) {
|
||||
var out []message
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
dec := json.NewDecoder(f)
|
||||
for {
|
||||
var m message
|
||||
if err := dec.Decode(&m); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
out = append(out, m)
|
||||
}
|
||||
return out, nil
|
||||
}
|
190
vendor/github.com/containerd/containerd/runtime/container_linux.go
generated
vendored
Normal file
190
vendor/github.com/containerd/containerd/runtime/container_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,190 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/containerd/specs"
|
||||
ocs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
func findCgroupMountpointAndRoot(pid int, subsystem string) (string, string, error) {
|
||||
f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
txt := scanner.Text()
|
||||
fields := strings.Split(txt, " ")
|
||||
for _, opt := range strings.Split(fields[len(fields)-1], ",") {
|
||||
if opt == subsystem {
|
||||
return fields[4], fields[3], nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return "", "", fmt.Errorf("cgroup path for %s not found", subsystem)
|
||||
}
|
||||
|
||||
func parseCgroupFile(path string) (map[string]string, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
s := bufio.NewScanner(f)
|
||||
cgroups := make(map[string]string)
|
||||
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
text := s.Text()
|
||||
parts := strings.Split(text, ":")
|
||||
|
||||
for _, subs := range strings.Split(parts[1], ",") {
|
||||
cgroups[subs] = parts[2]
|
||||
}
|
||||
}
|
||||
return cgroups, nil
|
||||
}
|
||||
|
||||
func (c *container) OOM() (OOM, error) {
|
||||
p := c.processes[InitProcessID]
|
||||
if p == nil {
|
||||
return nil, fmt.Errorf("no init process found")
|
||||
}
|
||||
|
||||
mountpoint, hostRoot, err := findCgroupMountpointAndRoot(os.Getpid(), "memory")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cgroups, err := parseCgroupFile(fmt.Sprintf("/proc/%d/cgroup", p.pid))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
root, ok := cgroups["memory"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no memory cgroup for container %s", c.ID())
|
||||
}
|
||||
|
||||
// Take care of the case were we're running inside a container
|
||||
// ourself
|
||||
root = strings.TrimPrefix(root, hostRoot)
|
||||
|
||||
return c.getMemoryEventFD(filepath.Join(mountpoint, root))
|
||||
}
|
||||
|
||||
func (c *container) Pids() ([]int, error) {
|
||||
var pids []int
|
||||
args := c.runtimeArgs
|
||||
args = append(args, "ps", "--format=json", c.id)
|
||||
out, err := exec.Command(c.runtime, args...).CombinedOutput()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: %q", err.Error(), out)
|
||||
}
|
||||
if err := json.Unmarshal(out, &pids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pids, nil
|
||||
}
|
||||
|
||||
func u64Ptr(i uint64) *uint64 { return &i }
|
||||
func i64Ptr(i int64) *int64 { return &i }
|
||||
|
||||
func (c *container) UpdateResources(r *Resource) error {
|
||||
sr := ocs.LinuxResources{
|
||||
Memory: &ocs.LinuxMemory{
|
||||
Limit: u64Ptr(uint64(r.Memory)),
|
||||
Reservation: u64Ptr(uint64(r.MemoryReservation)),
|
||||
Swap: u64Ptr(uint64(r.MemorySwap)),
|
||||
Kernel: u64Ptr(uint64(r.KernelMemory)),
|
||||
KernelTCP: u64Ptr(uint64(r.KernelTCPMemory)),
|
||||
},
|
||||
CPU: &ocs.LinuxCPU{
|
||||
Shares: u64Ptr(uint64(r.CPUShares)),
|
||||
Quota: i64Ptr(int64(r.CPUQuota)),
|
||||
Period: u64Ptr(uint64(r.CPUPeriod)),
|
||||
Cpus: r.CpusetCpus,
|
||||
Mems: r.CpusetMems,
|
||||
},
|
||||
BlockIO: &ocs.LinuxBlockIO{
|
||||
Weight: &r.BlkioWeight,
|
||||
},
|
||||
Pids: &ocs.LinuxPids{
|
||||
Limit: r.PidsLimit,
|
||||
},
|
||||
}
|
||||
|
||||
srStr := bytes.NewBuffer(nil)
|
||||
if err := json.NewEncoder(srStr).Encode(&sr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
args := c.runtimeArgs
|
||||
args = append(args, "update", "-r", "-", c.id)
|
||||
cmd := exec.Command(c.runtime, args...)
|
||||
cmd.Stdin = srStr
|
||||
b, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf(string(b))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getRootIDs(s *specs.Spec) (int, int, error) {
|
||||
if s == nil {
|
||||
return 0, 0, nil
|
||||
}
|
||||
var hasUserns bool
|
||||
for _, ns := range s.Linux.Namespaces {
|
||||
if ns.Type == ocs.UserNamespace {
|
||||
hasUserns = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasUserns {
|
||||
return 0, 0, nil
|
||||
}
|
||||
uid := hostIDFromMap(0, s.Linux.UIDMappings)
|
||||
gid := hostIDFromMap(0, s.Linux.GIDMappings)
|
||||
return uid, gid, nil
|
||||
}
|
||||
|
||||
func (c *container) getMemoryEventFD(root string) (*oom, error) {
|
||||
f, err := os.Open(filepath.Join(root, "memory.oom_control"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
fd, _, serr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0)
|
||||
if serr != 0 {
|
||||
return nil, serr
|
||||
}
|
||||
if err := c.writeEventFD(root, int(f.Fd()), int(fd)); err != nil {
|
||||
syscall.Close(int(fd))
|
||||
return nil, err
|
||||
}
|
||||
return &oom{
|
||||
root: root,
|
||||
id: c.id,
|
||||
eventfd: int(fd),
|
||||
}, nil
|
||||
}
|
48
vendor/github.com/containerd/containerd/runtime/container_solaris.go
generated
vendored
Normal file
48
vendor/github.com/containerd/containerd/runtime/container_solaris.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/specs"
|
||||
ocs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
func getRootIDs(s *specs.Spec) (int, int, error) {
|
||||
return 0, 0, nil
|
||||
}
|
||||
|
||||
func (c *container) OOM() (OOM, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *container) Pids() ([]int, error) {
|
||||
var pids []int
|
||||
|
||||
// TODO: This could be racy. Needs more investigation.
|
||||
//we get this information from runz state
|
||||
cmd := exec.Command(c.runtime, "state", c.id)
|
||||
outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer)
|
||||
cmd.Stdout, cmd.Stderr = outBuf, errBuf
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
if strings.Contains(errBuf.String(), "Container not found") {
|
||||
return nil, errContainerNotFound
|
||||
}
|
||||
return nil, fmt.Errorf("Error is: %+v\n", err)
|
||||
}
|
||||
response := ocs.State{}
|
||||
decoder := json.NewDecoder(outBuf)
|
||||
if err := decoder.Decode(&response); err != nil {
|
||||
return nil, fmt.Errorf("unable to decode json response: %+v", err)
|
||||
}
|
||||
pids = append(pids, response.Pid)
|
||||
return pids, nil
|
||||
}
|
||||
|
||||
func (c *container) UpdateResources(r *Resource) error {
|
||||
return nil
|
||||
}
|
476
vendor/github.com/containerd/containerd/runtime/process.go
generated
vendored
Normal file
476
vendor/github.com/containerd/containerd/runtime/process.go
generated
vendored
Normal file
|
@ -0,0 +1,476 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containerd/containerd/osutils"
|
||||
"github.com/containerd/containerd/specs"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Process holds the operation allowed on a container's process
|
||||
type Process interface {
|
||||
io.Closer
|
||||
|
||||
// ID of the process.
|
||||
// This is either "init" when it is the container's init process or
|
||||
// it is a user provided id for the process similar to the container id
|
||||
ID() string
|
||||
// Start unblocks the associated container init process.
|
||||
// This should only be called on the process with ID "init"
|
||||
Start() error
|
||||
CloseStdin() error
|
||||
Resize(int, int) error
|
||||
// ExitFD returns the fd the provides an event when the process exits
|
||||
ExitFD() int
|
||||
// ExitStatus returns the exit status of the process or an error if it
|
||||
// has not exited
|
||||
ExitStatus() (uint32, error)
|
||||
// Spec returns the process spec that created the process
|
||||
Spec() specs.ProcessSpec
|
||||
// Signal sends the provided signal to the process
|
||||
Signal(os.Signal) error
|
||||
// Container returns the container that the process belongs to
|
||||
Container() Container
|
||||
// Stdio of the container
|
||||
Stdio() Stdio
|
||||
// SystemPid is the pid on the system
|
||||
SystemPid() int
|
||||
// State returns if the process is running or not
|
||||
State() State
|
||||
// Wait reaps the shim process if avaliable
|
||||
Wait()
|
||||
}
|
||||
|
||||
type processConfig struct {
|
||||
id string
|
||||
root string
|
||||
processSpec specs.ProcessSpec
|
||||
spec *specs.Spec
|
||||
c *container
|
||||
stdio Stdio
|
||||
exec bool
|
||||
checkpoint string
|
||||
}
|
||||
|
||||
func newProcess(config *processConfig) (*process, error) {
|
||||
p := &process{
|
||||
root: config.root,
|
||||
id: config.id,
|
||||
container: config.c,
|
||||
spec: config.processSpec,
|
||||
stdio: config.stdio,
|
||||
cmdDoneCh: make(chan struct{}),
|
||||
state: Running,
|
||||
}
|
||||
uid, gid, err := getRootIDs(config.spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := os.Create(filepath.Join(config.root, "process.json"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
ps := ProcessState{
|
||||
ProcessSpec: config.processSpec,
|
||||
Exec: config.exec,
|
||||
PlatformProcessState: PlatformProcessState{
|
||||
Checkpoint: config.checkpoint,
|
||||
RootUID: uid,
|
||||
RootGID: gid,
|
||||
},
|
||||
Stdin: config.stdio.Stdin,
|
||||
Stdout: config.stdio.Stdout,
|
||||
Stderr: config.stdio.Stderr,
|
||||
RuntimeArgs: config.c.runtimeArgs,
|
||||
NoPivotRoot: config.c.noPivotRoot,
|
||||
}
|
||||
|
||||
if err := json.NewEncoder(f).Encode(ps); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
exit, err := getExitPipe(filepath.Join(config.root, ExitFile))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
control, err := getControlPipe(filepath.Join(config.root, ControlFile))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.exitPipe = exit
|
||||
p.controlPipe = control
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func loadProcess(root, id string, c *container, s *ProcessState) (*process, error) {
|
||||
p := &process{
|
||||
root: root,
|
||||
id: id,
|
||||
container: c,
|
||||
spec: s.ProcessSpec,
|
||||
stdio: Stdio{
|
||||
Stdin: s.Stdin,
|
||||
Stdout: s.Stdout,
|
||||
Stderr: s.Stderr,
|
||||
},
|
||||
state: Stopped,
|
||||
}
|
||||
|
||||
startTime, err := ioutil.ReadFile(filepath.Join(p.root, StartTimeFile))
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
p.startTime = string(startTime)
|
||||
|
||||
if _, err := p.getPidFromFile(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := p.ExitStatus(); err != nil {
|
||||
if err == ErrProcessNotExited {
|
||||
exit, err := getExitPipe(filepath.Join(root, ExitFile))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.exitPipe = exit
|
||||
|
||||
control, err := getControlPipe(filepath.Join(root, ControlFile))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.controlPipe = control
|
||||
|
||||
p.state = Running
|
||||
return p, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func readProcStatField(pid int, field int) (string, error) {
|
||||
data, err := ioutil.ReadFile(filepath.Join(string(filepath.Separator), "proc", strconv.Itoa(pid), "stat"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if field > 2 {
|
||||
// First, split out the name since he could contains spaces.
|
||||
parts := strings.Split(string(data), ") ")
|
||||
// Now split out the rest, we end up with 2 fields less
|
||||
parts = strings.Split(parts[1], " ")
|
||||
return parts[field-2-1], nil // field count start at 1 in manual
|
||||
}
|
||||
|
||||
parts := strings.Split(string(data), " (")
|
||||
|
||||
if field == 1 {
|
||||
return parts[0], nil
|
||||
}
|
||||
|
||||
parts = strings.Split(parts[1], ") ")
|
||||
return parts[0], nil
|
||||
}
|
||||
|
||||
type process struct {
|
||||
root string
|
||||
id string
|
||||
pid int
|
||||
exitPipe *os.File
|
||||
controlPipe *os.File
|
||||
container *container
|
||||
spec specs.ProcessSpec
|
||||
stdio Stdio
|
||||
cmd *exec.Cmd
|
||||
cmdSuccess bool
|
||||
cmdDoneCh chan struct{}
|
||||
state State
|
||||
stateLock sync.Mutex
|
||||
startTime string
|
||||
}
|
||||
|
||||
func (p *process) ID() string {
|
||||
return p.id
|
||||
}
|
||||
|
||||
func (p *process) Container() Container {
|
||||
return p.container
|
||||
}
|
||||
|
||||
func (p *process) SystemPid() int {
|
||||
return p.pid
|
||||
}
|
||||
|
||||
// ExitFD returns the fd of the exit pipe
|
||||
func (p *process) ExitFD() int {
|
||||
return int(p.exitPipe.Fd())
|
||||
}
|
||||
|
||||
func (p *process) CloseStdin() error {
|
||||
_, err := fmt.Fprintf(p.controlPipe, "%d %d %d\n", 0, 0, 0)
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *process) Resize(w, h int) error {
|
||||
_, err := fmt.Fprintf(p.controlPipe, "%d %d %d\n", 1, w, h)
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *process) updateExitStatusFile(status uint32) (uint32, error) {
|
||||
p.stateLock.Lock()
|
||||
p.state = Stopped
|
||||
p.stateLock.Unlock()
|
||||
err := ioutil.WriteFile(filepath.Join(p.root, ExitStatusFile), []byte(fmt.Sprintf("%u", status)), 0644)
|
||||
return status, err
|
||||
}
|
||||
|
||||
func (p *process) handleSigkilledShim(rst uint32, rerr error) (uint32, error) {
|
||||
if p.cmd == nil || p.cmd.Process == nil {
|
||||
e := unix.Kill(p.pid, 0)
|
||||
if e == syscall.ESRCH {
|
||||
logrus.Warnf("containerd: %s:%s (pid %d) does not exist", p.container.id, p.id, p.pid)
|
||||
// The process died while containerd was down (probably of
|
||||
// SIGKILL, but no way to be sure)
|
||||
return p.updateExitStatusFile(UnknownStatus)
|
||||
}
|
||||
|
||||
// If it's not the same process, just mark it stopped and set
|
||||
// the status to the UnknownStatus value (i.e. 255)
|
||||
if same, err := p.isSameProcess(); !same {
|
||||
logrus.Warnf("containerd: %s:%s (pid %d) is not the same process anymore (%v)", p.container.id, p.id, p.pid, err)
|
||||
// Create the file so we get the exit event generated once monitor kicks in
|
||||
// without having to go through all this process again
|
||||
return p.updateExitStatusFile(UnknownStatus)
|
||||
}
|
||||
|
||||
ppid, err := readProcStatField(p.pid, 4)
|
||||
if err != nil {
|
||||
return rst, fmt.Errorf("could not check process ppid: %v (%v)", err, rerr)
|
||||
}
|
||||
if ppid == "1" {
|
||||
logrus.Warnf("containerd: %s:%s shim died, killing associated process", p.container.id, p.id)
|
||||
unix.Kill(p.pid, syscall.SIGKILL)
|
||||
if err != nil && err != syscall.ESRCH {
|
||||
return UnknownStatus, fmt.Errorf("containerd: unable to SIGKILL %s:%s (pid %v): %v", p.container.id, p.id, p.pid, err)
|
||||
}
|
||||
|
||||
// wait for the process to die
|
||||
for {
|
||||
e := unix.Kill(p.pid, 0)
|
||||
if e == syscall.ESRCH {
|
||||
break
|
||||
}
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
}
|
||||
// Create the file so we get the exit event generated once monitor kicks in
|
||||
// without having to go through all this process again
|
||||
return p.updateExitStatusFile(128 + uint32(syscall.SIGKILL))
|
||||
}
|
||||
|
||||
return rst, rerr
|
||||
}
|
||||
|
||||
// Possible that the shim was SIGKILLED
|
||||
e := unix.Kill(p.cmd.Process.Pid, 0)
|
||||
if e != syscall.ESRCH {
|
||||
return rst, rerr
|
||||
}
|
||||
|
||||
// Ensure we got the shim ProcessState
|
||||
<-p.cmdDoneCh
|
||||
|
||||
shimStatus := p.cmd.ProcessState.Sys().(syscall.WaitStatus)
|
||||
if shimStatus.Signaled() && shimStatus.Signal() == syscall.SIGKILL {
|
||||
logrus.Debugf("containerd: ExitStatus(container: %s, process: %s): shim was SIGKILL'ed reaping its child with pid %d", p.container.id, p.id, p.pid)
|
||||
|
||||
rerr = nil
|
||||
rst = 128 + uint32(shimStatus.Signal())
|
||||
|
||||
p.stateLock.Lock()
|
||||
p.state = Stopped
|
||||
p.stateLock.Unlock()
|
||||
}
|
||||
|
||||
return rst, rerr
|
||||
}
|
||||
|
||||
func (p *process) ExitStatus() (rst uint32, rerr error) {
|
||||
data, err := ioutil.ReadFile(filepath.Join(p.root, ExitStatusFile))
|
||||
defer func() {
|
||||
if rerr != nil {
|
||||
rst, rerr = p.handleSigkilledShim(rst, rerr)
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return UnknownStatus, ErrProcessNotExited
|
||||
}
|
||||
return UnknownStatus, err
|
||||
}
|
||||
if len(data) == 0 {
|
||||
return UnknownStatus, ErrProcessNotExited
|
||||
}
|
||||
p.stateLock.Lock()
|
||||
p.state = Stopped
|
||||
p.stateLock.Unlock()
|
||||
|
||||
i, err := strconv.ParseUint(string(data), 10, 32)
|
||||
return uint32(i), err
|
||||
}
|
||||
|
||||
func (p *process) Spec() specs.ProcessSpec {
|
||||
return p.spec
|
||||
}
|
||||
|
||||
func (p *process) Stdio() Stdio {
|
||||
return p.stdio
|
||||
}
|
||||
|
||||
// Close closes any open files and/or resouces on the process
|
||||
func (p *process) Close() error {
|
||||
err := p.exitPipe.Close()
|
||||
if cerr := p.controlPipe.Close(); err == nil {
|
||||
err = cerr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *process) State() State {
|
||||
p.stateLock.Lock()
|
||||
defer p.stateLock.Unlock()
|
||||
return p.state
|
||||
}
|
||||
|
||||
func (p *process) readStartTime() (string, error) {
|
||||
return readProcStatField(p.pid, 22)
|
||||
}
|
||||
|
||||
func (p *process) saveStartTime() error {
|
||||
startTime, err := p.readStartTime()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.startTime = startTime
|
||||
return ioutil.WriteFile(filepath.Join(p.root, StartTimeFile), []byte(startTime), 0644)
|
||||
}
|
||||
|
||||
func (p *process) isSameProcess() (bool, error) {
|
||||
if p.pid == 0 {
|
||||
_, err := p.getPidFromFile()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
// for backward compat assume it's the same if startTime wasn't set
|
||||
if p.startTime == "" {
|
||||
// Sometimes the process dies before we can get the starttime,
|
||||
// check that the process actually exists
|
||||
if err := unix.Kill(p.pid, 0); err != syscall.ESRCH {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
startTime, err := p.readStartTime()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return startTime == p.startTime, nil
|
||||
}
|
||||
|
||||
// Wait will reap the shim process
|
||||
func (p *process) Wait() {
|
||||
if p.cmdDoneCh != nil {
|
||||
<-p.cmdDoneCh
|
||||
}
|
||||
}
|
||||
|
||||
func getExitPipe(path string) (*os.File, error) {
|
||||
if err := unix.Mkfifo(path, 0755); err != nil && !os.IsExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
// add NONBLOCK in case the other side has already closed or else
|
||||
// this function would never return
|
||||
return os.OpenFile(path, syscall.O_RDONLY|syscall.O_NONBLOCK, 0)
|
||||
}
|
||||
|
||||
func getControlPipe(path string) (*os.File, error) {
|
||||
if err := unix.Mkfifo(path, 0755); err != nil && !os.IsExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
return os.OpenFile(path, syscall.O_RDWR|syscall.O_NONBLOCK, 0)
|
||||
}
|
||||
|
||||
// Signal sends the provided signal to the process
|
||||
func (p *process) Signal(s os.Signal) error {
|
||||
return syscall.Kill(p.pid, s.(syscall.Signal))
|
||||
}
|
||||
|
||||
// Start unblocks the associated container init process.
|
||||
// This should only be called on the process with ID "init"
|
||||
func (p *process) Start() error {
|
||||
if p.ID() == InitProcessID {
|
||||
var (
|
||||
errC = make(chan error, 1)
|
||||
args = append(p.container.runtimeArgs, "start", p.container.id)
|
||||
cmd = exec.Command(p.container.runtime, args...)
|
||||
)
|
||||
go func() {
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
errC <- fmt.Errorf("%s: %q", err.Error(), out)
|
||||
}
|
||||
errC <- nil
|
||||
}()
|
||||
select {
|
||||
case err := <-errC:
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case <-p.cmdDoneCh:
|
||||
if !p.cmdSuccess {
|
||||
if cmd.Process != nil {
|
||||
cmd.Process.Kill()
|
||||
}
|
||||
cmd.Wait()
|
||||
return ErrShimExited
|
||||
}
|
||||
err := <-errC
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete delete any resources held by the container
|
||||
func (p *process) Delete() error {
|
||||
var (
|
||||
args = append(p.container.runtimeArgs, "delete", "-f", p.container.id)
|
||||
cmd = exec.Command(p.container.runtime, args...)
|
||||
)
|
||||
|
||||
cmd.SysProcAttr = osutils.SetPDeathSig()
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %v", out, err)
|
||||
}
|
||||
return nil
|
||||
}
|
22
vendor/github.com/containerd/containerd/runtime/process_linux.go
generated
vendored
Normal file
22
vendor/github.com/containerd/containerd/runtime/process_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
// +build linux
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func (p *process) getPidFromFile() (int, error) {
|
||||
data, err := ioutil.ReadFile(filepath.Join(p.root, "pid"))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
i, err := strconv.Atoi(string(data))
|
||||
if err != nil {
|
||||
return -1, errInvalidPidInt
|
||||
}
|
||||
p.pid = i
|
||||
return i, nil
|
||||
}
|
34
vendor/github.com/containerd/containerd/runtime/process_solaris.go
generated
vendored
Normal file
34
vendor/github.com/containerd/containerd/runtime/process_solaris.go
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
|||
// +build solaris
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
|
||||
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
// On Solaris we already have a state file maintained by the framework.
|
||||
// This is read by runz state. We just call that instead of maintaining
|
||||
// a separate file.
|
||||
func (p *process) getPidFromFile() (int, error) {
|
||||
//we get this information from runz state
|
||||
cmd := exec.Command("runc", "state", p.container.ID())
|
||||
outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer)
|
||||
cmd.Stdout, cmd.Stderr = outBuf, errBuf
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
// TODO: Improve logic
|
||||
return -1, errContainerNotFound
|
||||
}
|
||||
response := runtimespec.State{}
|
||||
decoder := json.NewDecoder(outBuf)
|
||||
if err := decoder.Decode(&response); err != nil {
|
||||
return -1, fmt.Errorf("unable to decode json response: %+v", err)
|
||||
}
|
||||
p.pid = response.Pid
|
||||
return p.pid, nil
|
||||
}
|
132
vendor/github.com/containerd/containerd/runtime/runtime.go
generated
vendored
Normal file
132
vendor/github.com/containerd/containerd/runtime/runtime.go
generated
vendored
Normal file
|
@ -0,0 +1,132 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/specs"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrContainerExited is returned when access to an exited
|
||||
// container is attempted
|
||||
ErrContainerExited = errors.New("containerd: container has exited")
|
||||
// ErrProcessNotExited is returned when trying to retrieve the exit
|
||||
// status of an alive process
|
||||
ErrProcessNotExited = errors.New("containerd: process has not exited")
|
||||
// ErrContainerNotStarted is returned when a container fails to
|
||||
// start without error from the shim or the OCI runtime
|
||||
ErrContainerNotStarted = errors.New("containerd: container not started")
|
||||
// ErrContainerStartTimeout is returned if a container takes too
|
||||
// long to start
|
||||
ErrContainerStartTimeout = errors.New("containerd: container did not start before the specified timeout")
|
||||
// ErrShimExited is returned if the shim or the contianer's init process
|
||||
// exits before completing
|
||||
ErrShimExited = errors.New("containerd: shim exited before container process was started")
|
||||
|
||||
errNoPidFile = errors.New("containerd: no process pid file found")
|
||||
errInvalidPidInt = errors.New("containerd: process pid is invalid")
|
||||
errContainerNotFound = errors.New("containerd: container not found")
|
||||
errNotImplemented = errors.New("containerd: not implemented")
|
||||
)
|
||||
|
||||
const (
|
||||
// ExitFile holds the name of the pipe used to monitor process
|
||||
// exit
|
||||
ExitFile = "exit"
|
||||
// ExitStatusFile holds the name of the file where the container
|
||||
// exit code is to be written
|
||||
ExitStatusFile = "exitStatus"
|
||||
// StateFile holds the name of the file where the container state
|
||||
// is written
|
||||
StateFile = "state.json"
|
||||
// ControlFile holds the name of the pipe used to control the shim
|
||||
ControlFile = "control"
|
||||
// InitProcessID holds the special ID used for the very first
|
||||
// container's process
|
||||
InitProcessID = "init"
|
||||
// StartTimeFile holds the name of the file in which the process
|
||||
// start time is saved
|
||||
StartTimeFile = "starttime"
|
||||
|
||||
// UnknownStatus is the value returned when a process exit
|
||||
// status cannot be determined
|
||||
UnknownStatus = 255
|
||||
)
|
||||
|
||||
// Checkpoint holds information regarding a container checkpoint
|
||||
type Checkpoint struct {
|
||||
// Timestamp is the time that checkpoint happened
|
||||
Created time.Time `json:"created"`
|
||||
// Name is the name of the checkpoint
|
||||
Name string `json:"name"`
|
||||
// TCP checkpoints open tcp connections
|
||||
TCP bool `json:"tcp"`
|
||||
// UnixSockets persists unix sockets in the checkpoint
|
||||
UnixSockets bool `json:"unixSockets"`
|
||||
// Shell persists tty sessions in the checkpoint
|
||||
Shell bool `json:"shell"`
|
||||
// Exit exits the container after the checkpoint is finished
|
||||
Exit bool `json:"exit"`
|
||||
// EmptyNS tells CRIU to omit a specified namespace
|
||||
EmptyNS []string `json:"emptyNS,omitempty"`
|
||||
}
|
||||
|
||||
// PlatformProcessState container platform-specific fields in the ProcessState structure
|
||||
type PlatformProcessState struct {
|
||||
Checkpoint string `json:"checkpoint"`
|
||||
RootUID int `json:"rootUID"`
|
||||
RootGID int `json:"rootGID"`
|
||||
}
|
||||
|
||||
// State represents a container state
|
||||
type State string
|
||||
|
||||
// Resource regroups the various container limits that can be updated
|
||||
type Resource struct {
|
||||
CPUShares int64
|
||||
BlkioWeight uint16
|
||||
CPUPeriod int64
|
||||
CPUQuota int64
|
||||
CpusetCpus string
|
||||
CpusetMems string
|
||||
KernelMemory int64
|
||||
KernelTCPMemory int64
|
||||
Memory int64
|
||||
MemoryReservation int64
|
||||
MemorySwap int64
|
||||
PidsLimit int64
|
||||
}
|
||||
|
||||
// Possible container states
|
||||
const (
|
||||
Paused = State("paused")
|
||||
Stopped = State("stopped")
|
||||
Running = State("running")
|
||||
)
|
||||
|
||||
type state struct {
|
||||
Bundle string `json:"bundle"`
|
||||
Labels []string `json:"labels"`
|
||||
Stdin string `json:"stdin"`
|
||||
Stdout string `json:"stdout"`
|
||||
Stderr string `json:"stderr"`
|
||||
Runtime string `json:"runtime"`
|
||||
RuntimeArgs []string `json:"runtimeArgs"`
|
||||
Shim string `json:"shim"`
|
||||
NoPivotRoot bool `json:"noPivotRoot"`
|
||||
}
|
||||
|
||||
// ProcessState holds the process OCI specs along with various fields
|
||||
// required by containerd
|
||||
type ProcessState struct {
|
||||
specs.ProcessSpec
|
||||
Exec bool `json:"exec"`
|
||||
Stdin string `json:"containerdStdin"`
|
||||
Stdout string `json:"containerdStdout"`
|
||||
Stderr string `json:"containerdStderr"`
|
||||
RuntimeArgs []string `json:"runtimeArgs"`
|
||||
NoPivotRoot bool `json:"noPivotRoot"`
|
||||
|
||||
PlatformProcessState
|
||||
}
|
87
vendor/github.com/containerd/containerd/runtime/stats.go
generated
vendored
Normal file
87
vendor/github.com/containerd/containerd/runtime/stats.go
generated
vendored
Normal file
|
@ -0,0 +1,87 @@
|
|||
package runtime
|
||||
|
||||
import "time"
|
||||
|
||||
// Stat holds a container statistics
|
||||
type Stat struct {
|
||||
// Timestamp is the time that the statistics where collected
|
||||
Timestamp time.Time
|
||||
CPU CPU `json:"cpu"`
|
||||
Memory Memory `json:"memory"`
|
||||
Pids Pids `json:"pids"`
|
||||
Blkio Blkio `json:"blkio"`
|
||||
Hugetlb map[string]Hugetlb `json:"hugetlb"`
|
||||
}
|
||||
|
||||
// Hugetlb holds information regarding a container huge tlb usage
|
||||
type Hugetlb struct {
|
||||
Usage uint64 `json:"usage,omitempty"`
|
||||
Max uint64 `json:"max,omitempty"`
|
||||
Failcnt uint64 `json:"failcnt"`
|
||||
}
|
||||
|
||||
// BlkioEntry represents a single record for a Blkio stat
|
||||
type BlkioEntry struct {
|
||||
Major uint64 `json:"major,omitempty"`
|
||||
Minor uint64 `json:"minor,omitempty"`
|
||||
Op string `json:"op,omitempty"`
|
||||
Value uint64 `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
// Blkio regroups all the Blkio related stats
|
||||
type Blkio struct {
|
||||
IoServiceBytesRecursive []BlkioEntry `json:"ioServiceBytesRecursive,omitempty"`
|
||||
IoServicedRecursive []BlkioEntry `json:"ioServicedRecursive,omitempty"`
|
||||
IoQueuedRecursive []BlkioEntry `json:"ioQueueRecursive,omitempty"`
|
||||
IoServiceTimeRecursive []BlkioEntry `json:"ioServiceTimeRecursive,omitempty"`
|
||||
IoWaitTimeRecursive []BlkioEntry `json:"ioWaitTimeRecursive,omitempty"`
|
||||
IoMergedRecursive []BlkioEntry `json:"ioMergedRecursive,omitempty"`
|
||||
IoTimeRecursive []BlkioEntry `json:"ioTimeRecursive,omitempty"`
|
||||
SectorsRecursive []BlkioEntry `json:"sectorsRecursive,omitempty"`
|
||||
}
|
||||
|
||||
// Pids holds the stat of the pid usage of the machine
|
||||
type Pids struct {
|
||||
Current uint64 `json:"current,omitempty"`
|
||||
Limit uint64 `json:"limit,omitempty"`
|
||||
}
|
||||
|
||||
// Throttling holds a cpu throttling information
|
||||
type Throttling struct {
|
||||
Periods uint64 `json:"periods,omitempty"`
|
||||
ThrottledPeriods uint64 `json:"throttledPeriods,omitempty"`
|
||||
ThrottledTime uint64 `json:"throttledTime,omitempty"`
|
||||
}
|
||||
|
||||
// CPUUsage holds information regarding cpu usage
|
||||
type CPUUsage struct {
|
||||
// Units: nanoseconds.
|
||||
Total uint64 `json:"total,omitempty"`
|
||||
Percpu []uint64 `json:"percpu,omitempty"`
|
||||
Kernel uint64 `json:"kernel"`
|
||||
User uint64 `json:"user"`
|
||||
}
|
||||
|
||||
// CPU regroups both a CPU usage and throttling information
|
||||
type CPU struct {
|
||||
Usage CPUUsage `json:"usage,omitempty"`
|
||||
Throttling Throttling `json:"throttling,omitempty"`
|
||||
}
|
||||
|
||||
// MemoryEntry regroups statistic about a given type of memory
|
||||
type MemoryEntry struct {
|
||||
Limit uint64 `json:"limit"`
|
||||
Usage uint64 `json:"usage,omitempty"`
|
||||
Max uint64 `json:"max,omitempty"`
|
||||
Failcnt uint64 `json:"failcnt"`
|
||||
}
|
||||
|
||||
// Memory holds information regarding the different type of memories available
|
||||
type Memory struct {
|
||||
Cache uint64 `json:"cache,omitempty"`
|
||||
Usage MemoryEntry `json:"usage,omitempty"`
|
||||
Swap MemoryEntry `json:"swap,omitempty"`
|
||||
Kernel MemoryEntry `json:"kernel,omitempty"`
|
||||
KernelTCP MemoryEntry `json:"kernelTCP,omitempty"`
|
||||
Raw map[string]uint64 `json:"raw,omitempty"`
|
||||
}
|
12
vendor/github.com/containerd/containerd/specs/spec_linux.go
generated
vendored
Normal file
12
vendor/github.com/containerd/containerd/specs/spec_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
package specs
|
||||
|
||||
import oci "github.com/opencontainers/runtime-spec/specs-go"
|
||||
|
||||
type (
|
||||
// ProcessSpec aliases the platform process specs
|
||||
ProcessSpec oci.Process
|
||||
// Spec aliases the platform oci spec
|
||||
Spec oci.Spec
|
||||
// Rlimit aliases the platform resource limit
|
||||
Rlimit oci.LinuxRlimit
|
||||
)
|
10
vendor/github.com/containerd/containerd/specs/spec_solaris.go
generated
vendored
Normal file
10
vendor/github.com/containerd/containerd/specs/spec_solaris.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
package specs
|
||||
|
||||
import ocs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
|
||||
type (
|
||||
// ProcessSpec aliases the platform process specs
|
||||
ProcessSpec ocs.Process
|
||||
// Spec aliases the platform oci spec
|
||||
Spec ocs.Spec
|
||||
)
|
6
vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unix.go
generated
vendored
6
vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unix.go
generated
vendored
|
@ -64,12 +64,12 @@ func IsNamespaceSupported(ns NamespaceType) bool {
|
|||
|
||||
func NamespaceTypes() []NamespaceType {
|
||||
return []NamespaceType{
|
||||
NEWUSER, // Keep user NS always first, don't move it.
|
||||
NEWIPC,
|
||||
NEWUTS,
|
||||
NEWNET,
|
||||
NEWPID,
|
||||
NEWNS,
|
||||
NEWUTS,
|
||||
NEWIPC,
|
||||
NEWUSER,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
27
vendor/github.com/pmezard/go-difflib/LICENSE
generated
vendored
27
vendor/github.com/pmezard/go-difflib/LICENSE
generated
vendored
|
@ -1,27 +0,0 @@
|
|||
Copyright (c) 2013, Patrick Mezard
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
The names of its contributors may not be used to endorse or promote
|
||||
products derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
772
vendor/github.com/pmezard/go-difflib/difflib/difflib.go
generated
vendored
772
vendor/github.com/pmezard/go-difflib/difflib/difflib.go
generated
vendored
|
@ -1,772 +0,0 @@
|
|||
// Package difflib is a partial port of Python difflib module.
|
||||
//
|
||||
// It provides tools to compare sequences of strings and generate textual diffs.
|
||||
//
|
||||
// The following class and functions have been ported:
|
||||
//
|
||||
// - SequenceMatcher
|
||||
//
|
||||
// - unified_diff
|
||||
//
|
||||
// - context_diff
|
||||
//
|
||||
// Getting unified diffs was the main goal of the port. Keep in mind this code
|
||||
// is mostly suitable to output text differences in a human friendly way, there
|
||||
// are no guarantees generated diffs are consumable by patch(1).
|
||||
package difflib
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func max(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func calculateRatio(matches, length int) float64 {
|
||||
if length > 0 {
|
||||
return 2.0 * float64(matches) / float64(length)
|
||||
}
|
||||
return 1.0
|
||||
}
|
||||
|
||||
type Match struct {
|
||||
A int
|
||||
B int
|
||||
Size int
|
||||
}
|
||||
|
||||
type OpCode struct {
|
||||
Tag byte
|
||||
I1 int
|
||||
I2 int
|
||||
J1 int
|
||||
J2 int
|
||||
}
|
||||
|
||||
// SequenceMatcher compares sequence of strings. The basic
|
||||
// algorithm predates, and is a little fancier than, an algorithm
|
||||
// published in the late 1980's by Ratcliff and Obershelp under the
|
||||
// hyperbolic name "gestalt pattern matching". The basic idea is to find
|
||||
// the longest contiguous matching subsequence that contains no "junk"
|
||||
// elements (R-O doesn't address junk). The same idea is then applied
|
||||
// recursively to the pieces of the sequences to the left and to the right
|
||||
// of the matching subsequence. This does not yield minimal edit
|
||||
// sequences, but does tend to yield matches that "look right" to people.
|
||||
//
|
||||
// SequenceMatcher tries to compute a "human-friendly diff" between two
|
||||
// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
|
||||
// longest *contiguous* & junk-free matching subsequence. That's what
|
||||
// catches peoples' eyes. The Windows(tm) windiff has another interesting
|
||||
// notion, pairing up elements that appear uniquely in each sequence.
|
||||
// That, and the method here, appear to yield more intuitive difference
|
||||
// reports than does diff. This method appears to be the least vulnerable
|
||||
// to synching up on blocks of "junk lines", though (like blank lines in
|
||||
// ordinary text files, or maybe "<P>" lines in HTML files). That may be
|
||||
// because this is the only method of the 3 that has a *concept* of
|
||||
// "junk" <wink>.
|
||||
//
|
||||
// Timing: Basic R-O is cubic time worst case and quadratic time expected
|
||||
// case. SequenceMatcher is quadratic time for the worst case and has
|
||||
// expected-case behavior dependent in a complicated way on how many
|
||||
// elements the sequences have in common; best case time is linear.
|
||||
type SequenceMatcher struct {
|
||||
a []string
|
||||
b []string
|
||||
b2j map[string][]int
|
||||
IsJunk func(string) bool
|
||||
autoJunk bool
|
||||
bJunk map[string]struct{}
|
||||
matchingBlocks []Match
|
||||
fullBCount map[string]int
|
||||
bPopular map[string]struct{}
|
||||
opCodes []OpCode
|
||||
}
|
||||
|
||||
func NewMatcher(a, b []string) *SequenceMatcher {
|
||||
m := SequenceMatcher{autoJunk: true}
|
||||
m.SetSeqs(a, b)
|
||||
return &m
|
||||
}
|
||||
|
||||
func NewMatcherWithJunk(a, b []string, autoJunk bool,
|
||||
isJunk func(string) bool) *SequenceMatcher {
|
||||
|
||||
m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
|
||||
m.SetSeqs(a, b)
|
||||
return &m
|
||||
}
|
||||
|
||||
// Set two sequences to be compared.
|
||||
func (m *SequenceMatcher) SetSeqs(a, b []string) {
|
||||
m.SetSeq1(a)
|
||||
m.SetSeq2(b)
|
||||
}
|
||||
|
||||
// Set the first sequence to be compared. The second sequence to be compared is
|
||||
// not changed.
|
||||
//
|
||||
// SequenceMatcher computes and caches detailed information about the second
|
||||
// sequence, so if you want to compare one sequence S against many sequences,
|
||||
// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
|
||||
// sequences.
|
||||
//
|
||||
// See also SetSeqs() and SetSeq2().
|
||||
func (m *SequenceMatcher) SetSeq1(a []string) {
|
||||
if &a == &m.a {
|
||||
return
|
||||
}
|
||||
m.a = a
|
||||
m.matchingBlocks = nil
|
||||
m.opCodes = nil
|
||||
}
|
||||
|
||||
// Set the second sequence to be compared. The first sequence to be compared is
|
||||
// not changed.
|
||||
func (m *SequenceMatcher) SetSeq2(b []string) {
|
||||
if &b == &m.b {
|
||||
return
|
||||
}
|
||||
m.b = b
|
||||
m.matchingBlocks = nil
|
||||
m.opCodes = nil
|
||||
m.fullBCount = nil
|
||||
m.chainB()
|
||||
}
|
||||
|
||||
func (m *SequenceMatcher) chainB() {
|
||||
// Populate line -> index mapping
|
||||
b2j := map[string][]int{}
|
||||
for i, s := range m.b {
|
||||
indices := b2j[s]
|
||||
indices = append(indices, i)
|
||||
b2j[s] = indices
|
||||
}
|
||||
|
||||
// Purge junk elements
|
||||
m.bJunk = map[string]struct{}{}
|
||||
if m.IsJunk != nil {
|
||||
junk := m.bJunk
|
||||
for s, _ := range b2j {
|
||||
if m.IsJunk(s) {
|
||||
junk[s] = struct{}{}
|
||||
}
|
||||
}
|
||||
for s, _ := range junk {
|
||||
delete(b2j, s)
|
||||
}
|
||||
}
|
||||
|
||||
// Purge remaining popular elements
|
||||
popular := map[string]struct{}{}
|
||||
n := len(m.b)
|
||||
if m.autoJunk && n >= 200 {
|
||||
ntest := n/100 + 1
|
||||
for s, indices := range b2j {
|
||||
if len(indices) > ntest {
|
||||
popular[s] = struct{}{}
|
||||
}
|
||||
}
|
||||
for s, _ := range popular {
|
||||
delete(b2j, s)
|
||||
}
|
||||
}
|
||||
m.bPopular = popular
|
||||
m.b2j = b2j
|
||||
}
|
||||
|
||||
func (m *SequenceMatcher) isBJunk(s string) bool {
|
||||
_, ok := m.bJunk[s]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Find longest matching block in a[alo:ahi] and b[blo:bhi].
|
||||
//
|
||||
// If IsJunk is not defined:
|
||||
//
|
||||
// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
|
||||
// alo <= i <= i+k <= ahi
|
||||
// blo <= j <= j+k <= bhi
|
||||
// and for all (i',j',k') meeting those conditions,
|
||||
// k >= k'
|
||||
// i <= i'
|
||||
// and if i == i', j <= j'
|
||||
//
|
||||
// In other words, of all maximal matching blocks, return one that
|
||||
// starts earliest in a, and of all those maximal matching blocks that
|
||||
// start earliest in a, return the one that starts earliest in b.
|
||||
//
|
||||
// If IsJunk is defined, first the longest matching block is
|
||||
// determined as above, but with the additional restriction that no
|
||||
// junk element appears in the block. Then that block is extended as
|
||||
// far as possible by matching (only) junk elements on both sides. So
|
||||
// the resulting block never matches on junk except as identical junk
|
||||
// happens to be adjacent to an "interesting" match.
|
||||
//
|
||||
// If no blocks match, return (alo, blo, 0).
|
||||
func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
|
||||
// CAUTION: stripping common prefix or suffix would be incorrect.
|
||||
// E.g.,
|
||||
// ab
|
||||
// acab
|
||||
// Longest matching block is "ab", but if common prefix is
|
||||
// stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
|
||||
// strip, so ends up claiming that ab is changed to acab by
|
||||
// inserting "ca" in the middle. That's minimal but unintuitive:
|
||||
// "it's obvious" that someone inserted "ac" at the front.
|
||||
// Windiff ends up at the same place as diff, but by pairing up
|
||||
// the unique 'b's and then matching the first two 'a's.
|
||||
besti, bestj, bestsize := alo, blo, 0
|
||||
|
||||
// find longest junk-free match
|
||||
// during an iteration of the loop, j2len[j] = length of longest
|
||||
// junk-free match ending with a[i-1] and b[j]
|
||||
j2len := map[int]int{}
|
||||
for i := alo; i != ahi; i++ {
|
||||
// look at all instances of a[i] in b; note that because
|
||||
// b2j has no junk keys, the loop is skipped if a[i] is junk
|
||||
newj2len := map[int]int{}
|
||||
for _, j := range m.b2j[m.a[i]] {
|
||||
// a[i] matches b[j]
|
||||
if j < blo {
|
||||
continue
|
||||
}
|
||||
if j >= bhi {
|
||||
break
|
||||
}
|
||||
k := j2len[j-1] + 1
|
||||
newj2len[j] = k
|
||||
if k > bestsize {
|
||||
besti, bestj, bestsize = i-k+1, j-k+1, k
|
||||
}
|
||||
}
|
||||
j2len = newj2len
|
||||
}
|
||||
|
||||
// Extend the best by non-junk elements on each end. In particular,
|
||||
// "popular" non-junk elements aren't in b2j, which greatly speeds
|
||||
// the inner loop above, but also means "the best" match so far
|
||||
// doesn't contain any junk *or* popular non-junk elements.
|
||||
for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
|
||||
m.a[besti-1] == m.b[bestj-1] {
|
||||
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
|
||||
}
|
||||
for besti+bestsize < ahi && bestj+bestsize < bhi &&
|
||||
!m.isBJunk(m.b[bestj+bestsize]) &&
|
||||
m.a[besti+bestsize] == m.b[bestj+bestsize] {
|
||||
bestsize += 1
|
||||
}
|
||||
|
||||
// Now that we have a wholly interesting match (albeit possibly
|
||||
// empty!), we may as well suck up the matching junk on each
|
||||
// side of it too. Can't think of a good reason not to, and it
|
||||
// saves post-processing the (possibly considerable) expense of
|
||||
// figuring out what to do with it. In the case of an empty
|
||||
// interesting match, this is clearly the right thing to do,
|
||||
// because no other kind of match is possible in the regions.
|
||||
for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
|
||||
m.a[besti-1] == m.b[bestj-1] {
|
||||
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
|
||||
}
|
||||
for besti+bestsize < ahi && bestj+bestsize < bhi &&
|
||||
m.isBJunk(m.b[bestj+bestsize]) &&
|
||||
m.a[besti+bestsize] == m.b[bestj+bestsize] {
|
||||
bestsize += 1
|
||||
}
|
||||
|
||||
return Match{A: besti, B: bestj, Size: bestsize}
|
||||
}
|
||||
|
||||
// Return list of triples describing matching subsequences.
|
||||
//
|
||||
// Each triple is of the form (i, j, n), and means that
|
||||
// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
|
||||
// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
|
||||
// adjacent triples in the list, and the second is not the last triple in the
|
||||
// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
|
||||
// adjacent equal blocks.
|
||||
//
|
||||
// The last triple is a dummy, (len(a), len(b), 0), and is the only
|
||||
// triple with n==0.
|
||||
func (m *SequenceMatcher) GetMatchingBlocks() []Match {
|
||||
if m.matchingBlocks != nil {
|
||||
return m.matchingBlocks
|
||||
}
|
||||
|
||||
var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
|
||||
matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
|
||||
match := m.findLongestMatch(alo, ahi, blo, bhi)
|
||||
i, j, k := match.A, match.B, match.Size
|
||||
if match.Size > 0 {
|
||||
if alo < i && blo < j {
|
||||
matched = matchBlocks(alo, i, blo, j, matched)
|
||||
}
|
||||
matched = append(matched, match)
|
||||
if i+k < ahi && j+k < bhi {
|
||||
matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
|
||||
}
|
||||
}
|
||||
return matched
|
||||
}
|
||||
matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
|
||||
|
||||
// It's possible that we have adjacent equal blocks in the
|
||||
// matching_blocks list now.
|
||||
nonAdjacent := []Match{}
|
||||
i1, j1, k1 := 0, 0, 0
|
||||
for _, b := range matched {
|
||||
// Is this block adjacent to i1, j1, k1?
|
||||
i2, j2, k2 := b.A, b.B, b.Size
|
||||
if i1+k1 == i2 && j1+k1 == j2 {
|
||||
// Yes, so collapse them -- this just increases the length of
|
||||
// the first block by the length of the second, and the first
|
||||
// block so lengthened remains the block to compare against.
|
||||
k1 += k2
|
||||
} else {
|
||||
// Not adjacent. Remember the first block (k1==0 means it's
|
||||
// the dummy we started with), and make the second block the
|
||||
// new block to compare against.
|
||||
if k1 > 0 {
|
||||
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
|
||||
}
|
||||
i1, j1, k1 = i2, j2, k2
|
||||
}
|
||||
}
|
||||
if k1 > 0 {
|
||||
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
|
||||
}
|
||||
|
||||
nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
|
||||
m.matchingBlocks = nonAdjacent
|
||||
return m.matchingBlocks
|
||||
}
|
||||
|
||||
// Return list of 5-tuples describing how to turn a into b.
|
||||
//
|
||||
// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
|
||||
// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
|
||||
// tuple preceding it, and likewise for j1 == the previous j2.
|
||||
//
|
||||
// The tags are characters, with these meanings:
|
||||
//
|
||||
// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
|
||||
//
|
||||
// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
|
||||
//
|
||||
// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
|
||||
//
|
||||
// 'e' (equal): a[i1:i2] == b[j1:j2]
|
||||
func (m *SequenceMatcher) GetOpCodes() []OpCode {
|
||||
if m.opCodes != nil {
|
||||
return m.opCodes
|
||||
}
|
||||
i, j := 0, 0
|
||||
matching := m.GetMatchingBlocks()
|
||||
opCodes := make([]OpCode, 0, len(matching))
|
||||
for _, m := range matching {
|
||||
// invariant: we've pumped out correct diffs to change
|
||||
// a[:i] into b[:j], and the next matching block is
|
||||
// a[ai:ai+size] == b[bj:bj+size]. So we need to pump
|
||||
// out a diff to change a[i:ai] into b[j:bj], pump out
|
||||
// the matching block, and move (i,j) beyond the match
|
||||
ai, bj, size := m.A, m.B, m.Size
|
||||
tag := byte(0)
|
||||
if i < ai && j < bj {
|
||||
tag = 'r'
|
||||
} else if i < ai {
|
||||
tag = 'd'
|
||||
} else if j < bj {
|
||||
tag = 'i'
|
||||
}
|
||||
if tag > 0 {
|
||||
opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
|
||||
}
|
||||
i, j = ai+size, bj+size
|
||||
// the list of matching blocks is terminated by a
|
||||
// sentinel with size 0
|
||||
if size > 0 {
|
||||
opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
|
||||
}
|
||||
}
|
||||
m.opCodes = opCodes
|
||||
return m.opCodes
|
||||
}
|
||||
|
||||
// Isolate change clusters by eliminating ranges with no changes.
|
||||
//
|
||||
// Return a generator of groups with up to n lines of context.
|
||||
// Each group is in the same format as returned by GetOpCodes().
|
||||
func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
|
||||
if n < 0 {
|
||||
n = 3
|
||||
}
|
||||
codes := m.GetOpCodes()
|
||||
if len(codes) == 0 {
|
||||
codes = []OpCode{OpCode{'e', 0, 1, 0, 1}}
|
||||
}
|
||||
// Fixup leading and trailing groups if they show no changes.
|
||||
if codes[0].Tag == 'e' {
|
||||
c := codes[0]
|
||||
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||
codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
|
||||
}
|
||||
if codes[len(codes)-1].Tag == 'e' {
|
||||
c := codes[len(codes)-1]
|
||||
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||
codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
|
||||
}
|
||||
nn := n + n
|
||||
groups := [][]OpCode{}
|
||||
group := []OpCode{}
|
||||
for _, c := range codes {
|
||||
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||
// End the current group and start a new one whenever
|
||||
// there is a large range with no changes.
|
||||
if c.Tag == 'e' && i2-i1 > nn {
|
||||
group = append(group, OpCode{c.Tag, i1, min(i2, i1+n),
|
||||
j1, min(j2, j1+n)})
|
||||
groups = append(groups, group)
|
||||
group = []OpCode{}
|
||||
i1, j1 = max(i1, i2-n), max(j1, j2-n)
|
||||
}
|
||||
group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
|
||||
}
|
||||
if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
|
||||
groups = append(groups, group)
|
||||
}
|
||||
return groups
|
||||
}
|
||||
|
||||
// Return a measure of the sequences' similarity (float in [0,1]).
|
||||
//
|
||||
// Where T is the total number of elements in both sequences, and
|
||||
// M is the number of matches, this is 2.0*M / T.
|
||||
// Note that this is 1 if the sequences are identical, and 0 if
|
||||
// they have nothing in common.
|
||||
//
|
||||
// .Ratio() is expensive to compute if you haven't already computed
|
||||
// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
|
||||
// want to try .QuickRatio() or .RealQuickRation() first to get an
|
||||
// upper bound.
|
||||
func (m *SequenceMatcher) Ratio() float64 {
|
||||
matches := 0
|
||||
for _, m := range m.GetMatchingBlocks() {
|
||||
matches += m.Size
|
||||
}
|
||||
return calculateRatio(matches, len(m.a)+len(m.b))
|
||||
}
|
||||
|
||||
// Return an upper bound on ratio() relatively quickly.
|
||||
//
|
||||
// This isn't defined beyond that it is an upper bound on .Ratio(), and
|
||||
// is faster to compute.
|
||||
func (m *SequenceMatcher) QuickRatio() float64 {
|
||||
// viewing a and b as multisets, set matches to the cardinality
|
||||
// of their intersection; this counts the number of matches
|
||||
// without regard to order, so is clearly an upper bound
|
||||
if m.fullBCount == nil {
|
||||
m.fullBCount = map[string]int{}
|
||||
for _, s := range m.b {
|
||||
m.fullBCount[s] = m.fullBCount[s] + 1
|
||||
}
|
||||
}
|
||||
|
||||
// avail[x] is the number of times x appears in 'b' less the
|
||||
// number of times we've seen it in 'a' so far ... kinda
|
||||
avail := map[string]int{}
|
||||
matches := 0
|
||||
for _, s := range m.a {
|
||||
n, ok := avail[s]
|
||||
if !ok {
|
||||
n = m.fullBCount[s]
|
||||
}
|
||||
avail[s] = n - 1
|
||||
if n > 0 {
|
||||
matches += 1
|
||||
}
|
||||
}
|
||||
return calculateRatio(matches, len(m.a)+len(m.b))
|
||||
}
|
||||
|
||||
// Return an upper bound on ratio() very quickly.
|
||||
//
|
||||
// This isn't defined beyond that it is an upper bound on .Ratio(), and
|
||||
// is faster to compute than either .Ratio() or .QuickRatio().
|
||||
func (m *SequenceMatcher) RealQuickRatio() float64 {
|
||||
la, lb := len(m.a), len(m.b)
|
||||
return calculateRatio(min(la, lb), la+lb)
|
||||
}
|
||||
|
||||
// Convert range to the "ed" format
|
||||
func formatRangeUnified(start, stop int) string {
|
||||
// Per the diff spec at http://www.unix.org/single_unix_specification/
|
||||
beginning := start + 1 // lines start numbering with one
|
||||
length := stop - start
|
||||
if length == 1 {
|
||||
return fmt.Sprintf("%d", beginning)
|
||||
}
|
||||
if length == 0 {
|
||||
beginning -= 1 // empty ranges begin at line just before the range
|
||||
}
|
||||
return fmt.Sprintf("%d,%d", beginning, length)
|
||||
}
|
||||
|
||||
// Unified diff parameters
|
||||
type UnifiedDiff struct {
|
||||
A []string // First sequence lines
|
||||
FromFile string // First file name
|
||||
FromDate string // First file time
|
||||
B []string // Second sequence lines
|
||||
ToFile string // Second file name
|
||||
ToDate string // Second file time
|
||||
Eol string // Headers end of line, defaults to LF
|
||||
Context int // Number of context lines
|
||||
}
|
||||
|
||||
// Compare two sequences of lines; generate the delta as a unified diff.
|
||||
//
|
||||
// Unified diffs are a compact way of showing line changes and a few
|
||||
// lines of context. The number of context lines is set by 'n' which
|
||||
// defaults to three.
|
||||
//
|
||||
// By default, the diff control lines (those with ---, +++, or @@) are
|
||||
// created with a trailing newline. This is helpful so that inputs
|
||||
// created from file.readlines() result in diffs that are suitable for
|
||||
// file.writelines() since both the inputs and outputs have trailing
|
||||
// newlines.
|
||||
//
|
||||
// For inputs that do not have trailing newlines, set the lineterm
|
||||
// argument to "" so that the output will be uniformly newline free.
|
||||
//
|
||||
// The unidiff format normally has a header for filenames and modification
|
||||
// times. Any or all of these may be specified using strings for
|
||||
// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
|
||||
// The modification times are normally expressed in the ISO 8601 format.
|
||||
func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
|
||||
buf := bufio.NewWriter(writer)
|
||||
defer buf.Flush()
|
||||
wf := func(format string, args ...interface{}) error {
|
||||
_, err := buf.WriteString(fmt.Sprintf(format, args...))
|
||||
return err
|
||||
}
|
||||
ws := func(s string) error {
|
||||
_, err := buf.WriteString(s)
|
||||
return err
|
||||
}
|
||||
|
||||
if len(diff.Eol) == 0 {
|
||||
diff.Eol = "\n"
|
||||
}
|
||||
|
||||
started := false
|
||||
m := NewMatcher(diff.A, diff.B)
|
||||
for _, g := range m.GetGroupedOpCodes(diff.Context) {
|
||||
if !started {
|
||||
started = true
|
||||
fromDate := ""
|
||||
if len(diff.FromDate) > 0 {
|
||||
fromDate = "\t" + diff.FromDate
|
||||
}
|
||||
toDate := ""
|
||||
if len(diff.ToDate) > 0 {
|
||||
toDate = "\t" + diff.ToDate
|
||||
}
|
||||
if diff.FromFile != "" || diff.ToFile != "" {
|
||||
err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
first, last := g[0], g[len(g)-1]
|
||||
range1 := formatRangeUnified(first.I1, last.I2)
|
||||
range2 := formatRangeUnified(first.J1, last.J2)
|
||||
if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, c := range g {
|
||||
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||
if c.Tag == 'e' {
|
||||
for _, line := range diff.A[i1:i2] {
|
||||
if err := ws(" " + line); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if c.Tag == 'r' || c.Tag == 'd' {
|
||||
for _, line := range diff.A[i1:i2] {
|
||||
if err := ws("-" + line); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if c.Tag == 'r' || c.Tag == 'i' {
|
||||
for _, line := range diff.B[j1:j2] {
|
||||
if err := ws("+" + line); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Like WriteUnifiedDiff but returns the diff a string.
|
||||
func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
|
||||
w := &bytes.Buffer{}
|
||||
err := WriteUnifiedDiff(w, diff)
|
||||
return string(w.Bytes()), err
|
||||
}
|
||||
|
||||
// Convert range to the "ed" format.
|
||||
func formatRangeContext(start, stop int) string {
|
||||
// Per the diff spec at http://www.unix.org/single_unix_specification/
|
||||
beginning := start + 1 // lines start numbering with one
|
||||
length := stop - start
|
||||
if length == 0 {
|
||||
beginning -= 1 // empty ranges begin at line just before the range
|
||||
}
|
||||
if length <= 1 {
|
||||
return fmt.Sprintf("%d", beginning)
|
||||
}
|
||||
return fmt.Sprintf("%d,%d", beginning, beginning+length-1)
|
||||
}
|
||||
|
||||
type ContextDiff UnifiedDiff
|
||||
|
||||
// Compare two sequences of lines; generate the delta as a context diff.
|
||||
//
|
||||
// Context diffs are a compact way of showing line changes and a few
|
||||
// lines of context. The number of context lines is set by diff.Context
|
||||
// which defaults to three.
|
||||
//
|
||||
// By default, the diff control lines (those with *** or ---) are
|
||||
// created with a trailing newline.
|
||||
//
|
||||
// For inputs that do not have trailing newlines, set the diff.Eol
|
||||
// argument to "" so that the output will be uniformly newline free.
|
||||
//
|
||||
// The context diff format normally has a header for filenames and
|
||||
// modification times. Any or all of these may be specified using
|
||||
// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate.
|
||||
// The modification times are normally expressed in the ISO 8601 format.
|
||||
// If not specified, the strings default to blanks.
|
||||
func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
|
||||
buf := bufio.NewWriter(writer)
|
||||
defer buf.Flush()
|
||||
var diffErr error
|
||||
wf := func(format string, args ...interface{}) {
|
||||
_, err := buf.WriteString(fmt.Sprintf(format, args...))
|
||||
if diffErr == nil && err != nil {
|
||||
diffErr = err
|
||||
}
|
||||
}
|
||||
ws := func(s string) {
|
||||
_, err := buf.WriteString(s)
|
||||
if diffErr == nil && err != nil {
|
||||
diffErr = err
|
||||
}
|
||||
}
|
||||
|
||||
if len(diff.Eol) == 0 {
|
||||
diff.Eol = "\n"
|
||||
}
|
||||
|
||||
prefix := map[byte]string{
|
||||
'i': "+ ",
|
||||
'd': "- ",
|
||||
'r': "! ",
|
||||
'e': " ",
|
||||
}
|
||||
|
||||
started := false
|
||||
m := NewMatcher(diff.A, diff.B)
|
||||
for _, g := range m.GetGroupedOpCodes(diff.Context) {
|
||||
if !started {
|
||||
started = true
|
||||
fromDate := ""
|
||||
if len(diff.FromDate) > 0 {
|
||||
fromDate = "\t" + diff.FromDate
|
||||
}
|
||||
toDate := ""
|
||||
if len(diff.ToDate) > 0 {
|
||||
toDate = "\t" + diff.ToDate
|
||||
}
|
||||
if diff.FromFile != "" || diff.ToFile != "" {
|
||||
wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
|
||||
wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
|
||||
}
|
||||
}
|
||||
|
||||
first, last := g[0], g[len(g)-1]
|
||||
ws("***************" + diff.Eol)
|
||||
|
||||
range1 := formatRangeContext(first.I1, last.I2)
|
||||
wf("*** %s ****%s", range1, diff.Eol)
|
||||
for _, c := range g {
|
||||
if c.Tag == 'r' || c.Tag == 'd' {
|
||||
for _, cc := range g {
|
||||
if cc.Tag == 'i' {
|
||||
continue
|
||||
}
|
||||
for _, line := range diff.A[cc.I1:cc.I2] {
|
||||
ws(prefix[cc.Tag] + line)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
range2 := formatRangeContext(first.J1, last.J2)
|
||||
wf("--- %s ----%s", range2, diff.Eol)
|
||||
for _, c := range g {
|
||||
if c.Tag == 'r' || c.Tag == 'i' {
|
||||
for _, cc := range g {
|
||||
if cc.Tag == 'd' {
|
||||
continue
|
||||
}
|
||||
for _, line := range diff.B[cc.J1:cc.J2] {
|
||||
ws(prefix[cc.Tag] + line)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return diffErr
|
||||
}
|
||||
|
||||
// Like WriteContextDiff but returns the diff a string.
|
||||
func GetContextDiffString(diff ContextDiff) (string, error) {
|
||||
w := &bytes.Buffer{}
|
||||
err := WriteContextDiff(w, diff)
|
||||
return string(w.Bytes()), err
|
||||
}
|
||||
|
||||
// Split a string on "\n" while preserving them. The output can be used
|
||||
// as input for UnifiedDiff and ContextDiff structures.
|
||||
func SplitLines(s string) []string {
|
||||
lines := strings.SplitAfter(s, "\n")
|
||||
lines[len(lines)-1] += "\n"
|
||||
return lines
|
||||
}
|
Loading…
Reference in a new issue