|
@@ -5,6 +5,7 @@ import (
|
|
"fmt"
|
|
"fmt"
|
|
"github.com/dotcloud/docker/api"
|
|
"github.com/dotcloud/docker/api"
|
|
"github.com/dotcloud/docker/archive"
|
|
"github.com/dotcloud/docker/archive"
|
|
|
|
+ "github.com/dotcloud/docker/daemon"
|
|
"github.com/dotcloud/docker/daemonconfig"
|
|
"github.com/dotcloud/docker/daemonconfig"
|
|
"github.com/dotcloud/docker/dockerversion"
|
|
"github.com/dotcloud/docker/dockerversion"
|
|
"github.com/dotcloud/docker/engine"
|
|
"github.com/dotcloud/docker/engine"
|
|
@@ -14,7 +15,6 @@ import (
|
|
"github.com/dotcloud/docker/pkg/signal"
|
|
"github.com/dotcloud/docker/pkg/signal"
|
|
"github.com/dotcloud/docker/registry"
|
|
"github.com/dotcloud/docker/registry"
|
|
"github.com/dotcloud/docker/runconfig"
|
|
"github.com/dotcloud/docker/runconfig"
|
|
- "github.com/dotcloud/docker/runtime"
|
|
|
|
"github.com/dotcloud/docker/utils"
|
|
"github.com/dotcloud/docker/utils"
|
|
"io"
|
|
"io"
|
|
"io/ioutil"
|
|
"io/ioutil"
|
|
@@ -43,9 +43,9 @@ func InitServer(job *engine.Job) engine.Status {
|
|
if err != nil {
|
|
if err != nil {
|
|
return job.Error(err)
|
|
return job.Error(err)
|
|
}
|
|
}
|
|
- if srv.runtime.Config().Pidfile != "" {
|
|
|
|
|
|
+ if srv.daemon.Config().Pidfile != "" {
|
|
job.Logf("Creating pidfile")
|
|
job.Logf("Creating pidfile")
|
|
- if err := utils.CreatePidFile(srv.runtime.Config().Pidfile); err != nil {
|
|
|
|
|
|
+ if err := utils.CreatePidFile(srv.daemon.Config().Pidfile); err != nil {
|
|
// FIXME: do we need fatal here instead of returning a job error?
|
|
// FIXME: do we need fatal here instead of returning a job error?
|
|
log.Fatal(err)
|
|
log.Fatal(err)
|
|
}
|
|
}
|
|
@@ -65,7 +65,7 @@ func InitServer(job *engine.Job) engine.Status {
|
|
interruptCount++
|
|
interruptCount++
|
|
// Initiate the cleanup only once
|
|
// Initiate the cleanup only once
|
|
if interruptCount == 1 {
|
|
if interruptCount == 1 {
|
|
- utils.RemovePidFile(srv.runtime.Config().Pidfile)
|
|
|
|
|
|
+ utils.RemovePidFile(srv.daemon.Config().Pidfile)
|
|
srv.Close()
|
|
srv.Close()
|
|
} else {
|
|
} else {
|
|
return
|
|
return
|
|
@@ -80,7 +80,7 @@ func InitServer(job *engine.Job) engine.Status {
|
|
}
|
|
}
|
|
}()
|
|
}()
|
|
job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
|
|
job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
|
|
- job.Eng.Hack_SetGlobalVar("httpapi.runtime", srv.runtime)
|
|
|
|
|
|
+ job.Eng.Hack_SetGlobalVar("httpapi.daemon", srv.daemon)
|
|
|
|
|
|
// FIXME: 'insert' is deprecated and should be removed in a future version.
|
|
// FIXME: 'insert' is deprecated and should be removed in a future version.
|
|
for name, handler := range map[string]engine.Handler{
|
|
for name, handler := range map[string]engine.Handler{
|
|
@@ -172,13 +172,13 @@ func (srv *Server) ContainerKill(job *engine.Job) engine.Status {
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- if container := srv.runtime.Get(name); container != nil {
|
|
|
|
|
|
+ if container := srv.daemon.Get(name); container != nil {
|
|
// If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait())
|
|
// If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait())
|
|
if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL {
|
|
if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL {
|
|
if err := container.Kill(); err != nil {
|
|
if err := container.Kill(); err != nil {
|
|
return job.Errorf("Cannot kill container %s: %s", name, err)
|
|
return job.Errorf("Cannot kill container %s: %s", name, err)
|
|
}
|
|
}
|
|
- srv.LogEvent("kill", container.ID, srv.runtime.Repositories().ImageName(container.Image))
|
|
|
|
|
|
+ srv.LogEvent("kill", container.ID, srv.daemon.Repositories().ImageName(container.Image))
|
|
} else {
|
|
} else {
|
|
// Otherwise, just send the requested signal
|
|
// Otherwise, just send the requested signal
|
|
if err := container.KillSig(int(sig)); err != nil {
|
|
if err := container.KillSig(int(sig)); err != nil {
|
|
@@ -294,7 +294,7 @@ func (srv *Server) ContainerExport(job *engine.Job) engine.Status {
|
|
return job.Errorf("Usage: %s container_id", job.Name)
|
|
return job.Errorf("Usage: %s container_id", job.Name)
|
|
}
|
|
}
|
|
name := job.Args[0]
|
|
name := job.Args[0]
|
|
- if container := srv.runtime.Get(name); container != nil {
|
|
|
|
|
|
+ if container := srv.daemon.Get(name); container != nil {
|
|
data, err := container.Export()
|
|
data, err := container.Export()
|
|
if err != nil {
|
|
if err != nil {
|
|
return job.Errorf("%s: %s", name, err)
|
|
return job.Errorf("%s: %s", name, err)
|
|
@@ -306,7 +306,7 @@ func (srv *Server) ContainerExport(job *engine.Job) engine.Status {
|
|
return job.Errorf("%s: %s", name, err)
|
|
return job.Errorf("%s: %s", name, err)
|
|
}
|
|
}
|
|
// FIXME: factor job-specific LogEvent to engine.Job.Run()
|
|
// FIXME: factor job-specific LogEvent to engine.Job.Run()
|
|
- srv.LogEvent("export", container.ID, srv.runtime.Repositories().ImageName(container.Image))
|
|
|
|
|
|
+ srv.LogEvent("export", container.ID, srv.daemon.Repositories().ImageName(container.Image))
|
|
return engine.StatusOK
|
|
return engine.StatusOK
|
|
}
|
|
}
|
|
return job.Errorf("No such container: %s", name)
|
|
return job.Errorf("No such container: %s", name)
|
|
@@ -331,7 +331,7 @@ func (srv *Server) ImageExport(job *engine.Job) engine.Status {
|
|
|
|
|
|
utils.Debugf("Serializing %s", name)
|
|
utils.Debugf("Serializing %s", name)
|
|
|
|
|
|
- rootRepo, err := srv.runtime.Repositories().Get(name)
|
|
|
|
|
|
+ rootRepo, err := srv.daemon.Repositories().Get(name)
|
|
if err != nil {
|
|
if err != nil {
|
|
return job.Error(err)
|
|
return job.Error(err)
|
|
}
|
|
}
|
|
@@ -510,7 +510,7 @@ func (srv *Server) Build(job *engine.Job) engine.Status {
|
|
return job.Error(err)
|
|
return job.Error(err)
|
|
}
|
|
}
|
|
if repoName != "" {
|
|
if repoName != "" {
|
|
- srv.runtime.Repositories().Set(repoName, tag, id, false)
|
|
|
|
|
|
+ srv.daemon.Repositories().Set(repoName, tag, id, false)
|
|
}
|
|
}
|
|
return engine.StatusOK
|
|
return engine.StatusOK
|
|
}
|
|
}
|
|
@@ -571,7 +571,7 @@ func (srv *Server) ImageLoad(job *engine.Job) engine.Status {
|
|
|
|
|
|
for imageName, tagMap := range repositories {
|
|
for imageName, tagMap := range repositories {
|
|
for tag, address := range tagMap {
|
|
for tag, address := range tagMap {
|
|
- if err := srv.runtime.Repositories().Set(imageName, tag, address, true); err != nil {
|
|
|
|
|
|
+ if err := srv.daemon.Repositories().Set(imageName, tag, address, true); err != nil {
|
|
return job.Error(err)
|
|
return job.Error(err)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -604,13 +604,13 @@ func (srv *Server) recursiveLoad(address, tmpImageDir string) error {
|
|
return err
|
|
return err
|
|
}
|
|
}
|
|
if img.Parent != "" {
|
|
if img.Parent != "" {
|
|
- if !srv.runtime.Graph().Exists(img.Parent) {
|
|
|
|
|
|
+ if !srv.daemon.Graph().Exists(img.Parent) {
|
|
if err := srv.recursiveLoad(img.Parent, tmpImageDir); err != nil {
|
|
if err := srv.recursiveLoad(img.Parent, tmpImageDir); err != nil {
|
|
return err
|
|
return err
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- if err := srv.runtime.Graph().Register(imageJson, layer, img); err != nil {
|
|
|
|
|
|
+ if err := srv.daemon.Graph().Register(imageJson, layer, img); err != nil {
|
|
return err
|
|
return err
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -668,7 +668,7 @@ func (srv *Server) ImageInsert(job *engine.Job) engine.Status {
|
|
sf := utils.NewStreamFormatter(job.GetenvBool("json"))
|
|
sf := utils.NewStreamFormatter(job.GetenvBool("json"))
|
|
|
|
|
|
out := utils.NewWriteFlusher(job.Stdout)
|
|
out := utils.NewWriteFlusher(job.Stdout)
|
|
- img, err := srv.runtime.Repositories().LookupImage(name)
|
|
|
|
|
|
+ img, err := srv.daemon.Repositories().LookupImage(name)
|
|
if err != nil {
|
|
if err != nil {
|
|
return job.Error(err)
|
|
return job.Error(err)
|
|
}
|
|
}
|
|
@@ -679,12 +679,12 @@ func (srv *Server) ImageInsert(job *engine.Job) engine.Status {
|
|
}
|
|
}
|
|
defer file.Body.Close()
|
|
defer file.Body.Close()
|
|
|
|
|
|
- config, _, _, err := runconfig.Parse([]string{img.ID, "echo", "insert", url, path}, srv.runtime.SystemConfig())
|
|
|
|
|
|
+ config, _, _, err := runconfig.Parse([]string{img.ID, "echo", "insert", url, path}, srv.daemon.SystemConfig())
|
|
if err != nil {
|
|
if err != nil {
|
|
return job.Error(err)
|
|
return job.Error(err)
|
|
}
|
|
}
|
|
|
|
|
|
- c, _, err := srv.runtime.Create(config, "")
|
|
|
|
|
|
+ c, _, err := srv.daemon.Create(config, "")
|
|
if err != nil {
|
|
if err != nil {
|
|
return job.Error(err)
|
|
return job.Error(err)
|
|
}
|
|
}
|
|
@@ -693,7 +693,7 @@ func (srv *Server) ImageInsert(job *engine.Job) engine.Status {
|
|
return job.Error(err)
|
|
return job.Error(err)
|
|
}
|
|
}
|
|
// FIXME: Handle custom repo, tag comment, author
|
|
// FIXME: Handle custom repo, tag comment, author
|
|
- img, err = srv.runtime.Commit(c, "", "", img.Comment, img.Author, nil)
|
|
|
|
|
|
+ img, err = srv.daemon.Commit(c, "", "", img.Comment, img.Author, nil)
|
|
if err != nil {
|
|
if err != nil {
|
|
out.Write(sf.FormatError(err))
|
|
out.Write(sf.FormatError(err))
|
|
return engine.StatusErr
|
|
return engine.StatusErr
|
|
@@ -703,7 +703,7 @@ func (srv *Server) ImageInsert(job *engine.Job) engine.Status {
|
|
}
|
|
}
|
|
|
|
|
|
func (srv *Server) ImagesViz(job *engine.Job) engine.Status {
|
|
func (srv *Server) ImagesViz(job *engine.Job) engine.Status {
|
|
- images, _ := srv.runtime.Graph().Map()
|
|
|
|
|
|
+ images, _ := srv.daemon.Graph().Map()
|
|
if images == nil {
|
|
if images == nil {
|
|
return engine.StatusOK
|
|
return engine.StatusOK
|
|
}
|
|
}
|
|
@@ -727,7 +727,7 @@ func (srv *Server) ImagesViz(job *engine.Job) engine.Status {
|
|
|
|
|
|
reporefs := make(map[string][]string)
|
|
reporefs := make(map[string][]string)
|
|
|
|
|
|
- for name, repository := range srv.runtime.Repositories().Repositories {
|
|
|
|
|
|
+ for name, repository := range srv.daemon.Repositories().Repositories {
|
|
for tag, id := range repository {
|
|
for tag, id := range repository {
|
|
reporefs[utils.TruncateID(id)] = append(reporefs[utils.TruncateID(id)], fmt.Sprintf("%s:%s", name, tag))
|
|
reporefs[utils.TruncateID(id)] = append(reporefs[utils.TruncateID(id)], fmt.Sprintf("%s:%s", name, tag))
|
|
}
|
|
}
|
|
@@ -746,22 +746,22 @@ func (srv *Server) Images(job *engine.Job) engine.Status {
|
|
err error
|
|
err error
|
|
)
|
|
)
|
|
if job.GetenvBool("all") {
|
|
if job.GetenvBool("all") {
|
|
- allImages, err = srv.runtime.Graph().Map()
|
|
|
|
|
|
+ allImages, err = srv.daemon.Graph().Map()
|
|
} else {
|
|
} else {
|
|
- allImages, err = srv.runtime.Graph().Heads()
|
|
|
|
|
|
+ allImages, err = srv.daemon.Graph().Heads()
|
|
}
|
|
}
|
|
if err != nil {
|
|
if err != nil {
|
|
return job.Error(err)
|
|
return job.Error(err)
|
|
}
|
|
}
|
|
lookup := make(map[string]*engine.Env)
|
|
lookup := make(map[string]*engine.Env)
|
|
- for name, repository := range srv.runtime.Repositories().Repositories {
|
|
|
|
|
|
+ for name, repository := range srv.daemon.Repositories().Repositories {
|
|
if job.Getenv("filter") != "" {
|
|
if job.Getenv("filter") != "" {
|
|
if match, _ := path.Match(job.Getenv("filter"), name); !match {
|
|
if match, _ := path.Match(job.Getenv("filter"), name); !match {
|
|
continue
|
|
continue
|
|
}
|
|
}
|
|
}
|
|
}
|
|
for tag, id := range repository {
|
|
for tag, id := range repository {
|
|
- image, err := srv.runtime.Graph().Get(id)
|
|
|
|
|
|
+ image, err := srv.daemon.Graph().Get(id)
|
|
if err != nil {
|
|
if err != nil {
|
|
log.Printf("Warning: couldn't load %s from %s/%s: %s", id, name, tag, err)
|
|
log.Printf("Warning: couldn't load %s from %s/%s: %s", id, name, tag, err)
|
|
continue
|
|
continue
|
|
@@ -811,7 +811,7 @@ func (srv *Server) Images(job *engine.Job) engine.Status {
|
|
}
|
|
}
|
|
|
|
|
|
func (srv *Server) DockerInfo(job *engine.Job) engine.Status {
|
|
func (srv *Server) DockerInfo(job *engine.Job) engine.Status {
|
|
- images, _ := srv.runtime.Graph().Map()
|
|
|
|
|
|
+ images, _ := srv.daemon.Graph().Map()
|
|
var imgcount int
|
|
var imgcount int
|
|
if images == nil {
|
|
if images == nil {
|
|
imgcount = 0
|
|
imgcount = 0
|
|
@@ -826,22 +826,22 @@ func (srv *Server) DockerInfo(job *engine.Job) engine.Status {
|
|
// if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION)
|
|
// if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION)
|
|
initPath := utils.DockerInitPath("")
|
|
initPath := utils.DockerInitPath("")
|
|
if initPath == "" {
|
|
if initPath == "" {
|
|
- // if that fails, we'll just return the path from the runtime
|
|
|
|
- initPath = srv.runtime.SystemInitPath()
|
|
|
|
|
|
+ // if that fails, we'll just return the path from the daemon
|
|
|
|
+ initPath = srv.daemon.SystemInitPath()
|
|
}
|
|
}
|
|
|
|
|
|
v := &engine.Env{}
|
|
v := &engine.Env{}
|
|
- v.SetInt("Containers", len(srv.runtime.List()))
|
|
|
|
|
|
+ v.SetInt("Containers", len(srv.daemon.List()))
|
|
v.SetInt("Images", imgcount)
|
|
v.SetInt("Images", imgcount)
|
|
- v.Set("Driver", srv.runtime.GraphDriver().String())
|
|
|
|
- v.SetJson("DriverStatus", srv.runtime.GraphDriver().Status())
|
|
|
|
- v.SetBool("MemoryLimit", srv.runtime.SystemConfig().MemoryLimit)
|
|
|
|
- v.SetBool("SwapLimit", srv.runtime.SystemConfig().SwapLimit)
|
|
|
|
- v.SetBool("IPv4Forwarding", !srv.runtime.SystemConfig().IPv4ForwardingDisabled)
|
|
|
|
|
|
+ v.Set("Driver", srv.daemon.GraphDriver().String())
|
|
|
|
+ v.SetJson("DriverStatus", srv.daemon.GraphDriver().Status())
|
|
|
|
+ v.SetBool("MemoryLimit", srv.daemon.SystemConfig().MemoryLimit)
|
|
|
|
+ v.SetBool("SwapLimit", srv.daemon.SystemConfig().SwapLimit)
|
|
|
|
+ v.SetBool("IPv4Forwarding", !srv.daemon.SystemConfig().IPv4ForwardingDisabled)
|
|
v.SetBool("Debug", os.Getenv("DEBUG") != "")
|
|
v.SetBool("Debug", os.Getenv("DEBUG") != "")
|
|
v.SetInt("NFd", utils.GetTotalUsedFds())
|
|
v.SetInt("NFd", utils.GetTotalUsedFds())
|
|
v.SetInt("NGoroutines", goruntime.NumGoroutine())
|
|
v.SetInt("NGoroutines", goruntime.NumGoroutine())
|
|
- v.Set("ExecutionDriver", srv.runtime.ExecutionDriver().Name())
|
|
|
|
|
|
+ v.Set("ExecutionDriver", srv.daemon.ExecutionDriver().Name())
|
|
v.SetInt("NEventsListener", len(srv.listeners))
|
|
v.SetInt("NEventsListener", len(srv.listeners))
|
|
v.Set("KernelVersion", kernelVersion)
|
|
v.Set("KernelVersion", kernelVersion)
|
|
v.Set("IndexServerAddress", registry.IndexServerAddress())
|
|
v.Set("IndexServerAddress", registry.IndexServerAddress())
|
|
@@ -875,13 +875,13 @@ func (srv *Server) ImageHistory(job *engine.Job) engine.Status {
|
|
return job.Errorf("Usage: %s IMAGE", job.Name)
|
|
return job.Errorf("Usage: %s IMAGE", job.Name)
|
|
}
|
|
}
|
|
name := job.Args[0]
|
|
name := job.Args[0]
|
|
- foundImage, err := srv.runtime.Repositories().LookupImage(name)
|
|
|
|
|
|
+ foundImage, err := srv.daemon.Repositories().LookupImage(name)
|
|
if err != nil {
|
|
if err != nil {
|
|
return job.Error(err)
|
|
return job.Error(err)
|
|
}
|
|
}
|
|
|
|
|
|
lookupMap := make(map[string][]string)
|
|
lookupMap := make(map[string][]string)
|
|
- for name, repository := range srv.runtime.Repositories().Repositories {
|
|
|
|
|
|
+ for name, repository := range srv.daemon.Repositories().Repositories {
|
|
for tag, id := range repository {
|
|
for tag, id := range repository {
|
|
// If the ID already has a reverse lookup, do not update it unless for "latest"
|
|
// If the ID already has a reverse lookup, do not update it unless for "latest"
|
|
if _, exists := lookupMap[id]; !exists {
|
|
if _, exists := lookupMap[id]; !exists {
|
|
@@ -922,11 +922,11 @@ func (srv *Server) ContainerTop(job *engine.Job) engine.Status {
|
|
psArgs = job.Args[1]
|
|
psArgs = job.Args[1]
|
|
}
|
|
}
|
|
|
|
|
|
- if container := srv.runtime.Get(name); container != nil {
|
|
|
|
|
|
+ if container := srv.daemon.Get(name); container != nil {
|
|
if !container.State.IsRunning() {
|
|
if !container.State.IsRunning() {
|
|
return job.Errorf("Container %s is not running", name)
|
|
return job.Errorf("Container %s is not running", name)
|
|
}
|
|
}
|
|
- pids, err := srv.runtime.ExecutionDriver().GetPidsForContainer(container.ID)
|
|
|
|
|
|
+ pids, err := srv.daemon.ExecutionDriver().GetPidsForContainer(container.ID)
|
|
if err != nil {
|
|
if err != nil {
|
|
return job.Error(err)
|
|
return job.Error(err)
|
|
}
|
|
}
|
|
@@ -984,7 +984,7 @@ func (srv *Server) ContainerChanges(job *engine.Job) engine.Status {
|
|
return job.Errorf("Usage: %s CONTAINER", job.Name)
|
|
return job.Errorf("Usage: %s CONTAINER", job.Name)
|
|
}
|
|
}
|
|
name := job.Args[0]
|
|
name := job.Args[0]
|
|
- if container := srv.runtime.Get(name); container != nil {
|
|
|
|
|
|
+ if container := srv.daemon.Get(name); container != nil {
|
|
outs := engine.NewTable("", 0)
|
|
outs := engine.NewTable("", 0)
|
|
changes, err := container.Changes()
|
|
changes, err := container.Changes()
|
|
if err != nil {
|
|
if err != nil {
|
|
@@ -1019,27 +1019,27 @@ func (srv *Server) Containers(job *engine.Job) engine.Status {
|
|
outs := engine.NewTable("Created", 0)
|
|
outs := engine.NewTable("Created", 0)
|
|
|
|
|
|
names := map[string][]string{}
|
|
names := map[string][]string{}
|
|
- srv.runtime.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
|
|
|
|
|
|
+ srv.daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
|
|
names[e.ID()] = append(names[e.ID()], p)
|
|
names[e.ID()] = append(names[e.ID()], p)
|
|
return nil
|
|
return nil
|
|
}, -1)
|
|
}, -1)
|
|
|
|
|
|
- var beforeCont, sinceCont *runtime.Container
|
|
|
|
|
|
+ var beforeCont, sinceCont *daemon.Container
|
|
if before != "" {
|
|
if before != "" {
|
|
- beforeCont = srv.runtime.Get(before)
|
|
|
|
|
|
+ beforeCont = srv.daemon.Get(before)
|
|
if beforeCont == nil {
|
|
if beforeCont == nil {
|
|
return job.Error(fmt.Errorf("Could not find container with name or id %s", before))
|
|
return job.Error(fmt.Errorf("Could not find container with name or id %s", before))
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
if since != "" {
|
|
if since != "" {
|
|
- sinceCont = srv.runtime.Get(since)
|
|
|
|
|
|
+ sinceCont = srv.daemon.Get(since)
|
|
if sinceCont == nil {
|
|
if sinceCont == nil {
|
|
return job.Error(fmt.Errorf("Could not find container with name or id %s", since))
|
|
return job.Error(fmt.Errorf("Could not find container with name or id %s", since))
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- for _, container := range srv.runtime.List() {
|
|
|
|
|
|
+ for _, container := range srv.daemon.List() {
|
|
if !container.State.IsRunning() && !all && n <= 0 && since == "" && before == "" {
|
|
if !container.State.IsRunning() && !all && n <= 0 && since == "" && before == "" {
|
|
continue
|
|
continue
|
|
}
|
|
}
|
|
@@ -1061,7 +1061,7 @@ func (srv *Server) Containers(job *engine.Job) engine.Status {
|
|
out := &engine.Env{}
|
|
out := &engine.Env{}
|
|
out.Set("Id", container.ID)
|
|
out.Set("Id", container.ID)
|
|
out.SetList("Names", names[container.ID])
|
|
out.SetList("Names", names[container.ID])
|
|
- out.Set("Image", srv.runtime.Repositories().ImageName(container.Image))
|
|
|
|
|
|
+ out.Set("Image", srv.daemon.Repositories().ImageName(container.Image))
|
|
if len(container.Args) > 0 {
|
|
if len(container.Args) > 0 {
|
|
args := []string{}
|
|
args := []string{}
|
|
for _, arg := range container.Args {
|
|
for _, arg := range container.Args {
|
|
@@ -1104,7 +1104,7 @@ func (srv *Server) ContainerCommit(job *engine.Job) engine.Status {
|
|
}
|
|
}
|
|
name := job.Args[0]
|
|
name := job.Args[0]
|
|
|
|
|
|
- container := srv.runtime.Get(name)
|
|
|
|
|
|
+ container := srv.daemon.Get(name)
|
|
if container == nil {
|
|
if container == nil {
|
|
return job.Errorf("No such container: %s", name)
|
|
return job.Errorf("No such container: %s", name)
|
|
}
|
|
}
|
|
@@ -1118,7 +1118,7 @@ func (srv *Server) ContainerCommit(job *engine.Job) engine.Status {
|
|
return job.Error(err)
|
|
return job.Error(err)
|
|
}
|
|
}
|
|
|
|
|
|
- img, err := srv.runtime.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), &newConfig)
|
|
|
|
|
|
+ img, err := srv.daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), &newConfig)
|
|
if err != nil {
|
|
if err != nil {
|
|
return job.Error(err)
|
|
return job.Error(err)
|
|
}
|
|
}
|
|
@@ -1134,7 +1134,7 @@ func (srv *Server) ImageTag(job *engine.Job) engine.Status {
|
|
if len(job.Args) == 3 {
|
|
if len(job.Args) == 3 {
|
|
tag = job.Args[2]
|
|
tag = job.Args[2]
|
|
}
|
|
}
|
|
- if err := srv.runtime.Repositories().Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil {
|
|
|
|
|
|
+ if err := srv.daemon.Repositories().Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil {
|
|
return job.Error(err)
|
|
return job.Error(err)
|
|
}
|
|
}
|
|
return engine.StatusOK
|
|
return engine.StatusOK
|
|
@@ -1159,7 +1159,7 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin
|
|
}
|
|
}
|
|
defer srv.poolRemove("pull", "layer:"+id)
|
|
defer srv.poolRemove("pull", "layer:"+id)
|
|
|
|
|
|
- if !srv.runtime.Graph().Exists(id) {
|
|
|
|
|
|
+ if !srv.daemon.Graph().Exists(id) {
|
|
out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil))
|
|
out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil))
|
|
var (
|
|
var (
|
|
imgJSON []byte
|
|
imgJSON []byte
|
|
@@ -1197,7 +1197,7 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin
|
|
return err
|
|
return err
|
|
}
|
|
}
|
|
defer layer.Close()
|
|
defer layer.Close()
|
|
- if err := srv.runtime.Graph().Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"), img); err != nil {
|
|
|
|
|
|
+ if err := srv.daemon.Graph().Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"), img); err != nil {
|
|
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil))
|
|
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil))
|
|
return err
|
|
return err
|
|
}
|
|
}
|
|
@@ -1332,11 +1332,11 @@ func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName
|
|
if askedTag != "" && tag != askedTag {
|
|
if askedTag != "" && tag != askedTag {
|
|
continue
|
|
continue
|
|
}
|
|
}
|
|
- if err := srv.runtime.Repositories().Set(localName, tag, id, true); err != nil {
|
|
|
|
|
|
+ if err := srv.daemon.Repositories().Set(localName, tag, id, true); err != nil {
|
|
return err
|
|
return err
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- if err := srv.runtime.Repositories().Save(); err != nil {
|
|
|
|
|
|
+ if err := srv.daemon.Repositories().Save(); err != nil {
|
|
return err
|
|
return err
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1467,7 +1467,7 @@ func (srv *Server) getImageList(localRepo map[string]string, requestedTag string
|
|
|
|
|
|
tagsByImage[id] = append(tagsByImage[id], tag)
|
|
tagsByImage[id] = append(tagsByImage[id], tag)
|
|
|
|
|
|
- for img, err := srv.runtime.Graph().Get(id); img != nil; img, err = img.GetParent() {
|
|
|
|
|
|
+ for img, err := srv.daemon.Graph().Get(id); img != nil; img, err = img.GetParent() {
|
|
if err != nil {
|
|
if err != nil {
|
|
return nil, nil, err
|
|
return nil, nil, err
|
|
}
|
|
}
|
|
@@ -1582,7 +1582,7 @@ func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName
|
|
|
|
|
|
func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) {
|
|
func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) {
|
|
out = utils.NewWriteFlusher(out)
|
|
out = utils.NewWriteFlusher(out)
|
|
- jsonRaw, err := ioutil.ReadFile(path.Join(srv.runtime.Graph().Root, imgID, "json"))
|
|
|
|
|
|
+ jsonRaw, err := ioutil.ReadFile(path.Join(srv.daemon.Graph().Root, imgID, "json"))
|
|
if err != nil {
|
|
if err != nil {
|
|
return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
|
|
return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
|
|
}
|
|
}
|
|
@@ -1601,7 +1601,7 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID,
|
|
return "", err
|
|
return "", err
|
|
}
|
|
}
|
|
|
|
|
|
- layerData, err := srv.runtime.Graph().TempLayerArchive(imgID, archive.Uncompressed, sf, out)
|
|
|
|
|
|
+ layerData, err := srv.daemon.Graph().TempLayerArchive(imgID, archive.Uncompressed, sf, out)
|
|
if err != nil {
|
|
if err != nil {
|
|
return "", fmt.Errorf("Failed to generate layer archive: %s", err)
|
|
return "", fmt.Errorf("Failed to generate layer archive: %s", err)
|
|
}
|
|
}
|
|
@@ -1656,7 +1656,7 @@ func (srv *Server) ImagePush(job *engine.Job) engine.Status {
|
|
return job.Error(err)
|
|
return job.Error(err)
|
|
}
|
|
}
|
|
|
|
|
|
- img, err := srv.runtime.Graph().Get(localName)
|
|
|
|
|
|
+ img, err := srv.daemon.Graph().Get(localName)
|
|
r, err2 := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint)
|
|
r, err2 := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint)
|
|
if err2 != nil {
|
|
if err2 != nil {
|
|
return job.Error(err2)
|
|
return job.Error(err2)
|
|
@@ -1665,11 +1665,11 @@ func (srv *Server) ImagePush(job *engine.Job) engine.Status {
|
|
if err != nil {
|
|
if err != nil {
|
|
reposLen := 1
|
|
reposLen := 1
|
|
if tag == "" {
|
|
if tag == "" {
|
|
- reposLen = len(srv.runtime.Repositories().Repositories[localName])
|
|
|
|
|
|
+ reposLen = len(srv.daemon.Repositories().Repositories[localName])
|
|
}
|
|
}
|
|
job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen))
|
|
job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen))
|
|
// If it fails, try to get the repository
|
|
// If it fails, try to get the repository
|
|
- if localRepo, exists := srv.runtime.Repositories().Repositories[localName]; exists {
|
|
|
|
|
|
+ if localRepo, exists := srv.daemon.Repositories().Repositories[localName]; exists {
|
|
if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, tag, sf); err != nil {
|
|
if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, tag, sf); err != nil {
|
|
return job.Error(err)
|
|
return job.Error(err)
|
|
}
|
|
}
|
|
@@ -1725,13 +1725,13 @@ func (srv *Server) ImageImport(job *engine.Job) engine.Status {
|
|
defer progressReader.Close()
|
|
defer progressReader.Close()
|
|
archive = progressReader
|
|
archive = progressReader
|
|
}
|
|
}
|
|
- img, err := srv.runtime.Graph().Create(archive, "", "", "Imported from "+src, "", nil, nil)
|
|
|
|
|
|
+ img, err := srv.daemon.Graph().Create(archive, "", "", "Imported from "+src, "", nil, nil)
|
|
if err != nil {
|
|
if err != nil {
|
|
return job.Error(err)
|
|
return job.Error(err)
|
|
}
|
|
}
|
|
// Optionally register the image at REPO/TAG
|
|
// Optionally register the image at REPO/TAG
|
|
if repo != "" {
|
|
if repo != "" {
|
|
- if err := srv.runtime.Repositories().Set(repo, tag, img.ID, true); err != nil {
|
|
|
|
|
|
+ if err := srv.daemon.Repositories().Set(repo, tag, img.ID, true); err != nil {
|
|
return job.Error(err)
|
|
return job.Error(err)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -1750,17 +1750,17 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status {
|
|
if config.Memory != 0 && config.Memory < 524288 {
|
|
if config.Memory != 0 && config.Memory < 524288 {
|
|
return job.Errorf("Minimum memory limit allowed is 512k")
|
|
return job.Errorf("Minimum memory limit allowed is 512k")
|
|
}
|
|
}
|
|
- if config.Memory > 0 && !srv.runtime.SystemConfig().MemoryLimit {
|
|
|
|
|
|
+ if config.Memory > 0 && !srv.daemon.SystemConfig().MemoryLimit {
|
|
job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
|
|
job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
|
|
config.Memory = 0
|
|
config.Memory = 0
|
|
}
|
|
}
|
|
- if config.Memory > 0 && !srv.runtime.SystemConfig().SwapLimit {
|
|
|
|
|
|
+ if config.Memory > 0 && !srv.daemon.SystemConfig().SwapLimit {
|
|
job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
|
|
job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
|
|
config.MemorySwap = -1
|
|
config.MemorySwap = -1
|
|
}
|
|
}
|
|
- container, buildWarnings, err := srv.runtime.Create(config, name)
|
|
|
|
|
|
+ container, buildWarnings, err := srv.daemon.Create(config, name)
|
|
if err != nil {
|
|
if err != nil {
|
|
- if srv.runtime.Graph().IsNotExist(err) {
|
|
|
|
|
|
+ if srv.daemon.Graph().IsNotExist(err) {
|
|
_, tag := utils.ParseRepositoryTag(config.Image)
|
|
_, tag := utils.ParseRepositoryTag(config.Image)
|
|
if tag == "" {
|
|
if tag == "" {
|
|
tag = graph.DEFAULTTAG
|
|
tag = graph.DEFAULTTAG
|
|
@@ -1769,11 +1769,11 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status {
|
|
}
|
|
}
|
|
return job.Error(err)
|
|
return job.Error(err)
|
|
}
|
|
}
|
|
- if !container.Config.NetworkDisabled && srv.runtime.SystemConfig().IPv4ForwardingDisabled {
|
|
|
|
|
|
+ if !container.Config.NetworkDisabled && srv.daemon.SystemConfig().IPv4ForwardingDisabled {
|
|
job.Errorf("IPv4 forwarding is disabled.\n")
|
|
job.Errorf("IPv4 forwarding is disabled.\n")
|
|
}
|
|
}
|
|
- srv.LogEvent("create", container.ID, srv.runtime.Repositories().ImageName(container.Image))
|
|
|
|
- // FIXME: this is necessary because runtime.Create might return a nil container
|
|
|
|
|
|
+ srv.LogEvent("create", container.ID, srv.daemon.Repositories().ImageName(container.Image))
|
|
|
|
+ // FIXME: this is necessary because daemon.Create might return a nil container
|
|
// with a non-nil error. This should not happen! Once it's fixed we
|
|
// with a non-nil error. This should not happen! Once it's fixed we
|
|
// can remove this workaround.
|
|
// can remove this workaround.
|
|
if container != nil {
|
|
if container != nil {
|
|
@@ -1796,11 +1796,11 @@ func (srv *Server) ContainerRestart(job *engine.Job) engine.Status {
|
|
if job.EnvExists("t") {
|
|
if job.EnvExists("t") {
|
|
t = job.GetenvInt("t")
|
|
t = job.GetenvInt("t")
|
|
}
|
|
}
|
|
- if container := srv.runtime.Get(name); container != nil {
|
|
|
|
|
|
+ if container := srv.daemon.Get(name); container != nil {
|
|
if err := container.Restart(int(t)); err != nil {
|
|
if err := container.Restart(int(t)); err != nil {
|
|
return job.Errorf("Cannot restart container %s: %s\n", name, err)
|
|
return job.Errorf("Cannot restart container %s: %s\n", name, err)
|
|
}
|
|
}
|
|
- srv.LogEvent("restart", container.ID, srv.runtime.Repositories().ImageName(container.Image))
|
|
|
|
|
|
+ srv.LogEvent("restart", container.ID, srv.daemon.Repositories().ImageName(container.Image))
|
|
} else {
|
|
} else {
|
|
return job.Errorf("No such container: %s\n", name)
|
|
return job.Errorf("No such container: %s\n", name)
|
|
}
|
|
}
|
|
@@ -1816,13 +1816,13 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
|
|
removeLink := job.GetenvBool("removeLink")
|
|
removeLink := job.GetenvBool("removeLink")
|
|
forceRemove := job.GetenvBool("forceRemove")
|
|
forceRemove := job.GetenvBool("forceRemove")
|
|
|
|
|
|
- container := srv.runtime.Get(name)
|
|
|
|
|
|
+ container := srv.daemon.Get(name)
|
|
|
|
|
|
if removeLink {
|
|
if removeLink {
|
|
if container == nil {
|
|
if container == nil {
|
|
return job.Errorf("No such link: %s", name)
|
|
return job.Errorf("No such link: %s", name)
|
|
}
|
|
}
|
|
- name, err := runtime.GetFullContainerName(name)
|
|
|
|
|
|
+ name, err := daemon.GetFullContainerName(name)
|
|
if err != nil {
|
|
if err != nil {
|
|
job.Error(err)
|
|
job.Error(err)
|
|
}
|
|
}
|
|
@@ -1830,17 +1830,17 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
|
|
if parent == "/" {
|
|
if parent == "/" {
|
|
return job.Errorf("Conflict, cannot remove the default name of the container")
|
|
return job.Errorf("Conflict, cannot remove the default name of the container")
|
|
}
|
|
}
|
|
- pe := srv.runtime.ContainerGraph().Get(parent)
|
|
|
|
|
|
+ pe := srv.daemon.ContainerGraph().Get(parent)
|
|
if pe == nil {
|
|
if pe == nil {
|
|
return job.Errorf("Cannot get parent %s for name %s", parent, name)
|
|
return job.Errorf("Cannot get parent %s for name %s", parent, name)
|
|
}
|
|
}
|
|
- parentContainer := srv.runtime.Get(pe.ID())
|
|
|
|
|
|
+ parentContainer := srv.daemon.Get(pe.ID())
|
|
|
|
|
|
if parentContainer != nil {
|
|
if parentContainer != nil {
|
|
parentContainer.DisableLink(n)
|
|
parentContainer.DisableLink(n)
|
|
}
|
|
}
|
|
|
|
|
|
- if err := srv.runtime.ContainerGraph().Delete(name); err != nil {
|
|
|
|
|
|
+ if err := srv.daemon.ContainerGraph().Delete(name); err != nil {
|
|
return job.Error(err)
|
|
return job.Error(err)
|
|
}
|
|
}
|
|
return engine.StatusOK
|
|
return engine.StatusOK
|
|
@@ -1856,16 +1856,16 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
|
|
return job.Errorf("Impossible to remove a running container, please stop it first or use -f")
|
|
return job.Errorf("Impossible to remove a running container, please stop it first or use -f")
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- if err := srv.runtime.Destroy(container); err != nil {
|
|
|
|
|
|
+ if err := srv.daemon.Destroy(container); err != nil {
|
|
return job.Errorf("Cannot destroy container %s: %s", name, err)
|
|
return job.Errorf("Cannot destroy container %s: %s", name, err)
|
|
}
|
|
}
|
|
- srv.LogEvent("destroy", container.ID, srv.runtime.Repositories().ImageName(container.Image))
|
|
|
|
|
|
+ srv.LogEvent("destroy", container.ID, srv.daemon.Repositories().ImageName(container.Image))
|
|
|
|
|
|
if removeVolume {
|
|
if removeVolume {
|
|
var (
|
|
var (
|
|
volumes = make(map[string]struct{})
|
|
volumes = make(map[string]struct{})
|
|
binds = make(map[string]struct{})
|
|
binds = make(map[string]struct{})
|
|
- usedVolumes = make(map[string]*runtime.Container)
|
|
|
|
|
|
+ usedVolumes = make(map[string]*daemon.Container)
|
|
)
|
|
)
|
|
|
|
|
|
// the volume id is always the base of the path
|
|
// the volume id is always the base of the path
|
|
@@ -1903,7 +1903,7 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
|
|
}
|
|
}
|
|
|
|
|
|
// Retrieve all volumes from all remaining containers
|
|
// Retrieve all volumes from all remaining containers
|
|
- for _, container := range srv.runtime.List() {
|
|
|
|
|
|
+ for _, container := range srv.daemon.List() {
|
|
for _, containerVolumeId := range container.Volumes {
|
|
for _, containerVolumeId := range container.Volumes {
|
|
containerVolumeId = getVolumeId(containerVolumeId)
|
|
containerVolumeId = getVolumeId(containerVolumeId)
|
|
usedVolumes[containerVolumeId] = container
|
|
usedVolumes[containerVolumeId] = container
|
|
@@ -1916,7 +1916,7 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
|
|
log.Printf("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID)
|
|
log.Printf("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID)
|
|
continue
|
|
continue
|
|
}
|
|
}
|
|
- if err := srv.runtime.Volumes().Delete(volumeId); err != nil {
|
|
|
|
|
|
+ if err := srv.daemon.Volumes().Delete(volumeId); err != nil {
|
|
return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err)
|
|
return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -1938,9 +1938,9 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force, no
|
|
tag = graph.DEFAULTTAG
|
|
tag = graph.DEFAULTTAG
|
|
}
|
|
}
|
|
|
|
|
|
- img, err := srv.runtime.Repositories().LookupImage(name)
|
|
|
|
|
|
+ img, err := srv.daemon.Repositories().LookupImage(name)
|
|
if err != nil {
|
|
if err != nil {
|
|
- if r, _ := srv.runtime.Repositories().Get(repoName); r != nil {
|
|
|
|
|
|
+ if r, _ := srv.daemon.Repositories().Get(repoName); r != nil {
|
|
return fmt.Errorf("No such image: %s:%s", repoName, tag)
|
|
return fmt.Errorf("No such image: %s:%s", repoName, tag)
|
|
}
|
|
}
|
|
return fmt.Errorf("No such image: %s", name)
|
|
return fmt.Errorf("No such image: %s", name)
|
|
@@ -1951,14 +1951,14 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force, no
|
|
tag = ""
|
|
tag = ""
|
|
}
|
|
}
|
|
|
|
|
|
- byParents, err := srv.runtime.Graph().ByParent()
|
|
|
|
|
|
+ byParents, err := srv.daemon.Graph().ByParent()
|
|
if err != nil {
|
|
if err != nil {
|
|
return err
|
|
return err
|
|
}
|
|
}
|
|
|
|
|
|
//If delete by id, see if the id belong only to one repository
|
|
//If delete by id, see if the id belong only to one repository
|
|
if repoName == "" {
|
|
if repoName == "" {
|
|
- for _, repoAndTag := range srv.runtime.Repositories().ByID()[img.ID] {
|
|
|
|
|
|
+ for _, repoAndTag := range srv.daemon.Repositories().ByID()[img.ID] {
|
|
parsedRepo, parsedTag := utils.ParseRepositoryTag(repoAndTag)
|
|
parsedRepo, parsedTag := utils.ParseRepositoryTag(repoAndTag)
|
|
if repoName == "" || repoName == parsedRepo {
|
|
if repoName == "" || repoName == parsedRepo {
|
|
repoName = parsedRepo
|
|
repoName = parsedRepo
|
|
@@ -1981,7 +1981,7 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force, no
|
|
|
|
|
|
//Untag the current image
|
|
//Untag the current image
|
|
for _, tag := range tags {
|
|
for _, tag := range tags {
|
|
- tagDeleted, err := srv.runtime.Repositories().Delete(repoName, tag)
|
|
|
|
|
|
+ tagDeleted, err := srv.daemon.Repositories().Delete(repoName, tag)
|
|
if err != nil {
|
|
if err != nil {
|
|
return err
|
|
return err
|
|
}
|
|
}
|
|
@@ -1992,16 +1992,16 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force, no
|
|
srv.LogEvent("untag", img.ID, "")
|
|
srv.LogEvent("untag", img.ID, "")
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- tags = srv.runtime.Repositories().ByID()[img.ID]
|
|
|
|
|
|
+ tags = srv.daemon.Repositories().ByID()[img.ID]
|
|
if (len(tags) <= 1 && repoName == "") || len(tags) == 0 {
|
|
if (len(tags) <= 1 && repoName == "") || len(tags) == 0 {
|
|
if len(byParents[img.ID]) == 0 {
|
|
if len(byParents[img.ID]) == 0 {
|
|
if err := srv.canDeleteImage(img.ID); err != nil {
|
|
if err := srv.canDeleteImage(img.ID); err != nil {
|
|
return err
|
|
return err
|
|
}
|
|
}
|
|
- if err := srv.runtime.Repositories().DeleteAll(img.ID); err != nil {
|
|
|
|
|
|
+ if err := srv.daemon.Repositories().DeleteAll(img.ID); err != nil {
|
|
return err
|
|
return err
|
|
}
|
|
}
|
|
- if err := srv.runtime.Graph().Delete(img.ID); err != nil {
|
|
|
|
|
|
+ if err := srv.daemon.Graph().Delete(img.ID); err != nil {
|
|
return err
|
|
return err
|
|
}
|
|
}
|
|
out := &engine.Env{}
|
|
out := &engine.Env{}
|
|
@@ -2039,8 +2039,8 @@ func (srv *Server) ImageDelete(job *engine.Job) engine.Status {
|
|
}
|
|
}
|
|
|
|
|
|
func (srv *Server) canDeleteImage(imgID string) error {
|
|
func (srv *Server) canDeleteImage(imgID string) error {
|
|
- for _, container := range srv.runtime.List() {
|
|
|
|
- parent, err := srv.runtime.Repositories().LookupImage(container.Image)
|
|
|
|
|
|
+ for _, container := range srv.daemon.List() {
|
|
|
|
+ parent, err := srv.daemon.Repositories().LookupImage(container.Image)
|
|
if err != nil {
|
|
if err != nil {
|
|
return err
|
|
return err
|
|
}
|
|
}
|
|
@@ -2059,7 +2059,7 @@ func (srv *Server) canDeleteImage(imgID string) error {
|
|
|
|
|
|
func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) {
|
|
func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) {
|
|
// Retrieve all images
|
|
// Retrieve all images
|
|
- images, err := srv.runtime.Graph().Map()
|
|
|
|
|
|
+ images, err := srv.daemon.Graph().Map()
|
|
if err != nil {
|
|
if err != nil {
|
|
return nil, err
|
|
return nil, err
|
|
}
|
|
}
|
|
@@ -2076,7 +2076,7 @@ func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*imag
|
|
// Loop on the children of the given image and check the config
|
|
// Loop on the children of the given image and check the config
|
|
var match *image.Image
|
|
var match *image.Image
|
|
for elem := range imageMap[imgID] {
|
|
for elem := range imageMap[imgID] {
|
|
- img, err := srv.runtime.Graph().Get(elem)
|
|
|
|
|
|
+ img, err := srv.daemon.Graph().Get(elem)
|
|
if err != nil {
|
|
if err != nil {
|
|
return nil, err
|
|
return nil, err
|
|
}
|
|
}
|
|
@@ -2089,8 +2089,8 @@ func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*imag
|
|
return match, nil
|
|
return match, nil
|
|
}
|
|
}
|
|
|
|
|
|
-func (srv *Server) RegisterLinks(container *runtime.Container, hostConfig *runconfig.HostConfig) error {
|
|
|
|
- runtime := srv.runtime
|
|
|
|
|
|
+func (srv *Server) RegisterLinks(container *daemon.Container, hostConfig *runconfig.HostConfig) error {
|
|
|
|
+ daemon := srv.daemon
|
|
|
|
|
|
if hostConfig != nil && hostConfig.Links != nil {
|
|
if hostConfig != nil && hostConfig.Links != nil {
|
|
for _, l := range hostConfig.Links {
|
|
for _, l := range hostConfig.Links {
|
|
@@ -2098,19 +2098,19 @@ func (srv *Server) RegisterLinks(container *runtime.Container, hostConfig *runco
|
|
if err != nil {
|
|
if err != nil {
|
|
return err
|
|
return err
|
|
}
|
|
}
|
|
- child, err := srv.runtime.GetByName(parts["name"])
|
|
|
|
|
|
+ child, err := srv.daemon.GetByName(parts["name"])
|
|
if err != nil {
|
|
if err != nil {
|
|
return err
|
|
return err
|
|
}
|
|
}
|
|
if child == nil {
|
|
if child == nil {
|
|
return fmt.Errorf("Could not get container for %s", parts["name"])
|
|
return fmt.Errorf("Could not get container for %s", parts["name"])
|
|
}
|
|
}
|
|
- if err := runtime.RegisterLink(container, child, parts["alias"]); err != nil {
|
|
|
|
|
|
+ if err := daemon.RegisterLink(container, child, parts["alias"]); err != nil {
|
|
return err
|
|
return err
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- // After we load all the links into the runtime
|
|
|
|
|
|
+ // After we load all the links into the daemon
|
|
// set them to nil on the hostconfig
|
|
// set them to nil on the hostconfig
|
|
hostConfig.Links = nil
|
|
hostConfig.Links = nil
|
|
if err := container.WriteHostConfig(); err != nil {
|
|
if err := container.WriteHostConfig(); err != nil {
|
|
@@ -2126,8 +2126,8 @@ func (srv *Server) ContainerStart(job *engine.Job) engine.Status {
|
|
}
|
|
}
|
|
var (
|
|
var (
|
|
name = job.Args[0]
|
|
name = job.Args[0]
|
|
- runtime = srv.runtime
|
|
|
|
- container = runtime.Get(name)
|
|
|
|
|
|
+ daemon = srv.daemon
|
|
|
|
+ container = daemon.Get(name)
|
|
)
|
|
)
|
|
|
|
|
|
if container == nil {
|
|
if container == nil {
|
|
@@ -2169,7 +2169,7 @@ func (srv *Server) ContainerStart(job *engine.Job) engine.Status {
|
|
if err := container.Start(); err != nil {
|
|
if err := container.Start(); err != nil {
|
|
return job.Errorf("Cannot start container %s: %s", name, err)
|
|
return job.Errorf("Cannot start container %s: %s", name, err)
|
|
}
|
|
}
|
|
- srv.LogEvent("start", container.ID, runtime.Repositories().ImageName(container.Image))
|
|
|
|
|
|
+ srv.LogEvent("start", container.ID, daemon.Repositories().ImageName(container.Image))
|
|
|
|
|
|
return engine.StatusOK
|
|
return engine.StatusOK
|
|
}
|
|
}
|
|
@@ -2185,11 +2185,11 @@ func (srv *Server) ContainerStop(job *engine.Job) engine.Status {
|
|
if job.EnvExists("t") {
|
|
if job.EnvExists("t") {
|
|
t = job.GetenvInt("t")
|
|
t = job.GetenvInt("t")
|
|
}
|
|
}
|
|
- if container := srv.runtime.Get(name); container != nil {
|
|
|
|
|
|
+ if container := srv.daemon.Get(name); container != nil {
|
|
if err := container.Stop(int(t)); err != nil {
|
|
if err := container.Stop(int(t)); err != nil {
|
|
return job.Errorf("Cannot stop container %s: %s\n", name, err)
|
|
return job.Errorf("Cannot stop container %s: %s\n", name, err)
|
|
}
|
|
}
|
|
- srv.LogEvent("stop", container.ID, srv.runtime.Repositories().ImageName(container.Image))
|
|
|
|
|
|
+ srv.LogEvent("stop", container.ID, srv.daemon.Repositories().ImageName(container.Image))
|
|
} else {
|
|
} else {
|
|
return job.Errorf("No such container: %s\n", name)
|
|
return job.Errorf("No such container: %s\n", name)
|
|
}
|
|
}
|
|
@@ -2201,7 +2201,7 @@ func (srv *Server) ContainerWait(job *engine.Job) engine.Status {
|
|
return job.Errorf("Usage: %s", job.Name)
|
|
return job.Errorf("Usage: %s", job.Name)
|
|
}
|
|
}
|
|
name := job.Args[0]
|
|
name := job.Args[0]
|
|
- if container := srv.runtime.Get(name); container != nil {
|
|
|
|
|
|
+ if container := srv.daemon.Get(name); container != nil {
|
|
status := container.Wait()
|
|
status := container.Wait()
|
|
job.Printf("%d\n", status)
|
|
job.Printf("%d\n", status)
|
|
return engine.StatusOK
|
|
return engine.StatusOK
|
|
@@ -2222,7 +2222,7 @@ func (srv *Server) ContainerResize(job *engine.Job) engine.Status {
|
|
if err != nil {
|
|
if err != nil {
|
|
return job.Error(err)
|
|
return job.Error(err)
|
|
}
|
|
}
|
|
- if container := srv.runtime.Get(name); container != nil {
|
|
|
|
|
|
+ if container := srv.daemon.Get(name); container != nil {
|
|
if err := container.Resize(height, width); err != nil {
|
|
if err := container.Resize(height, width); err != nil {
|
|
return job.Error(err)
|
|
return job.Error(err)
|
|
}
|
|
}
|
|
@@ -2245,7 +2245,7 @@ func (srv *Server) ContainerAttach(job *engine.Job) engine.Status {
|
|
stderr = job.GetenvBool("stderr")
|
|
stderr = job.GetenvBool("stderr")
|
|
)
|
|
)
|
|
|
|
|
|
- container := srv.runtime.Get(name)
|
|
|
|
|
|
+ container := srv.daemon.Get(name)
|
|
if container == nil {
|
|
if container == nil {
|
|
return job.Errorf("No such container: %s", name)
|
|
return job.Errorf("No such container: %s", name)
|
|
}
|
|
}
|
|
@@ -2335,15 +2335,15 @@ func (srv *Server) ContainerAttach(job *engine.Job) engine.Status {
|
|
return engine.StatusOK
|
|
return engine.StatusOK
|
|
}
|
|
}
|
|
|
|
|
|
-func (srv *Server) ContainerInspect(name string) (*runtime.Container, error) {
|
|
|
|
- if container := srv.runtime.Get(name); container != nil {
|
|
|
|
|
|
+func (srv *Server) ContainerInspect(name string) (*daemon.Container, error) {
|
|
|
|
+ if container := srv.daemon.Get(name); container != nil {
|
|
return container, nil
|
|
return container, nil
|
|
}
|
|
}
|
|
return nil, fmt.Errorf("No such container: %s", name)
|
|
return nil, fmt.Errorf("No such container: %s", name)
|
|
}
|
|
}
|
|
|
|
|
|
func (srv *Server) ImageInspect(name string) (*image.Image, error) {
|
|
func (srv *Server) ImageInspect(name string) (*image.Image, error) {
|
|
- if image, err := srv.runtime.Repositories().LookupImage(name); err == nil && image != nil {
|
|
|
|
|
|
+ if image, err := srv.daemon.Repositories().LookupImage(name); err == nil && image != nil {
|
|
return image, nil
|
|
return image, nil
|
|
}
|
|
}
|
|
return nil, fmt.Errorf("No such image: %s", name)
|
|
return nil, fmt.Errorf("No such image: %s", name)
|
|
@@ -2378,7 +2378,7 @@ func (srv *Server) JobInspect(job *engine.Job) engine.Status {
|
|
return job.Error(errContainer)
|
|
return job.Error(errContainer)
|
|
}
|
|
}
|
|
object = &struct {
|
|
object = &struct {
|
|
- *runtime.Container
|
|
|
|
|
|
+ *daemon.Container
|
|
HostConfig *runconfig.HostConfig
|
|
HostConfig *runconfig.HostConfig
|
|
}{container, container.HostConfig()}
|
|
}{container, container.HostConfig()}
|
|
default:
|
|
default:
|
|
@@ -2403,7 +2403,7 @@ func (srv *Server) ContainerCopy(job *engine.Job) engine.Status {
|
|
resource = job.Args[1]
|
|
resource = job.Args[1]
|
|
)
|
|
)
|
|
|
|
|
|
- if container := srv.runtime.Get(name); container != nil {
|
|
|
|
|
|
+ if container := srv.daemon.Get(name); container != nil {
|
|
|
|
|
|
data, err := container.Copy(resource)
|
|
data, err := container.Copy(resource)
|
|
if err != nil {
|
|
if err != nil {
|
|
@@ -2420,20 +2420,20 @@ func (srv *Server) ContainerCopy(job *engine.Job) engine.Status {
|
|
}
|
|
}
|
|
|
|
|
|
func NewServer(eng *engine.Engine, config *daemonconfig.Config) (*Server, error) {
|
|
func NewServer(eng *engine.Engine, config *daemonconfig.Config) (*Server, error) {
|
|
- runtime, err := runtime.NewRuntime(config, eng)
|
|
|
|
|
|
+ daemon, err := daemon.NewDaemon(config, eng)
|
|
if err != nil {
|
|
if err != nil {
|
|
return nil, err
|
|
return nil, err
|
|
}
|
|
}
|
|
srv := &Server{
|
|
srv := &Server{
|
|
Eng: eng,
|
|
Eng: eng,
|
|
- runtime: runtime,
|
|
|
|
|
|
+ daemon: daemon,
|
|
pullingPool: make(map[string]chan struct{}),
|
|
pullingPool: make(map[string]chan struct{}),
|
|
pushingPool: make(map[string]chan struct{}),
|
|
pushingPool: make(map[string]chan struct{}),
|
|
events: make([]utils.JSONMessage, 0, 64), //only keeps the 64 last events
|
|
events: make([]utils.JSONMessage, 0, 64), //only keeps the 64 last events
|
|
listeners: make(map[string]chan utils.JSONMessage),
|
|
listeners: make(map[string]chan utils.JSONMessage),
|
|
running: true,
|
|
running: true,
|
|
}
|
|
}
|
|
- runtime.SetServer(srv)
|
|
|
|
|
|
+ daemon.SetServer(srv)
|
|
return srv, nil
|
|
return srv, nil
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2498,15 +2498,15 @@ func (srv *Server) Close() error {
|
|
return nil
|
|
return nil
|
|
}
|
|
}
|
|
srv.SetRunning(false)
|
|
srv.SetRunning(false)
|
|
- if srv.runtime == nil {
|
|
|
|
|
|
+ if srv.daemon == nil {
|
|
return nil
|
|
return nil
|
|
}
|
|
}
|
|
- return srv.runtime.Close()
|
|
|
|
|
|
+ return srv.daemon.Close()
|
|
}
|
|
}
|
|
|
|
|
|
type Server struct {
|
|
type Server struct {
|
|
sync.RWMutex
|
|
sync.RWMutex
|
|
- runtime *runtime.Runtime
|
|
|
|
|
|
+ daemon *daemon.Daemon
|
|
pullingPool map[string]chan struct{}
|
|
pullingPool map[string]chan struct{}
|
|
pushingPool map[string]chan struct{}
|
|
pushingPool map[string]chan struct{}
|
|
events []utils.JSONMessage
|
|
events []utils.JSONMessage
|