moby/server.go

1640 lines
46 KiB
Go
Raw Normal View History

package docker
import (
"bufio"
"encoding/json"
2013-05-30 19:30:21 +00:00
"errors"
"fmt"
2013-10-31 23:57:45 +00:00
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/auth"
2013-10-31 23:57:45 +00:00
"github.com/dotcloud/docker/engine"
2013-11-15 23:55:45 +00:00
"github.com/dotcloud/docker/graphdb"
2013-05-15 01:41:39 +00:00
"github.com/dotcloud/docker/registry"
2013-05-14 22:37:35 +00:00
"github.com/dotcloud/docker/utils"
"io"
2013-05-15 18:30:40 +00:00
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"os/exec"
2013-10-31 23:57:45 +00:00
"os/signal"
2013-05-15 18:30:40 +00:00
"path"
2013-10-08 16:35:47 +00:00
"path/filepath"
"runtime"
"strings"
"sync"
"syscall"
2013-10-31 23:57:45 +00:00
"time"
)
func (srv *Server) Close() error {
return srv.runtime.Close()
}
func init() {
engine.Register("initapi", jobInitApi)
}
// jobInitApi runs the remote api server `srv` as a daemon,
// Only one api server can run at the same time - this is enforced by a pidfile.
// The signals SIGINT, SIGKILL and SIGTERM are intercepted for cleanup.
func jobInitApi(job *engine.Job) string {
job.Logf("Creating server")
srv, err := NewServer(job.Eng, ConfigFromJob(job))
if err != nil {
return err.Error()
}
if srv.runtime.config.Pidfile != "" {
job.Logf("Creating pidfile")
if err := utils.CreatePidFile(srv.runtime.config.Pidfile); err != nil {
log.Fatal(err)
}
}
job.Logf("Setting up signal traps")
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, os.Kill, os.Signal(syscall.SIGTERM))
go func() {
sig := <-c
log.Printf("Received signal '%v', exiting\n", sig)
utils.RemovePidFile(srv.runtime.config.Pidfile)
srv.Close()
os.Exit(0)
}()
job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
job.Eng.Hack_SetGlobalVar("httpapi.runtime", srv.runtime)
job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", srv.runtime.networkManager.bridgeNetwork.IP)
if err := job.Eng.Register("create", srv.ContainerCreate); err != nil {
return err.Error()
}
if err := job.Eng.Register("start", srv.ContainerStart); err != nil {
return err.Error()
}
if err := job.Eng.Register("serveapi", srv.ListenAndServe); err != nil {
return err.Error()
}
return "0"
}
func (srv *Server) ListenAndServe(job *engine.Job) string {
protoAddrs := job.Args
chErrors := make(chan error, len(protoAddrs))
for _, protoAddr := range protoAddrs {
protoAddrParts := strings.SplitN(protoAddr, "://", 2)
switch protoAddrParts[0] {
case "unix":
if err := syscall.Unlink(protoAddrParts[1]); err != nil && !os.IsNotExist(err) {
log.Fatal(err)
}
case "tcp":
if !strings.HasPrefix(protoAddrParts[1], "127.0.0.1") {
log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
}
default:
return "Invalid protocol format."
}
go func() {
// FIXME: merge Server.ListenAndServe with ListenAndServe
chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], srv, job.GetenvBool("Logging"))
}()
}
for i := 0; i < len(protoAddrs); i += 1 {
err := <-chErrors
if err != nil {
return err.Error()
}
}
return "0"
}
2013-06-04 18:00:22 +00:00
func (srv *Server) DockerVersion() APIVersion {
return APIVersion{
Version: VERSION,
2013-06-04 18:00:22 +00:00
GitCommit: GITCOMMIT,
GoVersion: runtime.Version(),
}
}
2013-07-23 21:05:13 +00:00
// simpleVersionInfo is a simple implementation of
// the interface VersionInfo, which is used
2013-07-18 18:22:49 +00:00
// to provide version information for some product,
// component, etc. It stores the product name and the version
// in string and returns them on calls to Name() and Version().
2013-07-23 21:05:13 +00:00
type simpleVersionInfo struct {
name string
version string
}
2013-07-23 21:05:13 +00:00
func (v *simpleVersionInfo) Name() string {
return v.name
}
2013-07-23 21:05:13 +00:00
func (v *simpleVersionInfo) Version() string {
return v.version
}
2013-07-18 18:22:49 +00:00
// versionCheckers() returns version informations of:
// docker, go, git-commit (of the docker) and the host's kernel.
//
// Such information will be used on call to NewRegistry().
2013-08-02 07:23:46 +00:00
func (srv *Server) versionInfos() []utils.VersionInfo {
v := srv.DockerVersion()
ret := append(make([]utils.VersionInfo, 0, 4), &simpleVersionInfo{"docker", v.Version})
if len(v.GoVersion) > 0 {
2013-07-23 21:05:13 +00:00
ret = append(ret, &simpleVersionInfo{"go", v.GoVersion})
}
if len(v.GitCommit) > 0 {
2013-07-23 21:05:13 +00:00
ret = append(ret, &simpleVersionInfo{"git-commit", v.GitCommit})
}
if kernelVersion, err := utils.GetKernelVersion(); err == nil {
2013-07-23 21:05:13 +00:00
ret = append(ret, &simpleVersionInfo{"kernel", kernelVersion.String()})
2013-06-28 21:48:37 +00:00
}
return ret
}
// ContainerKill send signal to the container
// If no signal is given (sig 0), then Kill with SIGKILL and wait
// for the container to exit.
// If a signal is given, then just send it to the container and return.
func (srv *Server) ContainerKill(name string, sig int) error {
if container := srv.runtime.Get(name); container != nil {
// If no signal is passed, perform regular Kill (SIGKILL + wait())
if sig == 0 {
if err := container.Kill(); err != nil {
return fmt.Errorf("Cannot kill container %s: %s", name, err)
}
srv.LogEvent("kill", container.ID, srv.runtime.repositories.ImageName(container.Image))
} else {
// Otherwise, just send the requested signal
if err := container.kill(sig); err != nil {
2013-10-30 20:18:48 +00:00
return fmt.Errorf("Cannot kill container %s: %s", name, err)
}
// FIXME: Add event for signals
}
} else {
return fmt.Errorf("No such container: %s", name)
}
return nil
}
func (srv *Server) ContainerExport(name string, out io.Writer) error {
if container := srv.runtime.Get(name); container != nil {
data, err := container.Export()
if err != nil {
return err
}
// Stream the entire contents of the container (basically a volatile snapshot)
if _, err := io.Copy(out, data); err != nil {
return err
}
srv.LogEvent("export", container.ID, srv.runtime.repositories.ImageName(container.Image))
return nil
}
return fmt.Errorf("No such container: %s", name)
}
func (srv *Server) ImagesSearch(term string) ([]registry.SearchResult, error) {
r, err := registry.NewRegistry(srv.runtime.config.Root, nil, srv.HTTPRequestFactory(nil))
if err != nil {
return nil, err
}
results, err := r.SearchRepositories(term)
2013-05-07 18:37:35 +00:00
if err != nil {
2013-05-07 18:59:04 +00:00
return nil, err
}
return results.Results, nil
2013-05-07 18:37:35 +00:00
}
func (srv *Server) ImageInsert(name, url, path string, out io.Writer, sf *utils.StreamFormatter) error {
2013-05-20 17:58:35 +00:00
out = utils.NewWriteFlusher(out)
2013-05-07 17:23:50 +00:00
img, err := srv.runtime.repositories.LookupImage(name)
if err != nil {
return err
2013-05-07 17:23:50 +00:00
}
2013-05-14 22:37:35 +00:00
file, err := utils.Download(url, out)
2013-05-07 17:23:50 +00:00
if err != nil {
return err
2013-05-07 17:23:50 +00:00
}
defer file.Body.Close()
config, _, _, err := ParseRun([]string{img.ID, "echo", "insert", url, path}, srv.runtime.capabilities)
2013-05-07 17:23:50 +00:00
if err != nil {
return err
2013-05-07 17:23:50 +00:00
}
c, _, err := srv.runtime.Create(config, "")
2013-05-07 17:23:50 +00:00
if err != nil {
return err
2013-05-07 17:23:50 +00:00
}
if err := c.Inject(utils.ProgressReader(file.Body, int(file.ContentLength), out, sf.FormatProgress("", "Downloading", "%8v/%v (%v)"), sf, false), path); err != nil {
return err
2013-05-07 17:23:50 +00:00
}
// FIXME: Handle custom repo, tag comment, author
2013-09-07 00:33:05 +00:00
img, err = srv.runtime.Commit(c, "", "", img.Comment, img.Author, nil)
2013-05-07 17:23:50 +00:00
if err != nil {
return err
2013-05-07 17:23:50 +00:00
}
2013-11-11 19:26:24 +00:00
out.Write(sf.FormatStatus(img.ID, ""))
return nil
2013-05-07 17:23:50 +00:00
}
func (srv *Server) ImagesViz(out io.Writer) error {
2013-09-01 03:31:21 +00:00
images, _ := srv.runtime.graph.Map()
2013-05-07 17:23:50 +00:00
if images == nil {
return nil
}
2013-05-09 21:10:26 +00:00
out.Write([]byte("digraph docker {\n"))
2013-05-07 17:23:50 +00:00
var (
parentImage *Image
err error
)
2013-05-07 17:23:50 +00:00
for _, image := range images {
parentImage, err = image.GetParent()
if err != nil {
return fmt.Errorf("Error while getting parent image: %v", err)
2013-05-07 17:23:50 +00:00
}
if parentImage != nil {
out.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n"))
2013-05-07 17:23:50 +00:00
} else {
out.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n"))
2013-05-07 17:23:50 +00:00
}
}
reporefs := make(map[string][]string)
for name, repository := range srv.runtime.repositories.Repositories {
for tag, id := range repository {
2013-06-04 18:00:22 +00:00
reporefs[utils.TruncateID(id)] = append(reporefs[utils.TruncateID(id)], fmt.Sprintf("%s:%s", name, tag))
2013-05-07 17:23:50 +00:00
}
}
for id, repos := range reporefs {
2013-05-09 21:10:26 +00:00
out.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n"))
2013-05-07 17:23:50 +00:00
}
2013-05-09 21:10:26 +00:00
out.Write([]byte(" base [style=invisible]\n}\n"))
2013-05-07 17:23:50 +00:00
return nil
}
2013-06-04 18:00:22 +00:00
func (srv *Server) Images(all bool, filter string) ([]APIImages, error) {
2013-05-19 17:46:24 +00:00
var (
allImages map[string]*Image
err error
)
2013-05-07 23:47:43 +00:00
if all {
allImages, err = srv.runtime.graph.Map()
} else {
allImages, err = srv.runtime.graph.Heads()
}
if err != nil {
return nil, err
}
2013-10-06 05:44:04 +00:00
lookup := make(map[string]APIImages)
for name, repository := range srv.runtime.repositories.Repositories {
2013-09-06 19:51:49 +00:00
if filter != "" {
2013-09-06 20:16:10 +00:00
if match, _ := path.Match(filter, name); !match {
2013-09-06 19:51:49 +00:00
continue
}
}
for tag, id := range repository {
image, err := srv.runtime.graph.Get(id)
if err != nil {
log.Printf("Warning: couldn't load %s from %s/%s: %s", id, name, tag, err)
continue
}
2013-10-06 05:44:04 +00:00
if out, exists := lookup[id]; exists {
out.RepoTags = append(out.RepoTags, fmt.Sprintf("%s:%s", name, tag))
lookup[id] = out
} else {
var out APIImages
delete(allImages, id)
out.ParentId = image.Parent
out.RepoTags = []string{fmt.Sprintf("%s:%s", name, tag)}
out.ID = image.ID
out.Created = image.Created.Unix()
out.Size = image.Size
out.VirtualSize = image.getParentsSize(0) + image.Size
lookup[id] = out
}
}
}
2013-10-06 05:44:04 +00:00
outs := make([]APIImages, 0, len(lookup))
for _, value := range lookup {
outs = append(outs, value)
}
// Display images which aren't part of a repository/tag
if filter == "" {
for _, image := range allImages {
2013-06-04 18:00:22 +00:00
var out APIImages
out.ID = image.ID
2013-10-08 13:41:44 +00:00
out.ParentId = image.Parent
2013-10-06 16:36:38 +00:00
out.RepoTags = []string{"<none>:<none>"}
out.Created = image.Created.Unix()
2013-05-22 13:41:29 +00:00
out.Size = image.Size
2013-06-14 10:05:01 +00:00
out.VirtualSize = image.getParentsSize(0) + image.Size
outs = append(outs, out)
}
}
sortImagesByCreationAndTag(outs)
return outs, nil
}
2013-06-04 18:00:22 +00:00
func (srv *Server) DockerInfo() *APIInfo {
2013-09-01 03:31:21 +00:00
images, _ := srv.runtime.graph.Map()
var imgcount int
if images == nil {
imgcount = 0
} else {
imgcount = len(images)
}
lxcVersion := ""
if output, err := exec.Command("lxc-version").CombinedOutput(); err == nil {
outputStr := string(output)
if len(strings.SplitN(outputStr, ":", 2)) == 2 {
lxcVersion = strings.TrimSpace(strings.SplitN(string(output), ":", 2)[1])
}
}
2013-07-24 13:35:38 +00:00
kernelVersion := "<unknown>"
if kv, err := utils.GetKernelVersion(); err == nil {
kernelVersion = kv.String()
}
2013-06-04 18:00:22 +00:00
return &APIInfo{
2013-07-22 18:42:31 +00:00
Containers: len(srv.runtime.List()),
Images: imgcount,
Driver: srv.runtime.driver.String(),
DriverStatus: srv.runtime.driver.Status(),
2013-07-22 18:42:31 +00:00
MemoryLimit: srv.runtime.capabilities.MemoryLimit,
SwapLimit: srv.runtime.capabilities.SwapLimit,
2013-08-19 12:34:30 +00:00
IPv4Forwarding: !srv.runtime.capabilities.IPv4ForwardingDisabled,
2013-07-22 18:42:31 +00:00
Debug: os.Getenv("DEBUG") != "",
NFd: utils.GetTotalUsedFds(),
NGoroutines: runtime.NumGoroutine(),
LXCVersion: lxcVersion,
NEventsListener: len(srv.events),
KernelVersion: kernelVersion,
IndexServerAddress: auth.IndexServerAddress(),
}
}
2013-06-04 18:00:22 +00:00
func (srv *Server) ImageHistory(name string) ([]APIHistory, error) {
image, err := srv.runtime.repositories.LookupImage(name)
if err != nil {
return nil, err
}
lookupMap := make(map[string][]string)
for name, repository := range srv.runtime.repositories.Repositories {
for tag, id := range repository {
// If the ID already has a reverse lookup, do not update it unless for "latest"
if _, exists := lookupMap[id]; !exists {
lookupMap[id] = []string{}
}
lookupMap[id] = append(lookupMap[id], name+":"+tag)
}
}
2013-06-04 18:00:22 +00:00
outs := []APIHistory{} //produce [] when empty instead of 'null'
err = image.WalkHistory(func(img *Image) error {
2013-06-04 18:00:22 +00:00
var out APIHistory
2013-10-18 23:39:40 +00:00
out.ID = img.ID
out.Created = img.Created.Unix()
out.CreatedBy = strings.Join(img.ContainerConfig.Cmd, " ")
out.Tags = lookupMap[img.ID]
2013-10-18 23:39:40 +00:00
out.Size = img.Size
outs = append(outs, out)
return nil
})
return outs, nil
}
2013-11-18 23:35:56 +00:00
func (srv *Server) ContainerTop(name, psArgs string) (*APITop, error) {
if container := srv.runtime.Get(name); container != nil {
2013-11-18 23:35:56 +00:00
output, err := exec.Command("lxc-ps", "--name", container.ID, "--", psArgs).CombinedOutput()
if err != nil {
return nil, fmt.Errorf("lxc-ps: %s (%s)", err, output)
}
2013-07-19 10:06:32 +00:00
procs := APITop{}
for i, line := range strings.Split(string(output), "\n") {
2013-07-19 10:06:32 +00:00
if len(line) == 0 {
continue
}
2013-07-19 10:06:32 +00:00
words := []string{}
scanner := bufio.NewScanner(strings.NewReader(line))
scanner.Split(bufio.ScanWords)
if !scanner.Scan() {
return nil, fmt.Errorf("Wrong output using lxc-ps")
}
// no scanner.Text because we skip container id
2013-07-19 10:06:32 +00:00
for scanner.Scan() {
2013-10-30 02:03:41 +00:00
if i != 0 && len(words) == len(procs.Titles) {
words[len(words)-1] = fmt.Sprintf("%s %s", words[len(words)-1], scanner.Text())
} else {
words = append(words, scanner.Text())
}
2013-07-19 10:06:32 +00:00
}
if i == 0 {
procs.Titles = words
} else {
procs.Processes = append(procs.Processes, words)
}
}
2013-07-19 10:06:32 +00:00
return &procs, nil
}
return nil, fmt.Errorf("No such container: %s", name)
}
func (srv *Server) ContainerChanges(name string) ([]archive.Change, error) {
if container := srv.runtime.Get(name); container != nil {
return container.Changes()
}
return nil, fmt.Errorf("No such container: %s", name)
}
2013-06-20 14:19:50 +00:00
func (srv *Server) Containers(all, size bool, n int, since, before string) []APIContainers {
2013-05-08 16:28:11 +00:00
var foundBefore bool
var displayed int
out := []APIContainers{}
2013-05-08 16:28:11 +00:00
for _, container := range srv.runtime.List() {
if !container.State.Running && !all && n == -1 && since == "" && before == "" {
continue
}
2013-05-08 16:28:11 +00:00
if before != "" {
if container.ID == before || utils.TruncateID(container.ID) == before {
2013-05-08 16:28:11 +00:00
foundBefore = true
continue
}
if !foundBefore {
continue
}
}
if displayed == n {
break
}
if container.ID == since || utils.TruncateID(container.ID) == since {
break
}
displayed++
c := createAPIContainer(container, size, srv.runtime)
out = append(out, c)
}
return out
}
func createAPIContainer(container *Container, size bool, runtime *Runtime) APIContainers {
c := APIContainers{
ID: container.ID,
}
names := []string{}
2013-11-15 23:55:45 +00:00
runtime.containerGraph.Walk("/", func(p string, e *graphdb.Entity) error {
if e.ID() == container.ID {
names = append(names, p)
}
return nil
}, -1)
c.Names = names
c.Image = runtime.repositories.ImageName(container.Image)
c.Command = fmt.Sprintf("%s %s", container.Path, strings.Join(container.Args, " "))
c.Created = container.Created.Unix()
c.Status = container.State.String()
c.Ports = container.NetworkSettings.PortMappingAPI()
if size {
c.SizeRw, c.SizeRootFs = container.GetSize()
}
return c
}
func (srv *Server) ContainerCommit(name, repo, tag, author, comment string, config *Config) (string, error) {
2013-05-07 17:23:50 +00:00
container := srv.runtime.Get(name)
if container == nil {
return "", fmt.Errorf("No such container: %s", name)
}
2013-09-07 00:33:05 +00:00
img, err := srv.runtime.Commit(container, repo, tag, comment, author, config)
if err != nil {
return "", err
}
return img.ID, err
}
// FIXME: this should be called ImageTag
func (srv *Server) ContainerTag(name, repo, tag string, force bool) error {
if err := srv.runtime.repositories.Set(repo, tag, name, force); err != nil {
return err
}
return nil
}
func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoint string, token []string, sf *utils.StreamFormatter) error {
history, err := r.GetRemoteHistory(imgID, endpoint, token)
2013-05-15 01:41:39 +00:00
if err != nil {
return err
}
out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pulling", "dependend layers"))
2013-05-15 01:41:39 +00:00
// FIXME: Try to stream the images?
// FIXME: Launch the getRemoteImage() in goroutines
for i := len(history) - 1; i >= 0; i-- {
id := history[i]
// ensure no two downloads of the same layer happen at the same time
if err := srv.poolAdd("pull", "layer:"+id); err != nil {
utils.Errorf("Image (id: %s) pull is already running, skipping: %v", id, err)
return nil
}
defer srv.poolRemove("pull", "layer:"+id)
2013-05-15 01:41:39 +00:00
if !srv.runtime.graph.Exists(id) {
out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling", "metadata"))
imgJSON, imgSize, err := r.GetRemoteImageJSON(id, endpoint, token)
2013-05-15 01:41:39 +00:00
if err != nil {
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error", "pulling dependend layers"))
2013-08-12 17:53:06 +00:00
// FIXME: Keep going in case of error?
2013-05-15 01:41:39 +00:00
return err
}
2013-06-04 18:00:22 +00:00
img, err := NewImgJSON(imgJSON)
2013-05-15 01:41:39 +00:00
if err != nil {
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error", "pulling dependend layers"))
2013-05-15 01:41:39 +00:00
return fmt.Errorf("Failed to parse json: %s", err)
}
// Get the layer
out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling", "fs layer"))
layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token)
2013-05-15 01:41:39 +00:00
if err != nil {
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error", "pulling dependend layers"))
2013-05-15 01:41:39 +00:00
return err
}
defer layer.Close()
2013-08-06 14:31:51 +00:00
if err := srv.runtime.graph.Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf.FormatProgress(utils.TruncateID(id), "Downloading", "%8v/%v (%v)"), sf, false), img); err != nil {
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error", "downloading dependend layers"))
2013-05-15 01:41:39 +00:00
return err
}
}
out.Write(sf.FormatProgress(utils.TruncateID(id), "Download", "complete"))
2013-05-15 01:41:39 +00:00
}
return nil
}
2013-07-30 12:09:07 +00:00
func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName, remoteName, askedTag, indexEp string, sf *utils.StreamFormatter, parallel bool) error {
2013-07-24 17:10:59 +00:00
out.Write(sf.FormatStatus("", "Pulling repository %s", localName))
2013-05-15 01:41:39 +00:00
repoData, err := r.GetRepositoryData(indexEp, remoteName)
2013-05-06 11:34:31 +00:00
if err != nil {
2013-05-15 01:41:39 +00:00
return err
}
utils.Debugf("Retrieving the tag list")
tagsList, err := r.GetRemoteTags(repoData.Endpoints, remoteName, repoData.Tokens)
2013-05-15 01:41:39 +00:00
if err != nil {
utils.Errorf("%v", err)
2013-05-15 01:41:39 +00:00
return err
}
for tag, id := range tagsList {
repoData.ImgList[id] = &registry.ImgData{
ID: id,
Tag: tag,
Checksum: "",
}
}
utils.Debugf("Registering tags")
// If no tag has been specified, pull them all
if askedTag == "" {
for tag, id := range tagsList {
repoData.ImgList[id].Tag = tag
}
} else {
// Otherwise, check that the tag exists and use only that one
2013-06-04 13:51:12 +00:00
id, exists := tagsList[askedTag]
if !exists {
return fmt.Errorf("Tag %s not found in repository %s", askedTag, localName)
}
2013-06-04 13:51:12 +00:00
repoData.ImgList[id].Tag = askedTag
2013-05-15 01:41:39 +00:00
}
errors := make(chan error)
for _, image := range repoData.ImgList {
2013-07-30 12:09:07 +00:00
downloadImage := func(img *registry.ImgData) {
if askedTag != "" && img.Tag != askedTag {
utils.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.ID)
if parallel {
errors <- nil
}
return
}
if img.Tag == "" {
utils.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
if parallel {
errors <- nil
}
return
}
// ensure no two downloads of the same image happen at the same time
if err := srv.poolAdd("pull", "img:"+img.ID); err != nil {
utils.Errorf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
if parallel {
errors <- nil
}
return
2013-05-15 01:41:39 +00:00
}
defer srv.poolRemove("pull", "img:"+img.ID)
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Pulling", fmt.Sprintf("image (%s) from %s", img.Tag, localName)))
success := false
var lastErr error
for _, ep := range repoData.Endpoints {
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Pulling", fmt.Sprintf("image (%s) from %s, endpoint: %s", img.Tag, localName, ep)))
if err := srv.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
// Its not ideal that only the last error is returned, it would be better to concatenate the errors.
// As the error is also given to the output stream the user will see the error.
lastErr = err
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Error pulling", fmt.Sprintf("image (%s) from %s, endpoint: %s, %s", img.Tag, localName, ep, err)))
continue
}
success = true
break
}
if !success {
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Error pulling", fmt.Sprintf("image (%s) from %s, %s", img.Tag, localName, lastErr)))
if parallel {
errors <- fmt.Errorf("Could not find repository on any of the indexed registries.")
return
}
}
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download", "complete"))
if parallel {
errors <- nil
}
2013-05-15 01:41:39 +00:00
}
2013-07-30 12:09:07 +00:00
if parallel {
go downloadImage(image)
} else {
downloadImage(image)
}
}
2013-07-30 12:09:07 +00:00
if parallel {
var lastError error
2013-07-30 12:09:07 +00:00
for i := 0; i < len(repoData.ImgList); i++ {
if err := <-errors; err != nil {
lastError = err
2013-05-15 01:41:39 +00:00
}
}
if lastError != nil {
return lastError
}
}
2013-05-15 01:41:39 +00:00
for tag, id := range tagsList {
if askedTag != "" && tag != askedTag {
continue
}
if err := srv.runtime.repositories.Set(localName, tag, id, true); err != nil {
2013-05-15 01:41:39 +00:00
return err
}
}
if err := srv.runtime.repositories.Save(); err != nil {
2013-05-06 11:34:31 +00:00
return err
}
2013-05-15 01:41:39 +00:00
return nil
}
func (srv *Server) poolAdd(kind, key string) error {
srv.Lock()
defer srv.Unlock()
if _, exists := srv.pullingPool[key]; exists {
return fmt.Errorf("pull %s is already in progress", key)
}
if _, exists := srv.pushingPool[key]; exists {
return fmt.Errorf("push %s is already in progress", key)
}
switch kind {
case "pull":
srv.pullingPool[key] = struct{}{}
break
case "push":
srv.pushingPool[key] = struct{}{}
break
default:
2013-08-12 17:53:06 +00:00
return fmt.Errorf("Unknown pool type")
}
return nil
}
func (srv *Server) poolRemove(kind, key string) error {
switch kind {
case "pull":
delete(srv.pullingPool, key)
break
case "push":
delete(srv.pushingPool, key)
break
default:
2013-08-12 17:53:06 +00:00
return fmt.Errorf("Unknown pool type")
}
return nil
}
func (srv *Server) ImagePull(localName string, tag string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig, metaHeaders map[string][]string, parallel bool) error {
r, err := registry.NewRegistry(srv.runtime.config.Root, authConfig, srv.HTTPRequestFactory(metaHeaders))
if err != nil {
return err
}
if err := srv.poolAdd("pull", localName+":"+tag); err != nil {
return err
}
defer srv.poolRemove("pull", localName+":"+tag)
// Resolve the Repository name from fqn to endpoint + name
endpoint, remoteName, err := registry.ResolveRepositoryName(localName)
if err != nil {
return err
}
if endpoint == auth.IndexServerAddress() {
// If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar"
localName = remoteName
}
2013-05-23 15:16:35 +00:00
out = utils.NewWriteFlusher(out)
2013-07-30 12:09:07 +00:00
err = srv.pullRepository(r, out, localName, remoteName, tag, endpoint, sf, parallel)
if err == registry.ErrLoginRequired {
return err
}
if err != nil {
if err := srv.pullImage(r, out, remoteName, endpoint, nil, sf); err != nil {
return err
}
2013-05-07 17:23:50 +00:00
return nil
}
2013-06-05 22:13:01 +00:00
2013-05-15 01:41:39 +00:00
return nil
}
// Retrieve the all the images to be uploaded in the correct order
// Note: we can't use a map as it is not ordered
func (srv *Server) getImageList(localRepo map[string]string) ([][]*registry.ImgData, error) {
imgList := map[string]*registry.ImgData{}
depGraph := utils.NewDependencyGraph()
for tag, id := range localRepo {
2013-05-15 18:30:40 +00:00
img, err := srv.runtime.graph.Get(id)
if err != nil {
2013-05-15 18:30:40 +00:00
return nil, err
}
depGraph.NewNode(img.ID)
img.WalkHistory(func(current *Image) error {
imgList[current.ID] = &registry.ImgData{
2013-09-06 20:16:10 +00:00
ID: current.ID,
Tag: tag,
}
parent, err := current.GetParent()
if err != nil {
return err
}
if parent == nil {
return nil
}
depGraph.NewNode(parent.ID)
depGraph.AddDependency(current.ID, parent.ID)
return nil
})
}
traversalMap, err := depGraph.GenerateTraversalMap()
if err != nil {
return nil, err
}
utils.Debugf("Traversal map: %v", traversalMap)
result := [][]*registry.ImgData{}
for _, round := range traversalMap {
dataRound := []*registry.ImgData{}
for _, imgID := range round {
dataRound = append(dataRound, imgList[imgID])
}
result = append(result, dataRound)
}
return result, nil
}
func flatten(slc [][]*registry.ImgData) []*registry.ImgData {
result := []*registry.ImgData{}
for _, x := range slc {
result = append(result, x...)
}
return result
}
func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName, remoteName string, localRepo map[string]string, indexEp string, sf *utils.StreamFormatter) error {
2013-05-20 17:58:35 +00:00
out = utils.NewWriteFlusher(out)
2013-05-15 18:30:40 +00:00
imgList, err := srv.getImageList(localRepo)
if err != nil {
return err
}
flattenedImgList := flatten(imgList)
2013-07-24 17:10:59 +00:00
out.Write(sf.FormatStatus("", "Sending image list"))
var repoData *registry.RepositoryData
repoData, err = r.PushImageJSONIndex(indexEp, remoteName, flattenedImgList, false, nil)
if err != nil {
return err
}
for _, ep := range repoData.Endpoints {
2013-07-24 17:10:59 +00:00
out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, len(localRepo)))
// This section can not be parallelized (each round depends on the previous one)
for _, round := range imgList {
// FIXME: This section can be parallelized
for _, elem := range round {
var pushTags func() error
pushTags = func() error {
out.Write(sf.FormatStatus("", "Pushing tags for rev [%s] on {%s}", elem.ID, ep+"repositories/"+remoteName+"/tags/"+elem.Tag))
if err := r.PushRegistryTag(remoteName, elem.ID, elem.Tag, ep, repoData.Tokens); err != nil {
return err
}
return nil
}
if _, exists := repoData.ImgList[elem.ID]; exists {
if err := pushTags(); err != nil {
return err
}
out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", elem.ID))
continue
} else if r.LookupRemoteImage(elem.ID, ep, repoData.Tokens) {
if err := pushTags(); err != nil {
return err
}
out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", elem.ID))
continue
}
2013-11-18 23:35:56 +00:00
checksum, err := srv.pushImage(r, out, remoteName, elem.ID, ep, repoData.Tokens, sf)
if err != nil {
// FIXME: Continue on error?
return err
}
2013-11-18 23:35:56 +00:00
elem.Checksum = checksum
2013-09-11 17:39:33 +00:00
if err := pushTags(); err != nil {
return err
}
2013-05-15 18:30:40 +00:00
}
}
}
if _, err := r.PushImageJSONIndex(indexEp, remoteName, flattenedImgList, true, repoData.Endpoints); err != nil {
return err
}
2013-05-15 18:30:40 +00:00
return nil
}
func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) {
2013-05-20 17:58:35 +00:00
out = utils.NewWriteFlusher(out)
jsonRaw, err := ioutil.ReadFile(path.Join(srv.runtime.graph.Root, imgID, "json"))
if err != nil {
return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
}
2013-07-24 17:10:59 +00:00
out.Write(sf.FormatStatus("", "Pushing %s", imgID))
imgData := &registry.ImgData{
2013-07-17 19:13:22 +00:00
ID: imgID,
}
2013-05-15 20:22:57 +00:00
// Send the json
2013-06-04 18:00:22 +00:00
if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil {
2013-05-15 20:22:57 +00:00
if err == registry.ErrAlreadyExists {
2013-07-24 17:10:59 +00:00
out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", imgData.ID))
return "", nil
2013-05-15 20:22:57 +00:00
}
return "", err
2013-05-15 20:22:57 +00:00
}
2013-10-31 23:57:45 +00:00
layerData, err := srv.runtime.graph.TempLayerArchive(imgID, archive.Uncompressed, sf, out)
if err != nil {
return "", fmt.Errorf("Failed to generate layer archive: %s", err)
}
2013-11-16 00:23:55 +00:00
defer os.RemoveAll(layerData.Name())
2013-05-15 18:30:40 +00:00
// Send the layer
2013-11-18 23:35:56 +00:00
checksum, err = r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf.FormatProgress("", "Pushing", "%8v/%v (%v)"), sf, false), ep, token, jsonRaw)
if err != nil {
return "", err
}
2013-11-18 23:35:56 +00:00
imgData.Checksum = checksum
out.Write(sf.FormatStatus("", ""))
2013-07-17 19:13:22 +00:00
// Send the checksum
if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil {
return "", err
2013-07-17 19:13:22 +00:00
}
return imgData.Checksum, nil
}
2013-08-12 17:53:06 +00:00
// FIXME: Allow to interrupt current push when new push of same image is done.
func (srv *Server) ImagePush(localName string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig, metaHeaders map[string][]string) error {
if err := srv.poolAdd("push", localName); err != nil {
return err
}
defer srv.poolRemove("push", localName)
// Resolve the Repository name from fqn to endpoint + name
endpoint, remoteName, err := registry.ResolveRepositoryName(localName)
2013-07-05 22:26:08 +00:00
if err != nil {
return err
}
2013-05-20 17:58:35 +00:00
out = utils.NewWriteFlusher(out)
img, err := srv.runtime.graph.Get(localName)
r, err2 := registry.NewRegistry(srv.runtime.config.Root, authConfig, srv.HTTPRequestFactory(metaHeaders))
if err2 != nil {
return err2
}
2013-06-05 22:12:50 +00:00
2013-05-06 11:34:31 +00:00
if err != nil {
reposLen := len(srv.runtime.repositories.Repositories[localName])
2013-07-24 17:10:59 +00:00
out.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen))
// If it fails, try to get the repository
if localRepo, exists := srv.runtime.repositories.Repositories[localName]; exists {
if err := srv.pushRepository(r, out, localName, remoteName, localRepo, endpoint, sf); err != nil {
2013-05-06 11:34:31 +00:00
return err
}
return nil
}
return err
}
var token []string
2013-07-24 17:10:59 +00:00
out.Write(sf.FormatStatus("", "The push refers to an image: [%s]", localName))
if _, err := srv.pushImage(r, out, remoteName, img.ID, endpoint, token, sf); err != nil {
2013-05-06 11:34:31 +00:00
return err
}
return nil
}
2013-05-25 15:09:46 +00:00
func (srv *Server) ImageImport(src, repo, tag string, in io.Reader, out io.Writer, sf *utils.StreamFormatter) error {
var archive io.Reader
var resp *http.Response
if src == "-" {
archive = in
} else {
u, err := url.Parse(src)
if err != nil {
2013-05-25 15:09:46 +00:00
return err
}
if u.Scheme == "" {
u.Scheme = "http"
u.Host = src
u.Path = ""
}
2013-07-24 17:10:59 +00:00
out.Write(sf.FormatStatus("", "Downloading from %s", u))
// Download with curl (pretty progress bar)
// If curl is not available, fallback to http.Get()
2013-05-14 22:37:35 +00:00
resp, err = utils.Download(u.String(), out)
if err != nil {
return err
}
2013-08-06 14:31:51 +00:00
archive = utils.ProgressReader(resp.Body, int(resp.ContentLength), out, sf.FormatProgress("", "Importing", "%8v/%v (%v)"), sf, true)
}
img, err := srv.runtime.graph.Create(archive, nil, "Imported from "+src, "", nil)
if err != nil {
return err
}
// Optionally register the image at REPO/TAG
if repo != "" {
2013-06-04 18:00:22 +00:00
if err := srv.runtime.repositories.Set(repo, tag, img.ID, true); err != nil {
return err
}
}
out.Write(sf.FormatStatus("", img.ID))
return nil
}
func (srv *Server) ContainerCreate(job *engine.Job) string {
var name string
if len(job.Args) == 1 {
name = job.Args[0]
} else if len(job.Args) > 1 {
return fmt.Sprintf("Usage: %s ", job.Name)
}
var config Config
if err := job.ExportEnv(&config); err != nil {
return err.Error()
}
if config.Memory != 0 && config.Memory < 524288 {
return "Minimum memory limit allowed is 512k"
}
if config.Memory > 0 && !srv.runtime.capabilities.MemoryLimit {
config.Memory = 0
}
if config.Memory > 0 && !srv.runtime.capabilities.SwapLimit {
config.MemorySwap = -1
}
container, buildWarnings, err := srv.runtime.Create(&config, name)
if err != nil {
if srv.runtime.graph.IsNotExist(err) {
2013-08-18 03:03:54 +00:00
_, tag := utils.ParseRepositoryTag(config.Image)
if tag == "" {
tag = DEFAULTTAG
}
return fmt.Sprintf("No such image: %s (tag: %s)", config.Image, tag)
}
return err.Error()
}
srv.LogEvent("create", container.ID, srv.runtime.repositories.ImageName(container.Image))
// FIXME: this is necessary because runtime.Create might return a nil container
// with a non-nil error. This should not happen! Once it's fixed we
// can remove this workaround.
if container != nil {
job.Printf("%s\n", container.ID)
}
for _, warning := range buildWarnings {
job.Errorf("%s\n", warning)
}
return "0"
}
func (srv *Server) ContainerRestart(name string, t int) error {
if container := srv.runtime.Get(name); container != nil {
if err := container.Restart(t); err != nil {
return fmt.Errorf("Cannot restart container %s: %s", name, err)
}
srv.LogEvent("restart", container.ID, srv.runtime.repositories.ImageName(container.Image))
} else {
return fmt.Errorf("No such container: %s", name)
}
return nil
}
func (srv *Server) ContainerDestroy(name string, removeVolume, removeLink bool) error {
container := srv.runtime.Get(name)
if removeLink {
if container == nil {
return fmt.Errorf("No such link: %s", name)
}
2013-11-04 17:28:40 +00:00
name, err := srv.runtime.getFullName(name)
if err != nil {
return err
}
parent, n := path.Split(name)
if parent == "/" {
return fmt.Errorf("Conflict, cannot remove the default name of the container")
}
pe := srv.runtime.containerGraph.Get(parent)
if pe == nil {
return fmt.Errorf("Cannot get parent %s for name %s", parent, name)
}
parentContainer := srv.runtime.Get(pe.ID())
if parentContainer != nil && parentContainer.activeLinks != nil {
if link, exists := parentContainer.activeLinks[n]; exists {
link.Disable()
} else {
utils.Debugf("Could not find active link for %s", name)
}
}
if err := srv.runtime.containerGraph.Delete(name); err != nil {
return err
}
return nil
}
if container != nil {
if container.State.Running {
return fmt.Errorf("Impossible to remove a running container, please stop it first")
}
2013-05-06 09:52:15 +00:00
volumes := make(map[string]struct{})
// Store all the deleted containers volumes
for _, volumeId := range container.Volumes {
2013-10-08 16:35:47 +00:00
volumeId = strings.TrimRight(volumeId, "/layer")
volumeId = filepath.Base(volumeId)
2013-05-06 09:52:15 +00:00
volumes[volumeId] = struct{}{}
}
if err := srv.runtime.Destroy(container); err != nil {
return fmt.Errorf("Cannot destroy container %s: %s", name, err)
}
srv.LogEvent("destroy", container.ID, srv.runtime.repositories.ImageName(container.Image))
2013-05-06 09:52:15 +00:00
2013-05-10 02:19:55 +00:00
if removeVolume {
2013-05-06 09:52:15 +00:00
// Retrieve all volumes from all remaining containers
usedVolumes := make(map[string]*Container)
for _, container := range srv.runtime.List() {
for _, containerVolumeId := range container.Volumes {
usedVolumes[containerVolumeId] = container
}
}
for volumeId := range volumes {
// If the requested volu
if c, exists := usedVolumes[volumeId]; exists {
2013-06-04 18:00:22 +00:00
log.Printf("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID)
2013-05-06 09:52:15 +00:00
continue
}
if err := srv.runtime.volumes.Delete(volumeId); err != nil {
return err
}
}
}
} else {
return fmt.Errorf("No such container: %s", name)
}
return nil
}
2013-05-30 19:30:21 +00:00
var ErrImageReferenced = errors.New("Image referenced by a repository")
2013-06-10 21:05:54 +00:00
func (srv *Server) deleteImageAndChildren(id string, imgs *[]APIRmi) error {
2013-05-30 19:30:21 +00:00
// If the image is referenced by a repo, do not delete
2013-06-10 21:05:54 +00:00
if len(srv.runtime.repositories.ByID()[id]) != 0 {
2013-05-30 19:30:21 +00:00
return ErrImageReferenced
}
// If the image is not referenced but has children, go recursive
referenced := false
2013-05-30 22:53:45 +00:00
byParents, err := srv.runtime.graph.ByParent()
if err != nil {
2013-05-30 22:53:45 +00:00
return err
2013-06-04 13:51:12 +00:00
}
2013-05-30 19:30:21 +00:00
for _, img := range byParents[id] {
2013-06-10 21:05:54 +00:00
if err := srv.deleteImageAndChildren(img.ID, imgs); err != nil {
2013-05-30 19:30:21 +00:00
if err != ErrImageReferenced {
return err
}
2013-06-10 21:05:54 +00:00
referenced = true
2013-05-30 19:30:21 +00:00
}
2013-06-04 13:51:12 +00:00
}
2013-05-30 19:30:21 +00:00
if referenced {
return ErrImageReferenced
}
2013-05-30 22:53:45 +00:00
// If the image is not referenced and has no children, remove it
byParents, err = srv.runtime.graph.ByParent()
if err != nil {
return err
}
if len(byParents[id]) == 0 {
if err := srv.runtime.repositories.DeleteAll(id); err != nil {
return err
}
err := srv.runtime.graph.Delete(id)
if err != nil {
return err
}
2013-06-10 21:05:54 +00:00
*imgs = append(*imgs, APIRmi{Deleted: utils.TruncateID(id)})
srv.LogEvent("delete", utils.TruncateID(id), "")
return nil
}
return nil
}
2013-06-10 21:05:54 +00:00
func (srv *Server) deleteImageParents(img *Image, imgs *[]APIRmi) error {
2013-05-30 19:30:21 +00:00
if img.Parent != "" {
parent, err := srv.runtime.graph.Get(img.Parent)
if err != nil {
return err
}
// Remove all children images
if err := srv.deleteImageAndChildren(img.Parent, imgs); err != nil {
2013-05-30 19:30:21 +00:00
return err
}
return srv.deleteImageParents(parent, imgs)
}
return nil
}
func (srv *Server) deleteImage(img *Image, repoName, tag string) ([]APIRmi, error) {
2013-07-05 16:58:39 +00:00
imgs := []APIRmi{}
2013-10-21 23:54:02 +00:00
tags := []string{}
2013-07-17 15:48:53 +00:00
//If delete by id, see if the id belong only to one repository
2013-10-21 23:54:02 +00:00
if repoName == "" {
2013-07-17 15:48:53 +00:00
for _, repoAndTag := range srv.runtime.repositories.ByID()[img.ID] {
2013-08-14 16:59:21 +00:00
parsedRepo, parsedTag := utils.ParseRepositoryTag(repoAndTag)
2013-10-21 23:54:02 +00:00
if repoName == "" || repoName == parsedRepo {
2013-07-17 15:48:53 +00:00
repoName = parsedRepo
2013-10-21 23:54:02 +00:00
if parsedTag != "" {
tags = append(tags, parsedTag)
2013-07-26 09:19:26 +00:00
}
2013-07-17 15:48:53 +00:00
} else if repoName != parsedRepo {
// the id belongs to multiple repos, like base:latest and user:test,
// in that case return conflict
return imgs, nil
}
}
2013-10-21 23:54:02 +00:00
} else {
tags = append(tags, tag)
2013-07-17 15:48:53 +00:00
}
//Untag the current image
2013-10-21 23:54:02 +00:00
for _, tag := range tags {
tagDeleted, err := srv.runtime.repositories.Delete(repoName, tag)
if err != nil {
return nil, err
}
if tagDeleted {
imgs = append(imgs, APIRmi{Untagged: img.ID})
srv.LogEvent("untag", img.ID, "")
2013-10-21 23:54:02 +00:00
}
2013-05-30 19:30:21 +00:00
}
2013-06-10 21:05:54 +00:00
if len(srv.runtime.repositories.ByID()[img.ID]) == 0 {
if err := srv.deleteImageAndChildren(img.ID, &imgs); err != nil {
2013-05-30 22:53:45 +00:00
if err != ErrImageReferenced {
return imgs, err
2013-05-30 22:53:45 +00:00
}
} else if err := srv.deleteImageParents(img, &imgs); err != nil {
2013-05-30 19:30:21 +00:00
if err != ErrImageReferenced {
return imgs, err
2013-05-30 19:30:21 +00:00
}
}
}
return imgs, nil
2013-05-30 19:30:21 +00:00
}
func (srv *Server) ImageDelete(name string, autoPrune bool) ([]APIRmi, error) {
2013-05-30 22:53:45 +00:00
img, err := srv.runtime.repositories.LookupImage(name)
if err != nil {
return nil, fmt.Errorf("No such image: %s", name)
}
if !autoPrune {
2013-06-10 21:05:54 +00:00
if err := srv.runtime.graph.Delete(img.ID); err != nil {
return nil, fmt.Errorf("Cannot delete image %s: %s", name, err)
}
return nil, nil
2013-05-30 22:53:45 +00:00
}
// Prevent deletion if image is used by a running container
for _, container := range srv.runtime.List() {
if container.State.Running {
parent, err := srv.runtime.repositories.LookupImage(container.Image)
if err != nil {
return nil, err
}
if err := parent.WalkHistory(func(p *Image) error {
if img.ID == p.ID {
return fmt.Errorf("Conflict, cannot delete %s because the running container %s is using it", name, container.ID)
}
return nil
}); err != nil {
return nil, err
}
}
}
2013-10-21 23:54:02 +00:00
if strings.Contains(img.ID, name) {
//delete via ID
return srv.deleteImage(img, "", "")
}
name, tag := utils.ParseRepositoryTag(name)
2013-05-30 22:53:45 +00:00
return srv.deleteImage(img, name, tag)
}
func (srv *Server) ImageGetCached(imgID string, config *Config) (*Image, error) {
2013-05-19 17:46:24 +00:00
// Retrieve all images
2013-09-01 03:31:21 +00:00
images, err := srv.runtime.graph.Map()
2013-05-19 17:46:24 +00:00
if err != nil {
return nil, err
}
// Store the tree in a map of map (map[parentId][childId])
imageMap := make(map[string]map[string]struct{})
for _, img := range images {
if _, exists := imageMap[img.Parent]; !exists {
imageMap[img.Parent] = make(map[string]struct{})
}
2013-06-04 18:00:22 +00:00
imageMap[img.Parent][img.ID] = struct{}{}
2013-05-19 17:46:24 +00:00
}
// Loop on the children of the given image and check the config
for elem := range imageMap[imgID] {
2013-05-19 17:46:24 +00:00
img, err := srv.runtime.graph.Get(elem)
if err != nil {
return nil, err
}
if CompareConfig(&img.ContainerConfig, config) {
return img, nil
}
}
return nil, nil
}
func (srv *Server) RegisterLinks(name string, hostConfig *HostConfig) error {
runtime := srv.runtime
container := runtime.Get(name)
if container == nil {
return fmt.Errorf("No such container: %s", name)
}
if hostConfig != nil && hostConfig.Links != nil {
for _, l := range hostConfig.Links {
parts, err := parseLink(l)
if err != nil {
return err
}
child, err := srv.runtime.GetByName(parts["name"])
if err != nil {
return err
}
if child == nil {
return fmt.Errorf("Could not get container for %s", parts["name"])
2013-10-18 21:15:24 +00:00
}
if err := runtime.RegisterLink(container, child, parts["alias"]); err != nil {
return err
}
}
// After we load all the links into the runtime
// set them to nil on the hostconfig
hostConfig.Links = nil
if err := container.writeHostConfig(); err != nil {
return err
}
}
return nil
}
func (srv *Server) ContainerStart(job *engine.Job) string {
if len(job.Args) < 1 {
return fmt.Sprintf("Usage: %s container_id", job.Name)
}
name := job.Args[0]
runtime := srv.runtime
container := runtime.Get(name)
if container == nil {
return fmt.Sprintf("No such container: %s", name)
}
// If no environment was set, then no hostconfig was passed.
if len(job.Environ()) > 0 {
var hostConfig HostConfig
if err := job.ExportEnv(&hostConfig); err != nil {
return err.Error()
}
// Validate the HostConfig binds. Make sure that:
2013-11-13 19:25:55 +00:00
// 1) the source of a bind mount isn't /
// The bind mount "/:/foo" isn't allowed.
// 2) Check that the source exists
// The source to be bind mounted must exist.
for _, bind := range hostConfig.Binds {
splitBind := strings.Split(bind, ":")
source := splitBind[0]
// refuse to bind mount "/" to the container
if source == "/" {
2013-11-13 19:25:55 +00:00
return fmt.Sprintf("Invalid bind mount '%s' : source can't be '/'", bind)
}
// ensure the source exists on the host
_, err := os.Stat(source)
if err != nil && os.IsNotExist(err) {
2013-11-13 19:25:55 +00:00
return fmt.Sprintf("Invalid bind mount '%s' : source doesn't exist", bind)
}
}
// Register any links from the host config before starting the container
// FIXME: we could just pass the container here, no need to lookup by name again.
if err := srv.RegisterLinks(name, &hostConfig); err != nil {
return err.Error()
}
container.hostConfig = &hostConfig
container.ToDisk()
}
if err := container.Start(); err != nil {
return fmt.Sprintf("Cannot start container %s: %s", name, err)
}
srv.LogEvent("start", container.ID, runtime.repositories.ImageName(container.Image))
return "0"
}
func (srv *Server) ContainerStop(name string, t int) error {
if container := srv.runtime.Get(name); container != nil {
if err := container.Stop(t); err != nil {
return fmt.Errorf("Cannot stop container %s: %s", name, err)
}
srv.LogEvent("stop", container.ID, srv.runtime.repositories.ImageName(container.Image))
} else {
return fmt.Errorf("No such container: %s", name)
}
return nil
}
func (srv *Server) ContainerWait(name string) (int, error) {
if container := srv.runtime.Get(name); container != nil {
return container.Wait(), nil
}
return 0, fmt.Errorf("No such container: %s", name)
}
2013-05-24 02:33:28 +00:00
func (srv *Server) ContainerResize(name string, h, w int) error {
if container := srv.runtime.Get(name); container != nil {
return container.Resize(h, w)
}
return fmt.Errorf("No such container: %s", name)
}
func (srv *Server) ContainerAttach(name string, logs, stream, stdin, stdout, stderr bool, inStream io.ReadCloser, outStream, errStream io.Writer) error {
2013-05-08 06:32:17 +00:00
container := srv.runtime.Get(name)
if container == nil {
return fmt.Errorf("No such container: %s", name)
}
2013-05-08 06:32:17 +00:00
//logs
if logs {
cLog, err := container.ReadLog("json")
2013-07-18 13:25:47 +00:00
if err != nil && os.IsNotExist(err) {
// Legacy logs
utils.Errorf("Old logs format")
2013-07-18 13:25:47 +00:00
if stdout {
cLog, err := container.ReadLog("stdout")
if err != nil {
utils.Errorf("Error reading logs (stdout): %s", err)
} else if _, err := io.Copy(outStream, cLog); err != nil {
utils.Errorf("Error streaming logs (stdout): %s", err)
2013-07-18 13:25:47 +00:00
}
}
2013-07-18 13:25:47 +00:00
if stderr {
cLog, err := container.ReadLog("stderr")
if err != nil {
utils.Errorf("Error reading logs (stderr): %s", err)
} else if _, err := io.Copy(errStream, cLog); err != nil {
utils.Errorf("Error streaming logs (stderr): %s", err)
2013-07-18 13:25:47 +00:00
}
}
2013-07-18 13:25:47 +00:00
} else if err != nil {
utils.Errorf("Error reading logs (json): %s", err)
2013-07-18 13:25:47 +00:00
} else {
dec := json.NewDecoder(cLog)
for {
2013-09-26 22:59:02 +00:00
l := &utils.JSONLog{}
if err := dec.Decode(l); err == io.EOF {
2013-07-18 13:25:47 +00:00
break
} else if err != nil {
utils.Errorf("Error streaming logs: %s", err)
2013-07-18 13:25:47 +00:00
break
}
2013-09-26 22:59:02 +00:00
if l.Stream == "stdout" && stdout {
fmt.Fprintf(outStream, "%s", l.Log)
2013-07-18 13:25:47 +00:00
}
2013-09-26 22:59:02 +00:00
if l.Stream == "stderr" && stderr {
fmt.Fprintf(errStream, "%s", l.Log)
2013-07-18 13:25:47 +00:00
}
}
}
2013-05-08 06:32:17 +00:00
}
2013-05-08 06:32:17 +00:00
//stream
if stream {
if container.State.Ghost {
return fmt.Errorf("Impossible to attach to a ghost container")
}
2013-05-08 06:32:17 +00:00
var (
cStdin io.ReadCloser
cStdout, cStderr io.Writer
cStdinCloser io.Closer
)
2013-05-08 06:32:17 +00:00
if stdin {
r, w := io.Pipe()
go func() {
defer w.Close()
2013-05-14 22:37:35 +00:00
defer utils.Debugf("Closing buffered stdin pipe")
io.Copy(w, inStream)
2013-05-08 06:32:17 +00:00
}()
cStdin = r
cStdinCloser = inStream
2013-05-08 06:32:17 +00:00
}
if stdout {
cStdout = outStream
2013-05-08 06:32:17 +00:00
}
if stderr {
cStderr = errStream
2013-05-08 06:32:17 +00:00
}
2013-05-07 21:15:42 +00:00
2013-05-08 06:32:17 +00:00
<-container.Attach(cStdin, cStdinCloser, cStdout, cStderr)
// If we are in stdinonce mode, wait for the process to end
// otherwise, simply return
if container.Config.StdinOnce && !container.Config.Tty {
container.Wait()
}
}
return nil
}
func (srv *Server) ContainerInspect(name string) (*Container, error) {
if container := srv.runtime.Get(name); container != nil {
return container, nil
}
return nil, fmt.Errorf("No such container: %s", name)
}
func (srv *Server) ImageInspect(name string) (*Image, error) {
if image, err := srv.runtime.repositories.LookupImage(name); err == nil && image != nil {
return image, nil
}
return nil, fmt.Errorf("No such image: %s", name)
}
func (srv *Server) ContainerCopy(name string, resource string, out io.Writer) error {
if container := srv.runtime.Get(name); container != nil {
data, err := container.Copy(resource)
if err != nil {
return err
}
if _, err := io.Copy(out, data); err != nil {
return err
}
return nil
}
return fmt.Errorf("No such container: %s", name)
}
func NewServer(eng *engine.Engine, config *DaemonConfig) (*Server, error) {
runtime, err := NewRuntime(config)
if err != nil {
return nil, err
}
srv := &Server{
Eng: eng,
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
events: make([]utils.JSONMessage, 0, 64), //only keeps the 64 last events
listeners: make(map[string]chan utils.JSONMessage),
2013-08-02 07:23:46 +00:00
reqFactory: nil,
}
runtime.srv = srv
return srv, nil
}
func (srv *Server) HTTPRequestFactory(metaHeaders map[string][]string) *utils.HTTPRequestFactory {
2013-08-02 07:30:45 +00:00
if srv.reqFactory == nil {
ud := utils.NewHTTPUserAgentDecorator(srv.versionInfos()...)
md := &utils.HTTPMetaHeadersDecorator{
Headers: metaHeaders,
}
factory := utils.NewHTTPRequestFactory(ud, md)
2013-08-02 07:30:45 +00:00
srv.reqFactory = factory
}
return srv.reqFactory
}
func (srv *Server) LogEvent(action, id, from string) *utils.JSONMessage {
now := time.Now().Unix()
jm := utils.JSONMessage{Status: action, ID: id, From: from, Time: now}
srv.events = append(srv.events, jm)
for _, c := range srv.listeners {
select { // non blocking channel
case c <- jm:
default:
}
2013-07-10 12:55:05 +00:00
}
return &jm
2013-07-10 12:55:05 +00:00
}
type Server struct {
sync.Mutex
runtime *Runtime
pullingPool map[string]struct{}
pushingPool map[string]struct{}
events []utils.JSONMessage
listeners map[string]chan utils.JSONMessage
2013-08-02 07:23:46 +00:00
reqFactory *utils.HTTPRequestFactory
Eng *engine.Engine
}