Merge branch 'master' into shykes-0.6.5-dm-plugin

Conflicts:
	utils.go
	utils_test.go
This commit is contained in:
Guillaume J. Charmes 2013-11-14 14:02:44 -08:00
commit 7cf60da388
No known key found for this signature in database
GPG key ID: B33E4642CB6E3FF3
70 changed files with 2050 additions and 829 deletions

1
.gitignore vendored
View file

@ -18,3 +18,4 @@ bundles/
.hg/ .hg/
.git/ .git/
vendor/pkg/ vendor/pkg/
pyenv

View file

@ -94,6 +94,7 @@ Jonathan Rudenberg <jonathan@titanous.com>
Joost Cassee <joost@cassee.net> Joost Cassee <joost@cassee.net>
Jordan Arentsen <blissdev@gmail.com> Jordan Arentsen <blissdev@gmail.com>
Joseph Anthony Pasquale Holsten <joseph@josephholsten.com> Joseph Anthony Pasquale Holsten <joseph@josephholsten.com>
Josh Poimboeuf <jpoimboe@redhat.com>
Julien Barbier <write0@gmail.com> Julien Barbier <write0@gmail.com>
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com> Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
Karan Lyons <karan@karanlyons.com> Karan Lyons <karan@karanlyons.com>
@ -165,6 +166,7 @@ Sridatta Thatipamala <sthatipamala@gmail.com>
Sridhar Ratnakumar <sridharr@activestate.com> Sridhar Ratnakumar <sridharr@activestate.com>
Steeve Morin <steeve.morin@gmail.com> Steeve Morin <steeve.morin@gmail.com>
Stefan Praszalowicz <stefan@greplin.com> Stefan Praszalowicz <stefan@greplin.com>
Sven Dowideit <SvenDowideit@home.org.au>
Thatcher Peskens <thatcher@dotcloud.com> Thatcher Peskens <thatcher@dotcloud.com>
Thermionix <bond711@gmail.com> Thermionix <bond711@gmail.com>
Thijs Terlouw <thijsterlouw@gmail.com> Thijs Terlouw <thijsterlouw@gmail.com>

View file

@ -17,7 +17,6 @@
+ Prevent DNS server conflicts in CreateBridgeIface + Prevent DNS server conflicts in CreateBridgeIface
+ Validate bind mounts on the server side + Validate bind mounts on the server side
+ Use parent image config in docker build + Use parent image config in docker build
* Fix regression in /etc/hosts
#### Client #### Client

View file

@ -1,11 +1,14 @@
# Contributing to Docker # Contributing to Docker
Want to hack on Docker? Awesome! Here are instructions to get you started. They are probably not perfect, please let us know if anything feels Want to hack on Docker? Awesome! Here are instructions to get you
wrong or incomplete. started. They are probably not perfect, please let us know if anything
feels wrong or incomplete.
## Build Environment ## Build Environment
For instructions on setting up your development environment, please see our dedicated [dev environment setup docs](http://docs.docker.io/en/latest/contributing/devenvironment/). For instructions on setting up your development environment, please
see our dedicated [dev environment setup
docs](http://docs.docker.io/en/latest/contributing/devenvironment/).
## Contribution guidelines ## Contribution guidelines

View file

@ -36,7 +36,7 @@ run apt-get install -y -q mercurial
run apt-get install -y -q build-essential libsqlite3-dev run apt-get install -y -q build-essential libsqlite3-dev
# Install Go # Install Go
run curl -s https://go.googlecode.com/files/go1.2rc3.src.tar.gz | tar -v -C /usr/local -xz run curl -s https://go.googlecode.com/files/go1.2rc4.src.tar.gz | tar -v -C /usr/local -xz
env PATH /usr/local/go/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin env PATH /usr/local/go/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
env GOPATH /go:/go/src/github.com/dotcloud/docker/vendor env GOPATH /go:/go/src/github.com/dotcloud/docker/vendor
run cd /usr/local/go/src && ./make.bash && go install -ldflags '-w -linkmode external -extldflags "-static -Wl,--unresolved-symbols=ignore-in-shared-libs"' -tags netgo -a std run cd /usr/local/go/src && ./make.bash && go install -ldflags '-w -linkmode external -extldflags "-static -Wl,--unresolved-symbols=ignore-in-shared-libs"' -tags netgo -a std

35
NOTICE
View file

@ -8,35 +8,12 @@ by Keith Rarick, licensed under the MIT License.
The following is courtesy of our legal counsel: The following is courtesy of our legal counsel:
Transfers of Docker shall be in accordance with applicable export
controls of any country and all other applicable legal requirements.
Docker shall not be distributed or downloaded to or in Cuba, Iran,
North Korea, Sudan or Syria and shall not be distributed or downloaded
to any person on the Denied Persons List administered by the U.S.
Department of Commerce.
What does that mean? Use and transfer of Docker may be subject to certain restrictions by the
Here is a further explanation from our legal counsel: United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
Like all software products that utilize cryptography, the export and For more information, please see http://www.bis.doc.gov
use of Docker is subject to the U.S. Commerce Department's Export
Administration Regulations (EAR) because it uses or contains
cryptography (see
http://www.bis.doc.gov/index.php/policy-guidance/encryption). Certain
free and open source software projects have a lightweight set of
requirements, which can generally be met by providing email notice to
the appropriate U.S. government agencies that their source code is
available on a publicly available repository and making the
appropriate statements in the README.
The restrictions of the EAR apply to certain denied locations See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
(currently Iran, Sudan, Syria, North Korea, or Cuba) and those
individuals on the Denied Persons List, which is available here:
http://www.bis.doc.gov/index.php/policy-guidance/lists-of-parties-of-concern/denied-persons-list.
If you are incorporating Docker into a new open source project, the
EAR restrictions apply to your incorporation of Docker into your
project in the same manner as other cryptography-enabled projects,
such as OpenSSL, almost all Linux distributions, etc.
For more information, see http://www.apache.org/dev/crypto.html and/or
seek legal counsel.

View file

@ -193,10 +193,9 @@ wrong or incomplete.
*Brought to you courtesy of our legal counsel. For more context, *Brought to you courtesy of our legal counsel. For more context,
please see the Notice document.* please see the Notice document.*
Transfers of Docker shall be in accordance with applicable export controls Use and transfer of Docker may be subject to certain restrictions by the
of any country and all other applicable legal requirements. Without limiting the United States and other governments.
foregoing, Docker shall not be distributed or downloaded to any individual or It is your responsibility to ensure that your use and/or transfer does not
location if such distribution or download would violate the applicable US violate applicable laws.
government export regulations.
For more information, please see http://www.bis.doc.gov For more information, please see http://www.bis.doc.gov

163
Vagrantfile vendored
View file

@ -4,65 +4,135 @@
BOX_NAME = ENV['BOX_NAME'] || "ubuntu" BOX_NAME = ENV['BOX_NAME'] || "ubuntu"
BOX_URI = ENV['BOX_URI'] || "http://files.vagrantup.com/precise64.box" BOX_URI = ENV['BOX_URI'] || "http://files.vagrantup.com/precise64.box"
VF_BOX_URI = ENV['BOX_URI'] || "http://files.vagrantup.com/precise64_vmware_fusion.box" VF_BOX_URI = ENV['BOX_URI'] || "http://files.vagrantup.com/precise64_vmware_fusion.box"
AWS_BOX_URI = ENV['BOX_URI'] || "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
AWS_REGION = ENV['AWS_REGION'] || "us-east-1" AWS_REGION = ENV['AWS_REGION'] || "us-east-1"
AWS_AMI = ENV['AWS_AMI'] || "ami-d0f89fb9" AWS_AMI = ENV['AWS_AMI'] || "ami-69f5a900"
AWS_INSTANCE_TYPE = ENV['AWS_INSTANCE_TYPE'] || 't1.micro'
FORWARD_DOCKER_PORTS = ENV['FORWARD_DOCKER_PORTS'] FORWARD_DOCKER_PORTS = ENV['FORWARD_DOCKER_PORTS']
SSH_PRIVKEY_PATH = ENV["SSH_PRIVKEY_PATH"]
# A script to upgrade from the 12.04 kernel to the raring backport kernel (3.8)
# and install docker.
$script = <<SCRIPT
# The username to add to the docker group will be passed as the first argument
# to the script. If nothing is passed, default to "vagrant".
user="$1"
if [ -z "$user" ]; then
user=vagrant
fi
# Adding an apt gpg key is idempotent.
wget -q -O - https://get.docker.io/gpg | apt-key add -
# Creating the docker.list file is idempotent, but it may overrite desired
# settings if it already exists. This could be solved with md5sum but it
# doesn't seem worth it.
echo 'deb http://get.docker.io/ubuntu docker main' > \
/etc/apt/sources.list.d/docker.list
# Update remote package metadata. 'apt-get update' is idempotent.
apt-get update -q
# Install docker. 'apt-get install' is idempotent.
apt-get install -q -y lxc-docker
usermod -a -G docker "$user"
tmp=`mktemp -q` && {
# Only install the backport kernel, don't bother upgrade if the backport is
# already installed. We want parse the output of apt so we need to save it
# with 'tee'. NOTE: The installation of the kernel will trigger dkms to
# install vboxguest if needed.
apt-get install -q -y --no-upgrade linux-image-generic-lts-raring | \
tee "$tmp"
# Parse the number of installed packages from the output
NUM_INST=`awk '$2 == "upgraded," && $4 == "newly" { print $3 }' "$tmp"`
rm "$tmp"
}
# If the number of installed packages is greater than 0, we want to reboot (the
# backport kernel was installed but is not running).
if [ "$NUM_INST" -gt 0 ];
then
echo "Rebooting down to activate new kernel."
echo "/vagrant will not be mounted. Use 'vagrant halt' followed by"
echo "'vagrant up' to ensure /vagrant is mounted."
shutdown -r now
fi
SCRIPT
# We need to install the virtualbox guest additions *before* we do the normal
# docker installation. As such this script is prepended to the common docker
# install script above. This allows the install of the backport kernel to
# trigger dkms to build the virtualbox guest module install.
$vbox_script = <<VBOX_SCRIPT + $script
# Install the VirtualBox guest additions if they aren't already installed.
if [ ! -d /opt/VBoxGuestAdditions-4.2.12/ ]; then
# Update remote package metadata. 'apt-get update' is idempotent.
apt-get update -q
# Kernel Headers and dkms are required to build the vbox guest kernel
# modules.
apt-get install -q -y linux-headers-generic-lts-raring dkms
echo 'Downloading VBox Guest Additions...'
wget -cq http://dlc.sun.com.edgesuite.net/virtualbox/4.2.12/VBoxGuestAdditions_4.2.12.iso
mount -o loop,ro /home/vagrant/VBoxGuestAdditions_4.2.12.iso /mnt
/mnt/VBoxLinuxAdditions.run --nox11
umount /mnt
fi
VBOX_SCRIPT
Vagrant::Config.run do |config| Vagrant::Config.run do |config|
# Setup virtual machine box. This VM configuration code is always executed. # Setup virtual machine box. This VM configuration code is always executed.
config.vm.box = BOX_NAME config.vm.box = BOX_NAME
config.vm.box_url = BOX_URI config.vm.box_url = BOX_URI
config.ssh.forward_agent = true # Use the specified private key path if it is specified and not empty.
if SSH_PRIVKEY_PATH
# Provision docker and new kernel if deployment was not done. config.ssh.private_key_path = SSH_PRIVKEY_PATH
# It is assumed Vagrant can successfully launch the provider instance.
if Dir.glob("#{File.dirname(__FILE__)}/.vagrant/machines/default/*/id").empty?
# Add lxc-docker package
pkg_cmd = "wget -q -O - https://get.docker.io/gpg | apt-key add -;" \
"echo deb http://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list;" \
"apt-get update -qq; apt-get install -q -y --force-yes lxc-docker; "
# Add Ubuntu raring backported kernel
pkg_cmd << "apt-get update -qq; apt-get install -q -y linux-image-generic-lts-raring; "
# Add guest additions if local vbox VM. As virtualbox is the default provider,
# it is assumed it won't be explicitly stated.
if ENV["VAGRANT_DEFAULT_PROVIDER"].nil? && ARGV.none? { |arg| arg.downcase.start_with?("--provider") }
pkg_cmd << "apt-get install -q -y linux-headers-generic-lts-raring dkms; " \
"echo 'Downloading VBox Guest Additions...'; " \
"wget -q http://dlc.sun.com.edgesuite.net/virtualbox/4.2.12/VBoxGuestAdditions_4.2.12.iso; "
# Prepare the VM to add guest additions after reboot
pkg_cmd << "echo -e 'mount -o loop,ro /home/vagrant/VBoxGuestAdditions_4.2.12.iso /mnt\n" \
"echo yes | /mnt/VBoxLinuxAdditions.run\numount /mnt\n" \
"rm /root/guest_additions.sh; ' > /root/guest_additions.sh; " \
"chmod 700 /root/guest_additions.sh; " \
"sed -i -E 's#^exit 0#[ -x /root/guest_additions.sh ] \\&\\& /root/guest_additions.sh#' /etc/rc.local; " \
"echo 'Installation of VBox Guest Additions is proceeding in the background.'; " \
"echo '\"vagrant reload\" can be used in about 2 minutes to activate the new guest additions.'; "
end
# Add vagrant user to the docker group
pkg_cmd << "usermod -a -G docker vagrant; "
# Activate new kernel
pkg_cmd << "shutdown -r +1; "
config.vm.provision :shell, :inline => pkg_cmd
end end
config.ssh.forward_agent = true
end end
# Providers were added on Vagrant >= 1.1.0 # Providers were added on Vagrant >= 1.1.0
#
# NOTE: The vagrant "vm.provision" appends its arguments to a list and executes
# them in order. If you invoke "vm.provision :shell, :inline => $script"
# twice then vagrant will run the script two times. Unfortunately when you use
# providers and the override argument to set up provisioners (like the vbox
# guest extensions) they 1) don't replace the other provisioners (they append
# to the end of the list) and 2) you can't control the order the provisioners
# are executed (you can only append to the list). If you want the virtualbox
# only script to run before the other script, you have to jump through a lot of
# hoops.
#
# Here is my only repeatable solution: make one script that is common ($script)
# and another script that is the virtual box guest *prepended* to the common
# script. Only ever use "vm.provision" *one time* per provider. That means
# every single provider has an override, and every single one configures
# "vm.provision". Much saddness, but such is life.
Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config| Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
config.vm.provider :aws do |aws, override| config.vm.provider :aws do |aws, override|
aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"] username = "ubuntu"
aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"] override.vm.box_url = AWS_BOX_URI
override.vm.provision :shell, :inline => $script, :args => username
aws.access_key_id = ENV["AWS_ACCESS_KEY"]
aws.secret_access_key = ENV["AWS_SECRET_KEY"]
aws.keypair_name = ENV["AWS_KEYPAIR_NAME"] aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
override.ssh.private_key_path = ENV["AWS_SSH_PRIVKEY"] override.ssh.username = username
override.ssh.username = "ubuntu"
aws.region = AWS_REGION aws.region = AWS_REGION
aws.ami = AWS_AMI aws.ami = AWS_AMI
aws.instance_type = "t1.micro" aws.instance_type = AWS_INSTANCE_TYPE
end end
config.vm.provider :rackspace do |rs| config.vm.provider :rackspace do |rs, override|
config.ssh.private_key_path = ENV["RS_PRIVATE_KEY"] override.vm.provision :shell, :inline => $script
rs.username = ENV["RS_USERNAME"] rs.username = ENV["RS_USERNAME"]
rs.api_key = ENV["RS_API_KEY"] rs.api_key = ENV["RS_API_KEY"]
rs.public_key_path = ENV["RS_PUBLIC_KEY"] rs.public_key_path = ENV["RS_PUBLIC_KEY"]
@ -71,20 +141,25 @@ Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
end end
config.vm.provider :vmware_fusion do |f, override| config.vm.provider :vmware_fusion do |f, override|
override.vm.box = BOX_NAME
override.vm.box_url = VF_BOX_URI override.vm.box_url = VF_BOX_URI
override.vm.synced_folder ".", "/vagrant", disabled: true override.vm.synced_folder ".", "/vagrant", disabled: true
override.vm.provision :shell, :inline => $script
f.vmx["displayName"] = "docker" f.vmx["displayName"] = "docker"
end end
config.vm.provider :virtualbox do |vb| config.vm.provider :virtualbox do |vb, override|
config.vm.box = BOX_NAME override.vm.provision :shell, :inline => $vbox_script
config.vm.box_url = BOX_URI
vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
vb.customize ["modifyvm", :id, "--natdnsproxy1", "on"] vb.customize ["modifyvm", :id, "--natdnsproxy1", "on"]
end end
end end
# If this is a version 1 config, virtualbox is the only option. A version 2
# config would have already been set in the above provider section.
Vagrant::VERSION < "1.1.0" and Vagrant::Config.run do |config|
config.vm.provision :shell, :inline => $vbox_script
end
if !FORWARD_DOCKER_PORTS.nil? if !FORWARD_DOCKER_PORTS.nil?
Vagrant::VERSION < "1.1.0" and Vagrant::Config.run do |config| Vagrant::VERSION < "1.1.0" and Vagrant::Config.run do |config|
(49000..49900).each do |port| (49000..49900).each do |port|

63
api.go
View file

@ -479,15 +479,16 @@ func postImagesInsert(srv *Server, version float64, w http.ResponseWriter, r *ht
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
} }
sf := utils.NewStreamFormatter(version > 1.0) sf := utils.NewStreamFormatter(version > 1.0)
imgID, err := srv.ImageInsert(name, url, path, w, sf) err := srv.ImageInsert(name, url, path, w, sf)
if err != nil { if err != nil {
if sf.Used() { if sf.Used() {
w.Write(sf.FormatError(err)) w.Write(sf.FormatError(err))
return nil return nil
} }
return err
} }
return writeJSON(w, http.StatusOK, &APIID{ID: imgID}) return nil
} }
func postImagesPush(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { func postImagesPush(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@ -540,43 +541,36 @@ func postContainersCreate(srv *Server, version float64, w http.ResponseWriter, r
if err := parseForm(r); err != nil { if err := parseForm(r); err != nil {
return nil return nil
} }
config := &Config{}
out := &APIRun{} out := &APIRun{}
name := r.Form.Get("name") job := srv.Eng.Job("create", r.Form.Get("name"))
if err := job.DecodeEnv(r.Body); err != nil {
if err := json.NewDecoder(r.Body).Decode(config); err != nil {
return err return err
} }
resolvConf, err := utils.GetResolvConf() resolvConf, err := utils.GetResolvConf()
if err != nil { if err != nil {
return err return err
} }
if !job.GetenvBool("NetworkDisabled") && len(job.Getenv("Dns")) == 0 && len(srv.runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
if !config.NetworkDisabled && len(config.Dns) == 0 && len(srv.runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
out.Warnings = append(out.Warnings, fmt.Sprintf("Docker detected local DNS server on resolv.conf. Using default external servers: %v", defaultDns)) out.Warnings = append(out.Warnings, fmt.Sprintf("Docker detected local DNS server on resolv.conf. Using default external servers: %v", defaultDns))
config.Dns = defaultDns job.SetenvList("Dns", defaultDns)
} }
// Read container ID from the first line of stdout
id, warnings, err := srv.ContainerCreate(config, name) job.StdoutParseString(&out.ID)
if err != nil { // Read warnings from stderr
job.StderrParseLines(&out.Warnings, 0)
if err := job.Run(); err != nil {
return err return err
} }
out.ID = id if job.GetenvInt("Memory") > 0 && !srv.runtime.capabilities.MemoryLimit {
for _, warning := range warnings {
out.Warnings = append(out.Warnings, warning)
}
if config.Memory > 0 && !srv.runtime.capabilities.MemoryLimit {
log.Println("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.") log.Println("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.")
out.Warnings = append(out.Warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.") out.Warnings = append(out.Warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.")
} }
if config.Memory > 0 && !srv.runtime.capabilities.SwapLimit { if job.GetenvInt("Memory") > 0 && !srv.runtime.capabilities.SwapLimit {
log.Println("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.") log.Println("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.")
out.Warnings = append(out.Warnings, "Your kernel does not support memory swap capabilities. Limitation discarded.") out.Warnings = append(out.Warnings, "Your kernel does not support memory swap capabilities. Limitation discarded.")
} }
if !config.NetworkDisabled && srv.runtime.capabilities.IPv4ForwardingDisabled { if !job.GetenvBool("NetworkDisabled") && srv.runtime.capabilities.IPv4ForwardingDisabled {
log.Println("Warning: IPv4 forwarding is disabled.") log.Println("Warning: IPv4 forwarding is disabled.")
out.Warnings = append(out.Warnings, "IPv4 forwarding is disabled.") out.Warnings = append(out.Warnings, "IPv4 forwarding is disabled.")
} }
@ -653,26 +647,23 @@ func deleteImages(srv *Server, version float64, w http.ResponseWriter, r *http.R
} }
func postContainersStart(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { func postContainersStart(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var hostConfig *HostConfig
// allow a nil body for backwards compatibility
if r.Body != nil {
if matchesContentType(r.Header.Get("Content-Type"), "application/json") {
hostConfig = &HostConfig{}
if err := json.NewDecoder(r.Body).Decode(hostConfig); err != nil {
return err
}
}
}
if vars == nil { if vars == nil {
return fmt.Errorf("Missing parameter") return fmt.Errorf("Missing parameter")
} }
name := vars["name"] name := vars["name"]
// Register any links from the host config before starting the container job := srv.Eng.Job("start", name)
if err := srv.RegisterLinks(name, hostConfig); err != nil { if err := job.ImportEnv(HostConfig{}); err != nil {
return err return fmt.Errorf("Couldn't initialize host configuration")
} }
if err := srv.ContainerStart(name, hostConfig); err != nil { // allow a nil body for backwards compatibility
if r.Body != nil {
if matchesContentType(r.Header.Get("Content-Type"), "application/json") {
if err := job.DecodeEnv(r.Body); err != nil {
return err
}
}
}
if err := job.Run(); err != nil {
return err return err
} }
w.WriteHeader(http.StatusNoContent) w.WriteHeader(http.StatusNoContent)

View file

@ -609,11 +609,11 @@ func TestPostCommit(t *testing.T) {
} }
func TestPostContainersCreate(t *testing.T) { func TestPostContainersCreate(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime) defer nuke(runtime)
srv := &Server{runtime: runtime}
configJSON, err := json.Marshal(&Config{ configJSON, err := json.Marshal(&Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Memory: 33554432, Memory: 33554432,
@ -756,27 +756,23 @@ func TestPostContainersRestart(t *testing.T) {
} }
func TestPostContainersStart(t *testing.T) { func TestPostContainersStart(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime) defer nuke(runtime)
srv := &Server{runtime: runtime} id := createTestContainer(
eng,
container, _, err := runtime.Create(
&Config{ &Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/cat"}, Cmd: []string{"/bin/cat"},
OpenStdin: true, OpenStdin: true,
}, },
"", t)
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
hostConfigJSON, err := json.Marshal(&HostConfig{}) hostConfigJSON, err := json.Marshal(&HostConfig{})
req, err := http.NewRequest("POST", "/containers/"+container.ID+"/start", bytes.NewReader(hostConfigJSON)) req, err := http.NewRequest("POST", "/containers/"+id+"/start", bytes.NewReader(hostConfigJSON))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -784,22 +780,26 @@ func TestPostContainersStart(t *testing.T) {
req.Header.Set("Content-Type", "application/json") req.Header.Set("Content-Type", "application/json")
r := httptest.NewRecorder() r := httptest.NewRecorder()
if err := postContainersStart(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil { if err := postContainersStart(srv, APIVERSION, r, req, map[string]string{"name": id}); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if r.Code != http.StatusNoContent { if r.Code != http.StatusNoContent {
t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code) t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code)
} }
container := runtime.Get(id)
if container == nil {
t.Fatalf("Container %s was not created", id)
}
// Give some time to the process to start // Give some time to the process to start
// FIXME: use Wait once it's available as a job
container.WaitTimeout(500 * time.Millisecond) container.WaitTimeout(500 * time.Millisecond)
if !container.State.Running { if !container.State.Running {
t.Errorf("Container should be running") t.Errorf("Container should be running")
} }
r = httptest.NewRecorder() r = httptest.NewRecorder()
if err = postContainersStart(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err == nil { if err = postContainersStart(srv, APIVERSION, r, req, map[string]string{"name": id}); err == nil {
t.Fatalf("A running container should be able to be started") t.Fatalf("A running container should be able to be started")
} }

View file

@ -544,10 +544,7 @@ func TestBuildADDFileNotFound(t *testing.T) {
} }
func TestBuildInheritance(t *testing.T) { func TestBuildInheritance(t *testing.T) {
runtime, err := newTestRuntime("") runtime := mkRuntime(t)
if err != nil {
t.Fatal(err)
}
defer nuke(runtime) defer nuke(runtime)
srv := &Server{ srv := &Server{

View file

@ -130,10 +130,7 @@ func (cli *DockerCli) CmdInsert(args ...string) error {
v.Set("url", cmd.Arg(1)) v.Set("url", cmd.Arg(1))
v.Set("path", cmd.Arg(2)) v.Set("path", cmd.Arg(2))
if err := cli.stream("POST", "/images/"+cmd.Arg(0)+"/insert?"+v.Encode(), nil, cli.out, nil); err != nil { return cli.stream("POST", "/images/"+cmd.Arg(0)+"/insert?"+v.Encode(), nil, cli.out, nil)
return err
}
return nil
} }
// mkBuildContext returns an archive of an empty context with the contents // mkBuildContext returns an archive of an empty context with the contents
@ -376,15 +373,17 @@ func (cli *DockerCli) CmdWait(args ...string) error {
cmd.Usage() cmd.Usage()
return nil return nil
} }
var encounteredError error
for _, name := range cmd.Args() { for _, name := range cmd.Args() {
status, err := waitForExit(cli, name) status, err := waitForExit(cli, name)
if err != nil { if err != nil {
fmt.Fprintf(cli.err, "%s", err) fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to wait one or more containers")
} else { } else {
fmt.Fprintf(cli.out, "%d\n", status) fmt.Fprintf(cli.out, "%d\n", status)
} }
} }
return nil return encounteredError
} }
// 'docker version': show version information // 'docker version': show version information
@ -505,15 +504,17 @@ func (cli *DockerCli) CmdStop(args ...string) error {
v := url.Values{} v := url.Values{}
v.Set("t", strconv.Itoa(*nSeconds)) v.Set("t", strconv.Itoa(*nSeconds))
var encounteredError error
for _, name := range cmd.Args() { for _, name := range cmd.Args() {
_, _, err := cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil) _, _, err := cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil)
if err != nil { if err != nil {
fmt.Fprintf(cli.err, "%s\n", err) fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to stop one or more containers")
} else { } else {
fmt.Fprintf(cli.out, "%s\n", name) fmt.Fprintf(cli.out, "%s\n", name)
} }
} }
return nil return encounteredError
} }
func (cli *DockerCli) CmdRestart(args ...string) error { func (cli *DockerCli) CmdRestart(args ...string) error {
@ -530,15 +531,17 @@ func (cli *DockerCli) CmdRestart(args ...string) error {
v := url.Values{} v := url.Values{}
v.Set("t", strconv.Itoa(*nSeconds)) v.Set("t", strconv.Itoa(*nSeconds))
var encounteredError error
for _, name := range cmd.Args() { for _, name := range cmd.Args() {
_, _, err := cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil) _, _, err := cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil)
if err != nil { if err != nil {
fmt.Fprintf(cli.err, "%s\n", err) fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to restart one or more containers")
} else { } else {
fmt.Fprintf(cli.out, "%s\n", name) fmt.Fprintf(cli.out, "%s\n", name)
} }
} }
return nil return encounteredError
} }
func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
@ -772,15 +775,19 @@ func (cli *DockerCli) CmdRmi(args ...string) error {
return nil return nil
} }
var encounteredError error
for _, name := range cmd.Args() { for _, name := range cmd.Args() {
body, _, err := cli.call("DELETE", "/images/"+name, nil) body, _, err := cli.call("DELETE", "/images/"+name, nil)
if err != nil { if err != nil {
fmt.Fprintf(cli.err, "%s", err) fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to remove one or more images")
} else { } else {
var outs []APIRmi var outs []APIRmi
err = json.Unmarshal(body, &outs) err = json.Unmarshal(body, &outs)
if err != nil { if err != nil {
return err fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to remove one or more images")
continue
} }
for _, out := range outs { for _, out := range outs {
if out.Deleted != "" { if out.Deleted != "" {
@ -791,7 +798,7 @@ func (cli *DockerCli) CmdRmi(args ...string) error {
} }
} }
} }
return nil return encounteredError
} }
func (cli *DockerCli) CmdHistory(args ...string) error { func (cli *DockerCli) CmdHistory(args ...string) error {
@ -870,15 +877,18 @@ func (cli *DockerCli) CmdRm(args ...string) error {
if *link { if *link {
val.Set("link", "1") val.Set("link", "1")
} }
var encounteredError error
for _, name := range cmd.Args() { for _, name := range cmd.Args() {
_, _, err := cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil) _, _, err := cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil)
if err != nil { if err != nil {
fmt.Fprintf(cli.err, "%s\n", err) fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to remove one or more containers")
} else { } else {
fmt.Fprintf(cli.out, "%s\n", name) fmt.Fprintf(cli.out, "%s\n", name)
} }
} }
return nil return encounteredError
} }
// 'docker kill NAME' kills a running container // 'docker kill NAME' kills a running container
@ -892,15 +902,16 @@ func (cli *DockerCli) CmdKill(args ...string) error {
return nil return nil
} }
var encounteredError error
for _, name := range args { for _, name := range args {
_, _, err := cli.call("POST", "/containers/"+name+"/kill", nil) if _, _, err := cli.call("POST", "/containers/"+name+"/kill", nil); err != nil {
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err) fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to kill one or more containers")
} else { } else {
fmt.Fprintf(cli.out, "%s\n", name) fmt.Fprintf(cli.out, "%s\n", name)
} }
} }
return nil return encounteredError
} }
func (cli *DockerCli) CmdImport(args ...string) error { func (cli *DockerCli) CmdImport(args ...string) error {
@ -913,8 +924,16 @@ func (cli *DockerCli) CmdImport(args ...string) error {
cmd.Usage() cmd.Usage()
return nil return nil
} }
src := cmd.Arg(0)
repository, tag := utils.ParseRepositoryTag(cmd.Arg(1)) var src, repository, tag string
if cmd.NArg() == 3 {
fmt.Fprintf(cli.err, "[DEPRECATED] The format 'URL|- [REPOSITORY [TAG]]' as been deprecated. Please use URL|- [REPOSITORY[:TAG]]\n")
src, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2)
} else {
src = cmd.Arg(0)
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
}
v := url.Values{} v := url.Values{}
v.Set("repo", repository) v.Set("repo", repository)
v.Set("tag", tag) v.Set("tag", tag)
@ -1166,14 +1185,10 @@ func (cli *DockerCli) CmdImages(args ...string) error {
fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tSIZE") fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tSIZE")
} }
var repo string
var tag string
for _, out := range outs { for _, out := range outs {
for _, repotag := range out.RepoTags { for _, repotag := range out.RepoTags {
components := strings.SplitN(repotag, ":", 2) repo, tag := utils.ParseRepositoryTag(repotag)
repo = components[0]
tag = components[1]
if !*noTrunc { if !*noTrunc {
out.ID = utils.TruncateID(out.ID) out.ID = utils.TruncateID(out.ID)
@ -1235,7 +1250,7 @@ func PrintTreeNode(cli *DockerCli, noTrunc *bool, image APIImages, prefix string
fmt.Fprintf(cli.out, "%s%s Size: %s (virtual %s)", prefix, imageID, utils.HumanSize(image.Size), utils.HumanSize(image.VirtualSize)) fmt.Fprintf(cli.out, "%s%s Size: %s (virtual %s)", prefix, imageID, utils.HumanSize(image.Size), utils.HumanSize(image.VirtualSize))
if image.RepoTags[0] != "<none>:<none>" { if image.RepoTags[0] != "<none>:<none>" {
fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.RepoTags, ",")) fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.RepoTags, ", "))
} else { } else {
fmt.Fprint(cli.out, "\n") fmt.Fprint(cli.out, "\n")
} }
@ -1351,8 +1366,16 @@ func (cli *DockerCli) CmdCommit(args ...string) error {
if err := cmd.Parse(args); err != nil { if err := cmd.Parse(args); err != nil {
return nil return nil
} }
name := cmd.Arg(0)
repository, tag := utils.ParseRepositoryTag(cmd.Arg(1)) var name, repository, tag string
if cmd.NArg() == 3 {
fmt.Fprintf(cli.err, "[DEPRECATED] The format 'CONTAINER [REPOSITORY [TAG]]' as been deprecated. Please use CONTAINER [REPOSITORY[:TAG]]\n")
name, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2)
} else {
name = cmd.Arg(0)
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
}
if name == "" { if name == "" {
cmd.Usage() cmd.Usage()
@ -1389,7 +1412,7 @@ func (cli *DockerCli) CmdCommit(args ...string) error {
func (cli *DockerCli) CmdEvents(args ...string) error { func (cli *DockerCli) CmdEvents(args ...string) error {
cmd := Subcmd("events", "[OPTIONS]", "Get real time events from the server") cmd := Subcmd("events", "[OPTIONS]", "Get real time events from the server")
since := cmd.String("since", "", "Show events previously created (used for polling).") since := cmd.String("since", "", "Show previously created events and then stream.")
if err := cmd.Parse(args); err != nil { if err := cmd.Parse(args); err != nil {
return nil return nil
} }
@ -1401,7 +1424,17 @@ func (cli *DockerCli) CmdEvents(args ...string) error {
v := url.Values{} v := url.Values{}
if *since != "" { if *since != "" {
v.Set("since", *since) loc := time.FixedZone(time.Now().Zone())
format := "2006-01-02 15:04:05 -0700 MST"
if len(*since) < len(format) {
format = format[:len(*since)]
}
if t, err := time.ParseInLocation(format, *since, loc); err == nil {
v.Set("since", strconv.FormatInt(t.Unix(), 10))
} else {
v.Set("since", *since)
}
} }
if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil { if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil {
@ -1658,9 +1691,16 @@ func (cli *DockerCli) CmdTag(args ...string) error {
return nil return nil
} }
v := url.Values{} var repository, tag string
repository, tag := utils.ParseRepositoryTag(cmd.Arg(1))
if cmd.NArg() == 3 {
fmt.Fprintf(cli.err, "[DEPRECATED] The format 'IMAGE [REPOSITORY [TAG]]' as been deprecated. Please use IMAGE [REPOSITORY[:TAG]]\n")
repository, tag = cmd.Arg(1), cmd.Arg(2)
} else {
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
}
v := url.Values{}
v.Set("repo", repository) v.Set("repo", repository)
v.Set("tag", tag) v.Set("tag", tag)
@ -1971,7 +2011,7 @@ func (cli *DockerCli) call(method, path string, data interface{}) ([]byte, int,
if len(body) == 0 { if len(body) == 0 {
return nil, resp.StatusCode, fmt.Errorf("Error: %s", http.StatusText(resp.StatusCode)) return nil, resp.StatusCode, fmt.Errorf("Error: %s", http.StatusText(resp.StatusCode))
} }
return nil, resp.StatusCode, fmt.Errorf("Error: %s", body) return nil, resp.StatusCode, fmt.Errorf("Error: %s", bytes.TrimSpace(body))
} }
return body, resp.StatusCode, nil return body, resp.StatusCode, nil
} }
@ -2027,7 +2067,7 @@ func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, h
if len(body) == 0 { if len(body) == 0 {
return fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode)) return fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode))
} }
return fmt.Errorf("Error: %s", body) return fmt.Errorf("Error: %s", bytes.TrimSpace(body))
} }
if matchesContentType(resp.Header.Get("Content-Type"), "application/json") { if matchesContentType(resp.Header.Get("Content-Type"), "application/json") {

View file

@ -6,6 +6,8 @@ import (
"github.com/dotcloud/docker/utils" "github.com/dotcloud/docker/utils"
"io" "io"
"io/ioutil" "io/ioutil"
"os"
"path"
"regexp" "regexp"
"strings" "strings"
"testing" "testing"
@ -381,8 +383,8 @@ func TestRunAttachStdin(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if cmdOutput != container.ShortID()+"\n" { if cmdOutput != container.ID+"\n" {
t.Fatalf("Wrong output: should be '%s', not '%s'\n", container.ShortID()+"\n", cmdOutput) t.Fatalf("Wrong output: should be '%s', not '%s'\n", container.ID+"\n", cmdOutput)
} }
}) })
@ -459,7 +461,7 @@ func TestRunDetach(t *testing.T) {
}) })
} }
// TestAttachDetach checks that attach in tty mode can be detached // TestAttachDetach checks that attach in tty mode can be detached using the long container ID
func TestAttachDetach(t *testing.T) { func TestAttachDetach(t *testing.T) {
stdin, stdinPipe := io.Pipe() stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe() stdout, stdoutPipe := io.Pipe()
@ -486,8 +488,8 @@ func TestAttachDetach(t *testing.T) {
container = globalRuntime.List()[0] container = globalRuntime.List()[0]
if strings.Trim(string(buf[:n]), " \r\n") != container.ShortID() { if strings.Trim(string(buf[:n]), " \r\n") != container.ID {
t.Fatalf("Wrong ID received. Expect %s, received %s", container.ShortID(), buf[:n]) t.Fatalf("Wrong ID received. Expect %s, received %s", container.ID, buf[:n])
} }
}) })
setTimeout(t, "Starting container timed out", 10*time.Second, func() { setTimeout(t, "Starting container timed out", 10*time.Second, func() {
@ -501,7 +503,69 @@ func TestAttachDetach(t *testing.T) {
ch = make(chan struct{}) ch = make(chan struct{})
go func() { go func() {
defer close(ch) defer close(ch)
if err := cli.CmdAttach(container.ShortID()); err != nil { if err := cli.CmdAttach(container.ID); err != nil {
if err != io.ErrClosedPipe {
t.Fatal(err)
}
}
}()
setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() {
if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
if err != io.ErrClosedPipe {
t.Fatal(err)
}
}
})
setTimeout(t, "Escape sequence timeout", 5*time.Second, func() {
stdinPipe.Write([]byte{16, 17})
if err := stdinPipe.Close(); err != nil {
t.Fatal(err)
}
})
closeWrap(stdin, stdinPipe, stdout, stdoutPipe)
// wait for CmdRun to return
setTimeout(t, "Waiting for CmdAttach timed out", 15*time.Second, func() {
<-ch
})
time.Sleep(500 * time.Millisecond)
if !container.State.Running {
t.Fatal("The detached container should be still running")
}
setTimeout(t, "Waiting for container to die timedout", 5*time.Second, func() {
container.Kill()
})
}
// TestAttachDetachTruncatedID checks that attach in tty mode can be detached
func TestAttachDetachTruncatedID(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
go stdout.Read(make([]byte, 1024))
setTimeout(t, "Starting container timed out", 2*time.Second, func() {
if err := cli.CmdRun("-i", "-t", "-d", unitTestImageID, "cat"); err != nil {
t.Fatal(err)
}
})
container := globalRuntime.List()[0]
stdin, stdinPipe = io.Pipe()
stdout, stdoutPipe = io.Pipe()
cli = NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
ch := make(chan struct{})
go func() {
defer close(ch)
if err := cli.CmdAttach(utils.TruncateID(container.ID)); err != nil {
if err != io.ErrClosedPipe { if err != io.ErrClosedPipe {
t.Fatal(err) t.Fatal(err)
} }
@ -824,3 +888,55 @@ run [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]
return image return image
} }
// #2098 - Docker cidFiles only contain short version of the containerId
//sudo docker run -cidfile /tmp/docker_test.cid ubuntu echo "test"
// TestRunCidFile tests that run -cidfile returns the longid
func TestRunCidFile(t *testing.T) {
stdout, stdoutPipe := io.Pipe()
tmpDir, err := ioutil.TempDir("", "TestRunCidFile")
if err != nil {
t.Fatal(err)
}
tmpCidFile := path.Join(tmpDir, "cid")
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
c := make(chan struct{})
go func() {
defer close(c)
if err := cli.CmdRun("-cidfile", tmpCidFile, unitTestImageID, "ls"); err != nil {
t.Fatal(err)
}
}()
defer os.RemoveAll(tmpDir)
setTimeout(t, "Reading command output time out", 2*time.Second, func() {
cmdOutput, err := bufio.NewReader(stdout).ReadString('\n')
if err != nil {
t.Fatal(err)
}
if len(cmdOutput) < 1 {
t.Fatalf("'ls' should return something , not '%s'", cmdOutput)
}
//read the tmpCidFile
buffer, err := ioutil.ReadFile(tmpCidFile)
if err != nil {
t.Fatal(err)
}
id := string(buffer)
if len(id) != len("2bf44ea18873287bd9ace8a4cb536a7cbe134bed67e805fdf2f58a57f69b320c") {
t.Fatalf("-cidfile should be a long id, not '%s'", id)
}
//test that its a valid cid? (though the container is gone..)
//remove the file and dir.
})
setTimeout(t, "CmdRun timed out", 5*time.Second, func() {
<-c
})
}

View file

@ -9,7 +9,6 @@ import (
type DaemonConfig struct { type DaemonConfig struct {
Pidfile string Pidfile string
Root string Root string
ProtoAddresses []string
AutoRestart bool AutoRestart bool
EnableCors bool EnableCors bool
Dns []string Dns []string
@ -36,7 +35,6 @@ func ConfigFromJob(job *engine.Job) *DaemonConfig {
} else { } else {
config.BridgeIface = DefaultNetworkBridge config.BridgeIface = DefaultNetworkBridge
} }
config.ProtoAddresses = job.GetenvList("ProtoAddresses")
config.DefaultIp = net.ParseIP(job.Getenv("DefaultIp")) config.DefaultIp = net.ParseIP(job.Getenv("DefaultIp"))
config.InterContainerCommunication = job.GetenvBool("InterContainerCommunication") config.InterContainerCommunication = job.GetenvBool("InterContainerCommunication")
return &config return &config

View file

@ -134,7 +134,11 @@ type PortBinding struct {
type Port string type Port string
func (p Port) Proto() string { func (p Port) Proto() string {
return strings.Split(string(p), "/")[1] parts := strings.Split(string(p), "/")
if len(parts) == 1 {
return "tcp"
}
return parts[1]
} }
func (p Port) Port() string { func (p Port) Port() string {
@ -168,7 +172,7 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
cmd.Var(flAttach, "a", "Attach to stdin, stdout or stderr.") cmd.Var(flAttach, "a", "Attach to stdin, stdout or stderr.")
flStdin := cmd.Bool("i", false, "Keep stdin open even if not attached") flStdin := cmd.Bool("i", false, "Keep stdin open even if not attached")
flTty := cmd.Bool("t", false, "Allocate a pseudo-tty") flTty := cmd.Bool("t", false, "Allocate a pseudo-tty")
flMemory := cmd.Int64("m", 0, "Memory limit (in bytes)") flMemoryString := cmd.String("m", "", "Memory limit (format: <number><optional unit>, where unit = b, k, m or g)")
flContainerIDFile := cmd.String("cidfile", "", "Write the container ID to the file") flContainerIDFile := cmd.String("cidfile", "", "Write the container ID to the file")
flNetwork := cmd.Bool("n", true, "Enable networking for this container") flNetwork := cmd.Bool("n", true, "Enable networking for this container")
flPrivileged := cmd.Bool("privileged", false, "Give extended privileges to this container") flPrivileged := cmd.Bool("privileged", false, "Give extended privileges to this container")
@ -177,9 +181,9 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
cmd.String("name", "", "Assign a name to the container") cmd.String("name", "", "Assign a name to the container")
flPublishAll := cmd.Bool("P", false, "Publish all exposed ports to the host interfaces") flPublishAll := cmd.Bool("P", false, "Publish all exposed ports to the host interfaces")
if capabilities != nil && *flMemory > 0 && !capabilities.MemoryLimit { if capabilities != nil && *flMemoryString != "" && !capabilities.MemoryLimit {
//fmt.Fprintf(stdout, "WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n") //fmt.Fprintf(stdout, "WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
*flMemory = 0 *flMemoryString = ""
} }
flCpuShares := cmd.Int64("c", 0, "CPU shares (relative weight)") flCpuShares := cmd.Int64("c", 0, "CPU shares (relative weight)")
@ -200,7 +204,7 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
cmd.Var(flVolumes, "v", "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)") cmd.Var(flVolumes, "v", "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)")
var flVolumesFrom utils.ListOpts var flVolumesFrom utils.ListOpts
cmd.Var(&flVolumesFrom, "volumes-from", "Mount volumes from the specified container") cmd.Var(&flVolumesFrom, "volumes-from", "Mount volumes from the specified container(s)")
flEntrypoint := cmd.String("entrypoint", "", "Overwrite the default entrypoint of the image") flEntrypoint := cmd.String("entrypoint", "", "Overwrite the default entrypoint of the image")
@ -246,6 +250,18 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
} }
} }
var flMemory int64
if *flMemoryString != "" {
parsedMemory, err := utils.RAMInBytes(*flMemoryString)
if err != nil {
return nil, nil, cmd, err
}
flMemory = parsedMemory
}
var binds []string var binds []string
// add any bind targets to the list of container volumes // add any bind targets to the list of container volumes
@ -316,7 +332,7 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
Tty: *flTty, Tty: *flTty,
NetworkDisabled: !*flNetwork, NetworkDisabled: !*flNetwork,
OpenStdin: *flStdin, OpenStdin: *flStdin,
Memory: *flMemory, Memory: flMemory,
CpuShares: *flCpuShares, CpuShares: *flCpuShares,
AttachStdin: flAttach.Get("stdin"), AttachStdin: flAttach.Get("stdin"),
AttachStdout: flAttach.Get("stdout"), AttachStdout: flAttach.Get("stdout"),
@ -341,7 +357,7 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
PublishAllPorts: *flPublishAll, PublishAllPorts: *flPublishAll,
} }
if capabilities != nil && *flMemory > 0 && !capabilities.SwapLimit { if capabilities != nil && flMemory > 0 && !capabilities.SwapLimit {
//fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n") //fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
config.MemorySwap = -1 config.MemorySwap = -1
} }
@ -694,24 +710,25 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
func (container *Container) Start() (err error) { func (container *Container) Start() (err error) {
container.State.Lock() container.State.Lock()
defer container.State.Unlock() defer container.State.Unlock()
if container.State.Running {
return fmt.Errorf("The container %s is already running.", container.ID)
}
defer func() { defer func() {
if err != nil { if err != nil {
container.cleanup() container.cleanup()
} }
}() }()
if container.State.Running {
return fmt.Errorf("The container %s is already running.", container.ID)
}
if err := container.EnsureMounted(); err != nil { if err := container.EnsureMounted(); err != nil {
return err return err
} }
if container.runtime.networkManager.disabled { if container.runtime.networkManager.disabled {
container.Config.NetworkDisabled = true container.Config.NetworkDisabled = true
container.buildHostnameAndHostsFiles("127.0.1.1")
} else { } else {
if err := container.allocateNetwork(); err != nil { if err := container.allocateNetwork(); err != nil {
return err return err
} }
container.buildHostnameAndHostsFiles(container.NetworkSettings.IPAddress)
} }
// Make sure the config is compatible with the current kernel // Make sure the config is compatible with the current kernel
@ -771,9 +788,23 @@ func (container *Container) Start() (err error) {
// Apply volumes from another container if requested // Apply volumes from another container if requested
if container.Config.VolumesFrom != "" { if container.Config.VolumesFrom != "" {
volumes := strings.Split(container.Config.VolumesFrom, ",") containerSpecs := strings.Split(container.Config.VolumesFrom, ",")
for _, v := range volumes { for _, containerSpec := range containerSpecs {
c := container.runtime.Get(v) mountRW := true
specParts := strings.SplitN(containerSpec, ":", 2)
switch len(specParts) {
case 0:
return fmt.Errorf("Malformed volumes-from specification: %s", container.Config.VolumesFrom)
case 2:
switch specParts[1] {
case "ro":
mountRW = false
case "rw": // mountRW is already true
default:
return fmt.Errorf("Malformed volumes-from speficication: %s", containerSpec)
}
}
c := container.runtime.Get(specParts[0])
if c == nil { if c == nil {
return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.ID) return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.ID)
} }
@ -786,7 +817,7 @@ func (container *Container) Start() (err error) {
} }
container.Volumes[volPath] = id container.Volumes[volPath] = id
if isRW, exists := c.VolumesRW[volPath]; exists { if isRW, exists := c.VolumesRW[volPath]; exists {
container.VolumesRW[volPath] = isRW container.VolumesRW[volPath] = isRW && mountRW
} }
} }
@ -832,7 +863,7 @@ func (container *Container) Start() (err error) {
// Create the mountpoint // Create the mountpoint
rootVolPath := path.Join(container.RootfsPath(), volPath) rootVolPath := path.Join(container.RootfsPath(), volPath)
if err := os.MkdirAll(rootVolPath, 0755); err != nil { if err := os.MkdirAll(rootVolPath, 0755); err != nil {
return nil return err
} }
// Do not copy or change permissions if we are mounting from the host // Do not copy or change permissions if we are mounting from the host
@ -876,7 +907,13 @@ func (container *Container) Start() (err error) {
return err return err
} }
var lxcStart string = "lxc-start"
if container.hostConfig.Privileged && container.runtime.capabilities.AppArmor {
lxcStart = path.Join(container.runtime.config.Root, "lxc-start-unconfined")
}
params := []string{ params := []string{
lxcStart,
"-n", container.ID, "-n", container.ID,
"-f", container.lxcConfigPath(), "-f", container.lxcConfigPath(),
"--", "--",
@ -969,11 +1006,24 @@ func (container *Container) Start() (err error) {
params = append(params, "--", container.Path) params = append(params, "--", container.Path)
params = append(params, container.Args...) params = append(params, container.Args...)
var lxcStart string = "lxc-start" if RootIsShared() {
if container.hostConfig.Privileged && container.runtime.capabilities.AppArmor { // lxc-start really needs / to be non-shared, or all kinds of stuff break
lxcStart = path.Join(container.runtime.config.Root, "lxc-start-unconfined") // when lxc-start unmount things and those unmounts propagate to the main
// mount namespace.
// What we really want is to clone into a new namespace and then
// mount / MS_REC|MS_SLAVE, but since we can't really clone or fork
// without exec in go we have to do this horrible shell hack...
shellString :=
"mount --make-rslave /; exec " +
utils.ShellQuoteArguments(params)
params = []string{
"unshare", "-m", "--", "/bin/sh", "-c", shellString,
}
} }
container.cmd = exec.Command(lxcStart, params...)
container.cmd = exec.Command(params[0], params[1:]...)
// Setup logging of stdout and stderr to disk // Setup logging of stdout and stderr to disk
if err := container.runtime.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil { if err := container.runtime.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil {
return err return err
@ -1082,6 +1132,30 @@ func (container *Container) StderrPipe() (io.ReadCloser, error) {
return utils.NewBufReader(reader), nil return utils.NewBufReader(reader), nil
} }
func (container *Container) buildHostnameAndHostsFiles(IP string) {
container.HostnamePath = path.Join(container.root, "hostname")
ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644)
hostsContent := []byte(`
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
`)
container.HostsPath = path.Join(container.root, "hosts")
if container.Config.Domainname != "" {
hostsContent = append([]byte(fmt.Sprintf("%s\t%s.%s %s\n", IP, container.Config.Hostname, container.Config.Domainname, container.Config.Hostname)), hostsContent...)
} else {
hostsContent = append([]byte(fmt.Sprintf("%s\t%s\n", IP, container.Config.Hostname)), hostsContent...)
}
ioutil.WriteFile(container.HostsPath, hostsContent, 0644)
}
func (container *Container) allocateNetwork() error { func (container *Container) allocateNetwork() error {
if container.Config.NetworkDisabled { if container.Config.NetworkDisabled {
return nil return nil
@ -1230,7 +1304,7 @@ func (container *Container) monitor() {
container.State.setStopped(exitCode) container.State.setStopped(exitCode)
if container.runtime != nil && container.runtime.srv != nil { if container.runtime != nil && container.runtime.srv != nil {
container.runtime.srv.LogEvent("die", container.ShortID(), container.runtime.repositories.ImageName(container.Image)) container.runtime.srv.LogEvent("die", container.ID, container.runtime.repositories.ImageName(container.Image))
} }
// Cleanup // Cleanup
@ -1297,7 +1371,7 @@ func (container *Container) kill(sig int) error {
} }
if output, err := exec.Command("lxc-kill", "-n", container.ID, strconv.Itoa(sig)).CombinedOutput(); err != nil { if output, err := exec.Command("lxc-kill", "-n", container.ID, strconv.Itoa(sig)).CombinedOutput(); err != nil {
log.Printf("error killing container %s (%s, %s)", container.ShortID(), output, err) log.Printf("error killing container %s (%s, %s)", utils.TruncateID(container.ID), output, err)
return err return err
} }
@ -1317,9 +1391,9 @@ func (container *Container) Kill() error {
// 2. Wait for the process to die, in last resort, try to kill the process directly // 2. Wait for the process to die, in last resort, try to kill the process directly
if err := container.WaitTimeout(10 * time.Second); err != nil { if err := container.WaitTimeout(10 * time.Second); err != nil {
if container.cmd == nil { if container.cmd == nil {
return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", container.ShortID()) return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", utils.TruncateID(container.ID))
} }
log.Printf("Container %s failed to exit within 10 seconds of lxc-kill %s - trying direct SIGKILL", "SIGKILL", container.ShortID()) log.Printf("Container %s failed to exit within 10 seconds of lxc-kill %s - trying direct SIGKILL", "SIGKILL", utils.TruncateID(container.ID))
if err := container.cmd.Process.Kill(); err != nil { if err := container.cmd.Process.Kill(); err != nil {
return err return err
} }
@ -1433,14 +1507,6 @@ func (container *Container) Unmount() error {
return container.runtime.Unmount(container) return container.runtime.Unmount(container)
} }
// ShortID returns a shorthand version of the container's id for convenience.
// A collision with other container shorthands is very unlikely, but possible.
// In case of a collision a lookup with Runtime.Get() will fail, and the caller
// will need to use a langer prefix, or the full-length container Id.
func (container *Container) ShortID() string {
return utils.TruncateID(container.ID)
}
func (container *Container) logPath(name string) string { func (container *Container) logPath(name string) string {
return path.Join(container.root, fmt.Sprintf("%s-%s.log", container.ID, name)) return path.Join(container.root, fmt.Sprintf("%s-%s.log", container.ID, name))
} }

View file

@ -3,6 +3,7 @@ package docker
import ( import (
"bufio" "bufio"
"fmt" "fmt"
"github.com/dotcloud/docker/utils"
"io" "io"
"io/ioutil" "io/ioutil"
"math/rand" "math/rand"
@ -1005,7 +1006,7 @@ func TestEnv(t *testing.T) {
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"HOME=/", "HOME=/",
"container=lxc", "container=lxc",
"HOSTNAME=" + container.ShortID(), "HOSTNAME=" + utils.TruncateID(container.ID),
"FALSE=true", "FALSE=true",
"TRUE=false", "TRUE=false",
"TRICKY=tri", "TRICKY=tri",
@ -1338,6 +1339,67 @@ func TestBindMounts(t *testing.T) {
} }
} }
// Test that -volumes-from supports both read-only mounts
func TestFromVolumesInReadonlyMode(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(
&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/echo", "-n", "foobar"},
Volumes: map[string]struct{}{"/test": {}},
},
"",
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
_, err = container.Output()
if err != nil {
t.Fatal(err)
}
if !container.VolumesRW["/test"] {
t.Fail()
}
container2, _, err := runtime.Create(
&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/echo", "-n", "foobar"},
VolumesFrom: container.ID + ":ro",
},
"",
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container2)
_, err = container2.Output()
if err != nil {
t.Fatal(err)
}
if container.Volumes["/test"] != container2.Volumes["/test"] {
t.Logf("container volumes do not match: %s | %s ",
container.Volumes["/test"],
container2.Volumes["/test"])
t.Fail()
}
_, exists := container2.VolumesRW["/test"]
if !exists {
t.Logf("container2 is missing '/test' volume: %s", container2.VolumesRW)
t.Fail()
}
if container2.VolumesRW["/test"] != false {
t.Log("'/test' volume mounted in read-write mode, expected read-only")
t.Fail()
}
}
// Test that VolumesRW values are copied to the new container. Regression test for #1201 // Test that VolumesRW values are copied to the new container. Regression test for #1201
func TestVolumesFromReadonlyMount(t *testing.T) { func TestVolumesFromReadonlyMount(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)

View file

@ -29,7 +29,9 @@ if [ -f /etc/default/$BASE ]; then
. /etc/default/$BASE . /etc/default/$BASE
fi fi
if [ "$1" = start ] && which initctl >/dev/null && initctl version | grep -q upstart; then # see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it)
if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | /bin/grep -q upstart; then
log_failure_msg "Docker is managed via upstart, try using service $BASE $1"
exit 1 exit 1
fi fi

View file

@ -6,5 +6,10 @@ stop on runlevel [!2345]
respawn respawn
script script
/usr/bin/docker -d DOCKER=/usr/bin/$UPSTART_JOB
DOCKER_OPTS=
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
"$DOCKER" -d $DOCKER_OPTS
end script end script

View file

@ -1,3 +1,19 @@
# Vagrant-docker # Vagrant integration
This is a placeholder for the official vagrant-docker, a plugin for Vagrant (http://vagrantup.com) which exposes Docker as a provider. Currently there are at least 4 different projects that we are aware of that deals
with integration with [Vagrant](http://vagrantup.com/) at different levels. One
approach is to use Docker as a [provisioner](http://docs.vagrantup.com/v2/provisioning/index.html)
which means you can create containers and pull base images on VMs using Docker's
CLI and the other is to use Docker as a [provider](http://docs.vagrantup.com/v2/providers/index.html),
meaning you can use Vagrant to control Docker containers.
### Provisioners
* [Vocker](https://github.com/fgrehm/vocker)
* [Ventriloquist](https://github.com/fgrehm/ventriloquist)
### Providers
* [docker-provider](https://github.com/fgrehm/docker-provider)
* [vagrant-shell](https://github.com/destructuring/vagrant-shell)

View file

@ -71,7 +71,8 @@ func main() {
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
job := eng.Job("serveapi") // Load plugin: httpapi
job := eng.Job("initapi")
job.Setenv("Pidfile", *pidfile) job.Setenv("Pidfile", *pidfile)
job.Setenv("Root", *flRoot) job.Setenv("Root", *flRoot)
job.SetenvBool("AutoRestart", *flAutoRestart) job.SetenvBool("AutoRestart", *flAutoRestart)
@ -79,12 +80,17 @@ func main() {
job.Setenv("Dns", *flDns) job.Setenv("Dns", *flDns)
job.SetenvBool("EnableIptables", *flEnableIptables) job.SetenvBool("EnableIptables", *flEnableIptables)
job.Setenv("BridgeIface", *bridgeName) job.Setenv("BridgeIface", *bridgeName)
job.SetenvList("ProtoAddresses", flHosts)
job.Setenv("DefaultIp", *flDefaultIp) job.Setenv("DefaultIp", *flDefaultIp)
job.SetenvBool("InterContainerCommunication", *flInterContainerComm) job.SetenvBool("InterContainerCommunication", *flInterContainerComm)
if err := job.Run(); err != nil { if err := job.Run(); err != nil {
log.Fatal(err) log.Fatal(err)
} }
// Serve api
job = eng.Job("serveapi", flHosts...)
job.SetenvBool("Logging", true)
if err := job.Run(); err != nil {
log.Fatal(err)
}
} else { } else {
if len(flHosts) > 1 { if len(flHosts) > 1 {
log.Fatal("Please specify only one -H") log.Fatal("Please specify only one -H")

View file

@ -121,8 +121,7 @@ Create a container
"AttachStdin":false, "AttachStdin":false,
"AttachStdout":true, "AttachStdout":true,
"AttachStderr":true, "AttachStderr":true,
"PortSpecs":null, "ExposedPorts":{},
"Privileged": false,
"Tty":false, "Tty":false,
"OpenStdin":false, "OpenStdin":false,
"StdinOnce":false, "StdinOnce":false,
@ -135,7 +134,6 @@ Create a container
"Volumes":{}, "Volumes":{},
"VolumesFrom":"", "VolumesFrom":"",
"WorkingDir":"" "WorkingDir":""
} }
**Example response**: **Example response**:
@ -242,7 +240,7 @@ Inspect a container
"AttachStdin": false, "AttachStdin": false,
"AttachStdout": true, "AttachStdout": true,
"AttachStderr": true, "AttachStderr": true,
"PortSpecs": null, "ExposedPorts": {},
"Tty": false, "Tty": false,
"OpenStdin": false, "OpenStdin": false,
"StdinOnce": false, "StdinOnce": false,
@ -413,7 +411,12 @@ Start a container
{ {
"Binds":["/tmp:/tmp"], "Binds":["/tmp:/tmp"],
"LxcConf":{"lxc.utsname":"docker"} "LxcConf":{"lxc.utsname":"docker"},
"ContainerIDFile": "",
"Privileged": false,
"PortBindings": {"22/tcp": [{HostIp:"", HostPort:""}]},
"Links": [],
"PublishAllPorts": false
} }
**Example response**: **Example response**:
@ -846,7 +849,7 @@ Inspect an image
"AttachStdin":false, "AttachStdin":false,
"AttachStdout":false, "AttachStdout":false,
"AttachStderr":false, "AttachStderr":false,
"PortSpecs":null, "ExposedPorts":{},
"Tty":true, "Tty":true,
"OpenStdin":true, "OpenStdin":true,
"StdinOnce":false, "StdinOnce":false,
@ -1192,7 +1195,7 @@ Create a new image from a container's changes
{ {
"Cmd": ["cat", "/world"], "Cmd": ["cat", "/world"],
"PortSpecs":["22"] "ExposedPorts":{"22/tcp":{}}
} }
**Example response**: **Example response**:

View file

@ -914,7 +914,12 @@ Search images
.. http:get:: /images/search .. http:get:: /images/search
Search for an image in the docker index Search for an image in the docker index.
.. note::
The response keys have changed from API v1.6 to reflect the JSON
sent by the registry server to the docker daemon's request.
**Example request**: **Example request**:
@ -930,18 +935,28 @@ Search images
Content-Type: application/json Content-Type: application/json
[ [
{ {
"Name":"cespare/sshd", "description": "",
"Description":"" "is_official": false,
}, "is_trusted": false,
{ "name": "wma55/u1210sshd",
"Name":"johnfuller/sshd", "star_count": 0
"Description":"" },
}, {
{ "description": "",
"Name":"dhrp/mongodb-sshd", "is_official": false,
"Description":"" "is_trusted": false,
} "name": "jdswinbank/sshd",
"star_count": 0
},
{
"description": "",
"is_official": false,
"is_trusted": false,
"name": "vgauthier/sshd",
"star_count": 0
}
...
] ]
:query term: term to search :query term: term to search

View file

@ -12,26 +12,28 @@ compatibility. Please file issues with the library owners. If you
find more library implementations, please list them in Docker doc bugs find more library implementations, please list them in Docker doc bugs
and we will add the libraries here. and we will add the libraries here.
+----------------------+----------------+--------------------------------------------+ +----------------------+----------------+--------------------------------------------+----------+
| Language/Framework | Name | Repository | | Language/Framework | Name | Repository | Status |
+======================+================+============================================+ +======================+================+============================================+==========+
| Python | docker-py | https://github.com/dotcloud/docker-py | | Python | docker-py | https://github.com/dotcloud/docker-py | Active |
+----------------------+----------------+--------------------------------------------+ +----------------------+----------------+--------------------------------------------+----------+
| Ruby | docker-client | https://github.com/geku/docker-client | | Ruby | docker-client | https://github.com/geku/docker-client | Outdated |
+----------------------+----------------+--------------------------------------------+ +----------------------+----------------+--------------------------------------------+----------+
| Ruby | docker-api | https://github.com/swipely/docker-api | | Ruby | docker-api | https://github.com/swipely/docker-api | Active |
+----------------------+----------------+--------------------------------------------+ +----------------------+----------------+--------------------------------------------+----------+
| Javascript (NodeJS) | docker.io | https://github.com/appersonlabs/docker.io | | Javascript (NodeJS) | docker.io | https://github.com/appersonlabs/docker.io | Active |
| | | Install via NPM: `npm install docker.io` | | | | Install via NPM: `npm install docker.io` | |
+----------------------+----------------+--------------------------------------------+ +----------------------+----------------+--------------------------------------------+----------+
| Javascript | docker-js | https://github.com/dgoujard/docker-js | | Javascript | docker-js | https://github.com/dgoujard/docker-js | Active |
+----------------------+----------------+--------------------------------------------+ +----------------------+----------------+--------------------------------------------+----------+
| Javascript (Angular) | dockerui | https://github.com/crosbymichael/dockerui | | Javascript (Angular) | dockerui | https://github.com/crosbymichael/dockerui | Active |
| **WebUI** | | | | **WebUI** | | | |
+----------------------+----------------+--------------------------------------------+ +----------------------+----------------+--------------------------------------------+----------+
| Java | docker-java | https://github.com/kpelykh/docker-java | | Java | docker-java | https://github.com/kpelykh/docker-java | Active |
+----------------------+----------------+--------------------------------------------+ +----------------------+----------------+--------------------------------------------+----------+
| Erlang | erldocker | https://github.com/proger/erldocker | | Erlang | erldocker | https://github.com/proger/erldocker | Active |
+----------------------+----------------+--------------------------------------------+ +----------------------+----------------+--------------------------------------------+----------+
| Go | go-dockerclient| https://github.com/fsouza/go-dockerclient | | Go | go-dockerclient| https://github.com/fsouza/go-dockerclient | Active |
+----------------------+----------------+--------------------------------------------+ +----------------------+----------------+--------------------------------------------+----------+
| PHP | Alvine | http://pear.alvine.io/ (alpha) | Active |
+----------------------+----------------+--------------------------------------------+----------+

View file

@ -245,6 +245,9 @@ Full -run example
Usage: docker events Usage: docker events
Get real time events from the server Get real time events from the server
-since="": Show previously created events and then stream.
(either seconds since epoch, or date string as below)
.. _cli_events_example: .. _cli_events_example:
@ -277,6 +280,23 @@ Shell 1: (Again .. now showing events)
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
Show events in the past from a specified time
.............................................
.. code-block:: bash
$ sudo docker events -since 1378216169
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
$ sudo docker events -since '2013-09-03'
[2013-09-03 15:49:26 +0200 CEST] 4386fb97867d: (from 12de384bfb10) start
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
$ sudo docker events -since '2013-09-03 15:49:29 +0200 CEST'
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
.. _cli_export: .. _cli_export:
@ -460,6 +480,12 @@ Insert file from github
The main process inside the container will be sent SIGKILL. The main process inside the container will be sent SIGKILL.
Known Issues (kill)
~~~~~~~~~~~~~~~~~~~
* :issue:`197` indicates that ``docker kill`` may leave directories
behind and make it difficult to remove the container.
.. _cli_login: .. _cli_login:
``login`` ``login``
@ -568,6 +594,12 @@ The main process inside the container will be sent SIGKILL.
Remove one or more containers Remove one or more containers
-link="": Remove the link instead of the actual container -link="": Remove the link instead of the actual container
Known Issues (rm)
~~~~~~~~~~~~~~~~~~~
* :issue:`197` indicates that ``docker kill`` may leave directories
behind and make it difficult to remove the container.
Examples: Examples:
~~~~~~~~~ ~~~~~~~~~
@ -590,6 +622,15 @@ This will remove the container referenced under the link ``/redis``.
This will remove the underlying link between ``/webapp`` and the ``/redis`` containers removing all This will remove the underlying link between ``/webapp`` and the ``/redis`` containers removing all
network communication. network communication.
.. code-block:: bash
$ docker rm `docker ps -a -q`
This command will delete all stopped containers. The command ``docker ps -a -q`` will return all
existing container IDs and pass them to the ``rm`` command which will delete them. Any running
containers will not be deleted.
.. _cli_rmi: .. _cli_rmi:
``rmi`` ``rmi``
@ -620,7 +661,7 @@ network communication.
-h="": Container host name -h="": Container host name
-i=false: Keep stdin open even if not attached -i=false: Keep stdin open even if not attached
-privileged=false: Give extended privileges to this container -privileged=false: Give extended privileges to this container
-m=0: Memory limit (in bytes) -m="": Memory limit (format: <number><optional unit>, where unit = b, k, m or g)
-n=true: Enable networking for this container -n=true: Enable networking for this container
-p=[]: Map a network port to the container -p=[]: Map a network port to the container
-rm=false: Automatically remove the container when it exits (incompatible with -d) -rm=false: Automatically remove the container when it exits (incompatible with -d)
@ -628,7 +669,7 @@ network communication.
-u="": Username or UID -u="": Username or UID
-dns=[]: Set custom dns servers for the container -dns=[]: Set custom dns servers for the container
-v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. If "container-dir" is missing, then docker creates a new volume. -v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. If "container-dir" is missing, then docker creates a new volume.
-volumes-from="": Mount all volumes from the given container -volumes-from="": Mount all volumes from the given container(s)
-entrypoint="": Overwrite the default entrypoint set by the image -entrypoint="": Overwrite the default entrypoint set by the image
-w="": Working directory inside the container -w="": Working directory inside the container
-lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" -lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
@ -720,6 +761,17 @@ can access the network and environment of the redis container via
environment variables. The ``-name`` flag will assign the name ``console`` environment variables. The ``-name`` flag will assign the name ``console``
to the newly created container. to the newly created container.
.. code-block:: bash
docker run -volumes-from 777f7dc92da7,ba8c0c54f0f2:ro -i -t ubuntu pwd
The ``-volumes-from`` flag mounts all the defined volumes from the
refrence containers. Containers can be specified by a comma seperated
list or by repetitions of the ``-volumes-from`` argument. The container
id may be optionally suffixed with ``:ro`` or ``:rw`` to mount the volumes in
read-only or read-write mode, respectively. By default, the volumes are mounted
in the same mode (rw or ro) as the reference container.
.. _cli_search: .. _cli_search:
``search`` ``search``

View file

@ -40,7 +40,11 @@ html_additional_pages = {
# Add any Sphinx extension module names here, as strings. They can be extensions # Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinxcontrib.httpdomain'] extensions = ['sphinxcontrib.httpdomain', 'sphinx.ext.extlinks']
# Configure extlinks
extlinks = { 'issue': ('https://github.com/dotcloud/docker/issues/%s',
'Issue ') }
# Add any paths that contain templates here, relative to this directory. # Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates'] templates_path = ['_templates']

View file

@ -10,13 +10,16 @@ Want to hack on Docker? Awesome!
The repository includes `all the instructions you need to get The repository includes `all the instructions you need to get
started <https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md>`_. started <https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md>`_.
The developer environment `Dockerfile <https://github.com/dotcloud/docker/blob/master/Dockerfile>`_ The `developer environment Dockerfile
<https://github.com/dotcloud/docker/blob/master/Dockerfile>`_
specifies the tools and versions used to test and build Docker. specifies the tools and versions used to test and build Docker.
If you're making changes to the documentation, see the If you're making changes to the documentation, see the
`README.md <https://github.com/dotcloud/docker/blob/master/docs/README.md>`_. `README.md <https://github.com/dotcloud/docker/blob/master/docs/README.md>`_.
The documentation environment `Dockerfile <https://github.com/dotcloud/docker/blob/master/docs/Dockerfile>`_ The `documentation environment Dockerfile
<https://github.com/dotcloud/docker/blob/master/docs/Dockerfile>`_
specifies the tools and versions used to build the Documentation. specifies the tools and versions used to build the Documentation.
Further interesting details can be found in the `Packaging hints <https://github.com/dotcloud/docker/blob/master/hack/PACKAGERS.md>`_. Further interesting details can be found in the `Packaging hints
<https://github.com/dotcloud/docker/blob/master/hack/PACKAGERS.md>`_.

View file

@ -86,7 +86,7 @@ http://0.0.0.0:5000/`` in the log output.
.. code-block:: bash .. code-block:: bash
WEB_PORT=$(sudo docker port $WEB_WORKER 5000) WEB_PORT=$(sudo docker port $WEB_WORKER 5000 | awk -F: '{ print $2 }')
Look up the public-facing port which is NAT-ed. Find the private port Look up the public-facing port which is NAT-ed. Find the private port
used by the container and store it inside of the ``WEB_PORT`` variable. used by the container and store it inside of the ``WEB_PORT`` variable.

View file

@ -102,26 +102,45 @@ Docker that way too. Vagrant 1.1 or higher is required.
we need to set them there first. Make sure you have everything on we need to set them there first. Make sure you have everything on
amazon aws setup so you can (manually) deploy a new image to EC2. amazon aws setup so you can (manually) deploy a new image to EC2.
Note that where possible these variables are the same as those honored by
the ec2 api tools.
:: ::
export AWS_ACCESS_KEY_ID=xxx export AWS_ACCESS_KEY=xxx
export AWS_SECRET_ACCESS_KEY=xxx export AWS_SECRET_KEY=xxx
export AWS_KEYPAIR_NAME=xxx export AWS_KEYPAIR_NAME=xxx
export AWS_SSH_PRIVKEY=xxx export SSH_PRIVKEY_PATH=xxx
The environment variables are: export BOX_NAME=xxx
export AWS_REGION=xxx
export AWS_AMI=xxx
export AWS_INSTANCE_TYPE=xxx
* ``AWS_ACCESS_KEY_ID`` - The API key used to make requests to AWS The required environment variables are:
* ``AWS_SECRET_ACCESS_KEY`` - The secret key to make AWS API requests
* ``AWS_ACCESS_KEY`` - The API key used to make requests to AWS
* ``AWS_SECRET_KEY`` - The secret key to make AWS API requests
* ``AWS_KEYPAIR_NAME`` - The name of the keypair used for this EC2 instance * ``AWS_KEYPAIR_NAME`` - The name of the keypair used for this EC2 instance
* ``AWS_SSH_PRIVKEY`` - The path to the private key for the named * ``SSH_PRIVKEY_PATH`` - The path to the private key for the named
keypair, for example ``~/.ssh/docker.pem`` keypair, for example ``~/.ssh/docker.pem``
There are a number of optional environment variables:
* ``BOX_NAME`` - The name of the vagrant box to use. Defaults to
``ubuntu``.
* ``AWS_REGION`` - The aws region to spawn the vm in. Defaults to
``us-east-1``.
* ``AWS_AMI`` - The aws AMI to start with as a base. This must be
be an ubuntu 12.04 precise image. You must change this value if
``AWS_REGION`` is set to a value other than ``us-east-1``.
This is because AMIs are region specific. Defaults to ``ami-69f5a900``.
* ``AWS_INSTANCE_TYPE`` - The aws instance type. Defaults to ``t1.micro``.
You can check if they are set correctly by doing something like You can check if they are set correctly by doing something like
:: ::
echo $AWS_ACCESS_KEY_ID echo $AWS_ACCESS_KEY
6. Do the magic! 6. Do the magic!

View file

@ -38,3 +38,10 @@ was when the container was stopped.
You can promote a container to an :ref:`image_def` with ``docker You can promote a container to an :ref:`image_def` with ``docker
commit``. Once a container is an image, you can use it as a parent for commit``. Once a container is an image, you can use it as a parent for
new containers. new containers.
Container IDs
.............
All containers are identified by a 64 hexadecimal digit string (internally a 256bit
value). To simplify their use, a short ID of the first 12 characters can be used
on the commandline. There is a small possibility of short id collisions, so the
docker server will always return the long ID.

View file

@ -36,3 +36,11 @@ Base Image
.......... ..........
An image that has no parent is a **base image**. An image that has no parent is a **base image**.
Image IDs
.........
All images are identified by a 64 hexadecimal digit string (internally a 256bit
value). To simplify their use, a short ID of the first 12 characters can be used
on the command line. There is a small possibility of short id collisions, so the
docker server will always return the long ID.

View file

@ -22,22 +22,37 @@ specify the path to it and manually start it.
# Run docker in daemon mode # Run docker in daemon mode
sudo <path to>/docker -d & sudo <path to>/docker -d &
Download a pre-built image
Running an interactive shell --------------------------
----------------------------
.. code-block:: bash .. code-block:: bash
# Download an ubuntu image # Download an ubuntu image
sudo docker pull ubuntu sudo docker pull ubuntu
This will find the ``ubuntu`` image by name in the :ref:`Central Index
<searching_central_index>` and download it from the top-level Central
Repository to a local image cache.
.. NOTE:: When the image has successfully downloaded, you will see a 12
character hash ``539c0211cd76: Download complete`` which is the short
form of the image ID. These short image IDs are the first 12 characters
of the full image ID - which can be found using ``docker inspect`` or
``docker images -notrunc=true``
.. _dockergroup:
Running an interactive shell
----------------------------
.. code-block:: bash
# Run an interactive shell in the ubuntu image, # Run an interactive shell in the ubuntu image,
# allocate a tty, attach stdin and stdout # allocate a tty, attach stdin and stdout
# To detach the tty without exiting the shell, # To detach the tty without exiting the shell,
# use the escape sequence Ctrl-p + Ctrl-q # use the escape sequence Ctrl-p + Ctrl-q
sudo docker run -i -t ubuntu /bin/bash sudo docker run -i -t ubuntu /bin/bash
.. _dockergroup:
Why ``sudo``? Why ``sudo``?
------------- -------------

View file

@ -116,6 +116,16 @@ core concepts of Docker where commits are cheap and containers can be
created from any point in an image's history, much like source created from any point in an image's history, much like source
control. control.
Known Issues (RUN)
..................
* :issue:`783` is about file permissions problems that can occur when
using the AUFS file system. You might notice it during an attempt to
``rm`` a file, for example. The issue describes a workaround.
* :issue:`2424` Locale will not be set automatically.
3.4 CMD 3.4 CMD
------- -------
@ -211,8 +221,16 @@ destination container.
All new files and directories are created with mode 0755, uid and gid All new files and directories are created with mode 0755, uid and gid
0. 0.
.. note::
if you build using STDIN (``docker build - < somefile``), there is no build
context, so the Dockerfile can only contain an URL based ADD statement.
The copy obeys the following rules: The copy obeys the following rules:
* The ``<src>`` path must be inside the *context* of the build; you cannot
``ADD ../something /something``, because the first step of a
``docker build`` is to send the context directory (and subdirectories) to
the docker daemon.
* If ``<src>`` is a URL and ``<dest>`` does not end with a trailing slash, * If ``<src>`` is a URL and ``<dest>`` does not end with a trailing slash,
then a file is downloaded from the URL and copied to ``<dest>``. then a file is downloaded from the URL and copied to ``<dest>``.
* If ``<src>`` is a URL and ``<dest>`` does end with a trailing slash, * If ``<src>`` is a URL and ``<dest>`` does end with a trailing slash,

View file

@ -20,3 +20,4 @@ Contents:
puppet puppet
host_integration host_integration
working_with_volumes working_with_volumes
working_with_links_names

View file

@ -0,0 +1,104 @@
:title: Working with Links and Names
:description: How to create and use links and names
:keywords: Examples, Usage, links, docker, documentation, examples, names, name, container naming
.. _working_with_links_names:
Working with Links and Names
============================
From version 0.6.5 you are now able to ``name`` a container and ``link`` it to another
container by referring to its name. This will create a parent -> child relationship
where the parent container can see selected information about its child.
.. _run_name:
Container Naming
----------------
.. versionadded:: v0.6.5
You can now name your container by using the ``-name`` flag. If no name is provided, Docker
will automatically generate a name. You can see this name using the ``docker ps`` command.
.. code-block:: bash
# format is "sudo docker run -name <container_name> <image_name> <command>"
$ sudo docker run -name test ubuntu /bin/bash
# the flag "-a" Show all containers. Only running containers are shown by default.
$ sudo docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
2522602a0d99 ubuntu:12.04 /bin/bash 14 seconds ago Exit 0 test
.. _run_link:
Links: service discovery for docker
-----------------------------------
.. versionadded:: v0.6.5
Links allow containers to discover and securely communicate with each other by using the
flag ``-link name:alias``. Inter-container communication can be disabled with the daemon
flag ``-icc=false``. With this flag set to false, Container A cannot access Container B
unless explicitly allowed via a link. This is a huge win for securing your containers.
When two containers are linked together Docker creates a parent child relationship
between the containers. The parent container will be able to access information via
environment variables of the child such as name, exposed ports, IP and other selected
environment variables.
When linking two containers Docker will use the exposed ports of the container to create
a secure tunnel for the parent to access. If a database container only exposes port 8080
then the linked container will only be allowed to access port 8080 and nothing else if
inter-container communication is set to false.
.. code-block:: bash
# Example: there is an image called redis-2.6 that exposes the port 6379 and starts redis-server.
# Let's name the container as "redis" based on that image and run it as daemon.
$ sudo docker run -d -name redis redis-2.6
We can issue all the commands that you would expect using the name "redis"; start, stop,
attach, using the name for our container. The name also allows us to link other containers
into this one.
Next, we can start a new web application that has a dependency on Redis and apply a link
to connect both containers. If you noticed when running our Redis server we did not use
the -p flag to publish the Redis port to the host system. Redis exposed port 6379 and
this is all we need to establish a link.
.. code-block:: bash
# Linking the redis container as a child
$ sudo docker run -t -i -link redis:db -name webapp ubuntu bash
When you specified -link redis:db you are telling docker to link the container named redis
into this new container with the alias db. Environment variables are prefixed with the alias
so that the parent container can access network and environment information from the containers
that are linked into it.
If we inspect the environment variables of the second container, we would see all the information
about the child container.
.. code-block:: bash
$ root@4c01db0b339c:/# env
HOSTNAME=4c01db0b339c
DB_NAME=/webapp/db
TERM=xterm
DB_PORT=tcp://172.17.0.8:6379
DB_PORT_6379_TCP=tcp://172.17.0.8:6379
DB_PORT_6379_TCP_PROTO=tcp
DB_PORT_6379_TCP_ADDR=172.17.0.8
DB_PORT_6379_TCP_PORT=6379
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
PWD=/
SHLVL=1
HOME=/
container=lxc
_=/usr/bin/env
root@4c01db0b339c:/#
Accessing the network information along with the environment of the child container allows
us to easily connect to the Redis service on the specific IP and port in the environment.

View file

@ -129,7 +129,8 @@
<div class="row footer"> <div class="row footer">
<div class="span12 tbox"> <div class="span12 tbox">
<div class="tbox"> <div class="tbox">
<p>Docker is an open source project, sponsored by <a href="https://dotcloud.com">dotCloud</a>, under the <a href="https://github.com/dotcloud/docker/blob/master/LICENSE" title="Docker licence, hosted in the Github repository">apache 2.0 licence</a></p> <p>Docker is an open source project, sponsored by <a href="https://www.docker.com">Docker Inc.</a>, under the <a href="https://github.com/dotcloud/docker/blob/master/LICENSE" title="Docker licence, hosted in the Github repository">apache 2.0 licence</a></p>
<p>Documentation proudly hosted by <a href="http://www.readthedocs.org">Read the Docs</a></p>
</div> </div>
<div class="social links"> <div class="social links">

View file

@ -6,15 +6,21 @@ import (
"log" "log"
"os" "os"
"runtime" "runtime"
"strings"
) )
type Handler func(*Job) string type Handler func(*Job) string
var globalHandlers map[string]Handler var globalHandlers map[string]Handler
func init() {
globalHandlers = make(map[string]Handler)
}
func Register(name string, handler Handler) error { func Register(name string, handler Handler) error {
if globalHandlers == nil { _, exists := globalHandlers[name]
globalHandlers = make(map[string]Handler) if exists {
return fmt.Errorf("Can't overwrite global handler for command %s", name)
} }
globalHandlers[name] = handler globalHandlers[name] = handler
return nil return nil
@ -26,6 +32,22 @@ func Register(name string, handler Handler) error {
type Engine struct { type Engine struct {
root string root string
handlers map[string]Handler handlers map[string]Handler
hack Hack // data for temporary hackery (see hack.go)
id string
}
func (eng *Engine) Root() string {
return eng.root
}
func (eng *Engine) Register(name string, handler Handler) error {
eng.Logf("Register(%s) (handlers=%v)", name, eng.handlers)
_, exists := eng.handlers[name]
if exists {
return fmt.Errorf("Can't overwrite handler for command %s", name)
}
eng.handlers[name] = handler
return nil
} }
// New initializes a new engine managing the directory specified at `root`. // New initializes a new engine managing the directory specified at `root`.
@ -56,16 +78,25 @@ func New(root string) (*Engine, error) {
} }
eng := &Engine{ eng := &Engine{
root: root, root: root,
handlers: globalHandlers, handlers: make(map[string]Handler),
id: utils.RandomString(),
}
// Copy existing global handlers
for k, v := range globalHandlers {
eng.handlers[k] = v
} }
return eng, nil return eng, nil
} }
func (eng *Engine) String() string {
return fmt.Sprintf("%s|%s", eng.Root(), eng.id[:8])
}
// Job creates a new job which can later be executed. // Job creates a new job which can later be executed.
// This function mimics `Command` from the standard os/exec package. // This function mimics `Command` from the standard os/exec package.
func (eng *Engine) Job(name string, args ...string) *Job { func (eng *Engine) Job(name string, args ...string) *Job {
job := &Job{ job := &Job{
eng: eng, Eng: eng,
Name: name, Name: name,
Args: args, Args: args,
Stdin: os.Stdin, Stdin: os.Stdin,
@ -78,3 +109,8 @@ func (eng *Engine) Job(name string, args ...string) *Job {
} }
return job return job
} }
func (eng *Engine) Logf(format string, args ...interface{}) (n int, err error) {
prefixedFormat := fmt.Sprintf("[%s] %s\n", eng, strings.TrimRight(format, "\n"))
return fmt.Fprintf(os.Stderr, prefixedFormat, args...)
}

21
engine/hack.go Normal file
View file

@ -0,0 +1,21 @@
package engine
type Hack map[string]interface{}
func (eng *Engine) Hack_GetGlobalVar(key string) interface{} {
if eng.hack == nil {
return nil
}
val, exists := eng.hack[key]
if !exists {
return nil
}
return val
}
func (eng *Engine) Hack_SetGlobalVar(key string, val interface{}) {
if eng.hack == nil {
eng.hack = make(Hack)
}
eng.hack[key] = val
}

View file

@ -1,11 +1,16 @@
package engine package engine
import ( import (
"bufio"
"bytes"
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/dotcloud/docker/utils"
"io" "io"
"io/ioutil"
"os"
"strconv"
"strings" "strings"
"sync"
) )
// A job is the fundamental unit of work in the docker engine. // A job is the fundamental unit of work in the docker engine.
@ -22,24 +27,43 @@ import (
// This allows for richer error reporting. // This allows for richer error reporting.
// //
type Job struct { type Job struct {
eng *Engine Eng *Engine
Name string Name string
Args []string Args []string
env []string env []string
Stdin io.ReadCloser Stdin io.Reader
Stdout io.WriteCloser Stdout io.Writer
Stderr io.WriteCloser Stderr io.Writer
handler func(*Job) string handler func(*Job) string
status string status string
onExit []func()
} }
// Run executes the job and blocks until the job completes. // Run executes the job and blocks until the job completes.
// If the job returns a failure status, an error is returned // If the job returns a failure status, an error is returned
// which includes the status. // which includes the status.
func (job *Job) Run() error { func (job *Job) Run() error {
randId := utils.RandomString()[:4] defer func() {
fmt.Printf("Job #%s: %s\n", randId, job) var wg sync.WaitGroup
defer fmt.Printf("Job #%s: %s = '%s'", randId, job, job.status) for _, f := range job.onExit {
wg.Add(1)
go func(f func()) {
f()
wg.Done()
}(f)
}
wg.Wait()
}()
if job.Stdout != nil && job.Stdout != os.Stdout {
job.Stdout = io.MultiWriter(job.Stdout, os.Stdout)
}
if job.Stderr != nil && job.Stderr != os.Stderr {
job.Stderr = io.MultiWriter(job.Stderr, os.Stderr)
}
job.Eng.Logf("+job %s", job.CallString())
defer func() {
job.Eng.Logf("-job %s%s", job.CallString(), job.StatusString())
}()
if job.handler == nil { if job.handler == nil {
job.status = "command not found" job.status = "command not found"
} else { } else {
@ -51,9 +75,87 @@ func (job *Job) Run() error {
return nil return nil
} }
func (job *Job) StdoutParseLines(dst *[]string, limit int) {
job.parseLines(job.StdoutPipe(), dst, limit)
}
func (job *Job) StderrParseLines(dst *[]string, limit int) {
job.parseLines(job.StderrPipe(), dst, limit)
}
func (job *Job) parseLines(src io.Reader, dst *[]string, limit int) {
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
scanner := bufio.NewScanner(src)
for scanner.Scan() {
// If the limit is reached, flush the rest of the source and return
if limit > 0 && len(*dst) >= limit {
io.Copy(ioutil.Discard, src)
return
}
line := scanner.Text()
// Append the line (with delimitor removed)
*dst = append(*dst, line)
}
}()
job.onExit = append(job.onExit, wg.Wait)
}
func (job *Job) StdoutParseString(dst *string) {
lines := make([]string, 0, 1)
job.StdoutParseLines(&lines, 1)
job.onExit = append(job.onExit, func() {
if len(lines) >= 1 {
*dst = lines[0]
}
})
}
func (job *Job) StderrParseString(dst *string) {
lines := make([]string, 0, 1)
job.StderrParseLines(&lines, 1)
job.onExit = append(job.onExit, func() { *dst = lines[0] })
}
func (job *Job) StdoutPipe() io.ReadCloser {
r, w := io.Pipe()
job.Stdout = w
job.onExit = append(job.onExit, func() { w.Close() })
return r
}
func (job *Job) StderrPipe() io.ReadCloser {
r, w := io.Pipe()
job.Stderr = w
job.onExit = append(job.onExit, func() { w.Close() })
return r
}
func (job *Job) CallString() string {
return fmt.Sprintf("%s(%s)", job.Name, strings.Join(job.Args, ", "))
}
func (job *Job) StatusString() string {
// FIXME: if a job returns the empty string, it will be printed
// as not having returned.
// (this only affects String which is a convenience function).
if job.status != "" {
var okerr string
if job.status == "0" {
okerr = "OK"
} else {
okerr = "ERR"
}
return fmt.Sprintf(" = %s (%s)", okerr, job.status)
}
return ""
}
// String returns a human-readable description of `job` // String returns a human-readable description of `job`
func (job *Job) String() string { func (job *Job) String() string {
return strings.Join(append([]string{job.Name}, job.Args...), " ") return fmt.Sprintf("%s.%s%s", job.Eng, job.CallString(), job.StatusString())
} }
func (job *Job) Getenv(key string) (value string) { func (job *Job) Getenv(key string) (value string) {
@ -90,6 +192,19 @@ func (job *Job) SetenvBool(key string, value bool) {
} }
} }
func (job *Job) GetenvInt(key string) int64 {
s := strings.Trim(job.Getenv(key), " \t")
val, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return -1
}
return val
}
func (job *Job) SetenvInt(key string, value int64) {
job.Setenv(key, fmt.Sprintf("%d", value))
}
func (job *Job) GetenvList(key string) []string { func (job *Job) GetenvList(key string) []string {
sval := job.Getenv(key) sval := job.Getenv(key)
l := make([]string, 0, 1) l := make([]string, 0, 1)
@ -111,3 +226,109 @@ func (job *Job) SetenvList(key string, value []string) error {
func (job *Job) Setenv(key, value string) { func (job *Job) Setenv(key, value string) {
job.env = append(job.env, key+"="+value) job.env = append(job.env, key+"="+value)
} }
// DecodeEnv decodes `src` as a json dictionary, and adds
// each decoded key-value pair to the environment.
//
// If `text` cannot be decoded as a json dictionary, an error
// is returned.
func (job *Job) DecodeEnv(src io.Reader) error {
m := make(map[string]interface{})
if err := json.NewDecoder(src).Decode(&m); err != nil {
return err
}
for k, v := range m {
// FIXME: we fix-convert float values to int, because
// encoding/json decodes integers to float64, but cannot encode them back.
// (See http://golang.org/src/pkg/encoding/json/decode.go#L46)
if fval, ok := v.(float64); ok {
job.SetenvInt(k, int64(fval))
} else if sval, ok := v.(string); ok {
job.Setenv(k, sval)
} else if val, err := json.Marshal(v); err == nil {
job.Setenv(k, string(val))
} else {
job.Setenv(k, fmt.Sprintf("%v", v))
}
}
return nil
}
func (job *Job) EncodeEnv(dst io.Writer) error {
m := make(map[string]interface{})
for k, v := range job.Environ() {
var val interface{}
if err := json.Unmarshal([]byte(v), &val); err == nil {
// FIXME: we fix-convert float values to int, because
// encoding/json decodes integers to float64, but cannot encode them back.
// (See http://golang.org/src/pkg/encoding/json/decode.go#L46)
if fval, isFloat := val.(float64); isFloat {
val = int(fval)
}
m[k] = val
} else {
m[k] = v
}
}
if err := json.NewEncoder(dst).Encode(&m); err != nil {
return err
}
return nil
}
func (job *Job) ExportEnv(dst interface{}) (err error) {
defer func() {
if err != nil {
err = fmt.Errorf("ExportEnv %s", err)
}
}()
var buf bytes.Buffer
// step 1: encode/marshal the env to an intermediary json representation
if err := job.EncodeEnv(&buf); err != nil {
return err
}
// step 2: decode/unmarshal the intermediary json into the destination object
if err := json.NewDecoder(&buf).Decode(dst); err != nil {
return err
}
return nil
}
func (job *Job) ImportEnv(src interface{}) (err error) {
defer func() {
if err != nil {
err = fmt.Errorf("ImportEnv: %s", err)
}
}()
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(src); err != nil {
return err
}
if err := job.DecodeEnv(&buf); err != nil {
return err
}
return nil
}
func (job *Job) Environ() map[string]string {
m := make(map[string]string)
for _, kv := range job.env {
parts := strings.SplitN(kv, "=", 2)
m[parts[0]] = parts[1]
}
return m
}
func (job *Job) Logf(format string, args ...interface{}) (n int, err error) {
prefixedFormat := fmt.Sprintf("[%s] %s\n", job, strings.TrimRight(format, "\n"))
return fmt.Fprintf(job.Stderr, prefixedFormat, args...)
}
func (job *Job) Printf(format string, args ...interface{}) (n int, err error) {
return fmt.Fprintf(job.Stdout, format, args...)
}
func (job *Job) Errorf(format string, args ...interface{}) (n int, err error) {
return fmt.Fprintf(job.Stderr, format, args...)
}

View file

@ -15,7 +15,7 @@ func init() {
Register("dummy", func(job *Job) string { return "" }) Register("dummy", func(job *Job) string { return "" })
} }
func mkEngine(t *testing.T) *Engine { func newTestEngine(t *testing.T) *Engine {
// Use the caller function name as a prefix. // Use the caller function name as a prefix.
// This helps trace temp directories back to their test. // This helps trace temp directories back to their test.
pc, _, _, _ := runtime.Caller(1) pc, _, _, _ := runtime.Caller(1)
@ -38,5 +38,5 @@ func mkEngine(t *testing.T) *Engine {
} }
func mkJob(t *testing.T, name string, args ...string) *Job { func mkJob(t *testing.T, name string, args ...string) *Job {
return mkEngine(t).Job(name, args...) return newTestEngine(t).Job(name, args...)
} }

View file

@ -48,7 +48,7 @@ type WalkFunc func(fullPath string, entity *Entity) error
// Graph database for storing entities and their relationships // Graph database for storing entities and their relationships
type Database struct { type Database struct {
conn *sql.DB conn *sql.DB
mux sync.Mutex mux sync.RWMutex
} }
// Create a new graph database initialized with a root entity // Create a new graph database initialized with a root entity
@ -138,7 +138,14 @@ func (db *Database) Set(fullPath, id string) (*Entity, error) {
// Return true if a name already exists in the database // Return true if a name already exists in the database
func (db *Database) Exists(name string) bool { func (db *Database) Exists(name string) bool {
return db.Get(name) != nil db.mux.RLock()
defer db.mux.RUnlock()
e, err := db.get(name)
if err != nil {
return false
}
return e != nil
} }
func (db *Database) setEdge(parentPath, name string, e *Entity) error { func (db *Database) setEdge(parentPath, name string, e *Entity) error {
@ -165,6 +172,9 @@ func (db *Database) RootEntity() *Entity {
// Return the entity for a given path // Return the entity for a given path
func (db *Database) Get(name string) *Entity { func (db *Database) Get(name string) *Entity {
db.mux.RLock()
defer db.mux.RUnlock()
e, err := db.get(name) e, err := db.get(name)
if err != nil { if err != nil {
return nil return nil
@ -200,23 +210,36 @@ func (db *Database) get(name string) (*Entity, error) {
// List all entities by from the name // List all entities by from the name
// The key will be the full path of the entity // The key will be the full path of the entity
func (db *Database) List(name string, depth int) Entities { func (db *Database) List(name string, depth int) Entities {
db.mux.RLock()
defer db.mux.RUnlock()
out := Entities{} out := Entities{}
e, err := db.get(name) e, err := db.get(name)
if err != nil { if err != nil {
return out return out
} }
for c := range db.children(e, name, depth) {
children, err := db.children(e, name, depth, nil)
if err != nil {
return out
}
for _, c := range children {
out[c.FullPath] = c.Entity out[c.FullPath] = c.Entity
} }
return out return out
} }
// Walk through the child graph of an entity, calling walkFunc for each child entity.
// It is safe for walkFunc to call graph functions.
func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error { func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error {
e, err := db.get(name) children, err := db.Children(name, depth)
if err != nil { if err != nil {
return err return err
} }
for c := range db.children(e, name, depth) {
// Note: the database lock must not be held while calling walkFunc
for _, c := range children {
if err := walkFunc(c.FullPath, c.Entity); err != nil { if err := walkFunc(c.FullPath, c.Entity); err != nil {
return err return err
} }
@ -224,8 +247,24 @@ func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error {
return nil return nil
} }
// Return the children of the specified entity
func (db *Database) Children(name string, depth int) ([]WalkMeta, error) {
db.mux.RLock()
defer db.mux.RUnlock()
e, err := db.get(name)
if err != nil {
return nil, err
}
return db.children(e, name, depth, nil)
}
// Return the refrence count for a specified id // Return the refrence count for a specified id
func (db *Database) Refs(id string) int { func (db *Database) Refs(id string) int {
db.mux.RLock()
defer db.mux.RUnlock()
var count int var count int
if err := db.conn.QueryRow("SELECT COUNT(*) FROM edge WHERE entity_id = ?;", id).Scan(&count); err != nil { if err := db.conn.QueryRow("SELECT COUNT(*) FROM edge WHERE entity_id = ?;", id).Scan(&count); err != nil {
return 0 return 0
@ -235,6 +274,9 @@ func (db *Database) Refs(id string) int {
// Return all the id's path references // Return all the id's path references
func (db *Database) RefPaths(id string) Edges { func (db *Database) RefPaths(id string) Edges {
db.mux.RLock()
defer db.mux.RUnlock()
refs := Edges{} refs := Edges{}
rows, err := db.conn.Query("SELECT name, parent_id FROM edge WHERE entity_id = ?;", id) rows, err := db.conn.Query("SELECT name, parent_id FROM edge WHERE entity_id = ?;", id)
@ -356,56 +398,51 @@ type WalkMeta struct {
Edge *Edge Edge *Edge
} }
func (db *Database) children(e *Entity, name string, depth int) <-chan WalkMeta { func (db *Database) children(e *Entity, name string, depth int, entities []WalkMeta) ([]WalkMeta, error) {
out := make(chan WalkMeta)
if e == nil { if e == nil {
close(out) return entities, nil
return out
} }
go func() { rows, err := db.conn.Query("SELECT entity_id, name FROM edge where parent_id = ?;", e.id)
rows, err := db.conn.Query("SELECT entity_id, name FROM edge where parent_id = ?;", e.id) if err != nil {
if err != nil { return nil, err
close(out) }
defer rows.Close()
for rows.Next() {
var entityId, entityName string
if err := rows.Scan(&entityId, &entityName); err != nil {
return nil, err
}
child := &Entity{entityId}
edge := &Edge{
ParentID: e.id,
Name: entityName,
EntityID: child.id,
} }
defer rows.Close()
for rows.Next() { meta := WalkMeta{
var entityId, entityName string Parent: e,
if err := rows.Scan(&entityId, &entityName); err != nil { Entity: child,
// Log error FullPath: path.Join(name, edge.Name),
continue Edge: edge,
} }
child := &Entity{entityId}
edge := &Edge{
ParentID: e.id,
Name: entityName,
EntityID: child.id,
}
meta := WalkMeta{ entities = append(entities, meta)
Parent: e,
Entity: child,
FullPath: path.Join(name, edge.Name),
Edge: edge,
}
out <- meta if depth != 0 {
if depth == 0 {
continue
}
nDepth := depth nDepth := depth
if depth != -1 { if depth != -1 {
nDepth -= 1 nDepth -= 1
} }
sc := db.children(child, meta.FullPath, nDepth) entities, err = db.children(child, meta.FullPath, nDepth, entities)
for c := range sc { if err != nil {
out <- c return nil, err
} }
} }
close(out) }
}()
return out return entities, nil
} }
// Return the entity based on the parent path and name // Return the entity based on the parent path and name

View file

@ -5,7 +5,7 @@ It is a curated selection of planned improvements which are either important, di
For a more complete view of planned and requested improvements, see [the Github issues](https://github.com/dotcloud/docker/issues). For a more complete view of planned and requested improvements, see [the Github issues](https://github.com/dotcloud/docker/issues).
Tu suggest changes to the roadmap, including additions, please write the change as if it were already in effect, and make a pull request. To suggest changes to the roadmap, including additions, please write the change as if it were already in effect, and make a pull request.
## Container wiring and service discovery ## Container wiring and service discovery

View file

@ -1,14 +1,16 @@
# VERSION: 0.22 # VERSION: 0.25
# DOCKER-VERSION 0.6.3 # DOCKER-VERSION 0.6.6
# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com> # AUTHOR: Daniel Mizyrycki <daniel@docker.com>
# DESCRIPTION: Deploy docker-ci on Amazon EC2 # DESCRIPTION: Deploy docker-ci on Digital Ocean
# COMMENTS: # COMMENTS:
# CONFIG_JSON is an environment variable json string loaded as: # CONFIG_JSON is an environment variable json string loaded as:
# #
# export CONFIG_JSON=' # export CONFIG_JSON='
# { "AWS_TAG": "EC2_instance_name", # { "DROPLET_NAME": "docker-ci",
# "AWS_ACCESS_KEY": "EC2_access_key", # "DO_CLIENT_ID": "Digital_Ocean_client_id",
# "AWS_SECRET_KEY": "EC2_secret_key", # "DO_API_KEY": "Digital_Ocean_api_key",
# "DOCKER_KEY_ID": "Digital_Ocean_ssh_key_id",
# "DOCKER_CI_KEY_PATH": "docker-ci_private_key_path",
# "DOCKER_CI_PUB": "$(cat docker-ci_ssh_public_key.pub)", # "DOCKER_CI_PUB": "$(cat docker-ci_ssh_public_key.pub)",
# "DOCKER_CI_KEY": "$(cat docker-ci_ssh_private_key.key)", # "DOCKER_CI_KEY": "$(cat docker-ci_ssh_private_key.key)",
# "BUILDBOT_PWD": "Buildbot_server_password", # "BUILDBOT_PWD": "Buildbot_server_password",
@ -33,9 +35,11 @@
from ubuntu:12.04 from ubuntu:12.04
run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' \
run apt-get update; apt-get install -y python2.7 python-dev python-pip ssh rsync less vim > /etc/apt/sources.list
run pip install boto fabric run apt-get update; apt-get install -y git python2.7 python-dev libevent-dev \
python-pip ssh rsync less vim
run pip install requests fabric
# Add deployment code and set default container command # Add deployment code and set default container command
add . /docker-ci add . /docker-ci

View file

@ -0,0 +1 @@
0.4.5

View file

@ -43,7 +43,7 @@ c['slavePortnum'] = PORT_MASTER
# Schedulers # Schedulers
c['schedulers'] = [ForceScheduler(name='trigger', builderNames=['docker', c['schedulers'] = [ForceScheduler(name='trigger', builderNames=['docker',
'index','registry','coverage','nightlyrelease'])] 'index','registry','docker-coverage','registry-coverage','nightlyrelease'])]
c['schedulers'] += [SingleBranchScheduler(name="all", treeStableTimer=None, c['schedulers'] += [SingleBranchScheduler(name="all", treeStableTimer=None,
change_filter=filter.ChangeFilter(branch='master', change_filter=filter.ChangeFilter(branch='master',
repository='https://github.com/dotcloud/docker'), builderNames=['docker'])] repository='https://github.com/dotcloud/docker'), builderNames=['docker'])]
@ -51,7 +51,7 @@ c['schedulers'] += [SingleBranchScheduler(name='pullrequest',
change_filter=filter.ChangeFilter(category='github_pullrequest'), treeStableTimer=None, change_filter=filter.ChangeFilter(category='github_pullrequest'), treeStableTimer=None,
builderNames=['pullrequest'])] builderNames=['pullrequest'])]
c['schedulers'] += [Nightly(name='daily', branch=None, builderNames=['nightlyrelease', c['schedulers'] += [Nightly(name='daily', branch=None, builderNames=['nightlyrelease',
'coverage'], hour=7, minute=00)] 'docker-coverage','registry-coverage'], hour=7, minute=00)]
c['schedulers'] += [Nightly(name='every4hrs', branch=None, builderNames=['registry','index'], c['schedulers'] += [Nightly(name='every4hrs', branch=None, builderNames=['registry','index'],
hour=range(0,24,4), minute=15)] hour=range(0,24,4), minute=15)]
@ -76,17 +76,25 @@ c['builders'] += [BuilderConfig(name='pullrequest',slavenames=['buildworker'],
# Docker coverage test # Docker coverage test
factory = BuildFactory() factory = BuildFactory()
factory.addStep(ShellCommand(description='Coverage', logEnviron=False, factory.addStep(ShellCommand(description='docker-coverage', logEnviron=False,
usePTY=True, command='{0}/docker-coverage/coverage-docker.sh'.format( usePTY=True, command='{0}/docker-coverage/coverage-docker.sh'.format(
DOCKER_CI_PATH))) DOCKER_CI_PATH)))
c['builders'] += [BuilderConfig(name='coverage',slavenames=['buildworker'], c['builders'] += [BuilderConfig(name='docker-coverage',slavenames=['buildworker'],
factory=factory)]
# Docker registry coverage test
factory = BuildFactory()
factory.addStep(ShellCommand(description='registry-coverage', logEnviron=False,
usePTY=True, command='docker run registry_coverage'.format(
DOCKER_CI_PATH)))
c['builders'] += [BuilderConfig(name='registry-coverage',slavenames=['buildworker'],
factory=factory)] factory=factory)]
# Registry functional test # Registry functional test
factory = BuildFactory() factory = BuildFactory()
factory.addStep(ShellCommand(description='registry', logEnviron=False, factory.addStep(ShellCommand(description='registry', logEnviron=False,
command='. {0}/master/credentials.cfg; ' command='. {0}/master/credentials.cfg; '
'/docker-ci/functionaltests/test_registry.sh'.format(BUILDBOT_PATH), '{1}/functionaltests/test_registry.sh'.format(BUILDBOT_PATH, DOCKER_CI_PATH),
usePTY=True)) usePTY=True))
c['builders'] += [BuilderConfig(name='registry',slavenames=['buildworker'], c['builders'] += [BuilderConfig(name='registry',slavenames=['buildworker'],
factory=factory)] factory=factory)]
@ -95,16 +103,17 @@ c['builders'] += [BuilderConfig(name='registry',slavenames=['buildworker'],
factory = BuildFactory() factory = BuildFactory()
factory.addStep(ShellCommand(description='index', logEnviron=False, factory.addStep(ShellCommand(description='index', logEnviron=False,
command='. {0}/master/credentials.cfg; ' command='. {0}/master/credentials.cfg; '
'/docker-ci/functionaltests/test_index.py'.format(BUILDBOT_PATH), '{1}/functionaltests/test_index.py'.format(BUILDBOT_PATH, DOCKER_CI_PATH),
usePTY=True)) usePTY=True))
c['builders'] += [BuilderConfig(name='index',slavenames=['buildworker'], c['builders'] += [BuilderConfig(name='index',slavenames=['buildworker'],
factory=factory)] factory=factory)]
# Docker nightly release # Docker nightly release
nightlyrelease_cmd = ('docker version; docker run -i -t -privileged -e AWS_S3_BUCKET='
'test.docker.io dockerbuilder hack/dind dockerbuild.sh')
factory = BuildFactory() factory = BuildFactory()
factory.addStep(ShellCommand(description='NightlyRelease', logEnviron=False, factory.addStep(ShellCommand(description='NightlyRelease',logEnviron=False,
usePTY=True, command='docker run -privileged' usePTY=True, command=nightlyrelease_cmd))
' -e AWS_S3_BUCKET=test.docker.io dockerbuilder'))
c['builders'] += [BuilderConfig(name='nightlyrelease',slavenames=['buildworker'], c['builders'] += [BuilderConfig(name='nightlyrelease',slavenames=['buildworker'],
factory=factory)] factory=factory)]

View file

@ -1,11 +1,11 @@
#!/usr/bin/env python #!/usr/bin/env python
import os, sys, re, json, base64 import os, sys, re, json, requests, base64
from boto.ec2.connection import EC2Connection
from subprocess import call from subprocess import call
from fabric import api from fabric import api
from fabric.api import cd, run, put, sudo from fabric.api import cd, run, put, sudo
from os import environ as env from os import environ as env
from datetime import datetime
from time import sleep from time import sleep
# Remove SSH private key as it needs more processing # Remove SSH private key as it needs more processing
@ -20,42 +20,41 @@ for key in CONFIG:
env['DOCKER_CI_KEY'] = re.sub('^.+"DOCKER_CI_KEY".+?"(.+?)".+','\\1', env['DOCKER_CI_KEY'] = re.sub('^.+"DOCKER_CI_KEY".+?"(.+?)".+','\\1',
env['CONFIG_JSON'],flags=re.DOTALL) env['CONFIG_JSON'],flags=re.DOTALL)
DROPLET_NAME = env.get('DROPLET_NAME','docker-ci')
AWS_TAG = env.get('AWS_TAG','docker-ci') TIMEOUT = 120 # Seconds before timeout droplet creation
AWS_KEY_NAME = 'dotcloud-dev' # Same as CONFIG_JSON['DOCKER_CI_PUB'] IMAGE_ID = 1004145 # Docker on Ubuntu 13.04
AWS_AMI = 'ami-d582d6bc' # Ubuntu 13.04 REGION_ID = 4 # New York 2
AWS_REGION = 'us-east-1' SIZE_ID = 62 # memory 2GB
AWS_TYPE = 'm1.small' DO_IMAGE_USER = 'root' # Image user on Digital Ocean
AWS_SEC_GROUPS = 'gateway' API_URL = 'https://api.digitalocean.com/'
AWS_IMAGE_USER = 'ubuntu'
DOCKER_PATH = '/go/src/github.com/dotcloud/docker' DOCKER_PATH = '/go/src/github.com/dotcloud/docker'
DOCKER_CI_PATH = '/docker-ci' DOCKER_CI_PATH = '/docker-ci'
CFG_PATH = '{}/buildbot'.format(DOCKER_CI_PATH) CFG_PATH = '{}/buildbot'.format(DOCKER_CI_PATH)
class AWS_EC2: class DigitalOcean():
'''Amazon EC2'''
def __init__(self, access_key, secret_key): def __init__(self, key, client):
'''Set default API parameters''' '''Set default API parameters'''
self.handler = EC2Connection(access_key, secret_key) self.key = key
def create_instance(self, tag, instance_type): self.client = client
reservation = self.handler.run_instances(**instance_type) self.api_url = API_URL
instance = reservation.instances[0]
sleep(10) def api(self, cmd_path, api_arg={}):
while instance.state != 'running': '''Make api call'''
sleep(5) api_arg.update({'api_key':self.key, 'client_id':self.client})
instance.update() resp = requests.get(self.api_url + cmd_path, params=api_arg).text
print "Instance state: %s" % (instance.state) resp = json.loads(resp)
instance.add_tag("Name",tag) if resp['status'] != 'OK':
print "instance %s done!" % (instance.id) raise Exception(resp['error_message'])
return instance.ip_address return resp
def get_instances(self):
return self.handler.get_all_instances() def droplet_data(self, name):
def get_tags(self): '''Get droplet data'''
return dict([(i.instances[0].id, i.instances[0].tags['Name']) data = self.api('droplets')
for i in self.handler.get_all_instances() if i.instances[0].tags]) data = [droplet for droplet in data['droplets']
def del_instance(self, instance_id): if droplet['name'] == name]
self.handler.terminate_instances(instance_ids=[instance_id]) return data[0] if data else {}
def json_fmt(data): def json_fmt(data):
@ -63,20 +62,36 @@ def json_fmt(data):
return json.dumps(data, sort_keys = True, indent = 2) return json.dumps(data, sort_keys = True, indent = 2)
# Create EC2 API handler do = DigitalOcean(env['DO_API_KEY'], env['DO_CLIENT_ID'])
ec2 = AWS_EC2(env['AWS_ACCESS_KEY'], env['AWS_SECRET_KEY'])
# Stop processing if AWS_TAG exists on EC2 # Get DROPLET_NAME data
if AWS_TAG in ec2.get_tags().values(): data = do.droplet_data(DROPLET_NAME)
print ('Instance: {} already deployed. Not further processing.'
.format(AWS_TAG)) # Stop processing if DROPLET_NAME exists on Digital Ocean
if data:
print ('Droplet: {} already deployed. Not further processing.'
.format(DROPLET_NAME))
exit(1) exit(1)
ip = ec2.create_instance(AWS_TAG, {'image_id':AWS_AMI, 'instance_type':AWS_TYPE, # Create droplet
'security_groups':[AWS_SEC_GROUPS], 'key_name':AWS_KEY_NAME}) do.api('droplets/new', {'name':DROPLET_NAME, 'region_id':REGION_ID,
'image_id':IMAGE_ID, 'size_id':SIZE_ID,
'ssh_key_ids':[env['DOCKER_KEY_ID']]})
# Wait 30 seconds for the machine to boot # Wait for droplet to be created.
sleep(30) start_time = datetime.now()
while (data.get('status','') != 'active' and (
datetime.now()-start_time).seconds < TIMEOUT):
data = do.droplet_data(DROPLET_NAME)
print data['status']
sleep(3)
# Wait for the machine to boot
sleep(15)
# Get droplet IP
ip = str(data['ip_address'])
print 'droplet: {} ip: {}'.format(DROPLET_NAME, ip)
# Create docker-ci ssh private key so docker-ci docker container can communicate # Create docker-ci ssh private key so docker-ci docker container can communicate
# with its EC2 instance # with its EC2 instance
@ -86,7 +101,7 @@ os.chmod('/root/.ssh/id_rsa',0600)
open('/root/.ssh/config','w').write('StrictHostKeyChecking no\n') open('/root/.ssh/config','w').write('StrictHostKeyChecking no\n')
api.env.host_string = ip api.env.host_string = ip
api.env.user = AWS_IMAGE_USER api.env.user = DO_IMAGE_USER
api.env.key_filename = '/root/.ssh/id_rsa' api.env.key_filename = '/root/.ssh/id_rsa'
# Correct timezone # Correct timezone
@ -100,20 +115,17 @@ sudo("echo '{}' >> /root/.ssh/authorized_keys".format(env['DOCKER_CI_PUB']))
credentials = { credentials = {
'AWS_ACCESS_KEY': env['PKG_ACCESS_KEY'], 'AWS_ACCESS_KEY': env['PKG_ACCESS_KEY'],
'AWS_SECRET_KEY': env['PKG_SECRET_KEY'], 'AWS_SECRET_KEY': env['PKG_SECRET_KEY'],
'GPG_PASSPHRASE': env['PKG_GPG_PASSPHRASE'], 'GPG_PASSPHRASE': env['PKG_GPG_PASSPHRASE']}
'INDEX_AUTH': env['INDEX_AUTH']}
open(DOCKER_CI_PATH + '/nightlyrelease/release_credentials.json', 'w').write( open(DOCKER_CI_PATH + '/nightlyrelease/release_credentials.json', 'w').write(
base64.b64encode(json.dumps(credentials))) base64.b64encode(json.dumps(credentials)))
# Transfer docker # Transfer docker
sudo('mkdir -p ' + DOCKER_CI_PATH) sudo('mkdir -p ' + DOCKER_CI_PATH)
sudo('chown {}.{} {}'.format(AWS_IMAGE_USER, AWS_IMAGE_USER, DOCKER_CI_PATH)) sudo('chown {}.{} {}'.format(DO_IMAGE_USER, DO_IMAGE_USER, DOCKER_CI_PATH))
call('/usr/bin/rsync -aH {} {}@{}:{}'.format(DOCKER_CI_PATH, AWS_IMAGE_USER, ip, call('/usr/bin/rsync -aH {} {}@{}:{}'.format(DOCKER_CI_PATH, DO_IMAGE_USER, ip,
os.path.dirname(DOCKER_CI_PATH)), shell=True) os.path.dirname(DOCKER_CI_PATH)), shell=True)
# Install Docker and Buildbot dependencies # Install Docker and Buildbot dependencies
sudo('addgroup docker')
sudo('usermod -a -G docker ubuntu')
sudo('mkdir /mnt/docker; ln -s /mnt/docker /var/lib/docker') sudo('mkdir /mnt/docker; ln -s /mnt/docker /var/lib/docker')
sudo('wget -q -O - https://get.docker.io/gpg | apt-key add -') sudo('wget -q -O - https://get.docker.io/gpg | apt-key add -')
sudo('echo deb https://get.docker.io/ubuntu docker main >' sudo('echo deb https://get.docker.io/ubuntu docker main >'
@ -123,7 +135,7 @@ sudo('echo -e "deb http://archive.ubuntu.com/ubuntu raring main universe\n'
' > /etc/apt/sources.list; apt-get update') ' > /etc/apt/sources.list; apt-get update')
sudo('DEBIAN_FRONTEND=noninteractive apt-get install -q -y wget python-dev' sudo('DEBIAN_FRONTEND=noninteractive apt-get install -q -y wget python-dev'
' python-pip supervisor git mercurial linux-image-extra-$(uname -r)' ' python-pip supervisor git mercurial linux-image-extra-$(uname -r)'
' aufs-tools make libfontconfig libevent-dev') ' aufs-tools make libfontconfig libevent-dev libsqlite3-dev libssl-dev')
sudo('wget -O - https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz | ' sudo('wget -O - https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz | '
'tar -v -C /usr/local -xz; ln -s /usr/local/go/bin/go /usr/bin/go') 'tar -v -C /usr/local -xz; ln -s /usr/local/go/bin/go /usr/bin/go')
sudo('GOPATH=/go go get -d github.com/dotcloud/docker') sudo('GOPATH=/go go get -d github.com/dotcloud/docker')
@ -135,13 +147,13 @@ sudo('curl -s https://phantomjs.googlecode.com/files/'
'phantomjs-1.9.1-linux-x86_64.tar.bz2 | tar jx -C /usr/bin' 'phantomjs-1.9.1-linux-x86_64.tar.bz2 | tar jx -C /usr/bin'
' --strip-components=2 phantomjs-1.9.1-linux-x86_64/bin/phantomjs') ' --strip-components=2 phantomjs-1.9.1-linux-x86_64/bin/phantomjs')
# Preventively reboot docker-ci daily
sudo('ln -s /sbin/reboot /etc/cron.daily')
# Build docker-ci containers # Build docker-ci containers
sudo('cd {}; docker build -t docker .'.format(DOCKER_PATH)) sudo('cd {}; docker build -t docker .'.format(DOCKER_PATH))
sudo('cd {}; docker build -t docker-ci .'.format(DOCKER_CI_PATH))
sudo('cd {}/nightlyrelease; docker build -t dockerbuilder .'.format( sudo('cd {}/nightlyrelease; docker build -t dockerbuilder .'.format(
DOCKER_CI_PATH)) DOCKER_CI_PATH))
sudo('cd {}/registry-coverage; docker build -t registry_coverage .'.format(
DOCKER_CI_PATH))
# Download docker-ci testing container # Download docker-ci testing container
sudo('docker pull mzdaniel/test_docker') sudo('docker pull mzdaniel/test_docker')
@ -154,3 +166,6 @@ sudo('{0}/setup.sh root {0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10}'
env['SMTP_PWD'], env['EMAIL_RCP'], env['REGISTRY_USER'], env['SMTP_PWD'], env['EMAIL_RCP'], env['REGISTRY_USER'],
env['REGISTRY_PWD'], env['REGISTRY_BUCKET'], env['REGISTRY_ACCESS_KEY'], env['REGISTRY_PWD'], env['REGISTRY_BUCKET'], env['REGISTRY_ACCESS_KEY'],
env['REGISTRY_SECRET_KEY'])) env['REGISTRY_SECRET_KEY']))
# Preventively reboot docker-ci daily
sudo('ln -s /sbin/reboot /etc/cron.daily')

View file

@ -1,6 +1,6 @@
# VERSION: 0.3 # VERSION: 0.4
# DOCKER-VERSION 0.6.3 # DOCKER-VERSION 0.6.6
# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com> # AUTHOR: Daniel Mizyrycki <daniel@docker.com>
# DESCRIPTION: Testing docker PRs and commits on top of master using # DESCRIPTION: Testing docker PRs and commits on top of master using
# REFERENCES: This code reuses the excellent implementation of # REFERENCES: This code reuses the excellent implementation of
# Docker in Docker made by Jerome Petazzoni. # Docker in Docker made by Jerome Petazzoni.
@ -15,15 +15,10 @@
# TO_RUN: docker run -privileged test_docker hack/dind test_docker.sh [commit] [repo] [branch] # TO_RUN: docker run -privileged test_docker hack/dind test_docker.sh [commit] [repo] [branch]
from docker from docker
maintainer Daniel Mizyrycki <daniel@dotcloud.com> maintainer Daniel Mizyrycki <daniel@docker.com>
# Setup go environment. Extracted from /Dockerfile # Setup go in PATH. Extracted from /Dockerfile
env CGO_ENABLED 0 env PATH /usr/local/go/bin:$PATH
env GOROOT /goroot
env PATH $PATH:/goroot/bin
env GOPATH /go:/go/src/github.com/dotcloud/docker/vendor
volume /var/lib/docker
workdir /go/src/github.com/dotcloud/docker
# Add test_docker.sh # Add test_docker.sh
add test_docker.sh /usr/bin/test_docker.sh add test_docker.sh /usr/bin/test_docker.sh

View file

@ -8,31 +8,26 @@ BRANCH=${3-master}
# Compute test paths # Compute test paths
DOCKER_PATH=/go/src/github.com/dotcloud/docker DOCKER_PATH=/go/src/github.com/dotcloud/docker
# Timestamp
echo
date; echo
# Fetch latest master # Fetch latest master
cd /
rm -rf /go rm -rf /go
mkdir -p $DOCKER_PATH git clone -q -b master http://github.com/dotcloud/docker $DOCKER_PATH
cd $DOCKER_PATH cd $DOCKER_PATH
git init .
git fetch -q http://github.com/dotcloud/docker master
git reset --hard FETCH_HEAD
# Merge commit # Merge commit
#echo FIXME. Temporarily skip TestPrivilegedCanMount until DinD works reliable on AWS
git pull -q https://github.com/mzdaniel/docker.git dind-aws || exit 1
# Merge commit in top of master
git fetch -q "$REPO" "$BRANCH" git fetch -q "$REPO" "$BRANCH"
git merge --no-edit $COMMIT || exit 1 git merge --no-edit $COMMIT || exit 255
# Test commit # Test commit
go test -v; exit_status=$? ./hack/make.sh test; exit_status=$?
# Display load if test fails # Display load if test fails
if [ $exit_status -eq 1 ] ; then if [ $exit_status -ne 0 ] ; then
uptime; echo; free uptime; echo; free
fi fi
# Cleanup testing directory
rm -rf $BASE_PATH
exit $exit_status exit $exit_status

View file

@ -8,10 +8,12 @@ rm -rf docker-registry
# Setup the environment # Setup the environment
export SETTINGS_FLAVOR=test export SETTINGS_FLAVOR=test
export DOCKER_REGISTRY_CONFIG=config_test.yml export DOCKER_REGISTRY_CONFIG=config_test.yml
export PYTHONPATH=$(pwd)/docker-registry/test
# Get latest docker registry # Get latest docker registry
git clone -q https://github.com/dotcloud/docker-registry.git git clone -q https://github.com/dotcloud/docker-registry.git
cd docker-registry cd docker-registry
sed -Ei "s#(boto_bucket: ).+#\1_env:S3_BUCKET#" config_test.yml
# Get dependencies # Get dependencies
pip install -q -r requirements.txt pip install -q -r requirements.txt
@ -20,7 +22,6 @@ pip install -q tox
# Run registry tests # Run registry tests
tox || exit 1 tox || exit 1
export PYTHONPATH=$(pwd)/docker-registry
python -m unittest discover -p s3.py -s test || exit 1 python -m unittest discover -p s3.py -s test || exit 1
python -m unittest discover -p workflow.py -s test python -m unittest discover -p workflow.py -s test

View file

@ -1,20 +1,19 @@
# VERSION: 1.2 # VERSION: 1.6
# DOCKER-VERSION 0.6.3 # DOCKER-VERSION 0.6.6
# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com> # AUTHOR: Daniel Mizyrycki <daniel@docker.com>
# DESCRIPTION: Build docker nightly release using Docker in Docker. # DESCRIPTION: Build docker nightly release using Docker in Docker.
# REFERENCES: This code reuses the excellent implementation of docker in docker # REFERENCES: This code reuses the excellent implementation of docker in docker
# made by Jerome Petazzoni. https://github.com/jpetazzo/dind # made by Jerome Petazzoni. https://github.com/jpetazzo/dind
# COMMENTS: # COMMENTS:
# release_credentials.json is a base64 json encoded file containing: # release_credentials.json is a base64 json encoded file containing:
# { "AWS_ACCESS_KEY": "Test_docker_AWS_S3_bucket_id", # { "AWS_ACCESS_KEY": "Test_docker_AWS_S3_bucket_id",
# "AWS_SECRET_KEY='Test_docker_AWS_S3_bucket_key' # "AWS_SECRET_KEY": "Test_docker_AWS_S3_bucket_key",
# "GPG_PASSPHRASE='Test_docker_GPG_passphrase_signature' # "GPG_PASSPHRASE": "Test_docker_GPG_passphrase_signature" }
# "INDEX_AUTH='Encripted_index_authentication' }
# TO_BUILD: docker build -t dockerbuilder . # TO_BUILD: docker build -t dockerbuilder .
# TO_RELEASE: docker run -i -t -privileged -e AWS_S3_BUCKET="test.docker.io" dockerbuilder # TO_RELEASE: docker run -i -t -privileged -e AWS_S3_BUCKET="test.docker.io" dockerbuilder hack/dind dockerbuild.sh
from docker from docker
maintainer Daniel Mizyrycki <daniel@dotcloud.com> maintainer Daniel Mizyrycki <daniel@docker.com>
# Add docker dependencies and downloading packages # Add docker dependencies and downloading packages
run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
@ -24,11 +23,8 @@ run apt-get update; apt-get install -y -q wget python2.7
run wget -q -O /usr/bin/docker http://get.docker.io/builds/Linux/x86_64/docker-latest; chmod +x /usr/bin/docker run wget -q -O /usr/bin/docker http://get.docker.io/builds/Linux/x86_64/docker-latest; chmod +x /usr/bin/docker
# Add proto docker builder # Add proto docker builder
add ./dockerbuild /usr/bin/dockerbuild add ./dockerbuild.sh /usr/bin/dockerbuild.sh
run chmod +x /usr/bin/dockerbuild run chmod +x /usr/bin/dockerbuild.sh
# Add release credentials # Add release credentials
add ./release_credentials.json /root/release_credentials.json add ./release_credentials.json /root/release_credentials.json
# Launch build process in a container
cmd dockerbuild

View file

@ -1,50 +0,0 @@
#!/bin/bash
# Variables AWS_ACCESS_KEY, AWS_SECRET_KEY, PG_PASSPHRASE and INDEX_AUTH
# are decoded from /root/release_credentials.json
# Variable AWS_S3_BUCKET is passed to the environment from docker run -e
# Enable debugging
set -x
# Fetch docker master branch
rm -rf /go/src/github.com/dotcloud/docker
cd /
git clone -q http://github.com/dotcloud/docker /go/src/github.com/dotcloud/docker
cd /go/src/github.com/dotcloud/docker
# Launch docker daemon using dind inside the container
./hack/dind /usr/bin/docker -d &
sleep 5
# Add an uncommitted change to generate a timestamped release
date > timestamp
# Build the docker package using /Dockerfile
docker build -t docker .
# Run Docker unittests binary and Ubuntu package
docker run -privileged docker hack/make.sh
exit_status=$?
# Display load if test fails
if [ $exit_status -eq 1 ] ; then
uptime; echo; free
exit 1
fi
# Commit binary and ubuntu bundles for release
docker commit -run '{"Env": ["PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin"], "WorkingDir": "/go/src/github.com/dotcloud/docker"}' $(docker ps -l -q) release
# Turn debug off to load credentials from the environment
set +x
eval $(cat /root/release_credentials.json | python -c '
import sys,json,base64;
d=json.loads(base64.b64decode(sys.stdin.read()));
exec("""for k in d: print "export {0}=\\"{1}\\"".format(k,d[k])""")')
set -x
# Push docker nightly
echo docker run -i -t -privileged -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=XXXXX -e AWS_SECRET_KEY=XXXXX -e GPG_PASSPHRASE=XXXXX release hack/release.sh
set +x
docker run -i -t -privileged -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE release hack/release.sh

View file

@ -0,0 +1,40 @@
#!/bin/bash
# Variables AWS_ACCESS_KEY, AWS_SECRET_KEY and PG_PASSPHRASE are decoded
# from /root/release_credentials.json
# Variable AWS_S3_BUCKET is passed to the environment from docker run -e
# Turn debug off to load credentials from the environment
set +x
eval $(cat /root/release_credentials.json | python -c '
import sys,json,base64;
d=json.loads(base64.b64decode(sys.stdin.read()));
exec("""for k in d: print "export {0}=\\"{1}\\"".format(k,d[k])""")')
# Fetch docker master branch
set -x
cd /
rm -rf /go
git clone -q -b master http://github.com/dotcloud/docker /go/src/github.com/dotcloud/docker
cd /go/src/github.com/dotcloud/docker
# Launch docker daemon using dind inside the container
/usr/bin/docker version
/usr/bin/docker -d &
sleep 5
# Build Docker release container
docker build -t docker .
# Test docker and if everything works well, release
echo docker run -i -t -privileged -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=XXXXX -e AWS_SECRET_KEY=XXXXX -e GPG_PASSPHRASE=XXXXX docker hack/release.sh
set +x
docker run -privileged -i -t -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE docker hack/release.sh
exit_status=$?
# Display load if test fails
set -x
if [ $exit_status -ne 0 ] ; then
uptime; echo; free
exit 1
fi

View file

@ -1 +0,0 @@
eyAiQVdTX0FDQ0VTU19LRVkiOiAiIiwKICAiQVdTX1NFQ1JFVF9LRVkiOiAiIiwKICAiR1BHX1BBU1NQSFJBU0UiOiAiIiwKICAiSU5ERVhfQVVUSCI6ICIiIH0=

View file

@ -0,0 +1,18 @@
# VERSION: 0.1
# DOCKER-VERSION 0.6.4
# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com>
# DESCRIPTION: Docker registry coverage
# COMMENTS: Add registry coverage into the docker-ci image
# TO_BUILD: docker build -t registry_coverage .
# TO_RUN: docker run registry_coverage
from docker-ci
maintainer Daniel Mizyrycki <daniel@dotcloud.com>
# Add registry_coverager.sh and dependencies
run pip install coverage flask pyyaml requests simplejson python-glanceclient \
blinker redis boto gevent rsa mock
add registry_coverage.sh /usr/bin/registry_coverage.sh
run chmod +x /usr/bin/registry_coverage.sh
cmd "/usr/bin/registry_coverage.sh"

View file

@ -0,0 +1,18 @@
#!/bin/bash
set -x
# Setup the environment
REGISTRY_PATH=/data/docker-registry
export SETTINGS_FLAVOR=test
export DOCKER_REGISTRY_CONFIG=config_test.yml
export PYTHONPATH=$REGISTRY_PATH/test
# Fetch latest docker-registry master
rm -rf $REGISTRY_PATH
git clone https://github.com/dotcloud/docker-registry -b master $REGISTRY_PATH
cd $REGISTRY_PATH
# Generate coverage
coverage run -m unittest discover test || exit 1
coverage report --include='./*' --omit='./test/*'

View file

@ -34,7 +34,7 @@ env['DOCKER_CI_KEY'] = open(env['DOCKER_CI_KEY_PATH']).read()
DROPLET_NAME = env.get('DROPLET_NAME','report') DROPLET_NAME = env.get('DROPLET_NAME','report')
TIMEOUT = 120 # Seconds before timeout droplet creation TIMEOUT = 120 # Seconds before timeout droplet creation
IMAGE_ID = 894856 # Docker on Ubuntu 13.04 IMAGE_ID = 1004145 # Docker on Ubuntu 13.04
REGION_ID = 4 # New York 2 REGION_ID = 4 # New York 2
SIZE_ID = 66 # memory 512MB SIZE_ID = 66 # memory 512MB
DO_IMAGE_USER = 'root' # Image user on Digital Ocean DO_IMAGE_USER = 'root' # Image user on Digital Ocean

View file

@ -22,7 +22,12 @@ bundle_test() {
for test_dir in $(find_test_dirs); do ( for test_dir in $(find_test_dirs); do (
set -x set -x
cd $test_dir cd $test_dir
# Install packages that are dependencies of the tests.
# Note: Does not run the tests.
go test -i -ldflags "$LDFLAGS" $BUILDFLAGS go test -i -ldflags "$LDFLAGS" $BUILDFLAGS
# Run the tests with the optional $TESTFLAGS.
export TEST_DOCKERINIT_PATH=$DEST/../dynbinary/dockerinit-$VERSION export TEST_DOCKERINIT_PATH=$DEST/../dynbinary/dockerinit-$VERSION
go test -v -ldflags "$LDFLAGS -X github.com/dotcloud/docker/utils.INITSHA1 \"$DOCKER_INITSHA1\"" $BUILDFLAGS $TESTFLAGS go test -v -ldflags "$LDFLAGS -X github.com/dotcloud/docker/utils.INITSHA1 \"$DOCKER_INITSHA1\"" $BUILDFLAGS $TESTFLAGS
) done ) done

View file

@ -16,7 +16,12 @@ bundle_test() {
for test_dir in $(find_test_dirs); do ( for test_dir in $(find_test_dirs); do (
set -x set -x
cd $test_dir cd $test_dir
# Install packages that are dependencies of the tests.
# Note: Does not run the tests.
go test -i -ldflags "$LDFLAGS $LDFLAGS_STATIC" $BUILDFLAGS go test -i -ldflags "$LDFLAGS $LDFLAGS_STATIC" $BUILDFLAGS
# Run the tests with the optional $TESTFLAGS.
go test -v -ldflags "$LDFLAGS $LDFLAGS_STATIC" $BUILDFLAGS $TESTFLAGS go test -v -ldflags "$LDFLAGS $LDFLAGS_STATIC" $BUILDFLAGS $TESTFLAGS
) done ) done
} 2>&1 | tee $DEST/test.log } 2>&1 | tee $DEST/test.log

View file

@ -10,7 +10,7 @@ fi
PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)" PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)"
PACKAGE_URL="http://www.docker.io/" PACKAGE_URL="http://www.docker.io/"
PACKAGE_MAINTAINER="docker@dotcloud.com" PACKAGE_MAINTAINER="docker@dotcloud.com"
PACKAGE_DESCRIPTION="lxc-docker is a Linux container runtime PACKAGE_DESCRIPTION="Linux container runtime
Docker complements LXC with a high-level API which operates at the process Docker complements LXC with a high-level API which operates at the process
level. It runs unix processes with strong guarantees of isolation and level. It runs unix processes with strong guarantees of isolation and
repeatability across servers. repeatability across servers.
@ -37,27 +37,51 @@ bundle_ubuntu() {
# This will fail if the binary bundle hasn't been built # This will fail if the binary bundle hasn't been built
cp $DEST/../binary/docker-$VERSION $DIR/usr/bin/docker cp $DEST/../binary/docker-$VERSION $DIR/usr/bin/docker
# Generate postinst/prerm scripts # Generate postinst/prerm/postrm scripts
cat >/tmp/postinst <<'EOF' cat > /tmp/postinst <<'EOF'
#!/bin/sh #!/bin/sh
service docker stop || true set -e
grep -q '^docker:' /etc/group || groupadd --system docker || true set -u
service docker start
EOF
cat >/tmp/prerm <<'EOF'
#!/bin/sh
service docker stop || true
case "$1" in getent group docker > /dev/null || groupadd --system docker || true
purge|remove|abort-install)
groupdel docker || true update-rc.d docker defaults > /dev/null || true
;; if [ -n "$2" ]; then
_dh_action=restart
upgrade|failed-upgrade|abort-upgrade) else
# don't touch docker group _dh_action=start
;; fi
esac service docker $_dh_action 2>/dev/null || true
#DEBHELPER#
EOF EOF
cat > /tmp/prerm <<'EOF'
#!/bin/sh
set -e
set -u
service docker stop 2>/dev/null || true
#DEBHELPER#
EOF
cat > /tmp/postrm <<'EOF'
#!/bin/sh
set -e
set -u
if [ "$1" = "purge" ] ; then
update-rc.d docker remove > /dev/null || true
fi
# In case this system is running systemd, we make systemd reload the unit files
# to pick up changes.
if [ -d /run/systemd/system ] ; then
systemctl --system daemon-reload > /dev/null || true
fi
#DEBHELPER#
EOF
# TODO swaths of these were borrowed from debhelper's auto-inserted stuff, because we're still using fpm - we need to use debhelper instead, and somehow reconcile Ubuntu that way
chmod +x /tmp/postinst /tmp/prerm chmod +x /tmp/postinst /tmp/prerm
( (
@ -66,6 +90,7 @@ EOF
--name lxc-docker-$VERSION --version $PKGVERSION \ --name lxc-docker-$VERSION --version $PKGVERSION \
--after-install /tmp/postinst \ --after-install /tmp/postinst \
--before-remove /tmp/prerm \ --before-remove /tmp/prerm \
--after-remove /tmp/postrm \
--architecture "$PACKAGE_ARCHITECTURE" \ --architecture "$PACKAGE_ARCHITECTURE" \
--prefix / \ --prefix / \
--depends lxc \ --depends lxc \
@ -82,6 +107,8 @@ EOF
--vendor "$PACKAGE_VENDOR" \ --vendor "$PACKAGE_VENDOR" \
--config-files /etc/init/docker.conf \ --config-files /etc/init/docker.conf \
--config-files /etc/init.d/docker \ --config-files /etc/init.d/docker \
--config-files /etc/default/docker \
--deb-compression xz \
-t deb . -t deb .
mkdir empty mkdir empty
fpm -s dir -C empty \ fpm -s dir -C empty \
@ -92,7 +119,12 @@ EOF
--maintainer "$PACKAGE_MAINTAINER" \ --maintainer "$PACKAGE_MAINTAINER" \
--url "$PACKAGE_URL" \ --url "$PACKAGE_URL" \
--vendor "$PACKAGE_VENDOR" \ --vendor "$PACKAGE_VENDOR" \
--config-files /etc/init/docker.conf \
--config-files /etc/init.d/docker \
--config-files /etc/default/docker \
--deb-compression xz \
-t deb . -t deb .
# note: the --config-files lines have to be duplicated to stop overwrite on package upgrade (since we have to use this funky virtual package)
) )
} }

View file

@ -97,7 +97,7 @@ write_to_s3() {
DEST=$1 DEST=$1
F=`mktemp` F=`mktemp`
cat > $F cat > $F
s3cmd --acl-public put $F $DEST s3cmd --acl-public --mime-type='text/plain' put $F $DEST
rm -f $F rm -f $F
} }
@ -107,14 +107,14 @@ s3_url() {
echo "https://$BUCKET" echo "https://$BUCKET"
;; ;;
*) *)
echo "http://$BUCKET.s3.amazonaws.com" s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }'
;; ;;
esac esac
} }
# Upload the 'ubuntu' bundle to S3: # Upload the 'ubuntu' bundle to S3:
# 1. A full APT repository is published at $BUCKET/ubuntu/ # 1. A full APT repository is published at $BUCKET/ubuntu/
# 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/info # 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/index
release_ubuntu() { release_ubuntu() {
[ -e bundles/$VERSION/ubuntu ] || { [ -e bundles/$VERSION/ubuntu ] || {
echo >&2 './hack/make.sh must be run before release_ubuntu' echo >&2 './hack/make.sh must be run before release_ubuntu'
@ -168,7 +168,7 @@ EOF
# Upload repo # Upload repo
s3cmd --acl-public sync $APTDIR/ s3://$BUCKET/ubuntu/ s3cmd --acl-public sync $APTDIR/ s3://$BUCKET/ubuntu/
cat <<EOF | write_to_s3 s3://$BUCKET/ubuntu/info cat <<EOF | write_to_s3 s3://$BUCKET/ubuntu/index
# Add the repository to your APT sources # Add the repository to your APT sources
echo deb $(s3_url)/ubuntu docker main > /etc/apt/sources.list.d/docker.list echo deb $(s3_url)/ubuntu docker main > /etc/apt/sources.list.d/docker.list
# Then import the repository key # Then import the repository key
@ -180,7 +180,12 @@ apt-get update ; apt-get install -y lxc-docker
# Alternatively, just use the curl-able install.sh script provided at $(s3_url) # Alternatively, just use the curl-able install.sh script provided at $(s3_url)
# #
EOF EOF
echo "APT repository uploaded. Instructions available at $(s3_url)/ubuntu/info"
# Add redirect at /ubuntu/info for URL-backwards-compatibility
rm -rf /tmp/emptyfile && touch /tmp/emptyfile
s3cmd --acl-public --add-header='x-amz-website-redirect-location:/ubuntu/' --mime-type='text/plain' put /tmp/emptyfile s3://$BUCKET/ubuntu/info
echo "APT repository uploaded. Instructions available at $(s3_url)/ubuntu"
} }
# Upload a static binary to S3 # Upload a static binary to S3
@ -189,14 +194,20 @@ release_binary() {
echo >&2 './hack/make.sh must be run before release_binary' echo >&2 './hack/make.sh must be run before release_binary'
exit 1 exit 1
} }
S3DIR=s3://$BUCKET/builds/Linux/x86_64 S3DIR=s3://$BUCKET/builds/Linux/x86_64
s3cmd --acl-public put bundles/$VERSION/binary/docker-$VERSION $S3DIR/docker-$VERSION s3cmd --acl-public put bundles/$VERSION/binary/docker-$VERSION $S3DIR/docker-$VERSION
cat <<EOF | write_to_s3 s3://$BUCKET/builds/info cat <<EOF | write_to_s3 s3://$BUCKET/builds/index
# To install, run the following command as root: # To install, run the following command as root:
curl -O $(s3_url)/builds/Linux/x86_64/docker-$VERSION && chmod +x docker-$VERSION && sudo mv docker-$VERSION /usr/local/bin/docker curl -O $(s3_url)/builds/Linux/x86_64/docker-$VERSION && chmod +x docker-$VERSION && sudo mv docker-$VERSION /usr/local/bin/docker
# Then start docker in daemon mode: # Then start docker in daemon mode:
sudo /usr/local/bin/docker -d sudo /usr/local/bin/docker -d
EOF EOF
# Add redirect at /builds/info for URL-backwards-compatibility
rm -rf /tmp/emptyfile && touch /tmp/emptyfile
s3cmd --acl-public --add-header='x-amz-website-redirect-location:/builds/' --mime-type='text/plain' put /tmp/emptyfile s3://$BUCKET/builds/info
if [ -z "$NOLATEST" ]; then if [ -z "$NOLATEST" ]; then
echo "Copying docker-$VERSION to docker-latest" echo "Copying docker-$VERSION to docker-latest"
s3cmd --acl-public cp $S3DIR/docker-$VERSION $S3DIR/docker-latest s3cmd --acl-public cp $S3DIR/docker-$VERSION $S3DIR/docker-latest

View file

@ -134,10 +134,6 @@ func (image *Image) TarLayer(compression archive.Compression) (archive.Archive,
return archive.Tar(layerPath, compression) return archive.Tar(layerPath, compression)
} }
func (image *Image) ShortID() string {
return utils.TruncateID(image.ID)
}
func ValidateID(id string) error { func ValidateID(id string) error {
if id == "" { if id == "" {
return fmt.Errorf("Image id can't be empty") return fmt.Errorf("Image id can't be empty")

View file

@ -55,9 +55,16 @@ func RemoveExistingChain(name string) error {
} }
func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr string, dest_port int) error { func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr string, dest_port int) error {
daddr := ip.String()
if ip.IsUnspecified() {
// iptables interprets "0.0.0.0" as "0.0.0.0/32", whereas we
// want "0.0.0.0/0". "0/0" is correctly interpreted as "any
// value" by both iptables and ip6tables.
daddr = "0/0"
}
if output, err := Raw("-t", "nat", fmt.Sprint(action), c.Name, if output, err := Raw("-t", "nat", fmt.Sprint(action), c.Name,
"-p", proto, "-p", proto,
"-d", ip.String(), "-d", daddr,
"--dport", strconv.Itoa(port), "--dport", strconv.Itoa(port),
"!", "-i", c.Bridge, "!", "-i", c.Bridge,
"-j", "DNAT", "-j", "DNAT",

View file

@ -168,12 +168,28 @@ func CreateBridgeIface(config *DaemonConfig) error {
} }
if config.EnableIptables { if config.EnableIptables {
// Enable NAT
if output, err := iptables.Raw("-t", "nat", "-A", "POSTROUTING", "-s", ifaceAddr, if output, err := iptables.Raw("-t", "nat", "-A", "POSTROUTING", "-s", ifaceAddr,
"!", "-d", ifaceAddr, "-j", "MASQUERADE"); err != nil { "!", "-d", ifaceAddr, "-j", "MASQUERADE"); err != nil {
return fmt.Errorf("Unable to enable network bridge NAT: %s", err) return fmt.Errorf("Unable to enable network bridge NAT: %s", err)
} else if len(output) != 0 { } else if len(output) != 0 {
return fmt.Errorf("Error iptables postrouting: %s", output) return fmt.Errorf("Error iptables postrouting: %s", output)
} }
// Accept incoming packets for existing connections
if output, err := iptables.Raw("-I", "FORWARD", "-o", config.BridgeIface, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"); err != nil {
return fmt.Errorf("Unable to allow incoming packets: %s", err)
} else if len(output) != 0 {
return fmt.Errorf("Error iptables allow incoming: %s", output)
}
// Accept all non-intercontainer outgoing packets
if output, err := iptables.Raw("-I", "FORWARD", "-i", config.BridgeIface, "!", "-o", config.BridgeIface, "-j", "ACCEPT"); err != nil {
return fmt.Errorf("Unable to allow outgoing packets: %s", err)
} else if len(output) != 0 {
return fmt.Errorf("Error iptables allow outgoing: %s", output)
}
} }
return nil return nil
} }
@ -680,20 +696,30 @@ func newNetworkManager(config *DaemonConfig) (*NetworkManager, error) {
// Configure iptables for link support // Configure iptables for link support
if config.EnableIptables { if config.EnableIptables {
args := []string{"FORWARD", "-i", config.BridgeIface, "-o", config.BridgeIface, "-j", "DROP"} args := []string{"FORWARD", "-i", config.BridgeIface, "-o", config.BridgeIface, "-j"}
acceptArgs := append(args, "ACCEPT")
dropArgs := append(args, "DROP")
if !config.InterContainerCommunication { if !config.InterContainerCommunication {
if !iptables.Exists(args...) { iptables.Raw(append([]string{"-D"}, acceptArgs...)...)
if !iptables.Exists(dropArgs...) {
utils.Debugf("Disable inter-container communication") utils.Debugf("Disable inter-container communication")
if output, err := iptables.Raw(append([]string{"-A"}, args...)...); err != nil { if output, err := iptables.Raw(append([]string{"-I"}, dropArgs...)...); err != nil {
return nil, fmt.Errorf("Unable to prevent intercontainer communication: %s", err) return nil, fmt.Errorf("Unable to prevent intercontainer communication: %s", err)
} else if len(output) != 0 { } else if len(output) != 0 {
return nil, fmt.Errorf("Error enabling iptables: %s", output) return nil, fmt.Errorf("Error disabling intercontainer communication: %s", output)
} }
} }
} else { } else {
utils.Debugf("Enable inter-container communication") iptables.Raw(append([]string{"-D"}, dropArgs...)...)
iptables.Raw(append([]string{"-D"}, args...)...) if !iptables.Exists(acceptArgs...) {
utils.Debugf("Enable inter-container communication")
if output, err := iptables.Raw(append([]string{"-I"}, acceptArgs...)...); err != nil {
return nil, fmt.Errorf("Unable to allow intercontainer communication: %s", err)
} else if len(output) != 0 {
return nil, fmt.Errorf("Error enabling intercontainer communication: %s", output)
}
}
} }
} }

View file

@ -186,6 +186,7 @@ func (runtime *Runtime) Register(container *Container) error {
if !container.State.Running { if !container.State.Running {
close(container.waitLock) close(container.waitLock)
} else if !nomonitor { } else if !nomonitor {
container.allocateNetwork()
go container.monitor() go container.monitor()
} }
return nil return nil
@ -195,7 +196,7 @@ func (runtime *Runtime) ensureName(container *Container) error {
if container.Name == "" { if container.Name == "" {
name, err := generateRandomName(runtime) name, err := generateRandomName(runtime)
if err != nil { if err != nil {
name = container.ShortID() name = utils.TruncateID(container.ID)
} }
container.Name = name container.Name = name
@ -298,7 +299,7 @@ func (runtime *Runtime) restore() error {
// Try to set the default name for a container if it exists prior to links // Try to set the default name for a container if it exists prior to links
container.Name, err = generateRandomName(runtime) container.Name, err = generateRandomName(runtime)
if err != nil { if err != nil {
container.Name = container.ShortID() container.Name = utils.TruncateID(container.ID)
} }
if _, err := runtime.containerGraph.Set(container.Name, container.ID); err != nil { if _, err := runtime.containerGraph.Set(container.Name, container.ID); err != nil {
@ -506,32 +507,7 @@ func (runtime *Runtime) Create(config *Config, name string) (*Container, []strin
return nil, nil, err return nil, nil, err
} }
// Step 3: if hostname, build hostname and hosts files // Step 3: register the container
container.HostnamePath = path.Join(container.root, "hostname")
ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644)
hostsContent := []byte(`
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
`)
container.HostsPath = path.Join(container.root, "hosts")
if container.Config.Domainname != "" {
hostsContent = append([]byte(fmt.Sprintf("::1\t\t%s.%s %s\n", container.Config.Hostname, container.Config.Domainname, container.Config.Hostname)), hostsContent...)
hostsContent = append([]byte(fmt.Sprintf("127.0.0.1\t%s.%s %s\n", container.Config.Hostname, container.Config.Domainname, container.Config.Hostname)), hostsContent...)
} else {
hostsContent = append([]byte(fmt.Sprintf("::1\t\t%s\n", container.Config.Hostname)), hostsContent...)
hostsContent = append([]byte(fmt.Sprintf("127.0.0.1\t%s\n", container.Config.Hostname)), hostsContent...)
}
ioutil.WriteFile(container.HostsPath, hostsContent, 0644)
// Step 4: register the container
if err := runtime.Register(container); err != nil { if err := runtime.Register(container); err != nil {
return nil, nil, err return nil, nil, err
} }

View file

@ -3,11 +3,13 @@ package docker
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/sysinit" "github.com/dotcloud/docker/sysinit"
"github.com/dotcloud/docker/utils" "github.com/dotcloud/docker/utils"
"io" "io"
"log" "log"
"net" "net"
"net/url"
"os" "os"
"path/filepath" "path/filepath"
"runtime" "runtime"
@ -122,22 +124,19 @@ func init() {
} }
func setupBaseImage() { func setupBaseImage() {
config := &DaemonConfig{ eng, err := engine.New(unitTestStoreBase)
Root: unitTestStoreBase,
AutoRestart: false,
BridgeIface: unitTestNetworkBridge,
}
runtime, err := NewRuntimeFromDirectory(config)
if err != nil { if err != nil {
log.Fatalf("Can't initialize engine at %s: %s", unitTestStoreBase, err)
}
job := eng.Job("initapi")
job.Setenv("Root", unitTestStoreBase)
job.SetenvBool("Autorestart", false)
job.Setenv("BridgeIface", unitTestNetworkBridge)
if err := job.Run(); err != nil {
log.Fatalf("Unable to create a runtime for tests:", err) log.Fatalf("Unable to create a runtime for tests:", err)
} }
srv := mkServerFromEngine(eng, log.New(os.Stderr, "", 0))
// Create the "Server" runtime := srv.runtime
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
// If the unit test is not found, try to download it. // If the unit test is not found, try to download it.
if img, err := runtime.repositories.LookupImage(unitTestImageName); err != nil || img.ID != unitTestImageID { if img, err := runtime.repositories.LookupImage(unitTestImageName); err != nil || img.ID != unitTestImageID {
@ -153,18 +152,22 @@ func spawnGlobalDaemon() {
utils.Debugf("Global runtime already exists. Skipping.") utils.Debugf("Global runtime already exists. Skipping.")
return return
} }
globalRuntime = mkRuntime(log.New(os.Stderr, "", 0)) t := log.New(os.Stderr, "", 0)
srv := &Server{ eng := NewTestEngine(t)
runtime: globalRuntime, srv := mkServerFromEngine(eng, t)
pullingPool: make(map[string]struct{}), globalRuntime = srv.runtime
pushingPool: make(map[string]struct{}),
}
// Spawn a Daemon // Spawn a Daemon
go func() { go func() {
utils.Debugf("Spawning global daemon for integration tests") utils.Debugf("Spawning global daemon for integration tests")
if err := ListenAndServe(testDaemonProto, testDaemonAddr, srv, os.Getenv("DEBUG") != ""); err != nil { listenURL := &url.URL{
log.Fatalf("Unable to spawn the test daemon:", err) Scheme: testDaemonProto,
Host: testDaemonAddr,
}
job := eng.Job("serveapi", listenURL.String())
job.SetenvBool("Logging", os.Getenv("DEBUG") != "")
if err := job.Run(); err != nil {
log.Fatalf("Unable to spawn the test daemon: %s", err)
} }
}() }()
// Give some time to ListenAndServer to actually start // Give some time to ListenAndServer to actually start
@ -184,7 +187,7 @@ func GetTestImage(runtime *Runtime) *Image {
return image return image
} }
} }
log.Fatalf("Test image %v not found", unitTestImageID) log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, runtime.graph.Root, imgs)
return nil return nil
} }
@ -646,20 +649,17 @@ func TestReloadContainerLinks(t *testing.T) {
} }
func TestDefaultContainerName(t *testing.T) { func TestDefaultContainerName(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime) defer nuke(runtime)
srv := &Server{runtime: runtime}
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
shortId, _, err := srv.ContainerCreate(config, "some_name") container := runtime.Get(createNamedTestContainer(eng, config, t, "some_name"))
if err != nil {
t.Fatal(err)
}
container := runtime.Get(shortId)
containerID := container.ID containerID := container.ID
if container.Name != "/some_name" { if container.Name != "/some_name" {
@ -683,20 +683,17 @@ func TestDefaultContainerName(t *testing.T) {
} }
func TestRandomContainerName(t *testing.T) { func TestRandomContainerName(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime) defer nuke(runtime)
srv := &Server{runtime: runtime}
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
shortId, _, err := srv.ContainerCreate(config, "") container := runtime.Get(createTestContainer(eng, config, t))
if err != nil {
t.Fatal(err)
}
container := runtime.Get(shortId)
containerID := container.ID containerID := container.ID
if container.Name == "" { if container.Name == "" {
@ -720,20 +717,17 @@ func TestRandomContainerName(t *testing.T) {
} }
func TestLinkChildContainer(t *testing.T) { func TestLinkChildContainer(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime) defer nuke(runtime)
srv := &Server{runtime: runtime}
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
shortId, _, err := srv.ContainerCreate(config, "/webapp") container := runtime.Get(createNamedTestContainer(eng, config, t, "/webapp"))
if err != nil {
t.Fatal(err)
}
container := runtime.Get(shortId)
webapp, err := runtime.GetByName("/webapp") webapp, err := runtime.GetByName("/webapp")
if err != nil { if err != nil {
@ -749,12 +743,7 @@ func TestLinkChildContainer(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
shortId, _, err = srv.ContainerCreate(config, "") childContainer := runtime.Get(createTestContainer(eng, config, t))
if err != nil {
t.Fatal(err)
}
childContainer := runtime.Get(shortId)
if err := runtime.RegisterLink(webapp, childContainer, "db"); err != nil { if err := runtime.RegisterLink(webapp, childContainer, "db"); err != nil {
t.Fatal(err) t.Fatal(err)
@ -771,20 +760,17 @@ func TestLinkChildContainer(t *testing.T) {
} }
func TestGetAllChildren(t *testing.T) { func TestGetAllChildren(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime) defer nuke(runtime)
srv := &Server{runtime: runtime}
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
shortId, _, err := srv.ContainerCreate(config, "/webapp") container := runtime.Get(createNamedTestContainer(eng, config, t, "/webapp"))
if err != nil {
t.Fatal(err)
}
container := runtime.Get(shortId)
webapp, err := runtime.GetByName("/webapp") webapp, err := runtime.GetByName("/webapp")
if err != nil { if err != nil {
@ -800,12 +786,7 @@ func TestGetAllChildren(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
shortId, _, err = srv.ContainerCreate(config, "") childContainer := runtime.Get(createTestContainer(eng, config, t))
if err != nil {
t.Fatal(err)
}
childContainer := runtime.Get(shortId)
if err := runtime.RegisterLink(webapp, childContainer, "db"); err != nil { if err := runtime.RegisterLink(webapp, childContainer, "db"); err != nil {
t.Fatal(err) t.Fatal(err)

205
server.go
View file

@ -33,30 +33,25 @@ func (srv *Server) Close() error {
} }
func init() { func init() {
engine.Register("serveapi", JobServeApi) engine.Register("initapi", jobInitApi)
} }
func JobServeApi(job *engine.Job) string { // jobInitApi runs the remote api server `srv` as a daemon,
srv, err := NewServer(ConfigFromJob(job)) // Only one api server can run at the same time - this is enforced by a pidfile.
// The signals SIGINT, SIGKILL and SIGTERM are intercepted for cleanup.
func jobInitApi(job *engine.Job) string {
job.Logf("Creating server")
srv, err := NewServer(job.Eng, ConfigFromJob(job))
if err != nil { if err != nil {
return err.Error() return err.Error()
} }
defer srv.Close() if srv.runtime.config.Pidfile != "" {
if err := srv.Daemon(); err != nil { job.Logf("Creating pidfile")
return err.Error() if err := utils.CreatePidFile(srv.runtime.config.Pidfile); err != nil {
log.Fatal(err)
}
} }
return "0" job.Logf("Setting up signal traps")
}
// Daemon runs the remote api server `srv` as a daemon,
// Only one api server can run at the same time - this is enforced by a pidfile.
// The signals SIGINT, SIGKILL and SIGTERM are intercepted for cleanup.
func (srv *Server) Daemon() error {
if err := utils.CreatePidFile(srv.runtime.config.Pidfile); err != nil {
log.Fatal(err)
}
defer utils.RemovePidFile(srv.runtime.config.Pidfile)
c := make(chan os.Signal, 1) c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, os.Kill, os.Signal(syscall.SIGTERM)) signal.Notify(c, os.Interrupt, os.Kill, os.Signal(syscall.SIGTERM))
go func() { go func() {
@ -66,8 +61,21 @@ func (srv *Server) Daemon() error {
srv.Close() srv.Close()
os.Exit(0) os.Exit(0)
}() }()
job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
if err := job.Eng.Register("create", srv.ContainerCreate); err != nil {
return err.Error()
}
if err := job.Eng.Register("start", srv.ContainerStart); err != nil {
return err.Error()
}
if err := job.Eng.Register("serveapi", srv.ListenAndServe); err != nil {
return err.Error()
}
return "0"
}
protoAddrs := srv.runtime.config.ProtoAddresses func (srv *Server) ListenAndServe(job *engine.Job) string {
protoAddrs := job.Args
chErrors := make(chan error, len(protoAddrs)) chErrors := make(chan error, len(protoAddrs))
for _, protoAddr := range protoAddrs { for _, protoAddr := range protoAddrs {
protoAddrParts := strings.SplitN(protoAddr, "://", 2) protoAddrParts := strings.SplitN(protoAddr, "://", 2)
@ -81,19 +89,20 @@ func (srv *Server) Daemon() error {
log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
} }
default: default:
return fmt.Errorf("Invalid protocol format.") return "Invalid protocol format."
} }
go func() { go func() {
chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], srv, true) // FIXME: merge Server.ListenAndServe with ListenAndServe
chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], srv, job.GetenvBool("Logging"))
}() }()
} }
for i := 0; i < len(protoAddrs); i += 1 { for i := 0; i < len(protoAddrs); i += 1 {
err := <-chErrors err := <-chErrors
if err != nil { if err != nil {
return err return err.Error()
} }
} }
return nil return "0"
} }
func (srv *Server) DockerVersion() APIVersion { func (srv *Server) DockerVersion() APIVersion {
@ -154,7 +163,7 @@ func (srv *Server) ContainerKill(name string, sig int) error {
if err := container.Kill(); err != nil { if err := container.Kill(); err != nil {
return fmt.Errorf("Cannot kill container %s: %s", name, err) return fmt.Errorf("Cannot kill container %s: %s", name, err)
} }
srv.LogEvent("kill", container.ShortID(), srv.runtime.repositories.ImageName(container.Image)) srv.LogEvent("kill", container.ID, srv.runtime.repositories.ImageName(container.Image))
} else { } else {
// Otherwise, just send the requested signal // Otherwise, just send the requested signal
if err := container.kill(sig); err != nil { if err := container.kill(sig); err != nil {
@ -180,7 +189,7 @@ func (srv *Server) ContainerExport(name string, out io.Writer) error {
if _, err := io.Copy(out, data); err != nil { if _, err := io.Copy(out, data); err != nil {
return err return err
} }
srv.LogEvent("export", container.ShortID(), srv.runtime.repositories.ImageName(container.Image)) srv.LogEvent("export", container.ID, srv.runtime.repositories.ImageName(container.Image))
return nil return nil
} }
return fmt.Errorf("No such container: %s", name) return fmt.Errorf("No such container: %s", name)
@ -198,39 +207,39 @@ func (srv *Server) ImagesSearch(term string) ([]registry.SearchResult, error) {
return results.Results, nil return results.Results, nil
} }
func (srv *Server) ImageInsert(name, url, path string, out io.Writer, sf *utils.StreamFormatter) (string, error) { func (srv *Server) ImageInsert(name, url, path string, out io.Writer, sf *utils.StreamFormatter) error {
out = utils.NewWriteFlusher(out) out = utils.NewWriteFlusher(out)
img, err := srv.runtime.repositories.LookupImage(name) img, err := srv.runtime.repositories.LookupImage(name)
if err != nil { if err != nil {
return "", err return err
} }
file, err := utils.Download(url, out) file, err := utils.Download(url, out)
if err != nil { if err != nil {
return "", err return err
} }
defer file.Body.Close() defer file.Body.Close()
config, _, _, err := ParseRun([]string{img.ID, "echo", "insert", url, path}, srv.runtime.capabilities) config, _, _, err := ParseRun([]string{img.ID, "echo", "insert", url, path}, srv.runtime.capabilities)
if err != nil { if err != nil {
return "", err return err
} }
c, _, err := srv.runtime.Create(config, "") c, _, err := srv.runtime.Create(config, "")
if err != nil { if err != nil {
return "", err return err
} }
if err := c.Inject(utils.ProgressReader(file.Body, int(file.ContentLength), out, sf.FormatProgress("", "Downloading", "%8v/%v (%v)"), sf, true), path); err != nil { if err := c.Inject(utils.ProgressReader(file.Body, int(file.ContentLength), out, sf.FormatProgress("", "Downloading", "%8v/%v (%v)"), sf, false), path); err != nil {
return "", err return err
} }
// FIXME: Handle custom repo, tag comment, author // FIXME: Handle custom repo, tag comment, author
img, err = srv.runtime.Commit(c, "", "", img.Comment, img.Author, nil) img, err = srv.runtime.Commit(c, "", "", img.Comment, img.Author, nil)
if err != nil { if err != nil {
return "", err return err
} }
out.Write(sf.FormatStatus("", img.ID)) out.Write(sf.FormatStatus(img.ID, ""))
return img.ShortID(), nil return nil
} }
func (srv *Server) ImagesViz(out io.Writer) error { func (srv *Server) ImagesViz(out io.Writer) error {
@ -250,9 +259,9 @@ func (srv *Server) ImagesViz(out io.Writer) error {
return fmt.Errorf("Error while getting parent image: %v", err) return fmt.Errorf("Error while getting parent image: %v", err)
} }
if parentImage != nil { if parentImage != nil {
out.Write([]byte(" \"" + parentImage.ShortID() + "\" -> \"" + image.ShortID() + "\"\n")) out.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n"))
} else { } else {
out.Write([]byte(" base -> \"" + image.ShortID() + "\" [style=invis]\n")) out.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n"))
} }
} }
@ -465,7 +474,7 @@ func (srv *Server) Containers(all, size bool, n int, since, before string) []API
continue continue
} }
if before != "" { if before != "" {
if container.ShortID() == before { if container.ID == before || utils.TruncateID(container.ID) == before {
foundBefore = true foundBefore = true
continue continue
} }
@ -476,7 +485,7 @@ func (srv *Server) Containers(all, size bool, n int, since, before string) []API
if displayed == n { if displayed == n {
break break
} }
if container.ShortID() == since { if container.ID == since || utils.TruncateID(container.ID) == since {
break break
} }
displayed++ displayed++
@ -518,7 +527,7 @@ func (srv *Server) ContainerCommit(name, repo, tag, author, comment string, conf
if err != nil { if err != nil {
return "", err return "", err
} }
return img.ShortID(), err return img.ID, err
} }
func (srv *Server) ContainerTag(name, repo, tag string, force bool) error { func (srv *Server) ContainerTag(name, repo, tag string, force bool) error {
@ -1018,37 +1027,47 @@ func (srv *Server) ImageImport(src, repo, tag string, in io.Reader, out io.Write
return err return err
} }
} }
out.Write(sf.FormatStatus("", img.ShortID())) out.Write(sf.FormatStatus("", img.ID))
return nil return nil
} }
func (srv *Server) ContainerCreate(config *Config, name string) (string, []string, error) { func (srv *Server) ContainerCreate(job *engine.Job) string {
if config.Memory != 0 && config.Memory < 524288 { var name string
return "", nil, fmt.Errorf("Memory limit must be given in bytes (minimum 524288 bytes)") if len(job.Args) == 1 {
name = job.Args[0]
} else if len(job.Args) > 1 {
return fmt.Sprintf("Usage: %s ", job.Name)
}
var config Config
if err := job.ExportEnv(&config); err != nil {
return err.Error()
}
if config.Memory != 0 && config.Memory < 524288 {
return "Minimum memory limit allowed is 512k"
} }
if config.Memory > 0 && !srv.runtime.capabilities.MemoryLimit { if config.Memory > 0 && !srv.runtime.capabilities.MemoryLimit {
config.Memory = 0 config.Memory = 0
} }
if config.Memory > 0 && !srv.runtime.capabilities.SwapLimit { if config.Memory > 0 && !srv.runtime.capabilities.SwapLimit {
config.MemorySwap = -1 config.MemorySwap = -1
} }
container, buildWarnings, err := srv.runtime.Create(config, name) container, buildWarnings, err := srv.runtime.Create(&config, name)
if err != nil { if err != nil {
if srv.runtime.graph.IsNotExist(err) { if srv.runtime.graph.IsNotExist(err) {
_, tag := utils.ParseRepositoryTag(config.Image) _, tag := utils.ParseRepositoryTag(config.Image)
if tag == "" { if tag == "" {
tag = DEFAULTTAG tag = DEFAULTTAG
} }
return fmt.Sprintf("No such image: %s (tag: %s)", config.Image, tag)
return "", nil, fmt.Errorf("No such image: %s (tag: %s)", config.Image, tag)
} }
return "", nil, err return err.Error()
} }
srv.LogEvent("create", container.ShortID(), srv.runtime.repositories.ImageName(container.Image)) srv.LogEvent("create", container.ID, srv.runtime.repositories.ImageName(container.Image))
return container.ShortID(), buildWarnings, nil job.Printf("%s\n", container.ID)
for _, warning := range buildWarnings {
job.Errorf("%s\n", warning)
}
return "0"
} }
func (srv *Server) ContainerRestart(name string, t int) error { func (srv *Server) ContainerRestart(name string, t int) error {
@ -1056,7 +1075,7 @@ func (srv *Server) ContainerRestart(name string, t int) error {
if err := container.Restart(t); err != nil { if err := container.Restart(t); err != nil {
return fmt.Errorf("Cannot restart container %s: %s", name, err) return fmt.Errorf("Cannot restart container %s: %s", name, err)
} }
srv.LogEvent("restart", container.ShortID(), srv.runtime.repositories.ImageName(container.Image)) srv.LogEvent("restart", container.ID, srv.runtime.repositories.ImageName(container.Image))
} else { } else {
return fmt.Errorf("No such container: %s", name) return fmt.Errorf("No such container: %s", name)
} }
@ -1112,7 +1131,7 @@ func (srv *Server) ContainerDestroy(name string, removeVolume, removeLink bool)
if err := srv.runtime.Destroy(container); err != nil { if err := srv.runtime.Destroy(container); err != nil {
return fmt.Errorf("Cannot destroy container %s: %s", name, err) return fmt.Errorf("Cannot destroy container %s: %s", name, err)
} }
srv.LogEvent("destroy", container.ShortID(), srv.runtime.repositories.ImageName(container.Image)) srv.LogEvent("destroy", container.ID, srv.runtime.repositories.ImageName(container.Image))
if removeVolume { if removeVolume {
// Retrieve all volumes from all remaining containers // Retrieve all volumes from all remaining containers
@ -1229,8 +1248,8 @@ func (srv *Server) deleteImage(img *Image, repoName, tag string) ([]APIRmi, erro
return nil, err return nil, err
} }
if tagDeleted { if tagDeleted {
imgs = append(imgs, APIRmi{Untagged: img.ShortID()}) imgs = append(imgs, APIRmi{Untagged: img.ID})
srv.LogEvent("untag", img.ShortID(), "") srv.LogEvent("untag", img.ID, "")
} }
} }
if len(srv.runtime.repositories.ByID()[img.ID]) == 0 { if len(srv.runtime.repositories.ByID()[img.ID]) == 0 {
@ -1258,6 +1277,26 @@ func (srv *Server) ImageDelete(name string, autoPrune bool) ([]APIRmi, error) {
} }
return nil, nil return nil, nil
} }
// Prevent deletion if image is used by a running container
for _, container := range srv.runtime.List() {
if container.State.Running {
parent, err := srv.runtime.repositories.LookupImage(container.Image)
if err != nil {
return nil, err
}
if err := parent.WalkHistory(func(p *Image) error {
if img.ID == p.ID {
return fmt.Errorf("Conflict, cannot delete %s because the running container %s is using it", name, container.ID)
}
return nil
}); err != nil {
return nil, err
}
}
}
if strings.Contains(img.ID, name) { if strings.Contains(img.ID, name) {
//delete via ID //delete via ID
return srv.deleteImage(img, "", "") return srv.deleteImage(img, "", "")
@ -1303,7 +1342,6 @@ func (srv *Server) RegisterLinks(name string, hostConfig *HostConfig) error {
return fmt.Errorf("No such container: %s", name) return fmt.Errorf("No such container: %s", name)
} }
// Register links
if hostConfig != nil && hostConfig.Links != nil { if hostConfig != nil && hostConfig.Links != nil {
for _, l := range hostConfig.Links { for _, l := range hostConfig.Links {
parts, err := parseLink(l) parts, err := parseLink(l)
@ -1317,7 +1355,6 @@ func (srv *Server) RegisterLinks(name string, hostConfig *HostConfig) error {
if child == nil { if child == nil {
return fmt.Errorf("Could not get container for %s", parts["name"]) return fmt.Errorf("Could not get container for %s", parts["name"])
} }
if err := runtime.RegisterLink(container, child, parts["alias"]); err != nil { if err := runtime.RegisterLink(container, child, parts["alias"]); err != nil {
return err return err
} }
@ -1333,41 +1370,57 @@ func (srv *Server) RegisterLinks(name string, hostConfig *HostConfig) error {
return nil return nil
} }
func (srv *Server) ContainerStart(name string, hostConfig *HostConfig) error { func (srv *Server) ContainerStart(job *engine.Job) string {
if len(job.Args) < 1 {
return fmt.Sprintf("Usage: %s container_id", job.Name)
}
name := job.Args[0]
runtime := srv.runtime runtime := srv.runtime
container := runtime.Get(name) container := runtime.Get(name)
if hostConfig != nil { if container == nil {
return fmt.Sprintf("No such container: %s", name)
}
// If no environment was set, then no hostconfig was passed.
if len(job.Environ()) > 0 {
var hostConfig HostConfig
if err := job.ExportEnv(&hostConfig); err != nil {
return err.Error()
}
// Validate the HostConfig binds. Make sure that:
// 1) the source of a bind mount isn't /
// The bind mount "/:/foo" isn't allowed.
// 2) Check that the source exists
// The source to be bind mounted must exist.
for _, bind := range hostConfig.Binds { for _, bind := range hostConfig.Binds {
splitBind := strings.Split(bind, ":") splitBind := strings.Split(bind, ":")
source := splitBind[0] source := splitBind[0]
// refuse to bind mount "/" to the container // refuse to bind mount "/" to the container
if source == "/" { if source == "/" {
return fmt.Errorf("Invalid bind mount '%s' : source can't be '/'", bind) return fmt.Sprintf("Invalid bind mount '%s' : source can't be '/'", bind)
} }
// ensure the source exists on the host // ensure the source exists on the host
_, err := os.Stat(source) _, err := os.Stat(source)
if err != nil && os.IsNotExist(err) { if err != nil && os.IsNotExist(err) {
return fmt.Errorf("Invalid bind mount '%s' : source doesn't exist", bind) return fmt.Sprintf("Invalid bind mount '%s' : source doesn't exist", bind)
} }
} }
} // Register any links from the host config before starting the container
// FIXME: we could just pass the container here, no need to lookup by name again.
if container == nil { if err := srv.RegisterLinks(name, &hostConfig); err != nil {
return fmt.Errorf("No such container: %s", name) return err.Error()
} }
if hostConfig != nil { container.hostConfig = &hostConfig
container.hostConfig = hostConfig
container.ToDisk() container.ToDisk()
} }
if err := container.Start(); err != nil { if err := container.Start(); err != nil {
return fmt.Errorf("Cannot start container %s: %s", name, err) return fmt.Sprintf("Cannot start container %s: %s", name, err)
} }
srv.LogEvent("start", container.ShortID(), runtime.repositories.ImageName(container.Image)) srv.LogEvent("start", container.ID, runtime.repositories.ImageName(container.Image))
return nil return "0"
} }
func (srv *Server) ContainerStop(name string, t int) error { func (srv *Server) ContainerStop(name string, t int) error {
@ -1375,7 +1428,7 @@ func (srv *Server) ContainerStop(name string, t int) error {
if err := container.Stop(t); err != nil { if err := container.Stop(t); err != nil {
return fmt.Errorf("Cannot stop container %s: %s", name, err) return fmt.Errorf("Cannot stop container %s: %s", name, err)
} }
srv.LogEvent("stop", container.ShortID(), srv.runtime.repositories.ImageName(container.Image)) srv.LogEvent("stop", container.ID, srv.runtime.repositories.ImageName(container.Image))
} else { } else {
return fmt.Errorf("No such container: %s", name) return fmt.Errorf("No such container: %s", name)
} }
@ -1518,12 +1571,13 @@ func (srv *Server) ContainerCopy(name string, resource string, out io.Writer) er
} }
func NewServer(config *DaemonConfig) (*Server, error) { func NewServer(eng *engine.Engine, config *DaemonConfig) (*Server, error) {
runtime, err := NewRuntime(config) runtime, err := NewRuntime(config)
if err != nil { if err != nil {
return nil, err return nil, err
} }
srv := &Server{ srv := &Server{
Eng: eng,
runtime: runtime, runtime: runtime,
pullingPool: make(map[string]struct{}), pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}), pushingPool: make(map[string]struct{}),
@ -1567,4 +1621,5 @@ type Server struct {
events []utils.JSONMessage events []utils.JSONMessage
listeners map[string]chan utils.JSONMessage listeners map[string]chan utils.JSONMessage
reqFactory *utils.HTTPRequestFactory reqFactory *utils.HTTPRequestFactory
Eng *engine.Engine
} }

View file

@ -2,6 +2,7 @@ package docker
import ( import (
"github.com/dotcloud/docker/utils" "github.com/dotcloud/docker/utils"
"io/ioutil"
"strings" "strings"
"testing" "testing"
"time" "time"
@ -79,20 +80,17 @@ func TestContainerTagImageDelete(t *testing.T) {
} }
func TestCreateRm(t *testing.T) { func TestCreateRm(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime) defer nuke(runtime)
srv := &Server{runtime: runtime}
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
id, _, err := srv.ContainerCreate(config, "") id := createTestContainer(eng, config, t)
if err != nil {
t.Fatal(err)
}
if len(runtime.List()) != 1 { if len(runtime.List()) != 1 {
t.Errorf("Expected 1 container, %v found", len(runtime.List())) t.Errorf("Expected 1 container, %v found", len(runtime.List()))
@ -109,27 +107,28 @@ func TestCreateRm(t *testing.T) {
} }
func TestCreateRmVolumes(t *testing.T) { func TestCreateRmVolumes(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
defer nuke(runtime)
srv := &Server{runtime: runtime} srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime)
config, hostConfig, _, err := ParseRun([]string{"-v", "/srv", GetTestImage(runtime).ID, "echo test"}, nil) config, hostConfig, _, err := ParseRun([]string{"-v", "/srv", GetTestImage(runtime).ID, "echo test"}, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
id, _, err := srv.ContainerCreate(config, "") id := createTestContainer(eng, config, t)
if err != nil {
t.Fatal(err)
}
if len(runtime.List()) != 1 { if len(runtime.List()) != 1 {
t.Errorf("Expected 1 container, %v found", len(runtime.List())) t.Errorf("Expected 1 container, %v found", len(runtime.List()))
} }
err = srv.ContainerStart(id, hostConfig) job := eng.Job("start", id)
if err != nil { if err := job.ImportEnv(hostConfig); err != nil {
t.Fatal(err)
}
if err := job.Run(); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -148,20 +147,17 @@ func TestCreateRmVolumes(t *testing.T) {
} }
func TestCommit(t *testing.T) { func TestCommit(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime) defer nuke(runtime)
srv := &Server{runtime: runtime}
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "/bin/cat"}, nil) config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "/bin/cat"}, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
id, _, err := srv.ContainerCreate(config, "") id := createTestContainer(eng, config, t)
if err != nil {
t.Fatal(err)
}
if _, err := srv.ContainerCommit(id, "testrepo", "testtag", "", "", config); err != nil { if _, err := srv.ContainerCommit(id, "testrepo", "testtag", "", "", config); err != nil {
t.Fatal(err) t.Fatal(err)
@ -169,26 +165,27 @@ func TestCommit(t *testing.T) {
} }
func TestCreateStartRestartStopStartKillRm(t *testing.T) { func TestCreateStartRestartStopStartKillRm(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime) defer nuke(runtime)
srv := &Server{runtime: runtime}
config, hostConfig, _, err := ParseRun([]string{GetTestImage(runtime).ID, "/bin/cat"}, nil) config, hostConfig, _, err := ParseRun([]string{GetTestImage(runtime).ID, "/bin/cat"}, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
id, _, err := srv.ContainerCreate(config, "") id := createTestContainer(eng, config, t)
if err != nil {
t.Fatal(err)
}
if len(runtime.List()) != 1 { if len(runtime.List()) != 1 {
t.Errorf("Expected 1 container, %v found", len(runtime.List())) t.Errorf("Expected 1 container, %v found", len(runtime.List()))
} }
if err := srv.ContainerStart(id, hostConfig); err != nil { job := eng.Job("start", id)
if err := job.ImportEnv(hostConfig); err != nil {
t.Fatal(err)
}
if err := job.Run(); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -200,7 +197,11 @@ func TestCreateStartRestartStopStartKillRm(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
if err := srv.ContainerStart(id, hostConfig); err != nil { job = eng.Job("start", id)
if err := job.ImportEnv(hostConfig); err != nil {
t.Fatal(err)
}
if err := job.Run(); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -220,22 +221,22 @@ func TestCreateStartRestartStopStartKillRm(t *testing.T) {
} }
func TestRunWithTooLowMemoryLimit(t *testing.T) { func TestRunWithTooLowMemoryLimit(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime) defer nuke(runtime)
// Try to create a container with a memory limit of 1 byte less than the minimum allowed limit. // Try to create a container with a memory limit of 1 byte less than the minimum allowed limit.
if _, _, err := (*Server).ContainerCreate(&Server{runtime: runtime}, job := eng.Job("create")
&Config{ job.Setenv("Image", GetTestImage(runtime).ID)
Image: GetTestImage(runtime).ID, job.Setenv("Memory", "524287")
Memory: 524287, job.Setenv("CpuShares", "1000")
CpuShares: 1000, job.SetenvList("Cmd", []string{"/bin/cat"})
Cmd: []string{"/bin/cat"}, var id string
}, job.StdoutParseString(&id)
"", if err := job.Run(); err == nil {
); err == nil {
t.Errorf("Memory limit is smaller than the allowed limit. Container creation should've failed!") t.Errorf("Memory limit is smaller than the allowed limit. Container creation should've failed!")
} }
} }
func TestContainerTop(t *testing.T) { func TestContainerTop(t *testing.T) {
@ -384,9 +385,10 @@ func TestLogEvent(t *testing.T) {
} }
func TestRmi(t *testing.T) { func TestRmi(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime) defer nuke(runtime)
srv := &Server{runtime: runtime}
initialImages, err := srv.Images(false, "") initialImages, err := srv.Images(false, "")
if err != nil { if err != nil {
@ -398,14 +400,14 @@ func TestRmi(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
containerID, _, err := srv.ContainerCreate(config, "") containerID := createTestContainer(eng, config, t)
if err != nil {
t.Fatal(err)
}
//To remove //To remove
err = srv.ContainerStart(containerID, hostConfig) job := eng.Job("start", containerID)
if err != nil { if err := job.ImportEnv(hostConfig); err != nil {
t.Fatal(err)
}
if err := job.Run(); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -419,14 +421,14 @@ func TestRmi(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
containerID, _, err = srv.ContainerCreate(config, "") containerID = createTestContainer(eng, config, t)
if err != nil {
t.Fatal(err)
}
//To remove //To remove
err = srv.ContainerStart(containerID, hostConfig) job = eng.Job("start", containerID)
if err != nil { if err := job.ImportEnv(hostConfig); err != nil {
t.Fatal(err)
}
if err := job.Run(); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -521,3 +523,25 @@ func TestImagesFilter(t *testing.T) {
t.Fatal("incorrect number of matches returned") t.Fatal("incorrect number of matches returned")
} }
} }
func TestImageInsert(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
sf := utils.NewStreamFormatter(true)
// bad image name fails
if err := srv.ImageInsert("foo", "https://www.docker.io/static/img/docker-top-logo.png", "/foo", ioutil.Discard, sf); err == nil {
t.Fatal("expected an error and got none")
}
// bad url fails
if err := srv.ImageInsert(GetTestImage(runtime).ID, "http://bad_host_name_that_will_totally_fail.com/", "/foo", ioutil.Discard, sf); err == nil {
t.Fatal("expected an error and got none")
}
// success returns nil
if err := srv.ImageInsert(GetTestImage(runtime).ID, "https://www.docker.io/static/img/docker-top-logo.png", "/foo", ioutil.Discard, sf); err != nil {
t.Fatalf("expected no error, but got %v", err)
}
}

View file

@ -119,6 +119,15 @@ func MergeConfig(userConf, imageConf *Config) error {
} }
if userConf.ExposedPorts == nil || len(userConf.ExposedPorts) == 0 { if userConf.ExposedPorts == nil || len(userConf.ExposedPorts) == 0 {
userConf.ExposedPorts = imageConf.ExposedPorts userConf.ExposedPorts = imageConf.ExposedPorts
} else if imageConf.ExposedPorts != nil {
if userConf.ExposedPorts == nil {
userConf.ExposedPorts = make(map[Port]struct{})
}
for port := range imageConf.ExposedPorts {
if _, exists := userConf.ExposedPorts[port]; !exists {
userConf.ExposedPorts[port] = struct{}{}
}
}
} }
if userConf.PortSpecs != nil && len(userConf.PortSpecs) > 0 { if userConf.PortSpecs != nil && len(userConf.PortSpecs) > 0 {
@ -325,20 +334,6 @@ func migratePortMappings(config *Config, hostConfig *HostConfig) error {
return nil return nil
} }
func RootIsShared() bool {
if data, err := ioutil.ReadFile("/proc/self/mountinfo"); err == nil {
for _, line := range strings.Split(string(data), "\n") {
cols := strings.Split(line, " ")
if len(cols) >= 6 && cols[4] == "/" {
return strings.HasPrefix(cols[6], "shared")
}
}
}
// No idea, probably safe to assume so
return true
}
func BtrfsReflink(fd_out, fd_in uintptr) error { func BtrfsReflink(fd_out, fd_in uintptr) error {
res := C.btrfs_reflink(C.int(fd_out), C.int(fd_in)) res := C.btrfs_reflink(C.int(fd_out), C.int(fd_in))
if res != 0 { if res != 0 {
@ -353,6 +348,20 @@ func parseLink(rawLink string) (map[string]string, error) {
return utils.PartParser("name:alias", rawLink) return utils.PartParser("name:alias", rawLink)
} }
func RootIsShared() bool {
if data, err := ioutil.ReadFile("/proc/self/mountinfo"); err == nil {
for _, line := range strings.Split(string(data), "\n") {
cols := strings.Split(line, " ")
if len(cols) >= 6 && cols[4] == "/" {
return strings.HasPrefix(cols[6], "shared")
}
}
}
// No idea, probably safe to assume so
return true
}
type checker struct { type checker struct {
runtime *Runtime runtime *Runtime
} }

View file

@ -28,6 +28,12 @@ var (
INITSHA1 string // sha1sum of separate static dockerinit, if Docker itself was compiled dynamically via ./hack/make.sh dynbinary INITSHA1 string // sha1sum of separate static dockerinit, if Docker itself was compiled dynamically via ./hack/make.sh dynbinary
) )
// A common interface to access the Fatal method of
// both testing.B and testing.T.
type Fataler interface {
Fatal(args ...interface{})
}
// ListOpts type // ListOpts type
type ListOpts []string type ListOpts []string
@ -177,6 +183,40 @@ func HumanSize(size int64) string {
return fmt.Sprintf("%.4g %s", sizef, units[i]) return fmt.Sprintf("%.4g %s", sizef, units[i])
} }
// Parses a human-readable string representing an amount of RAM
// in bytes, kibibytes, mebibytes or gibibytes, and returns the
// number of bytes, or -1 if the string is unparseable.
// Units are case-insensitive, and the 'b' suffix is optional.
func RAMInBytes(size string) (bytes int64, err error) {
re, error := regexp.Compile("^(\\d+)([kKmMgG])?[bB]?$")
if error != nil {
return -1, error
}
matches := re.FindStringSubmatch(size)
if len(matches) != 3 {
return -1, fmt.Errorf("Invalid size: '%s'", size)
}
memLimit, error := strconv.ParseInt(matches[1], 10, 0)
if error != nil {
return -1, error
}
unit := strings.ToLower(matches[2])
if unit == "k" {
memLimit *= 1024
} else if unit == "m" {
memLimit *= 1024 * 1024
} else if unit == "g" {
memLimit *= 1024 * 1024 * 1024
}
return memLimit, nil
}
func Trunc(s string, maxlen int) string { func Trunc(s string, maxlen int) string {
if len(s) <= maxlen { if len(s) <= maxlen {
return s return s
@ -910,7 +950,7 @@ func StripComments(input []byte, commentMarker []byte) []byte {
func GetNameserversAsCIDR(resolvConf []byte) []string { func GetNameserversAsCIDR(resolvConf []byte) []string {
var parsedResolvConf = StripComments(resolvConf, []byte("#")) var parsedResolvConf = StripComments(resolvConf, []byte("#"))
nameservers := []string{} nameservers := []string{}
re := regexp.MustCompile(`^\s*nameserver\s*(([0-9]\.){3}([0-9]))\s*$`) re := regexp.MustCompile(`^\s*nameserver\s*(([0-9]+\.){3}([0-9]+))\s*$`)
for _, line := range bytes.Split(parsedResolvConf, []byte("\n")) { for _, line := range bytes.Split(parsedResolvConf, []byte("\n")) {
var ns = re.FindSubmatch(line) var ns = re.FindSubmatch(line)
if len(ns) > 0 { if len(ns) > 0 {

View file

@ -265,6 +265,39 @@ func TestHumanSize(t *testing.T) {
} }
} }
func TestRAMInBytes(t *testing.T) {
assertRAMInBytes(t, "32", false, 32)
assertRAMInBytes(t, "32b", false, 32)
assertRAMInBytes(t, "32B", false, 32)
assertRAMInBytes(t, "32k", false, 32*1024)
assertRAMInBytes(t, "32K", false, 32*1024)
assertRAMInBytes(t, "32kb", false, 32*1024)
assertRAMInBytes(t, "32Kb", false, 32*1024)
assertRAMInBytes(t, "32Mb", false, 32*1024*1024)
assertRAMInBytes(t, "32Gb", false, 32*1024*1024*1024)
assertRAMInBytes(t, "", true, -1)
assertRAMInBytes(t, "hello", true, -1)
assertRAMInBytes(t, "-32", true, -1)
assertRAMInBytes(t, " 32 ", true, -1)
assertRAMInBytes(t, "32 mb", true, -1)
assertRAMInBytes(t, "32m b", true, -1)
assertRAMInBytes(t, "32bm", true, -1)
}
func assertRAMInBytes(t *testing.T, size string, expectError bool, expectedBytes int64) {
actualBytes, err := RAMInBytes(size)
if (err != nil) && !expectError {
t.Errorf("Unexpected error parsing '%s': %s", size, err)
}
if (err == nil) && expectError {
t.Errorf("Expected to get an error parsing '%s', but got none (bytes=%d)", size, actualBytes)
}
if actualBytes != expectedBytes {
t.Errorf("Expected '%s' to parse as %d bytes, got %d", size, expectedBytes, actualBytes)
}
}
func TestParseHost(t *testing.T) { func TestParseHost(t *testing.T) {
if addr, err := ParseHost("127.0.0.1", 4243, "0.0.0.0"); err != nil || addr != "tcp://0.0.0.0:4243" { if addr, err := ParseHost("127.0.0.1", 4243, "0.0.0.0"); err != nil || addr != "tcp://0.0.0.0:4243" {
t.Errorf("0.0.0.0 -> expected tcp://0.0.0.0:4243, got %s", addr) t.Errorf("0.0.0.0 -> expected tcp://0.0.0.0:4243, got %s", addr)
@ -448,12 +481,12 @@ func TestParsePortMapping(t *testing.T) {
func TestGetNameserversAsCIDR(t *testing.T) { func TestGetNameserversAsCIDR(t *testing.T) {
for resolv, result := range map[string][]string{` for resolv, result := range map[string][]string{`
nameserver 1.2.3.4 nameserver 1.2.3.4
nameserver 4.3.2.1 nameserver 40.3.200.10
search example.com`: {"1.2.3.4/32", "4.3.2.1/32"}, search example.com`: {"1.2.3.4/32", "40.3.200.10/32"},
`search example.com`: {}, `search example.com`: {},
`nameserver 1.2.3.4 `nameserver 1.2.3.4
search example.com search example.com
nameserver 4.3.2.1`: {"1.2.3.4/32", "4.3.2.1/32"}, nameserver 4.30.20.100`: {"1.2.3.4/32", "4.30.20.100/32"},
``: {}, ``: {},
` nameserver 1.2.3.4 `: {"1.2.3.4/32"}, ` nameserver 1.2.3.4 `: {"1.2.3.4/32"},
`search example.com `search example.com

View file

@ -2,6 +2,7 @@ package docker
import ( import (
"fmt" "fmt"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/utils" "github.com/dotcloud/docker/utils"
"io" "io"
"io/ioutil" "io/ioutil"
@ -20,64 +21,97 @@ var globalTestID string
// Create a temporary runtime suitable for unit testing. // Create a temporary runtime suitable for unit testing.
// Call t.Fatal() at the first error. // Call t.Fatal() at the first error.
func mkRuntime(f Fataler) *Runtime { func mkRuntime(f utils.Fataler) *Runtime {
// Use the caller function name as a prefix. root, err := newTestDirectory(unitTestStoreBase)
// This helps trace temp directories back to their test.
pc, _, _, _ := runtime.Caller(1)
callerLongName := runtime.FuncForPC(pc).Name()
parts := strings.Split(callerLongName, ".")
callerShortName := parts[len(parts)-1]
if globalTestID == "" {
globalTestID = GenerateID()[:4]
}
prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, callerShortName)
utils.Debugf("prefix = '%s'", prefix)
runtime, err := newTestRuntime(prefix)
if err != nil { if err != nil {
f.Fatal(err) f.Fatal(err)
} }
return runtime
}
// A common interface to access the Fatal method of
// both testing.B and testing.T.
type Fataler interface {
Fatal(args ...interface{})
}
func newTestRuntime(prefix string) (runtime *Runtime, err error) {
if prefix == "" {
prefix = "docker-test-"
}
utils.Debugf("prefix = %s", prefix)
utils.Debugf("newTestRuntime start")
root, err := ioutil.TempDir("", prefix)
defer func() {
utils.Debugf("newTestRuntime: %s", root)
}()
if err != nil {
return nil, err
}
if err := os.Remove(root); err != nil {
return nil, err
}
utils.Debugf("Copying %s to %s", unitTestStoreBase, root)
if err := utils.CopyDirectory(unitTestStoreBase, root); err != nil {
utils.Debugf("ERROR: Copying %s to %s returned %s", unitTestStoreBase, root, err)
return nil, err
}
config := &DaemonConfig{ config := &DaemonConfig{
Root: root, Root: root,
AutoRestart: false, AutoRestart: false,
} }
runtime, err = NewRuntimeFromDirectory(config) r, err := NewRuntimeFromDirectory(config)
if err != nil { if err != nil {
return nil, err f.Fatal(err)
} }
runtime.UpdateCapabilities(true) r.UpdateCapabilities(true)
return runtime, nil return r
}
func createNamedTestContainer(eng *engine.Engine, config *Config, f utils.Fataler, name string) (shortId string) {
job := eng.Job("create", name)
if err := job.ImportEnv(config); err != nil {
f.Fatal(err)
}
job.StdoutParseString(&shortId)
if err := job.Run(); err != nil {
f.Fatal(err)
}
return
}
func createTestContainer(eng *engine.Engine, config *Config, f utils.Fataler) (shortId string) {
return createNamedTestContainer(eng, config, f, "")
}
func mkServerFromEngine(eng *engine.Engine, t utils.Fataler) *Server {
iSrv := eng.Hack_GetGlobalVar("httpapi.server")
if iSrv == nil {
panic("Legacy server field not set in engine")
}
srv, ok := iSrv.(*Server)
if !ok {
panic("Legacy server field in engine does not cast to *Server")
}
return srv
}
func NewTestEngine(t utils.Fataler) *engine.Engine {
root, err := newTestDirectory(unitTestStoreBase)
if err != nil {
t.Fatal(err)
}
eng, err := engine.New(root)
if err != nil {
t.Fatal(err)
}
// Load default plugins
// (This is manually copied and modified from main() until we have a more generic plugin system)
job := eng.Job("initapi")
job.Setenv("Root", root)
job.SetenvBool("AutoRestart", false)
if err := job.Run(); err != nil {
t.Fatal(err)
}
return eng
}
func newTestDirectory(templateDir string) (dir string, err error) {
if globalTestID == "" {
globalTestID = GenerateID()[:4]
}
prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, getCallerName(2))
if prefix == "" {
prefix = "docker-test-"
}
dir, err = ioutil.TempDir("", prefix)
if err = os.Remove(dir); err != nil {
return
}
if err = utils.CopyDirectory(templateDir, dir); err != nil {
return
}
return
}
func getCallerName(depth int) string {
// Use the caller function name as a prefix.
// This helps trace temp directories back to their test.
pc, _, _, _ := runtime.Caller(depth + 1)
callerLongName := runtime.FuncForPC(pc).Name()
parts := strings.Split(callerLongName, ".")
callerShortName := parts[len(parts)-1]
return callerShortName
} }
// Write `content` to the file at path `dst`, creating it if necessary, // Write `content` to the file at path `dst`, creating it if necessary,
@ -249,7 +283,9 @@ func TestMergeConfig(t *testing.T) {
Volumes: volumesUser, Volumes: volumesUser,
} }
MergeConfig(configUser, configImage) if err := MergeConfig(configUser, configImage); err != nil {
t.Error(err)
}
if len(configUser.Dns) != 3 { if len(configUser.Dns) != 3 {
t.Fatalf("Expected 3 dns, 1.1.1.1, 2.2.2.2 and 3.3.3.3, found %d", len(configUser.Dns)) t.Fatalf("Expected 3 dns, 1.1.1.1, 2.2.2.2 and 3.3.3.3, found %d", len(configUser.Dns))
@ -261,7 +297,7 @@ func TestMergeConfig(t *testing.T) {
} }
if len(configUser.ExposedPorts) != 3 { if len(configUser.ExposedPorts) != 3 {
t.Fatalf("Expected 3 portSpecs, 1111, 2222 and 3333, found %d", len(configUser.PortSpecs)) t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts))
} }
for portSpecs := range configUser.ExposedPorts { for portSpecs := range configUser.ExposedPorts {
if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" {
@ -289,6 +325,28 @@ func TestMergeConfig(t *testing.T) {
if configUser.VolumesFrom != "1111" { if configUser.VolumesFrom != "1111" {
t.Fatalf("Expected VolumesFrom to be 1111, found %s", configUser.VolumesFrom) t.Fatalf("Expected VolumesFrom to be 1111, found %s", configUser.VolumesFrom)
} }
ports, _, err := parsePortSpecs([]string{"0000"})
if err != nil {
t.Error(err)
}
configImage2 := &Config{
ExposedPorts: ports,
}
if err := MergeConfig(configUser, configImage2); err != nil {
t.Error(err)
}
if len(configUser.ExposedPorts) != 4 {
t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts))
}
for portSpecs := range configUser.ExposedPorts {
if portSpecs.Port() != "0000" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" {
t.Fatalf("Expected 0000 or 1111 or 2222 or 3333, found %s", portSpecs)
}
}
} }
func TestParseLxcConfOpt(t *testing.T) { func TestParseLxcConfOpt(t *testing.T) {