Merge branch 'master' into shykes-0.6.5-dm-plugin
Conflicts: utils.go utils_test.go
This commit is contained in:
commit
7cf60da388
70 changed files with 2050 additions and 829 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -18,3 +18,4 @@ bundles/
|
|||
.hg/
|
||||
.git/
|
||||
vendor/pkg/
|
||||
pyenv
|
||||
|
|
2
AUTHORS
2
AUTHORS
|
@ -94,6 +94,7 @@ Jonathan Rudenberg <jonathan@titanous.com>
|
|||
Joost Cassee <joost@cassee.net>
|
||||
Jordan Arentsen <blissdev@gmail.com>
|
||||
Joseph Anthony Pasquale Holsten <joseph@josephholsten.com>
|
||||
Josh Poimboeuf <jpoimboe@redhat.com>
|
||||
Julien Barbier <write0@gmail.com>
|
||||
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
|
||||
Karan Lyons <karan@karanlyons.com>
|
||||
|
@ -165,6 +166,7 @@ Sridatta Thatipamala <sthatipamala@gmail.com>
|
|||
Sridhar Ratnakumar <sridharr@activestate.com>
|
||||
Steeve Morin <steeve.morin@gmail.com>
|
||||
Stefan Praszalowicz <stefan@greplin.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au>
|
||||
Thatcher Peskens <thatcher@dotcloud.com>
|
||||
Thermionix <bond711@gmail.com>
|
||||
Thijs Terlouw <thijsterlouw@gmail.com>
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
+ Prevent DNS server conflicts in CreateBridgeIface
|
||||
+ Validate bind mounts on the server side
|
||||
+ Use parent image config in docker build
|
||||
* Fix regression in /etc/hosts
|
||||
|
||||
#### Client
|
||||
|
||||
|
|
|
@ -1,11 +1,14 @@
|
|||
# Contributing to Docker
|
||||
|
||||
Want to hack on Docker? Awesome! Here are instructions to get you started. They are probably not perfect, please let us know if anything feels
|
||||
wrong or incomplete.
|
||||
Want to hack on Docker? Awesome! Here are instructions to get you
|
||||
started. They are probably not perfect, please let us know if anything
|
||||
feels wrong or incomplete.
|
||||
|
||||
## Build Environment
|
||||
|
||||
For instructions on setting up your development environment, please see our dedicated [dev environment setup docs](http://docs.docker.io/en/latest/contributing/devenvironment/).
|
||||
For instructions on setting up your development environment, please
|
||||
see our dedicated [dev environment setup
|
||||
docs](http://docs.docker.io/en/latest/contributing/devenvironment/).
|
||||
|
||||
## Contribution guidelines
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ run apt-get install -y -q mercurial
|
|||
run apt-get install -y -q build-essential libsqlite3-dev
|
||||
|
||||
# Install Go
|
||||
run curl -s https://go.googlecode.com/files/go1.2rc3.src.tar.gz | tar -v -C /usr/local -xz
|
||||
run curl -s https://go.googlecode.com/files/go1.2rc4.src.tar.gz | tar -v -C /usr/local -xz
|
||||
env PATH /usr/local/go/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
|
||||
env GOPATH /go:/go/src/github.com/dotcloud/docker/vendor
|
||||
run cd /usr/local/go/src && ./make.bash && go install -ldflags '-w -linkmode external -extldflags "-static -Wl,--unresolved-symbols=ignore-in-shared-libs"' -tags netgo -a std
|
||||
|
|
35
NOTICE
35
NOTICE
|
@ -8,35 +8,12 @@ by Keith Rarick, licensed under the MIT License.
|
|||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
||||
Transfers of Docker shall be in accordance with applicable export
|
||||
controls of any country and all other applicable legal requirements.
|
||||
Docker shall not be distributed or downloaded to or in Cuba, Iran,
|
||||
North Korea, Sudan or Syria and shall not be distributed or downloaded
|
||||
to any person on the Denied Persons List administered by the U.S.
|
||||
Department of Commerce.
|
||||
|
||||
What does that mean?
|
||||
Here is a further explanation from our legal counsel:
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
Like all software products that utilize cryptography, the export and
|
||||
use of Docker is subject to the U.S. Commerce Department's Export
|
||||
Administration Regulations (EAR) because it uses or contains
|
||||
cryptography (see
|
||||
http://www.bis.doc.gov/index.php/policy-guidance/encryption). Certain
|
||||
free and open source software projects have a lightweight set of
|
||||
requirements, which can generally be met by providing email notice to
|
||||
the appropriate U.S. government agencies that their source code is
|
||||
available on a publicly available repository and making the
|
||||
appropriate statements in the README.
|
||||
For more information, please see http://www.bis.doc.gov
|
||||
|
||||
The restrictions of the EAR apply to certain denied locations
|
||||
(currently Iran, Sudan, Syria, North Korea, or Cuba) and those
|
||||
individuals on the Denied Persons List, which is available here:
|
||||
http://www.bis.doc.gov/index.php/policy-guidance/lists-of-parties-of-concern/denied-persons-list.
|
||||
If you are incorporating Docker into a new open source project, the
|
||||
EAR restrictions apply to your incorporation of Docker into your
|
||||
project in the same manner as other cryptography-enabled projects,
|
||||
such as OpenSSL, almost all Linux distributions, etc.
|
||||
|
||||
For more information, see http://www.apache.org/dev/crypto.html and/or
|
||||
seek legal counsel.
|
||||
See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
||||
|
|
|
@ -193,10 +193,9 @@ wrong or incomplete.
|
|||
*Brought to you courtesy of our legal counsel. For more context,
|
||||
please see the Notice document.*
|
||||
|
||||
Transfers of Docker shall be in accordance with applicable export controls
|
||||
of any country and all other applicable legal requirements. Without limiting the
|
||||
foregoing, Docker shall not be distributed or downloaded to any individual or
|
||||
location if such distribution or download would violate the applicable US
|
||||
government export regulations.
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see http://www.bis.doc.gov
|
||||
|
|
163
Vagrantfile
vendored
163
Vagrantfile
vendored
|
@ -4,65 +4,135 @@
|
|||
BOX_NAME = ENV['BOX_NAME'] || "ubuntu"
|
||||
BOX_URI = ENV['BOX_URI'] || "http://files.vagrantup.com/precise64.box"
|
||||
VF_BOX_URI = ENV['BOX_URI'] || "http://files.vagrantup.com/precise64_vmware_fusion.box"
|
||||
AWS_BOX_URI = ENV['BOX_URI'] || "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
|
||||
AWS_REGION = ENV['AWS_REGION'] || "us-east-1"
|
||||
AWS_AMI = ENV['AWS_AMI'] || "ami-d0f89fb9"
|
||||
AWS_AMI = ENV['AWS_AMI'] || "ami-69f5a900"
|
||||
AWS_INSTANCE_TYPE = ENV['AWS_INSTANCE_TYPE'] || 't1.micro'
|
||||
|
||||
FORWARD_DOCKER_PORTS = ENV['FORWARD_DOCKER_PORTS']
|
||||
|
||||
SSH_PRIVKEY_PATH = ENV["SSH_PRIVKEY_PATH"]
|
||||
|
||||
# A script to upgrade from the 12.04 kernel to the raring backport kernel (3.8)
|
||||
# and install docker.
|
||||
$script = <<SCRIPT
|
||||
# The username to add to the docker group will be passed as the first argument
|
||||
# to the script. If nothing is passed, default to "vagrant".
|
||||
user="$1"
|
||||
if [ -z "$user" ]; then
|
||||
user=vagrant
|
||||
fi
|
||||
|
||||
# Adding an apt gpg key is idempotent.
|
||||
wget -q -O - https://get.docker.io/gpg | apt-key add -
|
||||
|
||||
# Creating the docker.list file is idempotent, but it may overrite desired
|
||||
# settings if it already exists. This could be solved with md5sum but it
|
||||
# doesn't seem worth it.
|
||||
echo 'deb http://get.docker.io/ubuntu docker main' > \
|
||||
/etc/apt/sources.list.d/docker.list
|
||||
|
||||
# Update remote package metadata. 'apt-get update' is idempotent.
|
||||
apt-get update -q
|
||||
|
||||
# Install docker. 'apt-get install' is idempotent.
|
||||
apt-get install -q -y lxc-docker
|
||||
|
||||
usermod -a -G docker "$user"
|
||||
|
||||
tmp=`mktemp -q` && {
|
||||
# Only install the backport kernel, don't bother upgrade if the backport is
|
||||
# already installed. We want parse the output of apt so we need to save it
|
||||
# with 'tee'. NOTE: The installation of the kernel will trigger dkms to
|
||||
# install vboxguest if needed.
|
||||
apt-get install -q -y --no-upgrade linux-image-generic-lts-raring | \
|
||||
tee "$tmp"
|
||||
|
||||
# Parse the number of installed packages from the output
|
||||
NUM_INST=`awk '$2 == "upgraded," && $4 == "newly" { print $3 }' "$tmp"`
|
||||
rm "$tmp"
|
||||
}
|
||||
|
||||
# If the number of installed packages is greater than 0, we want to reboot (the
|
||||
# backport kernel was installed but is not running).
|
||||
if [ "$NUM_INST" -gt 0 ];
|
||||
then
|
||||
echo "Rebooting down to activate new kernel."
|
||||
echo "/vagrant will not be mounted. Use 'vagrant halt' followed by"
|
||||
echo "'vagrant up' to ensure /vagrant is mounted."
|
||||
shutdown -r now
|
||||
fi
|
||||
SCRIPT
|
||||
|
||||
# We need to install the virtualbox guest additions *before* we do the normal
|
||||
# docker installation. As such this script is prepended to the common docker
|
||||
# install script above. This allows the install of the backport kernel to
|
||||
# trigger dkms to build the virtualbox guest module install.
|
||||
$vbox_script = <<VBOX_SCRIPT + $script
|
||||
# Install the VirtualBox guest additions if they aren't already installed.
|
||||
if [ ! -d /opt/VBoxGuestAdditions-4.2.12/ ]; then
|
||||
# Update remote package metadata. 'apt-get update' is idempotent.
|
||||
apt-get update -q
|
||||
|
||||
# Kernel Headers and dkms are required to build the vbox guest kernel
|
||||
# modules.
|
||||
apt-get install -q -y linux-headers-generic-lts-raring dkms
|
||||
|
||||
echo 'Downloading VBox Guest Additions...'
|
||||
wget -cq http://dlc.sun.com.edgesuite.net/virtualbox/4.2.12/VBoxGuestAdditions_4.2.12.iso
|
||||
|
||||
mount -o loop,ro /home/vagrant/VBoxGuestAdditions_4.2.12.iso /mnt
|
||||
/mnt/VBoxLinuxAdditions.run --nox11
|
||||
umount /mnt
|
||||
fi
|
||||
VBOX_SCRIPT
|
||||
|
||||
Vagrant::Config.run do |config|
|
||||
# Setup virtual machine box. This VM configuration code is always executed.
|
||||
config.vm.box = BOX_NAME
|
||||
config.vm.box_url = BOX_URI
|
||||
|
||||
config.ssh.forward_agent = true
|
||||
|
||||
# Provision docker and new kernel if deployment was not done.
|
||||
# It is assumed Vagrant can successfully launch the provider instance.
|
||||
if Dir.glob("#{File.dirname(__FILE__)}/.vagrant/machines/default/*/id").empty?
|
||||
# Add lxc-docker package
|
||||
pkg_cmd = "wget -q -O - https://get.docker.io/gpg | apt-key add -;" \
|
||||
"echo deb http://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list;" \
|
||||
"apt-get update -qq; apt-get install -q -y --force-yes lxc-docker; "
|
||||
# Add Ubuntu raring backported kernel
|
||||
pkg_cmd << "apt-get update -qq; apt-get install -q -y linux-image-generic-lts-raring; "
|
||||
# Add guest additions if local vbox VM. As virtualbox is the default provider,
|
||||
# it is assumed it won't be explicitly stated.
|
||||
if ENV["VAGRANT_DEFAULT_PROVIDER"].nil? && ARGV.none? { |arg| arg.downcase.start_with?("--provider") }
|
||||
pkg_cmd << "apt-get install -q -y linux-headers-generic-lts-raring dkms; " \
|
||||
"echo 'Downloading VBox Guest Additions...'; " \
|
||||
"wget -q http://dlc.sun.com.edgesuite.net/virtualbox/4.2.12/VBoxGuestAdditions_4.2.12.iso; "
|
||||
# Prepare the VM to add guest additions after reboot
|
||||
pkg_cmd << "echo -e 'mount -o loop,ro /home/vagrant/VBoxGuestAdditions_4.2.12.iso /mnt\n" \
|
||||
"echo yes | /mnt/VBoxLinuxAdditions.run\numount /mnt\n" \
|
||||
"rm /root/guest_additions.sh; ' > /root/guest_additions.sh; " \
|
||||
"chmod 700 /root/guest_additions.sh; " \
|
||||
"sed -i -E 's#^exit 0#[ -x /root/guest_additions.sh ] \\&\\& /root/guest_additions.sh#' /etc/rc.local; " \
|
||||
"echo 'Installation of VBox Guest Additions is proceeding in the background.'; " \
|
||||
"echo '\"vagrant reload\" can be used in about 2 minutes to activate the new guest additions.'; "
|
||||
end
|
||||
# Add vagrant user to the docker group
|
||||
pkg_cmd << "usermod -a -G docker vagrant; "
|
||||
# Activate new kernel
|
||||
pkg_cmd << "shutdown -r +1; "
|
||||
config.vm.provision :shell, :inline => pkg_cmd
|
||||
# Use the specified private key path if it is specified and not empty.
|
||||
if SSH_PRIVKEY_PATH
|
||||
config.ssh.private_key_path = SSH_PRIVKEY_PATH
|
||||
end
|
||||
|
||||
config.ssh.forward_agent = true
|
||||
end
|
||||
|
||||
|
||||
# Providers were added on Vagrant >= 1.1.0
|
||||
#
|
||||
# NOTE: The vagrant "vm.provision" appends its arguments to a list and executes
|
||||
# them in order. If you invoke "vm.provision :shell, :inline => $script"
|
||||
# twice then vagrant will run the script two times. Unfortunately when you use
|
||||
# providers and the override argument to set up provisioners (like the vbox
|
||||
# guest extensions) they 1) don't replace the other provisioners (they append
|
||||
# to the end of the list) and 2) you can't control the order the provisioners
|
||||
# are executed (you can only append to the list). If you want the virtualbox
|
||||
# only script to run before the other script, you have to jump through a lot of
|
||||
# hoops.
|
||||
#
|
||||
# Here is my only repeatable solution: make one script that is common ($script)
|
||||
# and another script that is the virtual box guest *prepended* to the common
|
||||
# script. Only ever use "vm.provision" *one time* per provider. That means
|
||||
# every single provider has an override, and every single one configures
|
||||
# "vm.provision". Much saddness, but such is life.
|
||||
Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
|
||||
config.vm.provider :aws do |aws, override|
|
||||
aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"]
|
||||
aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
|
||||
username = "ubuntu"
|
||||
override.vm.box_url = AWS_BOX_URI
|
||||
override.vm.provision :shell, :inline => $script, :args => username
|
||||
aws.access_key_id = ENV["AWS_ACCESS_KEY"]
|
||||
aws.secret_access_key = ENV["AWS_SECRET_KEY"]
|
||||
aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
|
||||
override.ssh.private_key_path = ENV["AWS_SSH_PRIVKEY"]
|
||||
override.ssh.username = "ubuntu"
|
||||
override.ssh.username = username
|
||||
aws.region = AWS_REGION
|
||||
aws.ami = AWS_AMI
|
||||
aws.instance_type = "t1.micro"
|
||||
aws.instance_type = AWS_INSTANCE_TYPE
|
||||
end
|
||||
|
||||
config.vm.provider :rackspace do |rs|
|
||||
config.ssh.private_key_path = ENV["RS_PRIVATE_KEY"]
|
||||
config.vm.provider :rackspace do |rs, override|
|
||||
override.vm.provision :shell, :inline => $script
|
||||
rs.username = ENV["RS_USERNAME"]
|
||||
rs.api_key = ENV["RS_API_KEY"]
|
||||
rs.public_key_path = ENV["RS_PUBLIC_KEY"]
|
||||
|
@ -71,20 +141,25 @@ Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
|
|||
end
|
||||
|
||||
config.vm.provider :vmware_fusion do |f, override|
|
||||
override.vm.box = BOX_NAME
|
||||
override.vm.box_url = VF_BOX_URI
|
||||
override.vm.synced_folder ".", "/vagrant", disabled: true
|
||||
override.vm.provision :shell, :inline => $script
|
||||
f.vmx["displayName"] = "docker"
|
||||
end
|
||||
|
||||
config.vm.provider :virtualbox do |vb|
|
||||
config.vm.box = BOX_NAME
|
||||
config.vm.box_url = BOX_URI
|
||||
config.vm.provider :virtualbox do |vb, override|
|
||||
override.vm.provision :shell, :inline => $vbox_script
|
||||
vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
|
||||
vb.customize ["modifyvm", :id, "--natdnsproxy1", "on"]
|
||||
end
|
||||
end
|
||||
|
||||
# If this is a version 1 config, virtualbox is the only option. A version 2
|
||||
# config would have already been set in the above provider section.
|
||||
Vagrant::VERSION < "1.1.0" and Vagrant::Config.run do |config|
|
||||
config.vm.provision :shell, :inline => $vbox_script
|
||||
end
|
||||
|
||||
if !FORWARD_DOCKER_PORTS.nil?
|
||||
Vagrant::VERSION < "1.1.0" and Vagrant::Config.run do |config|
|
||||
(49000..49900).each do |port|
|
||||
|
|
63
api.go
63
api.go
|
@ -479,15 +479,16 @@ func postImagesInsert(srv *Server, version float64, w http.ResponseWriter, r *ht
|
|||
w.Header().Set("Content-Type", "application/json")
|
||||
}
|
||||
sf := utils.NewStreamFormatter(version > 1.0)
|
||||
imgID, err := srv.ImageInsert(name, url, path, w, sf)
|
||||
err := srv.ImageInsert(name, url, path, w, sf)
|
||||
if err != nil {
|
||||
if sf.Used() {
|
||||
w.Write(sf.FormatError(err))
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return writeJSON(w, http.StatusOK, &APIID{ID: imgID})
|
||||
return nil
|
||||
}
|
||||
|
||||
func postImagesPush(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
|
@ -540,43 +541,36 @@ func postContainersCreate(srv *Server, version float64, w http.ResponseWriter, r
|
|||
if err := parseForm(r); err != nil {
|
||||
return nil
|
||||
}
|
||||
config := &Config{}
|
||||
out := &APIRun{}
|
||||
name := r.Form.Get("name")
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(config); err != nil {
|
||||
job := srv.Eng.Job("create", r.Form.Get("name"))
|
||||
if err := job.DecodeEnv(r.Body); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resolvConf, err := utils.GetResolvConf()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !config.NetworkDisabled && len(config.Dns) == 0 && len(srv.runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
|
||||
if !job.GetenvBool("NetworkDisabled") && len(job.Getenv("Dns")) == 0 && len(srv.runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
|
||||
out.Warnings = append(out.Warnings, fmt.Sprintf("Docker detected local DNS server on resolv.conf. Using default external servers: %v", defaultDns))
|
||||
config.Dns = defaultDns
|
||||
job.SetenvList("Dns", defaultDns)
|
||||
}
|
||||
|
||||
id, warnings, err := srv.ContainerCreate(config, name)
|
||||
if err != nil {
|
||||
// Read container ID from the first line of stdout
|
||||
job.StdoutParseString(&out.ID)
|
||||
// Read warnings from stderr
|
||||
job.StderrParseLines(&out.Warnings, 0)
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
out.ID = id
|
||||
for _, warning := range warnings {
|
||||
out.Warnings = append(out.Warnings, warning)
|
||||
}
|
||||
|
||||
if config.Memory > 0 && !srv.runtime.capabilities.MemoryLimit {
|
||||
if job.GetenvInt("Memory") > 0 && !srv.runtime.capabilities.MemoryLimit {
|
||||
log.Println("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.")
|
||||
out.Warnings = append(out.Warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.")
|
||||
}
|
||||
if config.Memory > 0 && !srv.runtime.capabilities.SwapLimit {
|
||||
if job.GetenvInt("Memory") > 0 && !srv.runtime.capabilities.SwapLimit {
|
||||
log.Println("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.")
|
||||
out.Warnings = append(out.Warnings, "Your kernel does not support memory swap capabilities. Limitation discarded.")
|
||||
}
|
||||
|
||||
if !config.NetworkDisabled && srv.runtime.capabilities.IPv4ForwardingDisabled {
|
||||
if !job.GetenvBool("NetworkDisabled") && srv.runtime.capabilities.IPv4ForwardingDisabled {
|
||||
log.Println("Warning: IPv4 forwarding is disabled.")
|
||||
out.Warnings = append(out.Warnings, "IPv4 forwarding is disabled.")
|
||||
}
|
||||
|
@ -653,26 +647,23 @@ func deleteImages(srv *Server, version float64, w http.ResponseWriter, r *http.R
|
|||
}
|
||||
|
||||
func postContainersStart(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
var hostConfig *HostConfig
|
||||
// allow a nil body for backwards compatibility
|
||||
if r.Body != nil {
|
||||
if matchesContentType(r.Header.Get("Content-Type"), "application/json") {
|
||||
hostConfig = &HostConfig{}
|
||||
if err := json.NewDecoder(r.Body).Decode(hostConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
// Register any links from the host config before starting the container
|
||||
if err := srv.RegisterLinks(name, hostConfig); err != nil {
|
||||
return err
|
||||
job := srv.Eng.Job("start", name)
|
||||
if err := job.ImportEnv(HostConfig{}); err != nil {
|
||||
return fmt.Errorf("Couldn't initialize host configuration")
|
||||
}
|
||||
if err := srv.ContainerStart(name, hostConfig); err != nil {
|
||||
// allow a nil body for backwards compatibility
|
||||
if r.Body != nil {
|
||||
if matchesContentType(r.Header.Get("Content-Type"), "application/json") {
|
||||
if err := job.DecodeEnv(r.Body); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
|
|
34
api_test.go
34
api_test.go
|
@ -609,11 +609,11 @@ func TestPostCommit(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPostContainersCreate(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
eng := NewTestEngine(t)
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
runtime := srv.runtime
|
||||
defer nuke(runtime)
|
||||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
configJSON, err := json.Marshal(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Memory: 33554432,
|
||||
|
@ -756,27 +756,23 @@ func TestPostContainersRestart(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPostContainersStart(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
eng := NewTestEngine(t)
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
runtime := srv.runtime
|
||||
defer nuke(runtime)
|
||||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
container, _, err := runtime.Create(
|
||||
id := createTestContainer(
|
||||
eng,
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/cat"},
|
||||
OpenStdin: true,
|
||||
},
|
||||
"",
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer runtime.Destroy(container)
|
||||
t)
|
||||
|
||||
hostConfigJSON, err := json.Marshal(&HostConfig{})
|
||||
|
||||
req, err := http.NewRequest("POST", "/containers/"+container.ID+"/start", bytes.NewReader(hostConfigJSON))
|
||||
req, err := http.NewRequest("POST", "/containers/"+id+"/start", bytes.NewReader(hostConfigJSON))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -784,22 +780,26 @@ func TestPostContainersStart(t *testing.T) {
|
|||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
r := httptest.NewRecorder()
|
||||
if err := postContainersStart(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil {
|
||||
if err := postContainersStart(srv, APIVERSION, r, req, map[string]string{"name": id}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if r.Code != http.StatusNoContent {
|
||||
t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code)
|
||||
}
|
||||
|
||||
container := runtime.Get(id)
|
||||
if container == nil {
|
||||
t.Fatalf("Container %s was not created", id)
|
||||
}
|
||||
// Give some time to the process to start
|
||||
// FIXME: use Wait once it's available as a job
|
||||
container.WaitTimeout(500 * time.Millisecond)
|
||||
|
||||
if !container.State.Running {
|
||||
t.Errorf("Container should be running")
|
||||
}
|
||||
|
||||
r = httptest.NewRecorder()
|
||||
if err = postContainersStart(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err == nil {
|
||||
if err = postContainersStart(srv, APIVERSION, r, req, map[string]string{"name": id}); err == nil {
|
||||
t.Fatalf("A running container should be able to be started")
|
||||
}
|
||||
|
||||
|
|
|
@ -544,10 +544,7 @@ func TestBuildADDFileNotFound(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBuildInheritance(t *testing.T) {
|
||||
runtime, err := newTestRuntime("")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
|
||||
srv := &Server{
|
||||
|
|
102
commands.go
102
commands.go
|
@ -130,10 +130,7 @@ func (cli *DockerCli) CmdInsert(args ...string) error {
|
|||
v.Set("url", cmd.Arg(1))
|
||||
v.Set("path", cmd.Arg(2))
|
||||
|
||||
if err := cli.stream("POST", "/images/"+cmd.Arg(0)+"/insert?"+v.Encode(), nil, cli.out, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return cli.stream("POST", "/images/"+cmd.Arg(0)+"/insert?"+v.Encode(), nil, cli.out, nil)
|
||||
}
|
||||
|
||||
// mkBuildContext returns an archive of an empty context with the contents
|
||||
|
@ -376,15 +373,17 @@ func (cli *DockerCli) CmdWait(args ...string) error {
|
|||
cmd.Usage()
|
||||
return nil
|
||||
}
|
||||
var encounteredError error
|
||||
for _, name := range cmd.Args() {
|
||||
status, err := waitForExit(cli, name)
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.err, "%s", err)
|
||||
fmt.Fprintf(cli.err, "%s\n", err)
|
||||
encounteredError = fmt.Errorf("Error: failed to wait one or more containers")
|
||||
} else {
|
||||
fmt.Fprintf(cli.out, "%d\n", status)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return encounteredError
|
||||
}
|
||||
|
||||
// 'docker version': show version information
|
||||
|
@ -505,15 +504,17 @@ func (cli *DockerCli) CmdStop(args ...string) error {
|
|||
v := url.Values{}
|
||||
v.Set("t", strconv.Itoa(*nSeconds))
|
||||
|
||||
var encounteredError error
|
||||
for _, name := range cmd.Args() {
|
||||
_, _, err := cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil)
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.err, "%s\n", err)
|
||||
encounteredError = fmt.Errorf("Error: failed to stop one or more containers")
|
||||
} else {
|
||||
fmt.Fprintf(cli.out, "%s\n", name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return encounteredError
|
||||
}
|
||||
|
||||
func (cli *DockerCli) CmdRestart(args ...string) error {
|
||||
|
@ -530,15 +531,17 @@ func (cli *DockerCli) CmdRestart(args ...string) error {
|
|||
v := url.Values{}
|
||||
v.Set("t", strconv.Itoa(*nSeconds))
|
||||
|
||||
var encounteredError error
|
||||
for _, name := range cmd.Args() {
|
||||
_, _, err := cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil)
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.err, "%s\n", err)
|
||||
encounteredError = fmt.Errorf("Error: failed to restart one or more containers")
|
||||
} else {
|
||||
fmt.Fprintf(cli.out, "%s\n", name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return encounteredError
|
||||
}
|
||||
|
||||
func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
|
||||
|
@ -772,15 +775,19 @@ func (cli *DockerCli) CmdRmi(args ...string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
var encounteredError error
|
||||
for _, name := range cmd.Args() {
|
||||
body, _, err := cli.call("DELETE", "/images/"+name, nil)
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.err, "%s", err)
|
||||
fmt.Fprintf(cli.err, "%s\n", err)
|
||||
encounteredError = fmt.Errorf("Error: failed to remove one or more images")
|
||||
} else {
|
||||
var outs []APIRmi
|
||||
err = json.Unmarshal(body, &outs)
|
||||
if err != nil {
|
||||
return err
|
||||
fmt.Fprintf(cli.err, "%s\n", err)
|
||||
encounteredError = fmt.Errorf("Error: failed to remove one or more images")
|
||||
continue
|
||||
}
|
||||
for _, out := range outs {
|
||||
if out.Deleted != "" {
|
||||
|
@ -791,7 +798,7 @@ func (cli *DockerCli) CmdRmi(args ...string) error {
|
|||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return encounteredError
|
||||
}
|
||||
|
||||
func (cli *DockerCli) CmdHistory(args ...string) error {
|
||||
|
@ -870,15 +877,18 @@ func (cli *DockerCli) CmdRm(args ...string) error {
|
|||
if *link {
|
||||
val.Set("link", "1")
|
||||
}
|
||||
|
||||
var encounteredError error
|
||||
for _, name := range cmd.Args() {
|
||||
_, _, err := cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil)
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.err, "%s\n", err)
|
||||
encounteredError = fmt.Errorf("Error: failed to remove one or more containers")
|
||||
} else {
|
||||
fmt.Fprintf(cli.out, "%s\n", name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return encounteredError
|
||||
}
|
||||
|
||||
// 'docker kill NAME' kills a running container
|
||||
|
@ -892,15 +902,16 @@ func (cli *DockerCli) CmdKill(args ...string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
var encounteredError error
|
||||
for _, name := range args {
|
||||
_, _, err := cli.call("POST", "/containers/"+name+"/kill", nil)
|
||||
if err != nil {
|
||||
if _, _, err := cli.call("POST", "/containers/"+name+"/kill", nil); err != nil {
|
||||
fmt.Fprintf(cli.err, "%s\n", err)
|
||||
encounteredError = fmt.Errorf("Error: failed to kill one or more containers")
|
||||
} else {
|
||||
fmt.Fprintf(cli.out, "%s\n", name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return encounteredError
|
||||
}
|
||||
|
||||
func (cli *DockerCli) CmdImport(args ...string) error {
|
||||
|
@ -913,8 +924,16 @@ func (cli *DockerCli) CmdImport(args ...string) error {
|
|||
cmd.Usage()
|
||||
return nil
|
||||
}
|
||||
src := cmd.Arg(0)
|
||||
repository, tag := utils.ParseRepositoryTag(cmd.Arg(1))
|
||||
|
||||
var src, repository, tag string
|
||||
|
||||
if cmd.NArg() == 3 {
|
||||
fmt.Fprintf(cli.err, "[DEPRECATED] The format 'URL|- [REPOSITORY [TAG]]' as been deprecated. Please use URL|- [REPOSITORY[:TAG]]\n")
|
||||
src, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2)
|
||||
} else {
|
||||
src = cmd.Arg(0)
|
||||
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
|
||||
}
|
||||
v := url.Values{}
|
||||
v.Set("repo", repository)
|
||||
v.Set("tag", tag)
|
||||
|
@ -1166,14 +1185,10 @@ func (cli *DockerCli) CmdImages(args ...string) error {
|
|||
fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tSIZE")
|
||||
}
|
||||
|
||||
var repo string
|
||||
var tag string
|
||||
for _, out := range outs {
|
||||
for _, repotag := range out.RepoTags {
|
||||
|
||||
components := strings.SplitN(repotag, ":", 2)
|
||||
repo = components[0]
|
||||
tag = components[1]
|
||||
repo, tag := utils.ParseRepositoryTag(repotag)
|
||||
|
||||
if !*noTrunc {
|
||||
out.ID = utils.TruncateID(out.ID)
|
||||
|
@ -1235,7 +1250,7 @@ func PrintTreeNode(cli *DockerCli, noTrunc *bool, image APIImages, prefix string
|
|||
|
||||
fmt.Fprintf(cli.out, "%s%s Size: %s (virtual %s)", prefix, imageID, utils.HumanSize(image.Size), utils.HumanSize(image.VirtualSize))
|
||||
if image.RepoTags[0] != "<none>:<none>" {
|
||||
fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.RepoTags, ","))
|
||||
fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.RepoTags, ", "))
|
||||
} else {
|
||||
fmt.Fprint(cli.out, "\n")
|
||||
}
|
||||
|
@ -1351,8 +1366,16 @@ func (cli *DockerCli) CmdCommit(args ...string) error {
|
|||
if err := cmd.Parse(args); err != nil {
|
||||
return nil
|
||||
}
|
||||
name := cmd.Arg(0)
|
||||
repository, tag := utils.ParseRepositoryTag(cmd.Arg(1))
|
||||
|
||||
var name, repository, tag string
|
||||
|
||||
if cmd.NArg() == 3 {
|
||||
fmt.Fprintf(cli.err, "[DEPRECATED] The format 'CONTAINER [REPOSITORY [TAG]]' as been deprecated. Please use CONTAINER [REPOSITORY[:TAG]]\n")
|
||||
name, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2)
|
||||
} else {
|
||||
name = cmd.Arg(0)
|
||||
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
|
||||
}
|
||||
|
||||
if name == "" {
|
||||
cmd.Usage()
|
||||
|
@ -1389,7 +1412,7 @@ func (cli *DockerCli) CmdCommit(args ...string) error {
|
|||
|
||||
func (cli *DockerCli) CmdEvents(args ...string) error {
|
||||
cmd := Subcmd("events", "[OPTIONS]", "Get real time events from the server")
|
||||
since := cmd.String("since", "", "Show events previously created (used for polling).")
|
||||
since := cmd.String("since", "", "Show previously created events and then stream.")
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -1401,7 +1424,17 @@ func (cli *DockerCli) CmdEvents(args ...string) error {
|
|||
|
||||
v := url.Values{}
|
||||
if *since != "" {
|
||||
v.Set("since", *since)
|
||||
loc := time.FixedZone(time.Now().Zone())
|
||||
format := "2006-01-02 15:04:05 -0700 MST"
|
||||
if len(*since) < len(format) {
|
||||
format = format[:len(*since)]
|
||||
}
|
||||
|
||||
if t, err := time.ParseInLocation(format, *since, loc); err == nil {
|
||||
v.Set("since", strconv.FormatInt(t.Unix(), 10))
|
||||
} else {
|
||||
v.Set("since", *since)
|
||||
}
|
||||
}
|
||||
|
||||
if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil {
|
||||
|
@ -1658,9 +1691,16 @@ func (cli *DockerCli) CmdTag(args ...string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
v := url.Values{}
|
||||
repository, tag := utils.ParseRepositoryTag(cmd.Arg(1))
|
||||
var repository, tag string
|
||||
|
||||
if cmd.NArg() == 3 {
|
||||
fmt.Fprintf(cli.err, "[DEPRECATED] The format 'IMAGE [REPOSITORY [TAG]]' as been deprecated. Please use IMAGE [REPOSITORY[:TAG]]\n")
|
||||
repository, tag = cmd.Arg(1), cmd.Arg(2)
|
||||
} else {
|
||||
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
|
||||
}
|
||||
|
||||
v := url.Values{}
|
||||
v.Set("repo", repository)
|
||||
v.Set("tag", tag)
|
||||
|
||||
|
@ -1971,7 +2011,7 @@ func (cli *DockerCli) call(method, path string, data interface{}) ([]byte, int,
|
|||
if len(body) == 0 {
|
||||
return nil, resp.StatusCode, fmt.Errorf("Error: %s", http.StatusText(resp.StatusCode))
|
||||
}
|
||||
return nil, resp.StatusCode, fmt.Errorf("Error: %s", body)
|
||||
return nil, resp.StatusCode, fmt.Errorf("Error: %s", bytes.TrimSpace(body))
|
||||
}
|
||||
return body, resp.StatusCode, nil
|
||||
}
|
||||
|
@ -2027,7 +2067,7 @@ func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, h
|
|||
if len(body) == 0 {
|
||||
return fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode))
|
||||
}
|
||||
return fmt.Errorf("Error: %s", body)
|
||||
return fmt.Errorf("Error: %s", bytes.TrimSpace(body))
|
||||
}
|
||||
|
||||
if matchesContentType(resp.Header.Get("Content-Type"), "application/json") {
|
||||
|
|
128
commands_test.go
128
commands_test.go
|
@ -6,6 +6,8 @@ import (
|
|||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -381,8 +383,8 @@ func TestRunAttachStdin(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if cmdOutput != container.ShortID()+"\n" {
|
||||
t.Fatalf("Wrong output: should be '%s', not '%s'\n", container.ShortID()+"\n", cmdOutput)
|
||||
if cmdOutput != container.ID+"\n" {
|
||||
t.Fatalf("Wrong output: should be '%s', not '%s'\n", container.ID+"\n", cmdOutput)
|
||||
}
|
||||
})
|
||||
|
||||
|
@ -459,7 +461,7 @@ func TestRunDetach(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
// TestAttachDetach checks that attach in tty mode can be detached
|
||||
// TestAttachDetach checks that attach in tty mode can be detached using the long container ID
|
||||
func TestAttachDetach(t *testing.T) {
|
||||
stdin, stdinPipe := io.Pipe()
|
||||
stdout, stdoutPipe := io.Pipe()
|
||||
|
@ -486,8 +488,8 @@ func TestAttachDetach(t *testing.T) {
|
|||
|
||||
container = globalRuntime.List()[0]
|
||||
|
||||
if strings.Trim(string(buf[:n]), " \r\n") != container.ShortID() {
|
||||
t.Fatalf("Wrong ID received. Expect %s, received %s", container.ShortID(), buf[:n])
|
||||
if strings.Trim(string(buf[:n]), " \r\n") != container.ID {
|
||||
t.Fatalf("Wrong ID received. Expect %s, received %s", container.ID, buf[:n])
|
||||
}
|
||||
})
|
||||
setTimeout(t, "Starting container timed out", 10*time.Second, func() {
|
||||
|
@ -501,7 +503,69 @@ func TestAttachDetach(t *testing.T) {
|
|||
ch = make(chan struct{})
|
||||
go func() {
|
||||
defer close(ch)
|
||||
if err := cli.CmdAttach(container.ShortID()); err != nil {
|
||||
if err := cli.CmdAttach(container.ID); err != nil {
|
||||
if err != io.ErrClosedPipe {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() {
|
||||
if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
|
||||
if err != io.ErrClosedPipe {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
setTimeout(t, "Escape sequence timeout", 5*time.Second, func() {
|
||||
stdinPipe.Write([]byte{16, 17})
|
||||
if err := stdinPipe.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
closeWrap(stdin, stdinPipe, stdout, stdoutPipe)
|
||||
|
||||
// wait for CmdRun to return
|
||||
setTimeout(t, "Waiting for CmdAttach timed out", 15*time.Second, func() {
|
||||
<-ch
|
||||
})
|
||||
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
if !container.State.Running {
|
||||
t.Fatal("The detached container should be still running")
|
||||
}
|
||||
|
||||
setTimeout(t, "Waiting for container to die timedout", 5*time.Second, func() {
|
||||
container.Kill()
|
||||
})
|
||||
}
|
||||
|
||||
// TestAttachDetachTruncatedID checks that attach in tty mode can be detached
|
||||
func TestAttachDetachTruncatedID(t *testing.T) {
|
||||
stdin, stdinPipe := io.Pipe()
|
||||
stdout, stdoutPipe := io.Pipe()
|
||||
|
||||
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
|
||||
defer cleanup(globalRuntime)
|
||||
|
||||
go stdout.Read(make([]byte, 1024))
|
||||
setTimeout(t, "Starting container timed out", 2*time.Second, func() {
|
||||
if err := cli.CmdRun("-i", "-t", "-d", unitTestImageID, "cat"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
|
||||
container := globalRuntime.List()[0]
|
||||
|
||||
stdin, stdinPipe = io.Pipe()
|
||||
stdout, stdoutPipe = io.Pipe()
|
||||
cli = NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
|
||||
|
||||
ch := make(chan struct{})
|
||||
go func() {
|
||||
defer close(ch)
|
||||
if err := cli.CmdAttach(utils.TruncateID(container.ID)); err != nil {
|
||||
if err != io.ErrClosedPipe {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -824,3 +888,55 @@ run [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]
|
|||
|
||||
return image
|
||||
}
|
||||
|
||||
// #2098 - Docker cidFiles only contain short version of the containerId
|
||||
//sudo docker run -cidfile /tmp/docker_test.cid ubuntu echo "test"
|
||||
// TestRunCidFile tests that run -cidfile returns the longid
|
||||
func TestRunCidFile(t *testing.T) {
|
||||
stdout, stdoutPipe := io.Pipe()
|
||||
|
||||
tmpDir, err := ioutil.TempDir("", "TestRunCidFile")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tmpCidFile := path.Join(tmpDir, "cid")
|
||||
|
||||
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
|
||||
defer cleanup(globalRuntime)
|
||||
|
||||
c := make(chan struct{})
|
||||
go func() {
|
||||
defer close(c)
|
||||
if err := cli.CmdRun("-cidfile", tmpCidFile, unitTestImageID, "ls"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
defer os.RemoveAll(tmpDir)
|
||||
setTimeout(t, "Reading command output time out", 2*time.Second, func() {
|
||||
cmdOutput, err := bufio.NewReader(stdout).ReadString('\n')
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(cmdOutput) < 1 {
|
||||
t.Fatalf("'ls' should return something , not '%s'", cmdOutput)
|
||||
}
|
||||
//read the tmpCidFile
|
||||
buffer, err := ioutil.ReadFile(tmpCidFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
id := string(buffer)
|
||||
|
||||
if len(id) != len("2bf44ea18873287bd9ace8a4cb536a7cbe134bed67e805fdf2f58a57f69b320c") {
|
||||
t.Fatalf("-cidfile should be a long id, not '%s'", id)
|
||||
}
|
||||
//test that its a valid cid? (though the container is gone..)
|
||||
//remove the file and dir.
|
||||
})
|
||||
|
||||
setTimeout(t, "CmdRun timed out", 5*time.Second, func() {
|
||||
<-c
|
||||
})
|
||||
|
||||
}
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
type DaemonConfig struct {
|
||||
Pidfile string
|
||||
Root string
|
||||
ProtoAddresses []string
|
||||
AutoRestart bool
|
||||
EnableCors bool
|
||||
Dns []string
|
||||
|
@ -36,7 +35,6 @@ func ConfigFromJob(job *engine.Job) *DaemonConfig {
|
|||
} else {
|
||||
config.BridgeIface = DefaultNetworkBridge
|
||||
}
|
||||
config.ProtoAddresses = job.GetenvList("ProtoAddresses")
|
||||
config.DefaultIp = net.ParseIP(job.Getenv("DefaultIp"))
|
||||
config.InterContainerCommunication = job.GetenvBool("InterContainerCommunication")
|
||||
return &config
|
||||
|
|
130
container.go
130
container.go
|
@ -134,7 +134,11 @@ type PortBinding struct {
|
|||
type Port string
|
||||
|
||||
func (p Port) Proto() string {
|
||||
return strings.Split(string(p), "/")[1]
|
||||
parts := strings.Split(string(p), "/")
|
||||
if len(parts) == 1 {
|
||||
return "tcp"
|
||||
}
|
||||
return parts[1]
|
||||
}
|
||||
|
||||
func (p Port) Port() string {
|
||||
|
@ -168,7 +172,7 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
|
|||
cmd.Var(flAttach, "a", "Attach to stdin, stdout or stderr.")
|
||||
flStdin := cmd.Bool("i", false, "Keep stdin open even if not attached")
|
||||
flTty := cmd.Bool("t", false, "Allocate a pseudo-tty")
|
||||
flMemory := cmd.Int64("m", 0, "Memory limit (in bytes)")
|
||||
flMemoryString := cmd.String("m", "", "Memory limit (format: <number><optional unit>, where unit = b, k, m or g)")
|
||||
flContainerIDFile := cmd.String("cidfile", "", "Write the container ID to the file")
|
||||
flNetwork := cmd.Bool("n", true, "Enable networking for this container")
|
||||
flPrivileged := cmd.Bool("privileged", false, "Give extended privileges to this container")
|
||||
|
@ -177,9 +181,9 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
|
|||
cmd.String("name", "", "Assign a name to the container")
|
||||
flPublishAll := cmd.Bool("P", false, "Publish all exposed ports to the host interfaces")
|
||||
|
||||
if capabilities != nil && *flMemory > 0 && !capabilities.MemoryLimit {
|
||||
if capabilities != nil && *flMemoryString != "" && !capabilities.MemoryLimit {
|
||||
//fmt.Fprintf(stdout, "WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
|
||||
*flMemory = 0
|
||||
*flMemoryString = ""
|
||||
}
|
||||
|
||||
flCpuShares := cmd.Int64("c", 0, "CPU shares (relative weight)")
|
||||
|
@ -200,7 +204,7 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
|
|||
cmd.Var(flVolumes, "v", "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)")
|
||||
|
||||
var flVolumesFrom utils.ListOpts
|
||||
cmd.Var(&flVolumesFrom, "volumes-from", "Mount volumes from the specified container")
|
||||
cmd.Var(&flVolumesFrom, "volumes-from", "Mount volumes from the specified container(s)")
|
||||
|
||||
flEntrypoint := cmd.String("entrypoint", "", "Overwrite the default entrypoint of the image")
|
||||
|
||||
|
@ -246,6 +250,18 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
|
|||
}
|
||||
}
|
||||
|
||||
var flMemory int64
|
||||
|
||||
if *flMemoryString != "" {
|
||||
parsedMemory, err := utils.RAMInBytes(*flMemoryString)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, cmd, err
|
||||
}
|
||||
|
||||
flMemory = parsedMemory
|
||||
}
|
||||
|
||||
var binds []string
|
||||
|
||||
// add any bind targets to the list of container volumes
|
||||
|
@ -316,7 +332,7 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
|
|||
Tty: *flTty,
|
||||
NetworkDisabled: !*flNetwork,
|
||||
OpenStdin: *flStdin,
|
||||
Memory: *flMemory,
|
||||
Memory: flMemory,
|
||||
CpuShares: *flCpuShares,
|
||||
AttachStdin: flAttach.Get("stdin"),
|
||||
AttachStdout: flAttach.Get("stdout"),
|
||||
|
@ -341,7 +357,7 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
|
|||
PublishAllPorts: *flPublishAll,
|
||||
}
|
||||
|
||||
if capabilities != nil && *flMemory > 0 && !capabilities.SwapLimit {
|
||||
if capabilities != nil && flMemory > 0 && !capabilities.SwapLimit {
|
||||
//fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
|
||||
config.MemorySwap = -1
|
||||
}
|
||||
|
@ -694,24 +710,25 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
|
|||
func (container *Container) Start() (err error) {
|
||||
container.State.Lock()
|
||||
defer container.State.Unlock()
|
||||
if container.State.Running {
|
||||
return fmt.Errorf("The container %s is already running.", container.ID)
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
container.cleanup()
|
||||
}
|
||||
}()
|
||||
|
||||
if container.State.Running {
|
||||
return fmt.Errorf("The container %s is already running.", container.ID)
|
||||
}
|
||||
if err := container.EnsureMounted(); err != nil {
|
||||
return err
|
||||
}
|
||||
if container.runtime.networkManager.disabled {
|
||||
container.Config.NetworkDisabled = true
|
||||
container.buildHostnameAndHostsFiles("127.0.1.1")
|
||||
} else {
|
||||
if err := container.allocateNetwork(); err != nil {
|
||||
return err
|
||||
}
|
||||
container.buildHostnameAndHostsFiles(container.NetworkSettings.IPAddress)
|
||||
}
|
||||
|
||||
// Make sure the config is compatible with the current kernel
|
||||
|
@ -771,9 +788,23 @@ func (container *Container) Start() (err error) {
|
|||
|
||||
// Apply volumes from another container if requested
|
||||
if container.Config.VolumesFrom != "" {
|
||||
volumes := strings.Split(container.Config.VolumesFrom, ",")
|
||||
for _, v := range volumes {
|
||||
c := container.runtime.Get(v)
|
||||
containerSpecs := strings.Split(container.Config.VolumesFrom, ",")
|
||||
for _, containerSpec := range containerSpecs {
|
||||
mountRW := true
|
||||
specParts := strings.SplitN(containerSpec, ":", 2)
|
||||
switch len(specParts) {
|
||||
case 0:
|
||||
return fmt.Errorf("Malformed volumes-from specification: %s", container.Config.VolumesFrom)
|
||||
case 2:
|
||||
switch specParts[1] {
|
||||
case "ro":
|
||||
mountRW = false
|
||||
case "rw": // mountRW is already true
|
||||
default:
|
||||
return fmt.Errorf("Malformed volumes-from speficication: %s", containerSpec)
|
||||
}
|
||||
}
|
||||
c := container.runtime.Get(specParts[0])
|
||||
if c == nil {
|
||||
return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.ID)
|
||||
}
|
||||
|
@ -786,7 +817,7 @@ func (container *Container) Start() (err error) {
|
|||
}
|
||||
container.Volumes[volPath] = id
|
||||
if isRW, exists := c.VolumesRW[volPath]; exists {
|
||||
container.VolumesRW[volPath] = isRW
|
||||
container.VolumesRW[volPath] = isRW && mountRW
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -832,7 +863,7 @@ func (container *Container) Start() (err error) {
|
|||
// Create the mountpoint
|
||||
rootVolPath := path.Join(container.RootfsPath(), volPath)
|
||||
if err := os.MkdirAll(rootVolPath, 0755); err != nil {
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
// Do not copy or change permissions if we are mounting from the host
|
||||
|
@ -876,7 +907,13 @@ func (container *Container) Start() (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
var lxcStart string = "lxc-start"
|
||||
if container.hostConfig.Privileged && container.runtime.capabilities.AppArmor {
|
||||
lxcStart = path.Join(container.runtime.config.Root, "lxc-start-unconfined")
|
||||
}
|
||||
|
||||
params := []string{
|
||||
lxcStart,
|
||||
"-n", container.ID,
|
||||
"-f", container.lxcConfigPath(),
|
||||
"--",
|
||||
|
@ -969,11 +1006,24 @@ func (container *Container) Start() (err error) {
|
|||
params = append(params, "--", container.Path)
|
||||
params = append(params, container.Args...)
|
||||
|
||||
var lxcStart string = "lxc-start"
|
||||
if container.hostConfig.Privileged && container.runtime.capabilities.AppArmor {
|
||||
lxcStart = path.Join(container.runtime.config.Root, "lxc-start-unconfined")
|
||||
if RootIsShared() {
|
||||
// lxc-start really needs / to be non-shared, or all kinds of stuff break
|
||||
// when lxc-start unmount things and those unmounts propagate to the main
|
||||
// mount namespace.
|
||||
// What we really want is to clone into a new namespace and then
|
||||
// mount / MS_REC|MS_SLAVE, but since we can't really clone or fork
|
||||
// without exec in go we have to do this horrible shell hack...
|
||||
shellString :=
|
||||
"mount --make-rslave /; exec " +
|
||||
utils.ShellQuoteArguments(params)
|
||||
|
||||
params = []string{
|
||||
"unshare", "-m", "--", "/bin/sh", "-c", shellString,
|
||||
}
|
||||
}
|
||||
container.cmd = exec.Command(lxcStart, params...)
|
||||
|
||||
container.cmd = exec.Command(params[0], params[1:]...)
|
||||
|
||||
// Setup logging of stdout and stderr to disk
|
||||
if err := container.runtime.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil {
|
||||
return err
|
||||
|
@ -1082,6 +1132,30 @@ func (container *Container) StderrPipe() (io.ReadCloser, error) {
|
|||
return utils.NewBufReader(reader), nil
|
||||
}
|
||||
|
||||
func (container *Container) buildHostnameAndHostsFiles(IP string) {
|
||||
container.HostnamePath = path.Join(container.root, "hostname")
|
||||
ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644)
|
||||
|
||||
hostsContent := []byte(`
|
||||
127.0.0.1 localhost
|
||||
::1 localhost ip6-localhost ip6-loopback
|
||||
fe00::0 ip6-localnet
|
||||
ff00::0 ip6-mcastprefix
|
||||
ff02::1 ip6-allnodes
|
||||
ff02::2 ip6-allrouters
|
||||
`)
|
||||
|
||||
container.HostsPath = path.Join(container.root, "hosts")
|
||||
|
||||
if container.Config.Domainname != "" {
|
||||
hostsContent = append([]byte(fmt.Sprintf("%s\t%s.%s %s\n", IP, container.Config.Hostname, container.Config.Domainname, container.Config.Hostname)), hostsContent...)
|
||||
} else {
|
||||
hostsContent = append([]byte(fmt.Sprintf("%s\t%s\n", IP, container.Config.Hostname)), hostsContent...)
|
||||
}
|
||||
|
||||
ioutil.WriteFile(container.HostsPath, hostsContent, 0644)
|
||||
}
|
||||
|
||||
func (container *Container) allocateNetwork() error {
|
||||
if container.Config.NetworkDisabled {
|
||||
return nil
|
||||
|
@ -1230,7 +1304,7 @@ func (container *Container) monitor() {
|
|||
container.State.setStopped(exitCode)
|
||||
|
||||
if container.runtime != nil && container.runtime.srv != nil {
|
||||
container.runtime.srv.LogEvent("die", container.ShortID(), container.runtime.repositories.ImageName(container.Image))
|
||||
container.runtime.srv.LogEvent("die", container.ID, container.runtime.repositories.ImageName(container.Image))
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
|
@ -1297,7 +1371,7 @@ func (container *Container) kill(sig int) error {
|
|||
}
|
||||
|
||||
if output, err := exec.Command("lxc-kill", "-n", container.ID, strconv.Itoa(sig)).CombinedOutput(); err != nil {
|
||||
log.Printf("error killing container %s (%s, %s)", container.ShortID(), output, err)
|
||||
log.Printf("error killing container %s (%s, %s)", utils.TruncateID(container.ID), output, err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -1317,9 +1391,9 @@ func (container *Container) Kill() error {
|
|||
// 2. Wait for the process to die, in last resort, try to kill the process directly
|
||||
if err := container.WaitTimeout(10 * time.Second); err != nil {
|
||||
if container.cmd == nil {
|
||||
return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", container.ShortID())
|
||||
return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", utils.TruncateID(container.ID))
|
||||
}
|
||||
log.Printf("Container %s failed to exit within 10 seconds of lxc-kill %s - trying direct SIGKILL", "SIGKILL", container.ShortID())
|
||||
log.Printf("Container %s failed to exit within 10 seconds of lxc-kill %s - trying direct SIGKILL", "SIGKILL", utils.TruncateID(container.ID))
|
||||
if err := container.cmd.Process.Kill(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1433,14 +1507,6 @@ func (container *Container) Unmount() error {
|
|||
return container.runtime.Unmount(container)
|
||||
}
|
||||
|
||||
// ShortID returns a shorthand version of the container's id for convenience.
|
||||
// A collision with other container shorthands is very unlikely, but possible.
|
||||
// In case of a collision a lookup with Runtime.Get() will fail, and the caller
|
||||
// will need to use a langer prefix, or the full-length container Id.
|
||||
func (container *Container) ShortID() string {
|
||||
return utils.TruncateID(container.ID)
|
||||
}
|
||||
|
||||
func (container *Container) logPath(name string) string {
|
||||
return path.Join(container.root, fmt.Sprintf("%s-%s.log", container.ID, name))
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package docker
|
|||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
|
@ -1005,7 +1006,7 @@ func TestEnv(t *testing.T) {
|
|||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
|
||||
"HOME=/",
|
||||
"container=lxc",
|
||||
"HOSTNAME=" + container.ShortID(),
|
||||
"HOSTNAME=" + utils.TruncateID(container.ID),
|
||||
"FALSE=true",
|
||||
"TRUE=false",
|
||||
"TRICKY=tri",
|
||||
|
@ -1338,6 +1339,67 @@ func TestBindMounts(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// Test that -volumes-from supports both read-only mounts
|
||||
func TestFromVolumesInReadonlyMode(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
container, _, err := runtime.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/echo", "-n", "foobar"},
|
||||
Volumes: map[string]struct{}{"/test": {}},
|
||||
},
|
||||
"",
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer runtime.Destroy(container)
|
||||
_, err = container.Output()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !container.VolumesRW["/test"] {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
container2, _, err := runtime.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/echo", "-n", "foobar"},
|
||||
VolumesFrom: container.ID + ":ro",
|
||||
},
|
||||
"",
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer runtime.Destroy(container2)
|
||||
|
||||
_, err = container2.Output()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if container.Volumes["/test"] != container2.Volumes["/test"] {
|
||||
t.Logf("container volumes do not match: %s | %s ",
|
||||
container.Volumes["/test"],
|
||||
container2.Volumes["/test"])
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
_, exists := container2.VolumesRW["/test"]
|
||||
if !exists {
|
||||
t.Logf("container2 is missing '/test' volume: %s", container2.VolumesRW)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if container2.VolumesRW["/test"] != false {
|
||||
t.Log("'/test' volume mounted in read-write mode, expected read-only")
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
// Test that VolumesRW values are copied to the new container. Regression test for #1201
|
||||
func TestVolumesFromReadonlyMount(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
|
|
|
@ -29,7 +29,9 @@ if [ -f /etc/default/$BASE ]; then
|
|||
. /etc/default/$BASE
|
||||
fi
|
||||
|
||||
if [ "$1" = start ] && which initctl >/dev/null && initctl version | grep -q upstart; then
|
||||
# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it)
|
||||
if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | /bin/grep -q upstart; then
|
||||
log_failure_msg "Docker is managed via upstart, try using service $BASE $1"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
|
|
@ -6,5 +6,10 @@ stop on runlevel [!2345]
|
|||
respawn
|
||||
|
||||
script
|
||||
/usr/bin/docker -d
|
||||
DOCKER=/usr/bin/$UPSTART_JOB
|
||||
DOCKER_OPTS=
|
||||
if [ -f /etc/default/$UPSTART_JOB ]; then
|
||||
. /etc/default/$UPSTART_JOB
|
||||
fi
|
||||
"$DOCKER" -d $DOCKER_OPTS
|
||||
end script
|
||||
|
|
|
@ -1,3 +1,19 @@
|
|||
# Vagrant-docker
|
||||
# Vagrant integration
|
||||
|
||||
This is a placeholder for the official vagrant-docker, a plugin for Vagrant (http://vagrantup.com) which exposes Docker as a provider.
|
||||
Currently there are at least 4 different projects that we are aware of that deals
|
||||
with integration with [Vagrant](http://vagrantup.com/) at different levels. One
|
||||
approach is to use Docker as a [provisioner](http://docs.vagrantup.com/v2/provisioning/index.html)
|
||||
which means you can create containers and pull base images on VMs using Docker's
|
||||
CLI and the other is to use Docker as a [provider](http://docs.vagrantup.com/v2/providers/index.html),
|
||||
meaning you can use Vagrant to control Docker containers.
|
||||
|
||||
|
||||
### Provisioners
|
||||
|
||||
* [Vocker](https://github.com/fgrehm/vocker)
|
||||
* [Ventriloquist](https://github.com/fgrehm/ventriloquist)
|
||||
|
||||
### Providers
|
||||
|
||||
* [docker-provider](https://github.com/fgrehm/docker-provider)
|
||||
* [vagrant-shell](https://github.com/destructuring/vagrant-shell)
|
||||
|
|
|
@ -71,7 +71,8 @@ func main() {
|
|||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
job := eng.Job("serveapi")
|
||||
// Load plugin: httpapi
|
||||
job := eng.Job("initapi")
|
||||
job.Setenv("Pidfile", *pidfile)
|
||||
job.Setenv("Root", *flRoot)
|
||||
job.SetenvBool("AutoRestart", *flAutoRestart)
|
||||
|
@ -79,12 +80,17 @@ func main() {
|
|||
job.Setenv("Dns", *flDns)
|
||||
job.SetenvBool("EnableIptables", *flEnableIptables)
|
||||
job.Setenv("BridgeIface", *bridgeName)
|
||||
job.SetenvList("ProtoAddresses", flHosts)
|
||||
job.Setenv("DefaultIp", *flDefaultIp)
|
||||
job.SetenvBool("InterContainerCommunication", *flInterContainerComm)
|
||||
if err := job.Run(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// Serve api
|
||||
job = eng.Job("serveapi", flHosts...)
|
||||
job.SetenvBool("Logging", true)
|
||||
if err := job.Run(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
if len(flHosts) > 1 {
|
||||
log.Fatal("Please specify only one -H")
|
||||
|
|
|
@ -121,8 +121,7 @@ Create a container
|
|||
"AttachStdin":false,
|
||||
"AttachStdout":true,
|
||||
"AttachStderr":true,
|
||||
"PortSpecs":null,
|
||||
"Privileged": false,
|
||||
"ExposedPorts":{},
|
||||
"Tty":false,
|
||||
"OpenStdin":false,
|
||||
"StdinOnce":false,
|
||||
|
@ -135,7 +134,6 @@ Create a container
|
|||
"Volumes":{},
|
||||
"VolumesFrom":"",
|
||||
"WorkingDir":""
|
||||
|
||||
}
|
||||
|
||||
**Example response**:
|
||||
|
@ -242,7 +240,7 @@ Inspect a container
|
|||
"AttachStdin": false,
|
||||
"AttachStdout": true,
|
||||
"AttachStderr": true,
|
||||
"PortSpecs": null,
|
||||
"ExposedPorts": {},
|
||||
"Tty": false,
|
||||
"OpenStdin": false,
|
||||
"StdinOnce": false,
|
||||
|
@ -413,7 +411,12 @@ Start a container
|
|||
|
||||
{
|
||||
"Binds":["/tmp:/tmp"],
|
||||
"LxcConf":{"lxc.utsname":"docker"}
|
||||
"LxcConf":{"lxc.utsname":"docker"},
|
||||
"ContainerIDFile": "",
|
||||
"Privileged": false,
|
||||
"PortBindings": {"22/tcp": [{HostIp:"", HostPort:""}]},
|
||||
"Links": [],
|
||||
"PublishAllPorts": false
|
||||
}
|
||||
|
||||
**Example response**:
|
||||
|
@ -846,7 +849,7 @@ Inspect an image
|
|||
"AttachStdin":false,
|
||||
"AttachStdout":false,
|
||||
"AttachStderr":false,
|
||||
"PortSpecs":null,
|
||||
"ExposedPorts":{},
|
||||
"Tty":true,
|
||||
"OpenStdin":true,
|
||||
"StdinOnce":false,
|
||||
|
@ -1192,7 +1195,7 @@ Create a new image from a container's changes
|
|||
|
||||
{
|
||||
"Cmd": ["cat", "/world"],
|
||||
"PortSpecs":["22"]
|
||||
"ExposedPorts":{"22/tcp":{}}
|
||||
}
|
||||
|
||||
**Example response**:
|
||||
|
|
|
@ -914,7 +914,12 @@ Search images
|
|||
|
||||
.. http:get:: /images/search
|
||||
|
||||
Search for an image in the docker index
|
||||
Search for an image in the docker index.
|
||||
|
||||
.. note::
|
||||
|
||||
The response keys have changed from API v1.6 to reflect the JSON
|
||||
sent by the registry server to the docker daemon's request.
|
||||
|
||||
**Example request**:
|
||||
|
||||
|
@ -930,18 +935,28 @@ Search images
|
|||
Content-Type: application/json
|
||||
|
||||
[
|
||||
{
|
||||
"Name":"cespare/sshd",
|
||||
"Description":""
|
||||
},
|
||||
{
|
||||
"Name":"johnfuller/sshd",
|
||||
"Description":""
|
||||
},
|
||||
{
|
||||
"Name":"dhrp/mongodb-sshd",
|
||||
"Description":""
|
||||
}
|
||||
{
|
||||
"description": "",
|
||||
"is_official": false,
|
||||
"is_trusted": false,
|
||||
"name": "wma55/u1210sshd",
|
||||
"star_count": 0
|
||||
},
|
||||
{
|
||||
"description": "",
|
||||
"is_official": false,
|
||||
"is_trusted": false,
|
||||
"name": "jdswinbank/sshd",
|
||||
"star_count": 0
|
||||
},
|
||||
{
|
||||
"description": "",
|
||||
"is_official": false,
|
||||
"is_trusted": false,
|
||||
"name": "vgauthier/sshd",
|
||||
"star_count": 0
|
||||
}
|
||||
...
|
||||
]
|
||||
|
||||
:query term: term to search
|
||||
|
|
|
@ -12,26 +12,28 @@ compatibility. Please file issues with the library owners. If you
|
|||
find more library implementations, please list them in Docker doc bugs
|
||||
and we will add the libraries here.
|
||||
|
||||
+----------------------+----------------+--------------------------------------------+
|
||||
| Language/Framework | Name | Repository |
|
||||
+======================+================+============================================+
|
||||
| Python | docker-py | https://github.com/dotcloud/docker-py |
|
||||
+----------------------+----------------+--------------------------------------------+
|
||||
| Ruby | docker-client | https://github.com/geku/docker-client |
|
||||
+----------------------+----------------+--------------------------------------------+
|
||||
| Ruby | docker-api | https://github.com/swipely/docker-api |
|
||||
+----------------------+----------------+--------------------------------------------+
|
||||
| Javascript (NodeJS) | docker.io | https://github.com/appersonlabs/docker.io |
|
||||
| | | Install via NPM: `npm install docker.io` |
|
||||
+----------------------+----------------+--------------------------------------------+
|
||||
| Javascript | docker-js | https://github.com/dgoujard/docker-js |
|
||||
+----------------------+----------------+--------------------------------------------+
|
||||
| Javascript (Angular) | dockerui | https://github.com/crosbymichael/dockerui |
|
||||
| **WebUI** | | |
|
||||
+----------------------+----------------+--------------------------------------------+
|
||||
| Java | docker-java | https://github.com/kpelykh/docker-java |
|
||||
+----------------------+----------------+--------------------------------------------+
|
||||
| Erlang | erldocker | https://github.com/proger/erldocker |
|
||||
+----------------------+----------------+--------------------------------------------+
|
||||
| Go | go-dockerclient| https://github.com/fsouza/go-dockerclient |
|
||||
+----------------------+----------------+--------------------------------------------+
|
||||
+----------------------+----------------+--------------------------------------------+----------+
|
||||
| Language/Framework | Name | Repository | Status |
|
||||
+======================+================+============================================+==========+
|
||||
| Python | docker-py | https://github.com/dotcloud/docker-py | Active |
|
||||
+----------------------+----------------+--------------------------------------------+----------+
|
||||
| Ruby | docker-client | https://github.com/geku/docker-client | Outdated |
|
||||
+----------------------+----------------+--------------------------------------------+----------+
|
||||
| Ruby | docker-api | https://github.com/swipely/docker-api | Active |
|
||||
+----------------------+----------------+--------------------------------------------+----------+
|
||||
| Javascript (NodeJS) | docker.io | https://github.com/appersonlabs/docker.io | Active |
|
||||
| | | Install via NPM: `npm install docker.io` | |
|
||||
+----------------------+----------------+--------------------------------------------+----------+
|
||||
| Javascript | docker-js | https://github.com/dgoujard/docker-js | Active |
|
||||
+----------------------+----------------+--------------------------------------------+----------+
|
||||
| Javascript (Angular) | dockerui | https://github.com/crosbymichael/dockerui | Active |
|
||||
| **WebUI** | | | |
|
||||
+----------------------+----------------+--------------------------------------------+----------+
|
||||
| Java | docker-java | https://github.com/kpelykh/docker-java | Active |
|
||||
+----------------------+----------------+--------------------------------------------+----------+
|
||||
| Erlang | erldocker | https://github.com/proger/erldocker | Active |
|
||||
+----------------------+----------------+--------------------------------------------+----------+
|
||||
| Go | go-dockerclient| https://github.com/fsouza/go-dockerclient | Active |
|
||||
+----------------------+----------------+--------------------------------------------+----------+
|
||||
| PHP | Alvine | http://pear.alvine.io/ (alpha) | Active |
|
||||
+----------------------+----------------+--------------------------------------------+----------+
|
||||
|
|
|
@ -245,6 +245,9 @@ Full -run example
|
|||
Usage: docker events
|
||||
|
||||
Get real time events from the server
|
||||
|
||||
-since="": Show previously created events and then stream.
|
||||
(either seconds since epoch, or date string as below)
|
||||
|
||||
.. _cli_events_example:
|
||||
|
||||
|
@ -277,6 +280,23 @@ Shell 1: (Again .. now showing events)
|
|||
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
|
||||
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
|
||||
|
||||
Show events in the past from a specified time
|
||||
.............................................
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker events -since 1378216169
|
||||
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
|
||||
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
|
||||
|
||||
$ sudo docker events -since '2013-09-03'
|
||||
[2013-09-03 15:49:26 +0200 CEST] 4386fb97867d: (from 12de384bfb10) start
|
||||
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
|
||||
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
|
||||
|
||||
$ sudo docker events -since '2013-09-03 15:49:29 +0200 CEST'
|
||||
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
|
||||
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
|
||||
|
||||
.. _cli_export:
|
||||
|
||||
|
@ -460,6 +480,12 @@ Insert file from github
|
|||
|
||||
The main process inside the container will be sent SIGKILL.
|
||||
|
||||
Known Issues (kill)
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
* :issue:`197` indicates that ``docker kill`` may leave directories
|
||||
behind and make it difficult to remove the container.
|
||||
|
||||
.. _cli_login:
|
||||
|
||||
``login``
|
||||
|
@ -568,6 +594,12 @@ The main process inside the container will be sent SIGKILL.
|
|||
Remove one or more containers
|
||||
-link="": Remove the link instead of the actual container
|
||||
|
||||
Known Issues (rm)
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
* :issue:`197` indicates that ``docker kill`` may leave directories
|
||||
behind and make it difficult to remove the container.
|
||||
|
||||
|
||||
Examples:
|
||||
~~~~~~~~~
|
||||
|
@ -590,6 +622,15 @@ This will remove the container referenced under the link ``/redis``.
|
|||
This will remove the underlying link between ``/webapp`` and the ``/redis`` containers removing all
|
||||
network communication.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker rm `docker ps -a -q`
|
||||
|
||||
|
||||
This command will delete all stopped containers. The command ``docker ps -a -q`` will return all
|
||||
existing container IDs and pass them to the ``rm`` command which will delete them. Any running
|
||||
containers will not be deleted.
|
||||
|
||||
.. _cli_rmi:
|
||||
|
||||
``rmi``
|
||||
|
@ -620,7 +661,7 @@ network communication.
|
|||
-h="": Container host name
|
||||
-i=false: Keep stdin open even if not attached
|
||||
-privileged=false: Give extended privileges to this container
|
||||
-m=0: Memory limit (in bytes)
|
||||
-m="": Memory limit (format: <number><optional unit>, where unit = b, k, m or g)
|
||||
-n=true: Enable networking for this container
|
||||
-p=[]: Map a network port to the container
|
||||
-rm=false: Automatically remove the container when it exits (incompatible with -d)
|
||||
|
@ -628,7 +669,7 @@ network communication.
|
|||
-u="": Username or UID
|
||||
-dns=[]: Set custom dns servers for the container
|
||||
-v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. If "container-dir" is missing, then docker creates a new volume.
|
||||
-volumes-from="": Mount all volumes from the given container
|
||||
-volumes-from="": Mount all volumes from the given container(s)
|
||||
-entrypoint="": Overwrite the default entrypoint set by the image
|
||||
-w="": Working directory inside the container
|
||||
-lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
|
||||
|
@ -720,6 +761,17 @@ can access the network and environment of the redis container via
|
|||
environment variables. The ``-name`` flag will assign the name ``console``
|
||||
to the newly created container.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run -volumes-from 777f7dc92da7,ba8c0c54f0f2:ro -i -t ubuntu pwd
|
||||
|
||||
The ``-volumes-from`` flag mounts all the defined volumes from the
|
||||
refrence containers. Containers can be specified by a comma seperated
|
||||
list or by repetitions of the ``-volumes-from`` argument. The container
|
||||
id may be optionally suffixed with ``:ro`` or ``:rw`` to mount the volumes in
|
||||
read-only or read-write mode, respectively. By default, the volumes are mounted
|
||||
in the same mode (rw or ro) as the reference container.
|
||||
|
||||
.. _cli_search:
|
||||
|
||||
``search``
|
||||
|
|
|
@ -40,7 +40,11 @@ html_additional_pages = {
|
|||
|
||||
# Add any Sphinx extension module names here, as strings. They can be extensions
|
||||
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = ['sphinxcontrib.httpdomain']
|
||||
extensions = ['sphinxcontrib.httpdomain', 'sphinx.ext.extlinks']
|
||||
|
||||
# Configure extlinks
|
||||
extlinks = { 'issue': ('https://github.com/dotcloud/docker/issues/%s',
|
||||
'Issue ') }
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
|
|
@ -10,13 +10,16 @@ Want to hack on Docker? Awesome!
|
|||
The repository includes `all the instructions you need to get
|
||||
started <https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md>`_.
|
||||
|
||||
The developer environment `Dockerfile <https://github.com/dotcloud/docker/blob/master/Dockerfile>`_
|
||||
The `developer environment Dockerfile
|
||||
<https://github.com/dotcloud/docker/blob/master/Dockerfile>`_
|
||||
specifies the tools and versions used to test and build Docker.
|
||||
|
||||
If you're making changes to the documentation, see the
|
||||
`README.md <https://github.com/dotcloud/docker/blob/master/docs/README.md>`_.
|
||||
|
||||
The documentation environment `Dockerfile <https://github.com/dotcloud/docker/blob/master/docs/Dockerfile>`_
|
||||
The `documentation environment Dockerfile
|
||||
<https://github.com/dotcloud/docker/blob/master/docs/Dockerfile>`_
|
||||
specifies the tools and versions used to build the Documentation.
|
||||
|
||||
Further interesting details can be found in the `Packaging hints <https://github.com/dotcloud/docker/blob/master/hack/PACKAGERS.md>`_.
|
||||
Further interesting details can be found in the `Packaging hints
|
||||
<https://github.com/dotcloud/docker/blob/master/hack/PACKAGERS.md>`_.
|
||||
|
|
|
@ -86,7 +86,7 @@ http://0.0.0.0:5000/`` in the log output.
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
WEB_PORT=$(sudo docker port $WEB_WORKER 5000)
|
||||
WEB_PORT=$(sudo docker port $WEB_WORKER 5000 | awk -F: '{ print $2 }')
|
||||
|
||||
Look up the public-facing port which is NAT-ed. Find the private port
|
||||
used by the container and store it inside of the ``WEB_PORT`` variable.
|
||||
|
|
|
@ -102,26 +102,45 @@ Docker that way too. Vagrant 1.1 or higher is required.
|
|||
we need to set them there first. Make sure you have everything on
|
||||
amazon aws setup so you can (manually) deploy a new image to EC2.
|
||||
|
||||
Note that where possible these variables are the same as those honored by
|
||||
the ec2 api tools.
|
||||
::
|
||||
|
||||
export AWS_ACCESS_KEY_ID=xxx
|
||||
export AWS_SECRET_ACCESS_KEY=xxx
|
||||
export AWS_ACCESS_KEY=xxx
|
||||
export AWS_SECRET_KEY=xxx
|
||||
export AWS_KEYPAIR_NAME=xxx
|
||||
export AWS_SSH_PRIVKEY=xxx
|
||||
export SSH_PRIVKEY_PATH=xxx
|
||||
|
||||
The environment variables are:
|
||||
export BOX_NAME=xxx
|
||||
export AWS_REGION=xxx
|
||||
export AWS_AMI=xxx
|
||||
export AWS_INSTANCE_TYPE=xxx
|
||||
|
||||
* ``AWS_ACCESS_KEY_ID`` - The API key used to make requests to AWS
|
||||
* ``AWS_SECRET_ACCESS_KEY`` - The secret key to make AWS API requests
|
||||
The required environment variables are:
|
||||
|
||||
* ``AWS_ACCESS_KEY`` - The API key used to make requests to AWS
|
||||
* ``AWS_SECRET_KEY`` - The secret key to make AWS API requests
|
||||
* ``AWS_KEYPAIR_NAME`` - The name of the keypair used for this EC2 instance
|
||||
* ``AWS_SSH_PRIVKEY`` - The path to the private key for the named
|
||||
* ``SSH_PRIVKEY_PATH`` - The path to the private key for the named
|
||||
keypair, for example ``~/.ssh/docker.pem``
|
||||
|
||||
There are a number of optional environment variables:
|
||||
|
||||
* ``BOX_NAME`` - The name of the vagrant box to use. Defaults to
|
||||
``ubuntu``.
|
||||
* ``AWS_REGION`` - The aws region to spawn the vm in. Defaults to
|
||||
``us-east-1``.
|
||||
* ``AWS_AMI`` - The aws AMI to start with as a base. This must be
|
||||
be an ubuntu 12.04 precise image. You must change this value if
|
||||
``AWS_REGION`` is set to a value other than ``us-east-1``.
|
||||
This is because AMIs are region specific. Defaults to ``ami-69f5a900``.
|
||||
* ``AWS_INSTANCE_TYPE`` - The aws instance type. Defaults to ``t1.micro``.
|
||||
|
||||
You can check if they are set correctly by doing something like
|
||||
|
||||
::
|
||||
|
||||
echo $AWS_ACCESS_KEY_ID
|
||||
echo $AWS_ACCESS_KEY
|
||||
|
||||
6. Do the magic!
|
||||
|
||||
|
|
|
@ -38,3 +38,10 @@ was when the container was stopped.
|
|||
You can promote a container to an :ref:`image_def` with ``docker
|
||||
commit``. Once a container is an image, you can use it as a parent for
|
||||
new containers.
|
||||
|
||||
Container IDs
|
||||
.............
|
||||
All containers are identified by a 64 hexadecimal digit string (internally a 256bit
|
||||
value). To simplify their use, a short ID of the first 12 characters can be used
|
||||
on the commandline. There is a small possibility of short id collisions, so the
|
||||
docker server will always return the long ID.
|
||||
|
|
|
@ -36,3 +36,11 @@ Base Image
|
|||
..........
|
||||
|
||||
An image that has no parent is a **base image**.
|
||||
|
||||
Image IDs
|
||||
.........
|
||||
All images are identified by a 64 hexadecimal digit string (internally a 256bit
|
||||
value). To simplify their use, a short ID of the first 12 characters can be used
|
||||
on the command line. There is a small possibility of short id collisions, so the
|
||||
docker server will always return the long ID.
|
||||
|
||||
|
|
|
@ -22,22 +22,37 @@ specify the path to it and manually start it.
|
|||
# Run docker in daemon mode
|
||||
sudo <path to>/docker -d &
|
||||
|
||||
|
||||
Running an interactive shell
|
||||
----------------------------
|
||||
Download a pre-built image
|
||||
--------------------------
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Download an ubuntu image
|
||||
sudo docker pull ubuntu
|
||||
|
||||
This will find the ``ubuntu`` image by name in the :ref:`Central Index
|
||||
<searching_central_index>` and download it from the top-level Central
|
||||
Repository to a local image cache.
|
||||
|
||||
.. NOTE:: When the image has successfully downloaded, you will see a 12
|
||||
character hash ``539c0211cd76: Download complete`` which is the short
|
||||
form of the image ID. These short image IDs are the first 12 characters
|
||||
of the full image ID - which can be found using ``docker inspect`` or
|
||||
``docker images -notrunc=true``
|
||||
|
||||
.. _dockergroup:
|
||||
|
||||
Running an interactive shell
|
||||
----------------------------
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Run an interactive shell in the ubuntu image,
|
||||
# allocate a tty, attach stdin and stdout
|
||||
# To detach the tty without exiting the shell,
|
||||
# use the escape sequence Ctrl-p + Ctrl-q
|
||||
sudo docker run -i -t ubuntu /bin/bash
|
||||
|
||||
.. _dockergroup:
|
||||
|
||||
Why ``sudo``?
|
||||
-------------
|
||||
|
|
|
@ -116,6 +116,16 @@ core concepts of Docker where commits are cheap and containers can be
|
|||
created from any point in an image's history, much like source
|
||||
control.
|
||||
|
||||
Known Issues (RUN)
|
||||
..................
|
||||
|
||||
* :issue:`783` is about file permissions problems that can occur when
|
||||
using the AUFS file system. You might notice it during an attempt to
|
||||
``rm`` a file, for example. The issue describes a workaround.
|
||||
* :issue:`2424` Locale will not be set automatically.
|
||||
|
||||
|
||||
|
||||
3.4 CMD
|
||||
-------
|
||||
|
||||
|
@ -211,8 +221,16 @@ destination container.
|
|||
All new files and directories are created with mode 0755, uid and gid
|
||||
0.
|
||||
|
||||
.. note::
|
||||
if you build using STDIN (``docker build - < somefile``), there is no build
|
||||
context, so the Dockerfile can only contain an URL based ADD statement.
|
||||
|
||||
The copy obeys the following rules:
|
||||
|
||||
* The ``<src>`` path must be inside the *context* of the build; you cannot
|
||||
``ADD ../something /something``, because the first step of a
|
||||
``docker build`` is to send the context directory (and subdirectories) to
|
||||
the docker daemon.
|
||||
* If ``<src>`` is a URL and ``<dest>`` does not end with a trailing slash,
|
||||
then a file is downloaded from the URL and copied to ``<dest>``.
|
||||
* If ``<src>`` is a URL and ``<dest>`` does end with a trailing slash,
|
||||
|
|
|
@ -20,3 +20,4 @@ Contents:
|
|||
puppet
|
||||
host_integration
|
||||
working_with_volumes
|
||||
working_with_links_names
|
||||
|
|
104
docs/sources/use/working_with_links_names.rst
Normal file
104
docs/sources/use/working_with_links_names.rst
Normal file
|
@ -0,0 +1,104 @@
|
|||
:title: Working with Links and Names
|
||||
:description: How to create and use links and names
|
||||
:keywords: Examples, Usage, links, docker, documentation, examples, names, name, container naming
|
||||
|
||||
.. _working_with_links_names:
|
||||
|
||||
Working with Links and Names
|
||||
============================
|
||||
|
||||
From version 0.6.5 you are now able to ``name`` a container and ``link`` it to another
|
||||
container by referring to its name. This will create a parent -> child relationship
|
||||
where the parent container can see selected information about its child.
|
||||
|
||||
.. _run_name:
|
||||
|
||||
Container Naming
|
||||
----------------
|
||||
|
||||
.. versionadded:: v0.6.5
|
||||
|
||||
You can now name your container by using the ``-name`` flag. If no name is provided, Docker
|
||||
will automatically generate a name. You can see this name using the ``docker ps`` command.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# format is "sudo docker run -name <container_name> <image_name> <command>"
|
||||
$ sudo docker run -name test ubuntu /bin/bash
|
||||
|
||||
# the flag "-a" Show all containers. Only running containers are shown by default.
|
||||
$ sudo docker ps -a
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
2522602a0d99 ubuntu:12.04 /bin/bash 14 seconds ago Exit 0 test
|
||||
|
||||
.. _run_link:
|
||||
|
||||
Links: service discovery for docker
|
||||
-----------------------------------
|
||||
|
||||
.. versionadded:: v0.6.5
|
||||
|
||||
Links allow containers to discover and securely communicate with each other by using the
|
||||
flag ``-link name:alias``. Inter-container communication can be disabled with the daemon
|
||||
flag ``-icc=false``. With this flag set to false, Container A cannot access Container B
|
||||
unless explicitly allowed via a link. This is a huge win for securing your containers.
|
||||
When two containers are linked together Docker creates a parent child relationship
|
||||
between the containers. The parent container will be able to access information via
|
||||
environment variables of the child such as name, exposed ports, IP and other selected
|
||||
environment variables.
|
||||
|
||||
When linking two containers Docker will use the exposed ports of the container to create
|
||||
a secure tunnel for the parent to access. If a database container only exposes port 8080
|
||||
then the linked container will only be allowed to access port 8080 and nothing else if
|
||||
inter-container communication is set to false.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Example: there is an image called redis-2.6 that exposes the port 6379 and starts redis-server.
|
||||
# Let's name the container as "redis" based on that image and run it as daemon.
|
||||
$ sudo docker run -d -name redis redis-2.6
|
||||
|
||||
We can issue all the commands that you would expect using the name "redis"; start, stop,
|
||||
attach, using the name for our container. The name also allows us to link other containers
|
||||
into this one.
|
||||
|
||||
Next, we can start a new web application that has a dependency on Redis and apply a link
|
||||
to connect both containers. If you noticed when running our Redis server we did not use
|
||||
the -p flag to publish the Redis port to the host system. Redis exposed port 6379 and
|
||||
this is all we need to establish a link.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Linking the redis container as a child
|
||||
$ sudo docker run -t -i -link redis:db -name webapp ubuntu bash
|
||||
|
||||
When you specified -link redis:db you are telling docker to link the container named redis
|
||||
into this new container with the alias db. Environment variables are prefixed with the alias
|
||||
so that the parent container can access network and environment information from the containers
|
||||
that are linked into it.
|
||||
|
||||
If we inspect the environment variables of the second container, we would see all the information
|
||||
about the child container.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ root@4c01db0b339c:/# env
|
||||
|
||||
HOSTNAME=4c01db0b339c
|
||||
DB_NAME=/webapp/db
|
||||
TERM=xterm
|
||||
DB_PORT=tcp://172.17.0.8:6379
|
||||
DB_PORT_6379_TCP=tcp://172.17.0.8:6379
|
||||
DB_PORT_6379_TCP_PROTO=tcp
|
||||
DB_PORT_6379_TCP_ADDR=172.17.0.8
|
||||
DB_PORT_6379_TCP_PORT=6379
|
||||
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
PWD=/
|
||||
SHLVL=1
|
||||
HOME=/
|
||||
container=lxc
|
||||
_=/usr/bin/env
|
||||
root@4c01db0b339c:/#
|
||||
|
||||
Accessing the network information along with the environment of the child container allows
|
||||
us to easily connect to the Redis service on the specific IP and port in the environment.
|
3
docs/theme/docker/layout.html
vendored
3
docs/theme/docker/layout.html
vendored
|
@ -129,7 +129,8 @@
|
|||
<div class="row footer">
|
||||
<div class="span12 tbox">
|
||||
<div class="tbox">
|
||||
<p>Docker is an open source project, sponsored by <a href="https://dotcloud.com">dotCloud</a>, under the <a href="https://github.com/dotcloud/docker/blob/master/LICENSE" title="Docker licence, hosted in the Github repository">apache 2.0 licence</a></p>
|
||||
<p>Docker is an open source project, sponsored by <a href="https://www.docker.com">Docker Inc.</a>, under the <a href="https://github.com/dotcloud/docker/blob/master/LICENSE" title="Docker licence, hosted in the Github repository">apache 2.0 licence</a></p>
|
||||
<p>Documentation proudly hosted by <a href="http://www.readthedocs.org">Read the Docs</a></p>
|
||||
</div>
|
||||
|
||||
<div class="social links">
|
||||
|
|
|
@ -6,15 +6,21 @@ import (
|
|||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Handler func(*Job) string
|
||||
|
||||
var globalHandlers map[string]Handler
|
||||
|
||||
func init() {
|
||||
globalHandlers = make(map[string]Handler)
|
||||
}
|
||||
|
||||
func Register(name string, handler Handler) error {
|
||||
if globalHandlers == nil {
|
||||
globalHandlers = make(map[string]Handler)
|
||||
_, exists := globalHandlers[name]
|
||||
if exists {
|
||||
return fmt.Errorf("Can't overwrite global handler for command %s", name)
|
||||
}
|
||||
globalHandlers[name] = handler
|
||||
return nil
|
||||
|
@ -26,6 +32,22 @@ func Register(name string, handler Handler) error {
|
|||
type Engine struct {
|
||||
root string
|
||||
handlers map[string]Handler
|
||||
hack Hack // data for temporary hackery (see hack.go)
|
||||
id string
|
||||
}
|
||||
|
||||
func (eng *Engine) Root() string {
|
||||
return eng.root
|
||||
}
|
||||
|
||||
func (eng *Engine) Register(name string, handler Handler) error {
|
||||
eng.Logf("Register(%s) (handlers=%v)", name, eng.handlers)
|
||||
_, exists := eng.handlers[name]
|
||||
if exists {
|
||||
return fmt.Errorf("Can't overwrite handler for command %s", name)
|
||||
}
|
||||
eng.handlers[name] = handler
|
||||
return nil
|
||||
}
|
||||
|
||||
// New initializes a new engine managing the directory specified at `root`.
|
||||
|
@ -56,16 +78,25 @@ func New(root string) (*Engine, error) {
|
|||
}
|
||||
eng := &Engine{
|
||||
root: root,
|
||||
handlers: globalHandlers,
|
||||
handlers: make(map[string]Handler),
|
||||
id: utils.RandomString(),
|
||||
}
|
||||
// Copy existing global handlers
|
||||
for k, v := range globalHandlers {
|
||||
eng.handlers[k] = v
|
||||
}
|
||||
return eng, nil
|
||||
}
|
||||
|
||||
func (eng *Engine) String() string {
|
||||
return fmt.Sprintf("%s|%s", eng.Root(), eng.id[:8])
|
||||
}
|
||||
|
||||
// Job creates a new job which can later be executed.
|
||||
// This function mimics `Command` from the standard os/exec package.
|
||||
func (eng *Engine) Job(name string, args ...string) *Job {
|
||||
job := &Job{
|
||||
eng: eng,
|
||||
Eng: eng,
|
||||
Name: name,
|
||||
Args: args,
|
||||
Stdin: os.Stdin,
|
||||
|
@ -78,3 +109,8 @@ func (eng *Engine) Job(name string, args ...string) *Job {
|
|||
}
|
||||
return job
|
||||
}
|
||||
|
||||
func (eng *Engine) Logf(format string, args ...interface{}) (n int, err error) {
|
||||
prefixedFormat := fmt.Sprintf("[%s] %s\n", eng, strings.TrimRight(format, "\n"))
|
||||
return fmt.Fprintf(os.Stderr, prefixedFormat, args...)
|
||||
}
|
||||
|
|
21
engine/hack.go
Normal file
21
engine/hack.go
Normal file
|
@ -0,0 +1,21 @@
|
|||
package engine
|
||||
|
||||
type Hack map[string]interface{}
|
||||
|
||||
func (eng *Engine) Hack_GetGlobalVar(key string) interface{} {
|
||||
if eng.hack == nil {
|
||||
return nil
|
||||
}
|
||||
val, exists := eng.hack[key]
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
func (eng *Engine) Hack_SetGlobalVar(key string, val interface{}) {
|
||||
if eng.hack == nil {
|
||||
eng.hack = make(Hack)
|
||||
}
|
||||
eng.hack[key] = val
|
||||
}
|
239
engine/job.go
239
engine/job.go
|
@ -1,11 +1,16 @@
|
|||
package engine
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A job is the fundamental unit of work in the docker engine.
|
||||
|
@ -22,24 +27,43 @@ import (
|
|||
// This allows for richer error reporting.
|
||||
//
|
||||
type Job struct {
|
||||
eng *Engine
|
||||
Eng *Engine
|
||||
Name string
|
||||
Args []string
|
||||
env []string
|
||||
Stdin io.ReadCloser
|
||||
Stdout io.WriteCloser
|
||||
Stderr io.WriteCloser
|
||||
Stdin io.Reader
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
handler func(*Job) string
|
||||
status string
|
||||
onExit []func()
|
||||
}
|
||||
|
||||
// Run executes the job and blocks until the job completes.
|
||||
// If the job returns a failure status, an error is returned
|
||||
// which includes the status.
|
||||
func (job *Job) Run() error {
|
||||
randId := utils.RandomString()[:4]
|
||||
fmt.Printf("Job #%s: %s\n", randId, job)
|
||||
defer fmt.Printf("Job #%s: %s = '%s'", randId, job, job.status)
|
||||
defer func() {
|
||||
var wg sync.WaitGroup
|
||||
for _, f := range job.onExit {
|
||||
wg.Add(1)
|
||||
go func(f func()) {
|
||||
f()
|
||||
wg.Done()
|
||||
}(f)
|
||||
}
|
||||
wg.Wait()
|
||||
}()
|
||||
if job.Stdout != nil && job.Stdout != os.Stdout {
|
||||
job.Stdout = io.MultiWriter(job.Stdout, os.Stdout)
|
||||
}
|
||||
if job.Stderr != nil && job.Stderr != os.Stderr {
|
||||
job.Stderr = io.MultiWriter(job.Stderr, os.Stderr)
|
||||
}
|
||||
job.Eng.Logf("+job %s", job.CallString())
|
||||
defer func() {
|
||||
job.Eng.Logf("-job %s%s", job.CallString(), job.StatusString())
|
||||
}()
|
||||
if job.handler == nil {
|
||||
job.status = "command not found"
|
||||
} else {
|
||||
|
@ -51,9 +75,87 @@ func (job *Job) Run() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (job *Job) StdoutParseLines(dst *[]string, limit int) {
|
||||
job.parseLines(job.StdoutPipe(), dst, limit)
|
||||
}
|
||||
|
||||
func (job *Job) StderrParseLines(dst *[]string, limit int) {
|
||||
job.parseLines(job.StderrPipe(), dst, limit)
|
||||
}
|
||||
|
||||
func (job *Job) parseLines(src io.Reader, dst *[]string, limit int) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
scanner := bufio.NewScanner(src)
|
||||
for scanner.Scan() {
|
||||
// If the limit is reached, flush the rest of the source and return
|
||||
if limit > 0 && len(*dst) >= limit {
|
||||
io.Copy(ioutil.Discard, src)
|
||||
return
|
||||
}
|
||||
line := scanner.Text()
|
||||
// Append the line (with delimitor removed)
|
||||
*dst = append(*dst, line)
|
||||
}
|
||||
}()
|
||||
job.onExit = append(job.onExit, wg.Wait)
|
||||
}
|
||||
|
||||
func (job *Job) StdoutParseString(dst *string) {
|
||||
lines := make([]string, 0, 1)
|
||||
job.StdoutParseLines(&lines, 1)
|
||||
job.onExit = append(job.onExit, func() {
|
||||
if len(lines) >= 1 {
|
||||
*dst = lines[0]
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (job *Job) StderrParseString(dst *string) {
|
||||
lines := make([]string, 0, 1)
|
||||
job.StderrParseLines(&lines, 1)
|
||||
job.onExit = append(job.onExit, func() { *dst = lines[0] })
|
||||
}
|
||||
|
||||
func (job *Job) StdoutPipe() io.ReadCloser {
|
||||
r, w := io.Pipe()
|
||||
job.Stdout = w
|
||||
job.onExit = append(job.onExit, func() { w.Close() })
|
||||
return r
|
||||
}
|
||||
|
||||
func (job *Job) StderrPipe() io.ReadCloser {
|
||||
r, w := io.Pipe()
|
||||
job.Stderr = w
|
||||
job.onExit = append(job.onExit, func() { w.Close() })
|
||||
return r
|
||||
}
|
||||
|
||||
func (job *Job) CallString() string {
|
||||
return fmt.Sprintf("%s(%s)", job.Name, strings.Join(job.Args, ", "))
|
||||
}
|
||||
|
||||
func (job *Job) StatusString() string {
|
||||
// FIXME: if a job returns the empty string, it will be printed
|
||||
// as not having returned.
|
||||
// (this only affects String which is a convenience function).
|
||||
if job.status != "" {
|
||||
var okerr string
|
||||
if job.status == "0" {
|
||||
okerr = "OK"
|
||||
} else {
|
||||
okerr = "ERR"
|
||||
}
|
||||
return fmt.Sprintf(" = %s (%s)", okerr, job.status)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// String returns a human-readable description of `job`
|
||||
func (job *Job) String() string {
|
||||
return strings.Join(append([]string{job.Name}, job.Args...), " ")
|
||||
return fmt.Sprintf("%s.%s%s", job.Eng, job.CallString(), job.StatusString())
|
||||
}
|
||||
|
||||
func (job *Job) Getenv(key string) (value string) {
|
||||
|
@ -90,6 +192,19 @@ func (job *Job) SetenvBool(key string, value bool) {
|
|||
}
|
||||
}
|
||||
|
||||
func (job *Job) GetenvInt(key string) int64 {
|
||||
s := strings.Trim(job.Getenv(key), " \t")
|
||||
val, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
func (job *Job) SetenvInt(key string, value int64) {
|
||||
job.Setenv(key, fmt.Sprintf("%d", value))
|
||||
}
|
||||
|
||||
func (job *Job) GetenvList(key string) []string {
|
||||
sval := job.Getenv(key)
|
||||
l := make([]string, 0, 1)
|
||||
|
@ -111,3 +226,109 @@ func (job *Job) SetenvList(key string, value []string) error {
|
|||
func (job *Job) Setenv(key, value string) {
|
||||
job.env = append(job.env, key+"="+value)
|
||||
}
|
||||
|
||||
// DecodeEnv decodes `src` as a json dictionary, and adds
|
||||
// each decoded key-value pair to the environment.
|
||||
//
|
||||
// If `text` cannot be decoded as a json dictionary, an error
|
||||
// is returned.
|
||||
func (job *Job) DecodeEnv(src io.Reader) error {
|
||||
m := make(map[string]interface{})
|
||||
if err := json.NewDecoder(src).Decode(&m); err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range m {
|
||||
// FIXME: we fix-convert float values to int, because
|
||||
// encoding/json decodes integers to float64, but cannot encode them back.
|
||||
// (See http://golang.org/src/pkg/encoding/json/decode.go#L46)
|
||||
if fval, ok := v.(float64); ok {
|
||||
job.SetenvInt(k, int64(fval))
|
||||
} else if sval, ok := v.(string); ok {
|
||||
job.Setenv(k, sval)
|
||||
} else if val, err := json.Marshal(v); err == nil {
|
||||
job.Setenv(k, string(val))
|
||||
} else {
|
||||
job.Setenv(k, fmt.Sprintf("%v", v))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (job *Job) EncodeEnv(dst io.Writer) error {
|
||||
m := make(map[string]interface{})
|
||||
for k, v := range job.Environ() {
|
||||
var val interface{}
|
||||
if err := json.Unmarshal([]byte(v), &val); err == nil {
|
||||
// FIXME: we fix-convert float values to int, because
|
||||
// encoding/json decodes integers to float64, but cannot encode them back.
|
||||
// (See http://golang.org/src/pkg/encoding/json/decode.go#L46)
|
||||
if fval, isFloat := val.(float64); isFloat {
|
||||
val = int(fval)
|
||||
}
|
||||
m[k] = val
|
||||
} else {
|
||||
m[k] = v
|
||||
}
|
||||
}
|
||||
if err := json.NewEncoder(dst).Encode(&m); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (job *Job) ExportEnv(dst interface{}) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
err = fmt.Errorf("ExportEnv %s", err)
|
||||
}
|
||||
}()
|
||||
var buf bytes.Buffer
|
||||
// step 1: encode/marshal the env to an intermediary json representation
|
||||
if err := job.EncodeEnv(&buf); err != nil {
|
||||
return err
|
||||
}
|
||||
// step 2: decode/unmarshal the intermediary json into the destination object
|
||||
if err := json.NewDecoder(&buf).Decode(dst); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (job *Job) ImportEnv(src interface{}) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
err = fmt.Errorf("ImportEnv: %s", err)
|
||||
}
|
||||
}()
|
||||
var buf bytes.Buffer
|
||||
if err := json.NewEncoder(&buf).Encode(src); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := job.DecodeEnv(&buf); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (job *Job) Environ() map[string]string {
|
||||
m := make(map[string]string)
|
||||
for _, kv := range job.env {
|
||||
parts := strings.SplitN(kv, "=", 2)
|
||||
m[parts[0]] = parts[1]
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (job *Job) Logf(format string, args ...interface{}) (n int, err error) {
|
||||
prefixedFormat := fmt.Sprintf("[%s] %s\n", job, strings.TrimRight(format, "\n"))
|
||||
return fmt.Fprintf(job.Stderr, prefixedFormat, args...)
|
||||
}
|
||||
|
||||
func (job *Job) Printf(format string, args ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(job.Stdout, format, args...)
|
||||
}
|
||||
|
||||
func (job *Job) Errorf(format string, args ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(job.Stderr, format, args...)
|
||||
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ func init() {
|
|||
Register("dummy", func(job *Job) string { return "" })
|
||||
}
|
||||
|
||||
func mkEngine(t *testing.T) *Engine {
|
||||
func newTestEngine(t *testing.T) *Engine {
|
||||
// Use the caller function name as a prefix.
|
||||
// This helps trace temp directories back to their test.
|
||||
pc, _, _, _ := runtime.Caller(1)
|
||||
|
@ -38,5 +38,5 @@ func mkEngine(t *testing.T) *Engine {
|
|||
}
|
||||
|
||||
func mkJob(t *testing.T, name string, args ...string) *Job {
|
||||
return mkEngine(t).Job(name, args...)
|
||||
return newTestEngine(t).Job(name, args...)
|
||||
}
|
|
@ -48,7 +48,7 @@ type WalkFunc func(fullPath string, entity *Entity) error
|
|||
// Graph database for storing entities and their relationships
|
||||
type Database struct {
|
||||
conn *sql.DB
|
||||
mux sync.Mutex
|
||||
mux sync.RWMutex
|
||||
}
|
||||
|
||||
// Create a new graph database initialized with a root entity
|
||||
|
@ -138,7 +138,14 @@ func (db *Database) Set(fullPath, id string) (*Entity, error) {
|
|||
|
||||
// Return true if a name already exists in the database
|
||||
func (db *Database) Exists(name string) bool {
|
||||
return db.Get(name) != nil
|
||||
db.mux.RLock()
|
||||
defer db.mux.RUnlock()
|
||||
|
||||
e, err := db.get(name)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return e != nil
|
||||
}
|
||||
|
||||
func (db *Database) setEdge(parentPath, name string, e *Entity) error {
|
||||
|
@ -165,6 +172,9 @@ func (db *Database) RootEntity() *Entity {
|
|||
|
||||
// Return the entity for a given path
|
||||
func (db *Database) Get(name string) *Entity {
|
||||
db.mux.RLock()
|
||||
defer db.mux.RUnlock()
|
||||
|
||||
e, err := db.get(name)
|
||||
if err != nil {
|
||||
return nil
|
||||
|
@ -200,23 +210,36 @@ func (db *Database) get(name string) (*Entity, error) {
|
|||
// List all entities by from the name
|
||||
// The key will be the full path of the entity
|
||||
func (db *Database) List(name string, depth int) Entities {
|
||||
db.mux.RLock()
|
||||
defer db.mux.RUnlock()
|
||||
|
||||
out := Entities{}
|
||||
e, err := db.get(name)
|
||||
if err != nil {
|
||||
return out
|
||||
}
|
||||
for c := range db.children(e, name, depth) {
|
||||
|
||||
children, err := db.children(e, name, depth, nil)
|
||||
if err != nil {
|
||||
return out
|
||||
}
|
||||
|
||||
for _, c := range children {
|
||||
out[c.FullPath] = c.Entity
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Walk through the child graph of an entity, calling walkFunc for each child entity.
|
||||
// It is safe for walkFunc to call graph functions.
|
||||
func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error {
|
||||
e, err := db.get(name)
|
||||
children, err := db.Children(name, depth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for c := range db.children(e, name, depth) {
|
||||
|
||||
// Note: the database lock must not be held while calling walkFunc
|
||||
for _, c := range children {
|
||||
if err := walkFunc(c.FullPath, c.Entity); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -224,8 +247,24 @@ func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Return the children of the specified entity
|
||||
func (db *Database) Children(name string, depth int) ([]WalkMeta, error) {
|
||||
db.mux.RLock()
|
||||
defer db.mux.RUnlock()
|
||||
|
||||
e, err := db.get(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return db.children(e, name, depth, nil)
|
||||
}
|
||||
|
||||
// Return the refrence count for a specified id
|
||||
func (db *Database) Refs(id string) int {
|
||||
db.mux.RLock()
|
||||
defer db.mux.RUnlock()
|
||||
|
||||
var count int
|
||||
if err := db.conn.QueryRow("SELECT COUNT(*) FROM edge WHERE entity_id = ?;", id).Scan(&count); err != nil {
|
||||
return 0
|
||||
|
@ -235,6 +274,9 @@ func (db *Database) Refs(id string) int {
|
|||
|
||||
// Return all the id's path references
|
||||
func (db *Database) RefPaths(id string) Edges {
|
||||
db.mux.RLock()
|
||||
defer db.mux.RUnlock()
|
||||
|
||||
refs := Edges{}
|
||||
|
||||
rows, err := db.conn.Query("SELECT name, parent_id FROM edge WHERE entity_id = ?;", id)
|
||||
|
@ -356,56 +398,51 @@ type WalkMeta struct {
|
|||
Edge *Edge
|
||||
}
|
||||
|
||||
func (db *Database) children(e *Entity, name string, depth int) <-chan WalkMeta {
|
||||
out := make(chan WalkMeta)
|
||||
func (db *Database) children(e *Entity, name string, depth int, entities []WalkMeta) ([]WalkMeta, error) {
|
||||
if e == nil {
|
||||
close(out)
|
||||
return out
|
||||
return entities, nil
|
||||
}
|
||||
|
||||
go func() {
|
||||
rows, err := db.conn.Query("SELECT entity_id, name FROM edge where parent_id = ?;", e.id)
|
||||
if err != nil {
|
||||
close(out)
|
||||
rows, err := db.conn.Query("SELECT entity_id, name FROM edge where parent_id = ?;", e.id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var entityId, entityName string
|
||||
if err := rows.Scan(&entityId, &entityName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
child := &Entity{entityId}
|
||||
edge := &Edge{
|
||||
ParentID: e.id,
|
||||
Name: entityName,
|
||||
EntityID: child.id,
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var entityId, entityName string
|
||||
if err := rows.Scan(&entityId, &entityName); err != nil {
|
||||
// Log error
|
||||
continue
|
||||
}
|
||||
child := &Entity{entityId}
|
||||
edge := &Edge{
|
||||
ParentID: e.id,
|
||||
Name: entityName,
|
||||
EntityID: child.id,
|
||||
}
|
||||
meta := WalkMeta{
|
||||
Parent: e,
|
||||
Entity: child,
|
||||
FullPath: path.Join(name, edge.Name),
|
||||
Edge: edge,
|
||||
}
|
||||
|
||||
meta := WalkMeta{
|
||||
Parent: e,
|
||||
Entity: child,
|
||||
FullPath: path.Join(name, edge.Name),
|
||||
Edge: edge,
|
||||
}
|
||||
entities = append(entities, meta)
|
||||
|
||||
out <- meta
|
||||
if depth == 0 {
|
||||
continue
|
||||
}
|
||||
if depth != 0 {
|
||||
nDepth := depth
|
||||
if depth != -1 {
|
||||
nDepth -= 1
|
||||
}
|
||||
sc := db.children(child, meta.FullPath, nDepth)
|
||||
for c := range sc {
|
||||
out <- c
|
||||
entities, err = db.children(child, meta.FullPath, nDepth, entities)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
close(out)
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
return entities, nil
|
||||
}
|
||||
|
||||
// Return the entity based on the parent path and name
|
||||
|
|
|
@ -5,7 +5,7 @@ It is a curated selection of planned improvements which are either important, di
|
|||
|
||||
For a more complete view of planned and requested improvements, see [the Github issues](https://github.com/dotcloud/docker/issues).
|
||||
|
||||
Tu suggest changes to the roadmap, including additions, please write the change as if it were already in effect, and make a pull request.
|
||||
To suggest changes to the roadmap, including additions, please write the change as if it were already in effect, and make a pull request.
|
||||
|
||||
|
||||
## Container wiring and service discovery
|
||||
|
|
|
@ -1,14 +1,16 @@
|
|||
# VERSION: 0.22
|
||||
# DOCKER-VERSION 0.6.3
|
||||
# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com>
|
||||
# DESCRIPTION: Deploy docker-ci on Amazon EC2
|
||||
# VERSION: 0.25
|
||||
# DOCKER-VERSION 0.6.6
|
||||
# AUTHOR: Daniel Mizyrycki <daniel@docker.com>
|
||||
# DESCRIPTION: Deploy docker-ci on Digital Ocean
|
||||
# COMMENTS:
|
||||
# CONFIG_JSON is an environment variable json string loaded as:
|
||||
#
|
||||
# export CONFIG_JSON='
|
||||
# { "AWS_TAG": "EC2_instance_name",
|
||||
# "AWS_ACCESS_KEY": "EC2_access_key",
|
||||
# "AWS_SECRET_KEY": "EC2_secret_key",
|
||||
# { "DROPLET_NAME": "docker-ci",
|
||||
# "DO_CLIENT_ID": "Digital_Ocean_client_id",
|
||||
# "DO_API_KEY": "Digital_Ocean_api_key",
|
||||
# "DOCKER_KEY_ID": "Digital_Ocean_ssh_key_id",
|
||||
# "DOCKER_CI_KEY_PATH": "docker-ci_private_key_path",
|
||||
# "DOCKER_CI_PUB": "$(cat docker-ci_ssh_public_key.pub)",
|
||||
# "DOCKER_CI_KEY": "$(cat docker-ci_ssh_private_key.key)",
|
||||
# "BUILDBOT_PWD": "Buildbot_server_password",
|
||||
|
@ -33,9 +35,11 @@
|
|||
|
||||
from ubuntu:12.04
|
||||
|
||||
run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
|
||||
run apt-get update; apt-get install -y python2.7 python-dev python-pip ssh rsync less vim
|
||||
run pip install boto fabric
|
||||
run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' \
|
||||
> /etc/apt/sources.list
|
||||
run apt-get update; apt-get install -y git python2.7 python-dev libevent-dev \
|
||||
python-pip ssh rsync less vim
|
||||
run pip install requests fabric
|
||||
|
||||
# Add deployment code and set default container command
|
||||
add . /docker-ci
|
||||
|
|
1
hack/infrastructure/docker-ci/VERSION
Normal file
1
hack/infrastructure/docker-ci/VERSION
Normal file
|
@ -0,0 +1 @@
|
|||
0.4.5
|
|
@ -43,7 +43,7 @@ c['slavePortnum'] = PORT_MASTER
|
|||
|
||||
# Schedulers
|
||||
c['schedulers'] = [ForceScheduler(name='trigger', builderNames=['docker',
|
||||
'index','registry','coverage','nightlyrelease'])]
|
||||
'index','registry','docker-coverage','registry-coverage','nightlyrelease'])]
|
||||
c['schedulers'] += [SingleBranchScheduler(name="all", treeStableTimer=None,
|
||||
change_filter=filter.ChangeFilter(branch='master',
|
||||
repository='https://github.com/dotcloud/docker'), builderNames=['docker'])]
|
||||
|
@ -51,7 +51,7 @@ c['schedulers'] += [SingleBranchScheduler(name='pullrequest',
|
|||
change_filter=filter.ChangeFilter(category='github_pullrequest'), treeStableTimer=None,
|
||||
builderNames=['pullrequest'])]
|
||||
c['schedulers'] += [Nightly(name='daily', branch=None, builderNames=['nightlyrelease',
|
||||
'coverage'], hour=7, minute=00)]
|
||||
'docker-coverage','registry-coverage'], hour=7, minute=00)]
|
||||
c['schedulers'] += [Nightly(name='every4hrs', branch=None, builderNames=['registry','index'],
|
||||
hour=range(0,24,4), minute=15)]
|
||||
|
||||
|
@ -76,17 +76,25 @@ c['builders'] += [BuilderConfig(name='pullrequest',slavenames=['buildworker'],
|
|||
|
||||
# Docker coverage test
|
||||
factory = BuildFactory()
|
||||
factory.addStep(ShellCommand(description='Coverage', logEnviron=False,
|
||||
factory.addStep(ShellCommand(description='docker-coverage', logEnviron=False,
|
||||
usePTY=True, command='{0}/docker-coverage/coverage-docker.sh'.format(
|
||||
DOCKER_CI_PATH)))
|
||||
c['builders'] += [BuilderConfig(name='coverage',slavenames=['buildworker'],
|
||||
c['builders'] += [BuilderConfig(name='docker-coverage',slavenames=['buildworker'],
|
||||
factory=factory)]
|
||||
|
||||
# Docker registry coverage test
|
||||
factory = BuildFactory()
|
||||
factory.addStep(ShellCommand(description='registry-coverage', logEnviron=False,
|
||||
usePTY=True, command='docker run registry_coverage'.format(
|
||||
DOCKER_CI_PATH)))
|
||||
c['builders'] += [BuilderConfig(name='registry-coverage',slavenames=['buildworker'],
|
||||
factory=factory)]
|
||||
|
||||
# Registry functional test
|
||||
factory = BuildFactory()
|
||||
factory.addStep(ShellCommand(description='registry', logEnviron=False,
|
||||
command='. {0}/master/credentials.cfg; '
|
||||
'/docker-ci/functionaltests/test_registry.sh'.format(BUILDBOT_PATH),
|
||||
'{1}/functionaltests/test_registry.sh'.format(BUILDBOT_PATH, DOCKER_CI_PATH),
|
||||
usePTY=True))
|
||||
c['builders'] += [BuilderConfig(name='registry',slavenames=['buildworker'],
|
||||
factory=factory)]
|
||||
|
@ -95,16 +103,17 @@ c['builders'] += [BuilderConfig(name='registry',slavenames=['buildworker'],
|
|||
factory = BuildFactory()
|
||||
factory.addStep(ShellCommand(description='index', logEnviron=False,
|
||||
command='. {0}/master/credentials.cfg; '
|
||||
'/docker-ci/functionaltests/test_index.py'.format(BUILDBOT_PATH),
|
||||
'{1}/functionaltests/test_index.py'.format(BUILDBOT_PATH, DOCKER_CI_PATH),
|
||||
usePTY=True))
|
||||
c['builders'] += [BuilderConfig(name='index',slavenames=['buildworker'],
|
||||
factory=factory)]
|
||||
|
||||
# Docker nightly release
|
||||
nightlyrelease_cmd = ('docker version; docker run -i -t -privileged -e AWS_S3_BUCKET='
|
||||
'test.docker.io dockerbuilder hack/dind dockerbuild.sh')
|
||||
factory = BuildFactory()
|
||||
factory.addStep(ShellCommand(description='NightlyRelease', logEnviron=False,
|
||||
usePTY=True, command='docker run -privileged'
|
||||
' -e AWS_S3_BUCKET=test.docker.io dockerbuilder'))
|
||||
factory.addStep(ShellCommand(description='NightlyRelease',logEnviron=False,
|
||||
usePTY=True, command=nightlyrelease_cmd))
|
||||
c['builders'] += [BuilderConfig(name='nightlyrelease',slavenames=['buildworker'],
|
||||
factory=factory)]
|
||||
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import os, sys, re, json, base64
|
||||
from boto.ec2.connection import EC2Connection
|
||||
import os, sys, re, json, requests, base64
|
||||
from subprocess import call
|
||||
from fabric import api
|
||||
from fabric.api import cd, run, put, sudo
|
||||
from os import environ as env
|
||||
from datetime import datetime
|
||||
from time import sleep
|
||||
|
||||
# Remove SSH private key as it needs more processing
|
||||
|
@ -20,42 +20,41 @@ for key in CONFIG:
|
|||
env['DOCKER_CI_KEY'] = re.sub('^.+"DOCKER_CI_KEY".+?"(.+?)".+','\\1',
|
||||
env['CONFIG_JSON'],flags=re.DOTALL)
|
||||
|
||||
|
||||
AWS_TAG = env.get('AWS_TAG','docker-ci')
|
||||
AWS_KEY_NAME = 'dotcloud-dev' # Same as CONFIG_JSON['DOCKER_CI_PUB']
|
||||
AWS_AMI = 'ami-d582d6bc' # Ubuntu 13.04
|
||||
AWS_REGION = 'us-east-1'
|
||||
AWS_TYPE = 'm1.small'
|
||||
AWS_SEC_GROUPS = 'gateway'
|
||||
AWS_IMAGE_USER = 'ubuntu'
|
||||
DROPLET_NAME = env.get('DROPLET_NAME','docker-ci')
|
||||
TIMEOUT = 120 # Seconds before timeout droplet creation
|
||||
IMAGE_ID = 1004145 # Docker on Ubuntu 13.04
|
||||
REGION_ID = 4 # New York 2
|
||||
SIZE_ID = 62 # memory 2GB
|
||||
DO_IMAGE_USER = 'root' # Image user on Digital Ocean
|
||||
API_URL = 'https://api.digitalocean.com/'
|
||||
DOCKER_PATH = '/go/src/github.com/dotcloud/docker'
|
||||
DOCKER_CI_PATH = '/docker-ci'
|
||||
CFG_PATH = '{}/buildbot'.format(DOCKER_CI_PATH)
|
||||
|
||||
|
||||
class AWS_EC2:
|
||||
'''Amazon EC2'''
|
||||
def __init__(self, access_key, secret_key):
|
||||
class DigitalOcean():
|
||||
|
||||
def __init__(self, key, client):
|
||||
'''Set default API parameters'''
|
||||
self.handler = EC2Connection(access_key, secret_key)
|
||||
def create_instance(self, tag, instance_type):
|
||||
reservation = self.handler.run_instances(**instance_type)
|
||||
instance = reservation.instances[0]
|
||||
sleep(10)
|
||||
while instance.state != 'running':
|
||||
sleep(5)
|
||||
instance.update()
|
||||
print "Instance state: %s" % (instance.state)
|
||||
instance.add_tag("Name",tag)
|
||||
print "instance %s done!" % (instance.id)
|
||||
return instance.ip_address
|
||||
def get_instances(self):
|
||||
return self.handler.get_all_instances()
|
||||
def get_tags(self):
|
||||
return dict([(i.instances[0].id, i.instances[0].tags['Name'])
|
||||
for i in self.handler.get_all_instances() if i.instances[0].tags])
|
||||
def del_instance(self, instance_id):
|
||||
self.handler.terminate_instances(instance_ids=[instance_id])
|
||||
self.key = key
|
||||
self.client = client
|
||||
self.api_url = API_URL
|
||||
|
||||
def api(self, cmd_path, api_arg={}):
|
||||
'''Make api call'''
|
||||
api_arg.update({'api_key':self.key, 'client_id':self.client})
|
||||
resp = requests.get(self.api_url + cmd_path, params=api_arg).text
|
||||
resp = json.loads(resp)
|
||||
if resp['status'] != 'OK':
|
||||
raise Exception(resp['error_message'])
|
||||
return resp
|
||||
|
||||
def droplet_data(self, name):
|
||||
'''Get droplet data'''
|
||||
data = self.api('droplets')
|
||||
data = [droplet for droplet in data['droplets']
|
||||
if droplet['name'] == name]
|
||||
return data[0] if data else {}
|
||||
|
||||
|
||||
def json_fmt(data):
|
||||
|
@ -63,20 +62,36 @@ def json_fmt(data):
|
|||
return json.dumps(data, sort_keys = True, indent = 2)
|
||||
|
||||
|
||||
# Create EC2 API handler
|
||||
ec2 = AWS_EC2(env['AWS_ACCESS_KEY'], env['AWS_SECRET_KEY'])
|
||||
do = DigitalOcean(env['DO_API_KEY'], env['DO_CLIENT_ID'])
|
||||
|
||||
# Stop processing if AWS_TAG exists on EC2
|
||||
if AWS_TAG in ec2.get_tags().values():
|
||||
print ('Instance: {} already deployed. Not further processing.'
|
||||
.format(AWS_TAG))
|
||||
# Get DROPLET_NAME data
|
||||
data = do.droplet_data(DROPLET_NAME)
|
||||
|
||||
# Stop processing if DROPLET_NAME exists on Digital Ocean
|
||||
if data:
|
||||
print ('Droplet: {} already deployed. Not further processing.'
|
||||
.format(DROPLET_NAME))
|
||||
exit(1)
|
||||
|
||||
ip = ec2.create_instance(AWS_TAG, {'image_id':AWS_AMI, 'instance_type':AWS_TYPE,
|
||||
'security_groups':[AWS_SEC_GROUPS], 'key_name':AWS_KEY_NAME})
|
||||
# Create droplet
|
||||
do.api('droplets/new', {'name':DROPLET_NAME, 'region_id':REGION_ID,
|
||||
'image_id':IMAGE_ID, 'size_id':SIZE_ID,
|
||||
'ssh_key_ids':[env['DOCKER_KEY_ID']]})
|
||||
|
||||
# Wait 30 seconds for the machine to boot
|
||||
sleep(30)
|
||||
# Wait for droplet to be created.
|
||||
start_time = datetime.now()
|
||||
while (data.get('status','') != 'active' and (
|
||||
datetime.now()-start_time).seconds < TIMEOUT):
|
||||
data = do.droplet_data(DROPLET_NAME)
|
||||
print data['status']
|
||||
sleep(3)
|
||||
|
||||
# Wait for the machine to boot
|
||||
sleep(15)
|
||||
|
||||
# Get droplet IP
|
||||
ip = str(data['ip_address'])
|
||||
print 'droplet: {} ip: {}'.format(DROPLET_NAME, ip)
|
||||
|
||||
# Create docker-ci ssh private key so docker-ci docker container can communicate
|
||||
# with its EC2 instance
|
||||
|
@ -86,7 +101,7 @@ os.chmod('/root/.ssh/id_rsa',0600)
|
|||
open('/root/.ssh/config','w').write('StrictHostKeyChecking no\n')
|
||||
|
||||
api.env.host_string = ip
|
||||
api.env.user = AWS_IMAGE_USER
|
||||
api.env.user = DO_IMAGE_USER
|
||||
api.env.key_filename = '/root/.ssh/id_rsa'
|
||||
|
||||
# Correct timezone
|
||||
|
@ -100,20 +115,17 @@ sudo("echo '{}' >> /root/.ssh/authorized_keys".format(env['DOCKER_CI_PUB']))
|
|||
credentials = {
|
||||
'AWS_ACCESS_KEY': env['PKG_ACCESS_KEY'],
|
||||
'AWS_SECRET_KEY': env['PKG_SECRET_KEY'],
|
||||
'GPG_PASSPHRASE': env['PKG_GPG_PASSPHRASE'],
|
||||
'INDEX_AUTH': env['INDEX_AUTH']}
|
||||
'GPG_PASSPHRASE': env['PKG_GPG_PASSPHRASE']}
|
||||
open(DOCKER_CI_PATH + '/nightlyrelease/release_credentials.json', 'w').write(
|
||||
base64.b64encode(json.dumps(credentials)))
|
||||
|
||||
# Transfer docker
|
||||
sudo('mkdir -p ' + DOCKER_CI_PATH)
|
||||
sudo('chown {}.{} {}'.format(AWS_IMAGE_USER, AWS_IMAGE_USER, DOCKER_CI_PATH))
|
||||
call('/usr/bin/rsync -aH {} {}@{}:{}'.format(DOCKER_CI_PATH, AWS_IMAGE_USER, ip,
|
||||
sudo('chown {}.{} {}'.format(DO_IMAGE_USER, DO_IMAGE_USER, DOCKER_CI_PATH))
|
||||
call('/usr/bin/rsync -aH {} {}@{}:{}'.format(DOCKER_CI_PATH, DO_IMAGE_USER, ip,
|
||||
os.path.dirname(DOCKER_CI_PATH)), shell=True)
|
||||
|
||||
# Install Docker and Buildbot dependencies
|
||||
sudo('addgroup docker')
|
||||
sudo('usermod -a -G docker ubuntu')
|
||||
sudo('mkdir /mnt/docker; ln -s /mnt/docker /var/lib/docker')
|
||||
sudo('wget -q -O - https://get.docker.io/gpg | apt-key add -')
|
||||
sudo('echo deb https://get.docker.io/ubuntu docker main >'
|
||||
|
@ -123,7 +135,7 @@ sudo('echo -e "deb http://archive.ubuntu.com/ubuntu raring main universe\n'
|
|||
' > /etc/apt/sources.list; apt-get update')
|
||||
sudo('DEBIAN_FRONTEND=noninteractive apt-get install -q -y wget python-dev'
|
||||
' python-pip supervisor git mercurial linux-image-extra-$(uname -r)'
|
||||
' aufs-tools make libfontconfig libevent-dev')
|
||||
' aufs-tools make libfontconfig libevent-dev libsqlite3-dev libssl-dev')
|
||||
sudo('wget -O - https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz | '
|
||||
'tar -v -C /usr/local -xz; ln -s /usr/local/go/bin/go /usr/bin/go')
|
||||
sudo('GOPATH=/go go get -d github.com/dotcloud/docker')
|
||||
|
@ -135,13 +147,13 @@ sudo('curl -s https://phantomjs.googlecode.com/files/'
|
|||
'phantomjs-1.9.1-linux-x86_64.tar.bz2 | tar jx -C /usr/bin'
|
||||
' --strip-components=2 phantomjs-1.9.1-linux-x86_64/bin/phantomjs')
|
||||
|
||||
# Preventively reboot docker-ci daily
|
||||
sudo('ln -s /sbin/reboot /etc/cron.daily')
|
||||
|
||||
# Build docker-ci containers
|
||||
sudo('cd {}; docker build -t docker .'.format(DOCKER_PATH))
|
||||
sudo('cd {}; docker build -t docker-ci .'.format(DOCKER_CI_PATH))
|
||||
sudo('cd {}/nightlyrelease; docker build -t dockerbuilder .'.format(
|
||||
DOCKER_CI_PATH))
|
||||
sudo('cd {}/registry-coverage; docker build -t registry_coverage .'.format(
|
||||
DOCKER_CI_PATH))
|
||||
|
||||
# Download docker-ci testing container
|
||||
sudo('docker pull mzdaniel/test_docker')
|
||||
|
@ -154,3 +166,6 @@ sudo('{0}/setup.sh root {0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10}'
|
|||
env['SMTP_PWD'], env['EMAIL_RCP'], env['REGISTRY_USER'],
|
||||
env['REGISTRY_PWD'], env['REGISTRY_BUCKET'], env['REGISTRY_ACCESS_KEY'],
|
||||
env['REGISTRY_SECRET_KEY']))
|
||||
|
||||
# Preventively reboot docker-ci daily
|
||||
sudo('ln -s /sbin/reboot /etc/cron.daily')
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# VERSION: 0.3
|
||||
# DOCKER-VERSION 0.6.3
|
||||
# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com>
|
||||
# VERSION: 0.4
|
||||
# DOCKER-VERSION 0.6.6
|
||||
# AUTHOR: Daniel Mizyrycki <daniel@docker.com>
|
||||
# DESCRIPTION: Testing docker PRs and commits on top of master using
|
||||
# REFERENCES: This code reuses the excellent implementation of
|
||||
# Docker in Docker made by Jerome Petazzoni.
|
||||
|
@ -15,15 +15,10 @@
|
|||
# TO_RUN: docker run -privileged test_docker hack/dind test_docker.sh [commit] [repo] [branch]
|
||||
|
||||
from docker
|
||||
maintainer Daniel Mizyrycki <daniel@dotcloud.com>
|
||||
maintainer Daniel Mizyrycki <daniel@docker.com>
|
||||
|
||||
# Setup go environment. Extracted from /Dockerfile
|
||||
env CGO_ENABLED 0
|
||||
env GOROOT /goroot
|
||||
env PATH $PATH:/goroot/bin
|
||||
env GOPATH /go:/go/src/github.com/dotcloud/docker/vendor
|
||||
volume /var/lib/docker
|
||||
workdir /go/src/github.com/dotcloud/docker
|
||||
# Setup go in PATH. Extracted from /Dockerfile
|
||||
env PATH /usr/local/go/bin:$PATH
|
||||
|
||||
# Add test_docker.sh
|
||||
add test_docker.sh /usr/bin/test_docker.sh
|
||||
|
|
|
@ -8,31 +8,26 @@ BRANCH=${3-master}
|
|||
# Compute test paths
|
||||
DOCKER_PATH=/go/src/github.com/dotcloud/docker
|
||||
|
||||
# Timestamp
|
||||
echo
|
||||
date; echo
|
||||
|
||||
# Fetch latest master
|
||||
cd /
|
||||
rm -rf /go
|
||||
mkdir -p $DOCKER_PATH
|
||||
git clone -q -b master http://github.com/dotcloud/docker $DOCKER_PATH
|
||||
cd $DOCKER_PATH
|
||||
git init .
|
||||
git fetch -q http://github.com/dotcloud/docker master
|
||||
git reset --hard FETCH_HEAD
|
||||
|
||||
# Merge commit
|
||||
#echo FIXME. Temporarily skip TestPrivilegedCanMount until DinD works reliable on AWS
|
||||
git pull -q https://github.com/mzdaniel/docker.git dind-aws || exit 1
|
||||
|
||||
# Merge commit in top of master
|
||||
git fetch -q "$REPO" "$BRANCH"
|
||||
git merge --no-edit $COMMIT || exit 1
|
||||
git merge --no-edit $COMMIT || exit 255
|
||||
|
||||
# Test commit
|
||||
go test -v; exit_status=$?
|
||||
./hack/make.sh test; exit_status=$?
|
||||
|
||||
# Display load if test fails
|
||||
if [ $exit_status -eq 1 ] ; then
|
||||
if [ $exit_status -ne 0 ] ; then
|
||||
uptime; echo; free
|
||||
fi
|
||||
|
||||
# Cleanup testing directory
|
||||
rm -rf $BASE_PATH
|
||||
|
||||
exit $exit_status
|
||||
|
|
|
@ -8,10 +8,12 @@ rm -rf docker-registry
|
|||
# Setup the environment
|
||||
export SETTINGS_FLAVOR=test
|
||||
export DOCKER_REGISTRY_CONFIG=config_test.yml
|
||||
export PYTHONPATH=$(pwd)/docker-registry/test
|
||||
|
||||
# Get latest docker registry
|
||||
git clone -q https://github.com/dotcloud/docker-registry.git
|
||||
cd docker-registry
|
||||
sed -Ei "s#(boto_bucket: ).+#\1_env:S3_BUCKET#" config_test.yml
|
||||
|
||||
# Get dependencies
|
||||
pip install -q -r requirements.txt
|
||||
|
@ -20,7 +22,6 @@ pip install -q tox
|
|||
|
||||
# Run registry tests
|
||||
tox || exit 1
|
||||
export PYTHONPATH=$(pwd)/docker-registry
|
||||
python -m unittest discover -p s3.py -s test || exit 1
|
||||
python -m unittest discover -p workflow.py -s test
|
||||
|
||||
|
|
|
@ -1,20 +1,19 @@
|
|||
# VERSION: 1.2
|
||||
# DOCKER-VERSION 0.6.3
|
||||
# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com>
|
||||
# VERSION: 1.6
|
||||
# DOCKER-VERSION 0.6.6
|
||||
# AUTHOR: Daniel Mizyrycki <daniel@docker.com>
|
||||
# DESCRIPTION: Build docker nightly release using Docker in Docker.
|
||||
# REFERENCES: This code reuses the excellent implementation of docker in docker
|
||||
# made by Jerome Petazzoni. https://github.com/jpetazzo/dind
|
||||
# COMMENTS:
|
||||
# release_credentials.json is a base64 json encoded file containing:
|
||||
# { "AWS_ACCESS_KEY": "Test_docker_AWS_S3_bucket_id",
|
||||
# "AWS_SECRET_KEY='Test_docker_AWS_S3_bucket_key'
|
||||
# "GPG_PASSPHRASE='Test_docker_GPG_passphrase_signature'
|
||||
# "INDEX_AUTH='Encripted_index_authentication' }
|
||||
# "AWS_SECRET_KEY": "Test_docker_AWS_S3_bucket_key",
|
||||
# "GPG_PASSPHRASE": "Test_docker_GPG_passphrase_signature" }
|
||||
# TO_BUILD: docker build -t dockerbuilder .
|
||||
# TO_RELEASE: docker run -i -t -privileged -e AWS_S3_BUCKET="test.docker.io" dockerbuilder
|
||||
# TO_RELEASE: docker run -i -t -privileged -e AWS_S3_BUCKET="test.docker.io" dockerbuilder hack/dind dockerbuild.sh
|
||||
|
||||
from docker
|
||||
maintainer Daniel Mizyrycki <daniel@dotcloud.com>
|
||||
maintainer Daniel Mizyrycki <daniel@docker.com>
|
||||
|
||||
# Add docker dependencies and downloading packages
|
||||
run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
|
||||
|
@ -24,11 +23,8 @@ run apt-get update; apt-get install -y -q wget python2.7
|
|||
run wget -q -O /usr/bin/docker http://get.docker.io/builds/Linux/x86_64/docker-latest; chmod +x /usr/bin/docker
|
||||
|
||||
# Add proto docker builder
|
||||
add ./dockerbuild /usr/bin/dockerbuild
|
||||
run chmod +x /usr/bin/dockerbuild
|
||||
add ./dockerbuild.sh /usr/bin/dockerbuild.sh
|
||||
run chmod +x /usr/bin/dockerbuild.sh
|
||||
|
||||
# Add release credentials
|
||||
add ./release_credentials.json /root/release_credentials.json
|
||||
|
||||
# Launch build process in a container
|
||||
cmd dockerbuild
|
||||
|
|
|
@ -1,50 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Variables AWS_ACCESS_KEY, AWS_SECRET_KEY, PG_PASSPHRASE and INDEX_AUTH
|
||||
# are decoded from /root/release_credentials.json
|
||||
# Variable AWS_S3_BUCKET is passed to the environment from docker run -e
|
||||
|
||||
# Enable debugging
|
||||
set -x
|
||||
|
||||
# Fetch docker master branch
|
||||
rm -rf /go/src/github.com/dotcloud/docker
|
||||
cd /
|
||||
git clone -q http://github.com/dotcloud/docker /go/src/github.com/dotcloud/docker
|
||||
cd /go/src/github.com/dotcloud/docker
|
||||
|
||||
# Launch docker daemon using dind inside the container
|
||||
./hack/dind /usr/bin/docker -d &
|
||||
sleep 5
|
||||
|
||||
# Add an uncommitted change to generate a timestamped release
|
||||
date > timestamp
|
||||
|
||||
# Build the docker package using /Dockerfile
|
||||
docker build -t docker .
|
||||
|
||||
# Run Docker unittests binary and Ubuntu package
|
||||
docker run -privileged docker hack/make.sh
|
||||
exit_status=$?
|
||||
|
||||
# Display load if test fails
|
||||
if [ $exit_status -eq 1 ] ; then
|
||||
uptime; echo; free
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Commit binary and ubuntu bundles for release
|
||||
docker commit -run '{"Env": ["PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin"], "WorkingDir": "/go/src/github.com/dotcloud/docker"}' $(docker ps -l -q) release
|
||||
|
||||
# Turn debug off to load credentials from the environment
|
||||
set +x
|
||||
eval $(cat /root/release_credentials.json | python -c '
|
||||
import sys,json,base64;
|
||||
d=json.loads(base64.b64decode(sys.stdin.read()));
|
||||
exec("""for k in d: print "export {0}=\\"{1}\\"".format(k,d[k])""")')
|
||||
set -x
|
||||
|
||||
# Push docker nightly
|
||||
echo docker run -i -t -privileged -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=XXXXX -e AWS_SECRET_KEY=XXXXX -e GPG_PASSPHRASE=XXXXX release hack/release.sh
|
||||
set +x
|
||||
docker run -i -t -privileged -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE release hack/release.sh
|
40
hack/infrastructure/docker-ci/nightlyrelease/dockerbuild.sh
Normal file
40
hack/infrastructure/docker-ci/nightlyrelease/dockerbuild.sh
Normal file
|
@ -0,0 +1,40 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Variables AWS_ACCESS_KEY, AWS_SECRET_KEY and PG_PASSPHRASE are decoded
|
||||
# from /root/release_credentials.json
|
||||
# Variable AWS_S3_BUCKET is passed to the environment from docker run -e
|
||||
|
||||
# Turn debug off to load credentials from the environment
|
||||
set +x
|
||||
eval $(cat /root/release_credentials.json | python -c '
|
||||
import sys,json,base64;
|
||||
d=json.loads(base64.b64decode(sys.stdin.read()));
|
||||
exec("""for k in d: print "export {0}=\\"{1}\\"".format(k,d[k])""")')
|
||||
|
||||
# Fetch docker master branch
|
||||
set -x
|
||||
cd /
|
||||
rm -rf /go
|
||||
git clone -q -b master http://github.com/dotcloud/docker /go/src/github.com/dotcloud/docker
|
||||
cd /go/src/github.com/dotcloud/docker
|
||||
|
||||
# Launch docker daemon using dind inside the container
|
||||
/usr/bin/docker version
|
||||
/usr/bin/docker -d &
|
||||
sleep 5
|
||||
|
||||
# Build Docker release container
|
||||
docker build -t docker .
|
||||
|
||||
# Test docker and if everything works well, release
|
||||
echo docker run -i -t -privileged -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=XXXXX -e AWS_SECRET_KEY=XXXXX -e GPG_PASSPHRASE=XXXXX docker hack/release.sh
|
||||
set +x
|
||||
docker run -privileged -i -t -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE docker hack/release.sh
|
||||
exit_status=$?
|
||||
|
||||
# Display load if test fails
|
||||
set -x
|
||||
if [ $exit_status -ne 0 ] ; then
|
||||
uptime; echo; free
|
||||
exit 1
|
||||
fi
|
|
@ -1 +0,0 @@
|
|||
eyAiQVdTX0FDQ0VTU19LRVkiOiAiIiwKICAiQVdTX1NFQ1JFVF9LRVkiOiAiIiwKICAiR1BHX1BBU1NQSFJBU0UiOiAiIiwKICAiSU5ERVhfQVVUSCI6ICIiIH0=
|
18
hack/infrastructure/docker-ci/registry-coverage/Dockerfile
Normal file
18
hack/infrastructure/docker-ci/registry-coverage/Dockerfile
Normal file
|
@ -0,0 +1,18 @@
|
|||
# VERSION: 0.1
|
||||
# DOCKER-VERSION 0.6.4
|
||||
# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com>
|
||||
# DESCRIPTION: Docker registry coverage
|
||||
# COMMENTS: Add registry coverage into the docker-ci image
|
||||
# TO_BUILD: docker build -t registry_coverage .
|
||||
# TO_RUN: docker run registry_coverage
|
||||
|
||||
from docker-ci
|
||||
maintainer Daniel Mizyrycki <daniel@dotcloud.com>
|
||||
|
||||
# Add registry_coverager.sh and dependencies
|
||||
run pip install coverage flask pyyaml requests simplejson python-glanceclient \
|
||||
blinker redis boto gevent rsa mock
|
||||
add registry_coverage.sh /usr/bin/registry_coverage.sh
|
||||
run chmod +x /usr/bin/registry_coverage.sh
|
||||
|
||||
cmd "/usr/bin/registry_coverage.sh"
|
18
hack/infrastructure/docker-ci/registry-coverage/registry_coverage.sh
Executable file
18
hack/infrastructure/docker-ci/registry-coverage/registry_coverage.sh
Executable file
|
@ -0,0 +1,18 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -x
|
||||
|
||||
# Setup the environment
|
||||
REGISTRY_PATH=/data/docker-registry
|
||||
export SETTINGS_FLAVOR=test
|
||||
export DOCKER_REGISTRY_CONFIG=config_test.yml
|
||||
export PYTHONPATH=$REGISTRY_PATH/test
|
||||
|
||||
# Fetch latest docker-registry master
|
||||
rm -rf $REGISTRY_PATH
|
||||
git clone https://github.com/dotcloud/docker-registry -b master $REGISTRY_PATH
|
||||
cd $REGISTRY_PATH
|
||||
|
||||
# Generate coverage
|
||||
coverage run -m unittest discover test || exit 1
|
||||
coverage report --include='./*' --omit='./test/*'
|
|
@ -34,7 +34,7 @@ env['DOCKER_CI_KEY'] = open(env['DOCKER_CI_KEY_PATH']).read()
|
|||
|
||||
DROPLET_NAME = env.get('DROPLET_NAME','report')
|
||||
TIMEOUT = 120 # Seconds before timeout droplet creation
|
||||
IMAGE_ID = 894856 # Docker on Ubuntu 13.04
|
||||
IMAGE_ID = 1004145 # Docker on Ubuntu 13.04
|
||||
REGION_ID = 4 # New York 2
|
||||
SIZE_ID = 66 # memory 512MB
|
||||
DO_IMAGE_USER = 'root' # Image user on Digital Ocean
|
||||
|
|
|
@ -22,7 +22,12 @@ bundle_test() {
|
|||
for test_dir in $(find_test_dirs); do (
|
||||
set -x
|
||||
cd $test_dir
|
||||
|
||||
# Install packages that are dependencies of the tests.
|
||||
# Note: Does not run the tests.
|
||||
go test -i -ldflags "$LDFLAGS" $BUILDFLAGS
|
||||
|
||||
# Run the tests with the optional $TESTFLAGS.
|
||||
export TEST_DOCKERINIT_PATH=$DEST/../dynbinary/dockerinit-$VERSION
|
||||
go test -v -ldflags "$LDFLAGS -X github.com/dotcloud/docker/utils.INITSHA1 \"$DOCKER_INITSHA1\"" $BUILDFLAGS $TESTFLAGS
|
||||
) done
|
||||
|
|
|
@ -16,7 +16,12 @@ bundle_test() {
|
|||
for test_dir in $(find_test_dirs); do (
|
||||
set -x
|
||||
cd $test_dir
|
||||
|
||||
# Install packages that are dependencies of the tests.
|
||||
# Note: Does not run the tests.
|
||||
go test -i -ldflags "$LDFLAGS $LDFLAGS_STATIC" $BUILDFLAGS
|
||||
|
||||
# Run the tests with the optional $TESTFLAGS.
|
||||
go test -v -ldflags "$LDFLAGS $LDFLAGS_STATIC" $BUILDFLAGS $TESTFLAGS
|
||||
) done
|
||||
} 2>&1 | tee $DEST/test.log
|
||||
|
|
|
@ -10,7 +10,7 @@ fi
|
|||
PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)"
|
||||
PACKAGE_URL="http://www.docker.io/"
|
||||
PACKAGE_MAINTAINER="docker@dotcloud.com"
|
||||
PACKAGE_DESCRIPTION="lxc-docker is a Linux container runtime
|
||||
PACKAGE_DESCRIPTION="Linux container runtime
|
||||
Docker complements LXC with a high-level API which operates at the process
|
||||
level. It runs unix processes with strong guarantees of isolation and
|
||||
repeatability across servers.
|
||||
|
@ -37,27 +37,51 @@ bundle_ubuntu() {
|
|||
# This will fail if the binary bundle hasn't been built
|
||||
cp $DEST/../binary/docker-$VERSION $DIR/usr/bin/docker
|
||||
|
||||
# Generate postinst/prerm scripts
|
||||
cat >/tmp/postinst <<'EOF'
|
||||
# Generate postinst/prerm/postrm scripts
|
||||
cat > /tmp/postinst <<'EOF'
|
||||
#!/bin/sh
|
||||
service docker stop || true
|
||||
grep -q '^docker:' /etc/group || groupadd --system docker || true
|
||||
service docker start
|
||||
EOF
|
||||
cat >/tmp/prerm <<'EOF'
|
||||
#!/bin/sh
|
||||
service docker stop || true
|
||||
set -e
|
||||
set -u
|
||||
|
||||
case "$1" in
|
||||
purge|remove|abort-install)
|
||||
groupdel docker || true
|
||||
;;
|
||||
|
||||
upgrade|failed-upgrade|abort-upgrade)
|
||||
# don't touch docker group
|
||||
;;
|
||||
esac
|
||||
getent group docker > /dev/null || groupadd --system docker || true
|
||||
|
||||
update-rc.d docker defaults > /dev/null || true
|
||||
if [ -n "$2" ]; then
|
||||
_dh_action=restart
|
||||
else
|
||||
_dh_action=start
|
||||
fi
|
||||
service docker $_dh_action 2>/dev/null || true
|
||||
|
||||
#DEBHELPER#
|
||||
EOF
|
||||
cat > /tmp/prerm <<'EOF'
|
||||
#!/bin/sh
|
||||
set -e
|
||||
set -u
|
||||
|
||||
service docker stop 2>/dev/null || true
|
||||
|
||||
#DEBHELPER#
|
||||
EOF
|
||||
cat > /tmp/postrm <<'EOF'
|
||||
#!/bin/sh
|
||||
set -e
|
||||
set -u
|
||||
|
||||
if [ "$1" = "purge" ] ; then
|
||||
update-rc.d docker remove > /dev/null || true
|
||||
fi
|
||||
|
||||
# In case this system is running systemd, we make systemd reload the unit files
|
||||
# to pick up changes.
|
||||
if [ -d /run/systemd/system ] ; then
|
||||
systemctl --system daemon-reload > /dev/null || true
|
||||
fi
|
||||
|
||||
#DEBHELPER#
|
||||
EOF
|
||||
# TODO swaths of these were borrowed from debhelper's auto-inserted stuff, because we're still using fpm - we need to use debhelper instead, and somehow reconcile Ubuntu that way
|
||||
chmod +x /tmp/postinst /tmp/prerm
|
||||
|
||||
(
|
||||
|
@ -66,6 +90,7 @@ EOF
|
|||
--name lxc-docker-$VERSION --version $PKGVERSION \
|
||||
--after-install /tmp/postinst \
|
||||
--before-remove /tmp/prerm \
|
||||
--after-remove /tmp/postrm \
|
||||
--architecture "$PACKAGE_ARCHITECTURE" \
|
||||
--prefix / \
|
||||
--depends lxc \
|
||||
|
@ -82,6 +107,8 @@ EOF
|
|||
--vendor "$PACKAGE_VENDOR" \
|
||||
--config-files /etc/init/docker.conf \
|
||||
--config-files /etc/init.d/docker \
|
||||
--config-files /etc/default/docker \
|
||||
--deb-compression xz \
|
||||
-t deb .
|
||||
mkdir empty
|
||||
fpm -s dir -C empty \
|
||||
|
@ -92,7 +119,12 @@ EOF
|
|||
--maintainer "$PACKAGE_MAINTAINER" \
|
||||
--url "$PACKAGE_URL" \
|
||||
--vendor "$PACKAGE_VENDOR" \
|
||||
--config-files /etc/init/docker.conf \
|
||||
--config-files /etc/init.d/docker \
|
||||
--config-files /etc/default/docker \
|
||||
--deb-compression xz \
|
||||
-t deb .
|
||||
# note: the --config-files lines have to be duplicated to stop overwrite on package upgrade (since we have to use this funky virtual package)
|
||||
)
|
||||
}
|
||||
|
||||
|
|
|
@ -97,7 +97,7 @@ write_to_s3() {
|
|||
DEST=$1
|
||||
F=`mktemp`
|
||||
cat > $F
|
||||
s3cmd --acl-public put $F $DEST
|
||||
s3cmd --acl-public --mime-type='text/plain' put $F $DEST
|
||||
rm -f $F
|
||||
}
|
||||
|
||||
|
@ -107,14 +107,14 @@ s3_url() {
|
|||
echo "https://$BUCKET"
|
||||
;;
|
||||
*)
|
||||
echo "http://$BUCKET.s3.amazonaws.com"
|
||||
s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }'
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Upload the 'ubuntu' bundle to S3:
|
||||
# 1. A full APT repository is published at $BUCKET/ubuntu/
|
||||
# 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/info
|
||||
# 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/index
|
||||
release_ubuntu() {
|
||||
[ -e bundles/$VERSION/ubuntu ] || {
|
||||
echo >&2 './hack/make.sh must be run before release_ubuntu'
|
||||
|
@ -168,7 +168,7 @@ EOF
|
|||
|
||||
# Upload repo
|
||||
s3cmd --acl-public sync $APTDIR/ s3://$BUCKET/ubuntu/
|
||||
cat <<EOF | write_to_s3 s3://$BUCKET/ubuntu/info
|
||||
cat <<EOF | write_to_s3 s3://$BUCKET/ubuntu/index
|
||||
# Add the repository to your APT sources
|
||||
echo deb $(s3_url)/ubuntu docker main > /etc/apt/sources.list.d/docker.list
|
||||
# Then import the repository key
|
||||
|
@ -180,7 +180,12 @@ apt-get update ; apt-get install -y lxc-docker
|
|||
# Alternatively, just use the curl-able install.sh script provided at $(s3_url)
|
||||
#
|
||||
EOF
|
||||
echo "APT repository uploaded. Instructions available at $(s3_url)/ubuntu/info"
|
||||
|
||||
# Add redirect at /ubuntu/info for URL-backwards-compatibility
|
||||
rm -rf /tmp/emptyfile && touch /tmp/emptyfile
|
||||
s3cmd --acl-public --add-header='x-amz-website-redirect-location:/ubuntu/' --mime-type='text/plain' put /tmp/emptyfile s3://$BUCKET/ubuntu/info
|
||||
|
||||
echo "APT repository uploaded. Instructions available at $(s3_url)/ubuntu"
|
||||
}
|
||||
|
||||
# Upload a static binary to S3
|
||||
|
@ -189,14 +194,20 @@ release_binary() {
|
|||
echo >&2 './hack/make.sh must be run before release_binary'
|
||||
exit 1
|
||||
}
|
||||
|
||||
S3DIR=s3://$BUCKET/builds/Linux/x86_64
|
||||
s3cmd --acl-public put bundles/$VERSION/binary/docker-$VERSION $S3DIR/docker-$VERSION
|
||||
cat <<EOF | write_to_s3 s3://$BUCKET/builds/info
|
||||
cat <<EOF | write_to_s3 s3://$BUCKET/builds/index
|
||||
# To install, run the following command as root:
|
||||
curl -O $(s3_url)/builds/Linux/x86_64/docker-$VERSION && chmod +x docker-$VERSION && sudo mv docker-$VERSION /usr/local/bin/docker
|
||||
# Then start docker in daemon mode:
|
||||
sudo /usr/local/bin/docker -d
|
||||
EOF
|
||||
|
||||
# Add redirect at /builds/info for URL-backwards-compatibility
|
||||
rm -rf /tmp/emptyfile && touch /tmp/emptyfile
|
||||
s3cmd --acl-public --add-header='x-amz-website-redirect-location:/builds/' --mime-type='text/plain' put /tmp/emptyfile s3://$BUCKET/builds/info
|
||||
|
||||
if [ -z "$NOLATEST" ]; then
|
||||
echo "Copying docker-$VERSION to docker-latest"
|
||||
s3cmd --acl-public cp $S3DIR/docker-$VERSION $S3DIR/docker-latest
|
||||
|
|
4
image.go
4
image.go
|
@ -134,10 +134,6 @@ func (image *Image) TarLayer(compression archive.Compression) (archive.Archive,
|
|||
return archive.Tar(layerPath, compression)
|
||||
}
|
||||
|
||||
func (image *Image) ShortID() string {
|
||||
return utils.TruncateID(image.ID)
|
||||
}
|
||||
|
||||
func ValidateID(id string) error {
|
||||
if id == "" {
|
||||
return fmt.Errorf("Image id can't be empty")
|
||||
|
|
|
@ -55,9 +55,16 @@ func RemoveExistingChain(name string) error {
|
|||
}
|
||||
|
||||
func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr string, dest_port int) error {
|
||||
daddr := ip.String()
|
||||
if ip.IsUnspecified() {
|
||||
// iptables interprets "0.0.0.0" as "0.0.0.0/32", whereas we
|
||||
// want "0.0.0.0/0". "0/0" is correctly interpreted as "any
|
||||
// value" by both iptables and ip6tables.
|
||||
daddr = "0/0"
|
||||
}
|
||||
if output, err := Raw("-t", "nat", fmt.Sprint(action), c.Name,
|
||||
"-p", proto,
|
||||
"-d", ip.String(),
|
||||
"-d", daddr,
|
||||
"--dport", strconv.Itoa(port),
|
||||
"!", "-i", c.Bridge,
|
||||
"-j", "DNAT",
|
||||
|
|
38
network.go
38
network.go
|
@ -168,12 +168,28 @@ func CreateBridgeIface(config *DaemonConfig) error {
|
|||
}
|
||||
|
||||
if config.EnableIptables {
|
||||
// Enable NAT
|
||||
if output, err := iptables.Raw("-t", "nat", "-A", "POSTROUTING", "-s", ifaceAddr,
|
||||
"!", "-d", ifaceAddr, "-j", "MASQUERADE"); err != nil {
|
||||
return fmt.Errorf("Unable to enable network bridge NAT: %s", err)
|
||||
} else if len(output) != 0 {
|
||||
return fmt.Errorf("Error iptables postrouting: %s", output)
|
||||
}
|
||||
|
||||
// Accept incoming packets for existing connections
|
||||
if output, err := iptables.Raw("-I", "FORWARD", "-o", config.BridgeIface, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"); err != nil {
|
||||
return fmt.Errorf("Unable to allow incoming packets: %s", err)
|
||||
} else if len(output) != 0 {
|
||||
return fmt.Errorf("Error iptables allow incoming: %s", output)
|
||||
}
|
||||
|
||||
// Accept all non-intercontainer outgoing packets
|
||||
if output, err := iptables.Raw("-I", "FORWARD", "-i", config.BridgeIface, "!", "-o", config.BridgeIface, "-j", "ACCEPT"); err != nil {
|
||||
return fmt.Errorf("Unable to allow outgoing packets: %s", err)
|
||||
} else if len(output) != 0 {
|
||||
return fmt.Errorf("Error iptables allow outgoing: %s", output)
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -680,20 +696,30 @@ func newNetworkManager(config *DaemonConfig) (*NetworkManager, error) {
|
|||
|
||||
// Configure iptables for link support
|
||||
if config.EnableIptables {
|
||||
args := []string{"FORWARD", "-i", config.BridgeIface, "-o", config.BridgeIface, "-j", "DROP"}
|
||||
args := []string{"FORWARD", "-i", config.BridgeIface, "-o", config.BridgeIface, "-j"}
|
||||
acceptArgs := append(args, "ACCEPT")
|
||||
dropArgs := append(args, "DROP")
|
||||
|
||||
if !config.InterContainerCommunication {
|
||||
if !iptables.Exists(args...) {
|
||||
iptables.Raw(append([]string{"-D"}, acceptArgs...)...)
|
||||
if !iptables.Exists(dropArgs...) {
|
||||
utils.Debugf("Disable inter-container communication")
|
||||
if output, err := iptables.Raw(append([]string{"-A"}, args...)...); err != nil {
|
||||
if output, err := iptables.Raw(append([]string{"-I"}, dropArgs...)...); err != nil {
|
||||
return nil, fmt.Errorf("Unable to prevent intercontainer communication: %s", err)
|
||||
} else if len(output) != 0 {
|
||||
return nil, fmt.Errorf("Error enabling iptables: %s", output)
|
||||
return nil, fmt.Errorf("Error disabling intercontainer communication: %s", output)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
utils.Debugf("Enable inter-container communication")
|
||||
iptables.Raw(append([]string{"-D"}, args...)...)
|
||||
iptables.Raw(append([]string{"-D"}, dropArgs...)...)
|
||||
if !iptables.Exists(acceptArgs...) {
|
||||
utils.Debugf("Enable inter-container communication")
|
||||
if output, err := iptables.Raw(append([]string{"-I"}, acceptArgs...)...); err != nil {
|
||||
return nil, fmt.Errorf("Unable to allow intercontainer communication: %s", err)
|
||||
} else if len(output) != 0 {
|
||||
return nil, fmt.Errorf("Error enabling intercontainer communication: %s", output)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
32
runtime.go
32
runtime.go
|
@ -186,6 +186,7 @@ func (runtime *Runtime) Register(container *Container) error {
|
|||
if !container.State.Running {
|
||||
close(container.waitLock)
|
||||
} else if !nomonitor {
|
||||
container.allocateNetwork()
|
||||
go container.monitor()
|
||||
}
|
||||
return nil
|
||||
|
@ -195,7 +196,7 @@ func (runtime *Runtime) ensureName(container *Container) error {
|
|||
if container.Name == "" {
|
||||
name, err := generateRandomName(runtime)
|
||||
if err != nil {
|
||||
name = container.ShortID()
|
||||
name = utils.TruncateID(container.ID)
|
||||
}
|
||||
container.Name = name
|
||||
|
||||
|
@ -298,7 +299,7 @@ func (runtime *Runtime) restore() error {
|
|||
// Try to set the default name for a container if it exists prior to links
|
||||
container.Name, err = generateRandomName(runtime)
|
||||
if err != nil {
|
||||
container.Name = container.ShortID()
|
||||
container.Name = utils.TruncateID(container.ID)
|
||||
}
|
||||
|
||||
if _, err := runtime.containerGraph.Set(container.Name, container.ID); err != nil {
|
||||
|
@ -506,32 +507,7 @@ func (runtime *Runtime) Create(config *Config, name string) (*Container, []strin
|
|||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Step 3: if hostname, build hostname and hosts files
|
||||
container.HostnamePath = path.Join(container.root, "hostname")
|
||||
ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644)
|
||||
|
||||
hostsContent := []byte(`
|
||||
127.0.0.1 localhost
|
||||
::1 localhost ip6-localhost ip6-loopback
|
||||
fe00::0 ip6-localnet
|
||||
ff00::0 ip6-mcastprefix
|
||||
ff02::1 ip6-allnodes
|
||||
ff02::2 ip6-allrouters
|
||||
`)
|
||||
|
||||
container.HostsPath = path.Join(container.root, "hosts")
|
||||
|
||||
if container.Config.Domainname != "" {
|
||||
hostsContent = append([]byte(fmt.Sprintf("::1\t\t%s.%s %s\n", container.Config.Hostname, container.Config.Domainname, container.Config.Hostname)), hostsContent...)
|
||||
hostsContent = append([]byte(fmt.Sprintf("127.0.0.1\t%s.%s %s\n", container.Config.Hostname, container.Config.Domainname, container.Config.Hostname)), hostsContent...)
|
||||
} else {
|
||||
hostsContent = append([]byte(fmt.Sprintf("::1\t\t%s\n", container.Config.Hostname)), hostsContent...)
|
||||
hostsContent = append([]byte(fmt.Sprintf("127.0.0.1\t%s\n", container.Config.Hostname)), hostsContent...)
|
||||
}
|
||||
|
||||
ioutil.WriteFile(container.HostsPath, hostsContent, 0644)
|
||||
|
||||
// Step 4: register the container
|
||||
// Step 3: register the container
|
||||
if err := runtime.Register(container); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
|
105
runtime_test.go
105
runtime_test.go
|
@ -3,11 +3,13 @@ package docker
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/sysinit"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
@ -122,22 +124,19 @@ func init() {
|
|||
}
|
||||
|
||||
func setupBaseImage() {
|
||||
config := &DaemonConfig{
|
||||
Root: unitTestStoreBase,
|
||||
AutoRestart: false,
|
||||
BridgeIface: unitTestNetworkBridge,
|
||||
}
|
||||
runtime, err := NewRuntimeFromDirectory(config)
|
||||
eng, err := engine.New(unitTestStoreBase)
|
||||
if err != nil {
|
||||
log.Fatalf("Can't initialize engine at %s: %s", unitTestStoreBase, err)
|
||||
}
|
||||
job := eng.Job("initapi")
|
||||
job.Setenv("Root", unitTestStoreBase)
|
||||
job.SetenvBool("Autorestart", false)
|
||||
job.Setenv("BridgeIface", unitTestNetworkBridge)
|
||||
if err := job.Run(); err != nil {
|
||||
log.Fatalf("Unable to create a runtime for tests:", err)
|
||||
}
|
||||
|
||||
// Create the "Server"
|
||||
srv := &Server{
|
||||
runtime: runtime,
|
||||
pullingPool: make(map[string]struct{}),
|
||||
pushingPool: make(map[string]struct{}),
|
||||
}
|
||||
srv := mkServerFromEngine(eng, log.New(os.Stderr, "", 0))
|
||||
runtime := srv.runtime
|
||||
|
||||
// If the unit test is not found, try to download it.
|
||||
if img, err := runtime.repositories.LookupImage(unitTestImageName); err != nil || img.ID != unitTestImageID {
|
||||
|
@ -153,18 +152,22 @@ func spawnGlobalDaemon() {
|
|||
utils.Debugf("Global runtime already exists. Skipping.")
|
||||
return
|
||||
}
|
||||
globalRuntime = mkRuntime(log.New(os.Stderr, "", 0))
|
||||
srv := &Server{
|
||||
runtime: globalRuntime,
|
||||
pullingPool: make(map[string]struct{}),
|
||||
pushingPool: make(map[string]struct{}),
|
||||
}
|
||||
t := log.New(os.Stderr, "", 0)
|
||||
eng := NewTestEngine(t)
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
globalRuntime = srv.runtime
|
||||
|
||||
// Spawn a Daemon
|
||||
go func() {
|
||||
utils.Debugf("Spawning global daemon for integration tests")
|
||||
if err := ListenAndServe(testDaemonProto, testDaemonAddr, srv, os.Getenv("DEBUG") != ""); err != nil {
|
||||
log.Fatalf("Unable to spawn the test daemon:", err)
|
||||
listenURL := &url.URL{
|
||||
Scheme: testDaemonProto,
|
||||
Host: testDaemonAddr,
|
||||
}
|
||||
job := eng.Job("serveapi", listenURL.String())
|
||||
job.SetenvBool("Logging", os.Getenv("DEBUG") != "")
|
||||
if err := job.Run(); err != nil {
|
||||
log.Fatalf("Unable to spawn the test daemon: %s", err)
|
||||
}
|
||||
}()
|
||||
// Give some time to ListenAndServer to actually start
|
||||
|
@ -184,7 +187,7 @@ func GetTestImage(runtime *Runtime) *Image {
|
|||
return image
|
||||
}
|
||||
}
|
||||
log.Fatalf("Test image %v not found", unitTestImageID)
|
||||
log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, runtime.graph.Root, imgs)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -646,20 +649,17 @@ func TestReloadContainerLinks(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDefaultContainerName(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
eng := NewTestEngine(t)
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
runtime := srv.runtime
|
||||
defer nuke(runtime)
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
shortId, _, err := srv.ContainerCreate(config, "some_name")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
container := runtime.Get(shortId)
|
||||
container := runtime.Get(createNamedTestContainer(eng, config, t, "some_name"))
|
||||
containerID := container.ID
|
||||
|
||||
if container.Name != "/some_name" {
|
||||
|
@ -683,20 +683,17 @@ func TestDefaultContainerName(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRandomContainerName(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
eng := NewTestEngine(t)
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
runtime := srv.runtime
|
||||
defer nuke(runtime)
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
shortId, _, err := srv.ContainerCreate(config, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
container := runtime.Get(shortId)
|
||||
container := runtime.Get(createTestContainer(eng, config, t))
|
||||
containerID := container.ID
|
||||
|
||||
if container.Name == "" {
|
||||
|
@ -720,20 +717,17 @@ func TestRandomContainerName(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLinkChildContainer(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
eng := NewTestEngine(t)
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
runtime := srv.runtime
|
||||
defer nuke(runtime)
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
shortId, _, err := srv.ContainerCreate(config, "/webapp")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
container := runtime.Get(shortId)
|
||||
container := runtime.Get(createNamedTestContainer(eng, config, t, "/webapp"))
|
||||
|
||||
webapp, err := runtime.GetByName("/webapp")
|
||||
if err != nil {
|
||||
|
@ -749,12 +743,7 @@ func TestLinkChildContainer(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
shortId, _, err = srv.ContainerCreate(config, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
childContainer := runtime.Get(shortId)
|
||||
childContainer := runtime.Get(createTestContainer(eng, config, t))
|
||||
|
||||
if err := runtime.RegisterLink(webapp, childContainer, "db"); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -771,20 +760,17 @@ func TestLinkChildContainer(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetAllChildren(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
eng := NewTestEngine(t)
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
runtime := srv.runtime
|
||||
defer nuke(runtime)
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
shortId, _, err := srv.ContainerCreate(config, "/webapp")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
container := runtime.Get(shortId)
|
||||
container := runtime.Get(createNamedTestContainer(eng, config, t, "/webapp"))
|
||||
|
||||
webapp, err := runtime.GetByName("/webapp")
|
||||
if err != nil {
|
||||
|
@ -800,12 +786,7 @@ func TestGetAllChildren(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
shortId, _, err = srv.ContainerCreate(config, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
childContainer := runtime.Get(shortId)
|
||||
childContainer := runtime.Get(createTestContainer(eng, config, t))
|
||||
|
||||
if err := runtime.RegisterLink(webapp, childContainer, "db"); err != nil {
|
||||
t.Fatal(err)
|
||||
|
|
205
server.go
205
server.go
|
@ -33,30 +33,25 @@ func (srv *Server) Close() error {
|
|||
}
|
||||
|
||||
func init() {
|
||||
engine.Register("serveapi", JobServeApi)
|
||||
engine.Register("initapi", jobInitApi)
|
||||
}
|
||||
|
||||
func JobServeApi(job *engine.Job) string {
|
||||
srv, err := NewServer(ConfigFromJob(job))
|
||||
// jobInitApi runs the remote api server `srv` as a daemon,
|
||||
// Only one api server can run at the same time - this is enforced by a pidfile.
|
||||
// The signals SIGINT, SIGKILL and SIGTERM are intercepted for cleanup.
|
||||
func jobInitApi(job *engine.Job) string {
|
||||
job.Logf("Creating server")
|
||||
srv, err := NewServer(job.Eng, ConfigFromJob(job))
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
defer srv.Close()
|
||||
if err := srv.Daemon(); err != nil {
|
||||
return err.Error()
|
||||
if srv.runtime.config.Pidfile != "" {
|
||||
job.Logf("Creating pidfile")
|
||||
if err := utils.CreatePidFile(srv.runtime.config.Pidfile); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
return "0"
|
||||
}
|
||||
|
||||
// Daemon runs the remote api server `srv` as a daemon,
|
||||
// Only one api server can run at the same time - this is enforced by a pidfile.
|
||||
// The signals SIGINT, SIGKILL and SIGTERM are intercepted for cleanup.
|
||||
func (srv *Server) Daemon() error {
|
||||
if err := utils.CreatePidFile(srv.runtime.config.Pidfile); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer utils.RemovePidFile(srv.runtime.config.Pidfile)
|
||||
|
||||
job.Logf("Setting up signal traps")
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, os.Kill, os.Signal(syscall.SIGTERM))
|
||||
go func() {
|
||||
|
@ -66,8 +61,21 @@ func (srv *Server) Daemon() error {
|
|||
srv.Close()
|
||||
os.Exit(0)
|
||||
}()
|
||||
job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
|
||||
if err := job.Eng.Register("create", srv.ContainerCreate); err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
if err := job.Eng.Register("start", srv.ContainerStart); err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
if err := job.Eng.Register("serveapi", srv.ListenAndServe); err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
return "0"
|
||||
}
|
||||
|
||||
protoAddrs := srv.runtime.config.ProtoAddresses
|
||||
func (srv *Server) ListenAndServe(job *engine.Job) string {
|
||||
protoAddrs := job.Args
|
||||
chErrors := make(chan error, len(protoAddrs))
|
||||
for _, protoAddr := range protoAddrs {
|
||||
protoAddrParts := strings.SplitN(protoAddr, "://", 2)
|
||||
|
@ -81,19 +89,20 @@ func (srv *Server) Daemon() error {
|
|||
log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("Invalid protocol format.")
|
||||
return "Invalid protocol format."
|
||||
}
|
||||
go func() {
|
||||
chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], srv, true)
|
||||
// FIXME: merge Server.ListenAndServe with ListenAndServe
|
||||
chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], srv, job.GetenvBool("Logging"))
|
||||
}()
|
||||
}
|
||||
for i := 0; i < len(protoAddrs); i += 1 {
|
||||
err := <-chErrors
|
||||
if err != nil {
|
||||
return err
|
||||
return err.Error()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return "0"
|
||||
}
|
||||
|
||||
func (srv *Server) DockerVersion() APIVersion {
|
||||
|
@ -154,7 +163,7 @@ func (srv *Server) ContainerKill(name string, sig int) error {
|
|||
if err := container.Kill(); err != nil {
|
||||
return fmt.Errorf("Cannot kill container %s: %s", name, err)
|
||||
}
|
||||
srv.LogEvent("kill", container.ShortID(), srv.runtime.repositories.ImageName(container.Image))
|
||||
srv.LogEvent("kill", container.ID, srv.runtime.repositories.ImageName(container.Image))
|
||||
} else {
|
||||
// Otherwise, just send the requested signal
|
||||
if err := container.kill(sig); err != nil {
|
||||
|
@ -180,7 +189,7 @@ func (srv *Server) ContainerExport(name string, out io.Writer) error {
|
|||
if _, err := io.Copy(out, data); err != nil {
|
||||
return err
|
||||
}
|
||||
srv.LogEvent("export", container.ShortID(), srv.runtime.repositories.ImageName(container.Image))
|
||||
srv.LogEvent("export", container.ID, srv.runtime.repositories.ImageName(container.Image))
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("No such container: %s", name)
|
||||
|
@ -198,39 +207,39 @@ func (srv *Server) ImagesSearch(term string) ([]registry.SearchResult, error) {
|
|||
return results.Results, nil
|
||||
}
|
||||
|
||||
func (srv *Server) ImageInsert(name, url, path string, out io.Writer, sf *utils.StreamFormatter) (string, error) {
|
||||
func (srv *Server) ImageInsert(name, url, path string, out io.Writer, sf *utils.StreamFormatter) error {
|
||||
out = utils.NewWriteFlusher(out)
|
||||
img, err := srv.runtime.repositories.LookupImage(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return err
|
||||
}
|
||||
|
||||
file, err := utils.Download(url, out)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return err
|
||||
}
|
||||
defer file.Body.Close()
|
||||
|
||||
config, _, _, err := ParseRun([]string{img.ID, "echo", "insert", url, path}, srv.runtime.capabilities)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return err
|
||||
}
|
||||
|
||||
c, _, err := srv.runtime.Create(config, "")
|
||||
if err != nil {
|
||||
return "", err
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.Inject(utils.ProgressReader(file.Body, int(file.ContentLength), out, sf.FormatProgress("", "Downloading", "%8v/%v (%v)"), sf, true), path); err != nil {
|
||||
return "", err
|
||||
if err := c.Inject(utils.ProgressReader(file.Body, int(file.ContentLength), out, sf.FormatProgress("", "Downloading", "%8v/%v (%v)"), sf, false), path); err != nil {
|
||||
return err
|
||||
}
|
||||
// FIXME: Handle custom repo, tag comment, author
|
||||
img, err = srv.runtime.Commit(c, "", "", img.Comment, img.Author, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return err
|
||||
}
|
||||
out.Write(sf.FormatStatus("", img.ID))
|
||||
return img.ShortID(), nil
|
||||
out.Write(sf.FormatStatus(img.ID, ""))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (srv *Server) ImagesViz(out io.Writer) error {
|
||||
|
@ -250,9 +259,9 @@ func (srv *Server) ImagesViz(out io.Writer) error {
|
|||
return fmt.Errorf("Error while getting parent image: %v", err)
|
||||
}
|
||||
if parentImage != nil {
|
||||
out.Write([]byte(" \"" + parentImage.ShortID() + "\" -> \"" + image.ShortID() + "\"\n"))
|
||||
out.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n"))
|
||||
} else {
|
||||
out.Write([]byte(" base -> \"" + image.ShortID() + "\" [style=invis]\n"))
|
||||
out.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n"))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -465,7 +474,7 @@ func (srv *Server) Containers(all, size bool, n int, since, before string) []API
|
|||
continue
|
||||
}
|
||||
if before != "" {
|
||||
if container.ShortID() == before {
|
||||
if container.ID == before || utils.TruncateID(container.ID) == before {
|
||||
foundBefore = true
|
||||
continue
|
||||
}
|
||||
|
@ -476,7 +485,7 @@ func (srv *Server) Containers(all, size bool, n int, since, before string) []API
|
|||
if displayed == n {
|
||||
break
|
||||
}
|
||||
if container.ShortID() == since {
|
||||
if container.ID == since || utils.TruncateID(container.ID) == since {
|
||||
break
|
||||
}
|
||||
displayed++
|
||||
|
@ -518,7 +527,7 @@ func (srv *Server) ContainerCommit(name, repo, tag, author, comment string, conf
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return img.ShortID(), err
|
||||
return img.ID, err
|
||||
}
|
||||
|
||||
func (srv *Server) ContainerTag(name, repo, tag string, force bool) error {
|
||||
|
@ -1018,37 +1027,47 @@ func (srv *Server) ImageImport(src, repo, tag string, in io.Reader, out io.Write
|
|||
return err
|
||||
}
|
||||
}
|
||||
out.Write(sf.FormatStatus("", img.ShortID()))
|
||||
out.Write(sf.FormatStatus("", img.ID))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (srv *Server) ContainerCreate(config *Config, name string) (string, []string, error) {
|
||||
if config.Memory != 0 && config.Memory < 524288 {
|
||||
return "", nil, fmt.Errorf("Memory limit must be given in bytes (minimum 524288 bytes)")
|
||||
func (srv *Server) ContainerCreate(job *engine.Job) string {
|
||||
var name string
|
||||
if len(job.Args) == 1 {
|
||||
name = job.Args[0]
|
||||
} else if len(job.Args) > 1 {
|
||||
return fmt.Sprintf("Usage: %s ", job.Name)
|
||||
}
|
||||
var config Config
|
||||
if err := job.ExportEnv(&config); err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
if config.Memory != 0 && config.Memory < 524288 {
|
||||
return "Minimum memory limit allowed is 512k"
|
||||
}
|
||||
|
||||
if config.Memory > 0 && !srv.runtime.capabilities.MemoryLimit {
|
||||
config.Memory = 0
|
||||
}
|
||||
|
||||
if config.Memory > 0 && !srv.runtime.capabilities.SwapLimit {
|
||||
config.MemorySwap = -1
|
||||
}
|
||||
container, buildWarnings, err := srv.runtime.Create(config, name)
|
||||
container, buildWarnings, err := srv.runtime.Create(&config, name)
|
||||
if err != nil {
|
||||
if srv.runtime.graph.IsNotExist(err) {
|
||||
|
||||
_, tag := utils.ParseRepositoryTag(config.Image)
|
||||
if tag == "" {
|
||||
tag = DEFAULTTAG
|
||||
}
|
||||
|
||||
return "", nil, fmt.Errorf("No such image: %s (tag: %s)", config.Image, tag)
|
||||
return fmt.Sprintf("No such image: %s (tag: %s)", config.Image, tag)
|
||||
}
|
||||
return "", nil, err
|
||||
return err.Error()
|
||||
}
|
||||
srv.LogEvent("create", container.ShortID(), srv.runtime.repositories.ImageName(container.Image))
|
||||
return container.ShortID(), buildWarnings, nil
|
||||
srv.LogEvent("create", container.ID, srv.runtime.repositories.ImageName(container.Image))
|
||||
job.Printf("%s\n", container.ID)
|
||||
for _, warning := range buildWarnings {
|
||||
job.Errorf("%s\n", warning)
|
||||
}
|
||||
return "0"
|
||||
}
|
||||
|
||||
func (srv *Server) ContainerRestart(name string, t int) error {
|
||||
|
@ -1056,7 +1075,7 @@ func (srv *Server) ContainerRestart(name string, t int) error {
|
|||
if err := container.Restart(t); err != nil {
|
||||
return fmt.Errorf("Cannot restart container %s: %s", name, err)
|
||||
}
|
||||
srv.LogEvent("restart", container.ShortID(), srv.runtime.repositories.ImageName(container.Image))
|
||||
srv.LogEvent("restart", container.ID, srv.runtime.repositories.ImageName(container.Image))
|
||||
} else {
|
||||
return fmt.Errorf("No such container: %s", name)
|
||||
}
|
||||
|
@ -1112,7 +1131,7 @@ func (srv *Server) ContainerDestroy(name string, removeVolume, removeLink bool)
|
|||
if err := srv.runtime.Destroy(container); err != nil {
|
||||
return fmt.Errorf("Cannot destroy container %s: %s", name, err)
|
||||
}
|
||||
srv.LogEvent("destroy", container.ShortID(), srv.runtime.repositories.ImageName(container.Image))
|
||||
srv.LogEvent("destroy", container.ID, srv.runtime.repositories.ImageName(container.Image))
|
||||
|
||||
if removeVolume {
|
||||
// Retrieve all volumes from all remaining containers
|
||||
|
@ -1229,8 +1248,8 @@ func (srv *Server) deleteImage(img *Image, repoName, tag string) ([]APIRmi, erro
|
|||
return nil, err
|
||||
}
|
||||
if tagDeleted {
|
||||
imgs = append(imgs, APIRmi{Untagged: img.ShortID()})
|
||||
srv.LogEvent("untag", img.ShortID(), "")
|
||||
imgs = append(imgs, APIRmi{Untagged: img.ID})
|
||||
srv.LogEvent("untag", img.ID, "")
|
||||
}
|
||||
}
|
||||
if len(srv.runtime.repositories.ByID()[img.ID]) == 0 {
|
||||
|
@ -1258,6 +1277,26 @@ func (srv *Server) ImageDelete(name string, autoPrune bool) ([]APIRmi, error) {
|
|||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Prevent deletion if image is used by a running container
|
||||
for _, container := range srv.runtime.List() {
|
||||
if container.State.Running {
|
||||
parent, err := srv.runtime.repositories.LookupImage(container.Image)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := parent.WalkHistory(func(p *Image) error {
|
||||
if img.ID == p.ID {
|
||||
return fmt.Errorf("Conflict, cannot delete %s because the running container %s is using it", name, container.ID)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if strings.Contains(img.ID, name) {
|
||||
//delete via ID
|
||||
return srv.deleteImage(img, "", "")
|
||||
|
@ -1303,7 +1342,6 @@ func (srv *Server) RegisterLinks(name string, hostConfig *HostConfig) error {
|
|||
return fmt.Errorf("No such container: %s", name)
|
||||
}
|
||||
|
||||
// Register links
|
||||
if hostConfig != nil && hostConfig.Links != nil {
|
||||
for _, l := range hostConfig.Links {
|
||||
parts, err := parseLink(l)
|
||||
|
@ -1317,7 +1355,6 @@ func (srv *Server) RegisterLinks(name string, hostConfig *HostConfig) error {
|
|||
if child == nil {
|
||||
return fmt.Errorf("Could not get container for %s", parts["name"])
|
||||
}
|
||||
|
||||
if err := runtime.RegisterLink(container, child, parts["alias"]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1333,41 +1370,57 @@ func (srv *Server) RegisterLinks(name string, hostConfig *HostConfig) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (srv *Server) ContainerStart(name string, hostConfig *HostConfig) error {
|
||||
func (srv *Server) ContainerStart(job *engine.Job) string {
|
||||
if len(job.Args) < 1 {
|
||||
return fmt.Sprintf("Usage: %s container_id", job.Name)
|
||||
}
|
||||
name := job.Args[0]
|
||||
runtime := srv.runtime
|
||||
container := runtime.Get(name)
|
||||
|
||||
if hostConfig != nil {
|
||||
if container == nil {
|
||||
return fmt.Sprintf("No such container: %s", name)
|
||||
}
|
||||
// If no environment was set, then no hostconfig was passed.
|
||||
if len(job.Environ()) > 0 {
|
||||
var hostConfig HostConfig
|
||||
if err := job.ExportEnv(&hostConfig); err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
// Validate the HostConfig binds. Make sure that:
|
||||
// 1) the source of a bind mount isn't /
|
||||
// The bind mount "/:/foo" isn't allowed.
|
||||
// 2) Check that the source exists
|
||||
// The source to be bind mounted must exist.
|
||||
for _, bind := range hostConfig.Binds {
|
||||
splitBind := strings.Split(bind, ":")
|
||||
source := splitBind[0]
|
||||
|
||||
// refuse to bind mount "/" to the container
|
||||
if source == "/" {
|
||||
return fmt.Errorf("Invalid bind mount '%s' : source can't be '/'", bind)
|
||||
return fmt.Sprintf("Invalid bind mount '%s' : source can't be '/'", bind)
|
||||
}
|
||||
|
||||
// ensure the source exists on the host
|
||||
_, err := os.Stat(source)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return fmt.Errorf("Invalid bind mount '%s' : source doesn't exist", bind)
|
||||
return fmt.Sprintf("Invalid bind mount '%s' : source doesn't exist", bind)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if container == nil {
|
||||
return fmt.Errorf("No such container: %s", name)
|
||||
}
|
||||
if hostConfig != nil {
|
||||
container.hostConfig = hostConfig
|
||||
// Register any links from the host config before starting the container
|
||||
// FIXME: we could just pass the container here, no need to lookup by name again.
|
||||
if err := srv.RegisterLinks(name, &hostConfig); err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
container.hostConfig = &hostConfig
|
||||
container.ToDisk()
|
||||
}
|
||||
if err := container.Start(); err != nil {
|
||||
return fmt.Errorf("Cannot start container %s: %s", name, err)
|
||||
return fmt.Sprintf("Cannot start container %s: %s", name, err)
|
||||
}
|
||||
srv.LogEvent("start", container.ShortID(), runtime.repositories.ImageName(container.Image))
|
||||
srv.LogEvent("start", container.ID, runtime.repositories.ImageName(container.Image))
|
||||
|
||||
return nil
|
||||
return "0"
|
||||
}
|
||||
|
||||
func (srv *Server) ContainerStop(name string, t int) error {
|
||||
|
@ -1375,7 +1428,7 @@ func (srv *Server) ContainerStop(name string, t int) error {
|
|||
if err := container.Stop(t); err != nil {
|
||||
return fmt.Errorf("Cannot stop container %s: %s", name, err)
|
||||
}
|
||||
srv.LogEvent("stop", container.ShortID(), srv.runtime.repositories.ImageName(container.Image))
|
||||
srv.LogEvent("stop", container.ID, srv.runtime.repositories.ImageName(container.Image))
|
||||
} else {
|
||||
return fmt.Errorf("No such container: %s", name)
|
||||
}
|
||||
|
@ -1518,12 +1571,13 @@ func (srv *Server) ContainerCopy(name string, resource string, out io.Writer) er
|
|||
|
||||
}
|
||||
|
||||
func NewServer(config *DaemonConfig) (*Server, error) {
|
||||
func NewServer(eng *engine.Engine, config *DaemonConfig) (*Server, error) {
|
||||
runtime, err := NewRuntime(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srv := &Server{
|
||||
Eng: eng,
|
||||
runtime: runtime,
|
||||
pullingPool: make(map[string]struct{}),
|
||||
pushingPool: make(map[string]struct{}),
|
||||
|
@ -1567,4 +1621,5 @@ type Server struct {
|
|||
events []utils.JSONMessage
|
||||
listeners map[string]chan utils.JSONMessage
|
||||
reqFactory *utils.HTTPRequestFactory
|
||||
Eng *engine.Engine
|
||||
}
|
||||
|
|
138
server_test.go
138
server_test.go
|
@ -2,6 +2,7 @@ package docker
|
|||
|
||||
import (
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -79,20 +80,17 @@ func TestContainerTagImageDelete(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCreateRm(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
eng := NewTestEngine(t)
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
runtime := srv.runtime
|
||||
defer nuke(runtime)
|
||||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
id, _, err := srv.ContainerCreate(config, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
id := createTestContainer(eng, config, t)
|
||||
|
||||
if len(runtime.List()) != 1 {
|
||||
t.Errorf("Expected 1 container, %v found", len(runtime.List()))
|
||||
|
@ -109,27 +107,28 @@ func TestCreateRm(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCreateRmVolumes(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
eng := NewTestEngine(t)
|
||||
|
||||
srv := &Server{runtime: runtime}
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
runtime := srv.runtime
|
||||
defer nuke(runtime)
|
||||
|
||||
config, hostConfig, _, err := ParseRun([]string{"-v", "/srv", GetTestImage(runtime).ID, "echo test"}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
id, _, err := srv.ContainerCreate(config, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
id := createTestContainer(eng, config, t)
|
||||
|
||||
if len(runtime.List()) != 1 {
|
||||
t.Errorf("Expected 1 container, %v found", len(runtime.List()))
|
||||
}
|
||||
|
||||
err = srv.ContainerStart(id, hostConfig)
|
||||
if err != nil {
|
||||
job := eng.Job("start", id)
|
||||
if err := job.ImportEnv(hostConfig); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := job.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -148,20 +147,17 @@ func TestCreateRmVolumes(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCommit(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
eng := NewTestEngine(t)
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
runtime := srv.runtime
|
||||
defer nuke(runtime)
|
||||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "/bin/cat"}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
id, _, err := srv.ContainerCreate(config, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
id := createTestContainer(eng, config, t)
|
||||
|
||||
if _, err := srv.ContainerCommit(id, "testrepo", "testtag", "", "", config); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -169,26 +165,27 @@ func TestCommit(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCreateStartRestartStopStartKillRm(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
eng := NewTestEngine(t)
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
runtime := srv.runtime
|
||||
defer nuke(runtime)
|
||||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
config, hostConfig, _, err := ParseRun([]string{GetTestImage(runtime).ID, "/bin/cat"}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
id, _, err := srv.ContainerCreate(config, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
id := createTestContainer(eng, config, t)
|
||||
|
||||
if len(runtime.List()) != 1 {
|
||||
t.Errorf("Expected 1 container, %v found", len(runtime.List()))
|
||||
}
|
||||
|
||||
if err := srv.ContainerStart(id, hostConfig); err != nil {
|
||||
job := eng.Job("start", id)
|
||||
if err := job.ImportEnv(hostConfig); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := job.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -200,7 +197,11 @@ func TestCreateStartRestartStopStartKillRm(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := srv.ContainerStart(id, hostConfig); err != nil {
|
||||
job = eng.Job("start", id)
|
||||
if err := job.ImportEnv(hostConfig); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := job.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -220,22 +221,22 @@ func TestCreateStartRestartStopStartKillRm(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRunWithTooLowMemoryLimit(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
eng := NewTestEngine(t)
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
runtime := srv.runtime
|
||||
defer nuke(runtime)
|
||||
|
||||
// Try to create a container with a memory limit of 1 byte less than the minimum allowed limit.
|
||||
if _, _, err := (*Server).ContainerCreate(&Server{runtime: runtime},
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Memory: 524287,
|
||||
CpuShares: 1000,
|
||||
Cmd: []string{"/bin/cat"},
|
||||
},
|
||||
"",
|
||||
); err == nil {
|
||||
job := eng.Job("create")
|
||||
job.Setenv("Image", GetTestImage(runtime).ID)
|
||||
job.Setenv("Memory", "524287")
|
||||
job.Setenv("CpuShares", "1000")
|
||||
job.SetenvList("Cmd", []string{"/bin/cat"})
|
||||
var id string
|
||||
job.StdoutParseString(&id)
|
||||
if err := job.Run(); err == nil {
|
||||
t.Errorf("Memory limit is smaller than the allowed limit. Container creation should've failed!")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestContainerTop(t *testing.T) {
|
||||
|
@ -384,9 +385,10 @@ func TestLogEvent(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRmi(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
eng := NewTestEngine(t)
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
runtime := srv.runtime
|
||||
defer nuke(runtime)
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
initialImages, err := srv.Images(false, "")
|
||||
if err != nil {
|
||||
|
@ -398,14 +400,14 @@ func TestRmi(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
containerID, _, err := srv.ContainerCreate(config, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
containerID := createTestContainer(eng, config, t)
|
||||
|
||||
//To remove
|
||||
err = srv.ContainerStart(containerID, hostConfig)
|
||||
if err != nil {
|
||||
job := eng.Job("start", containerID)
|
||||
if err := job.ImportEnv(hostConfig); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := job.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -419,14 +421,14 @@ func TestRmi(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
containerID, _, err = srv.ContainerCreate(config, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
containerID = createTestContainer(eng, config, t)
|
||||
|
||||
//To remove
|
||||
err = srv.ContainerStart(containerID, hostConfig)
|
||||
if err != nil {
|
||||
job = eng.Job("start", containerID)
|
||||
if err := job.ImportEnv(hostConfig); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := job.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -521,3 +523,25 @@ func TestImagesFilter(t *testing.T) {
|
|||
t.Fatal("incorrect number of matches returned")
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageInsert(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
srv := &Server{runtime: runtime}
|
||||
sf := utils.NewStreamFormatter(true)
|
||||
|
||||
// bad image name fails
|
||||
if err := srv.ImageInsert("foo", "https://www.docker.io/static/img/docker-top-logo.png", "/foo", ioutil.Discard, sf); err == nil {
|
||||
t.Fatal("expected an error and got none")
|
||||
}
|
||||
|
||||
// bad url fails
|
||||
if err := srv.ImageInsert(GetTestImage(runtime).ID, "http://bad_host_name_that_will_totally_fail.com/", "/foo", ioutil.Discard, sf); err == nil {
|
||||
t.Fatal("expected an error and got none")
|
||||
}
|
||||
|
||||
// success returns nil
|
||||
if err := srv.ImageInsert(GetTestImage(runtime).ID, "https://www.docker.io/static/img/docker-top-logo.png", "/foo", ioutil.Discard, sf); err != nil {
|
||||
t.Fatalf("expected no error, but got %v", err)
|
||||
}
|
||||
}
|
||||
|
|
37
utils.go
37
utils.go
|
@ -119,6 +119,15 @@ func MergeConfig(userConf, imageConf *Config) error {
|
|||
}
|
||||
if userConf.ExposedPorts == nil || len(userConf.ExposedPorts) == 0 {
|
||||
userConf.ExposedPorts = imageConf.ExposedPorts
|
||||
} else if imageConf.ExposedPorts != nil {
|
||||
if userConf.ExposedPorts == nil {
|
||||
userConf.ExposedPorts = make(map[Port]struct{})
|
||||
}
|
||||
for port := range imageConf.ExposedPorts {
|
||||
if _, exists := userConf.ExposedPorts[port]; !exists {
|
||||
userConf.ExposedPorts[port] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if userConf.PortSpecs != nil && len(userConf.PortSpecs) > 0 {
|
||||
|
@ -325,20 +334,6 @@ func migratePortMappings(config *Config, hostConfig *HostConfig) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func RootIsShared() bool {
|
||||
if data, err := ioutil.ReadFile("/proc/self/mountinfo"); err == nil {
|
||||
for _, line := range strings.Split(string(data), "\n") {
|
||||
cols := strings.Split(line, " ")
|
||||
if len(cols) >= 6 && cols[4] == "/" {
|
||||
return strings.HasPrefix(cols[6], "shared")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// No idea, probably safe to assume so
|
||||
return true
|
||||
}
|
||||
|
||||
func BtrfsReflink(fd_out, fd_in uintptr) error {
|
||||
res := C.btrfs_reflink(C.int(fd_out), C.int(fd_in))
|
||||
if res != 0 {
|
||||
|
@ -353,6 +348,20 @@ func parseLink(rawLink string) (map[string]string, error) {
|
|||
return utils.PartParser("name:alias", rawLink)
|
||||
}
|
||||
|
||||
func RootIsShared() bool {
|
||||
if data, err := ioutil.ReadFile("/proc/self/mountinfo"); err == nil {
|
||||
for _, line := range strings.Split(string(data), "\n") {
|
||||
cols := strings.Split(line, " ")
|
||||
if len(cols) >= 6 && cols[4] == "/" {
|
||||
return strings.HasPrefix(cols[6], "shared")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// No idea, probably safe to assume so
|
||||
return true
|
||||
}
|
||||
|
||||
type checker struct {
|
||||
runtime *Runtime
|
||||
}
|
||||
|
|
|
@ -28,6 +28,12 @@ var (
|
|||
INITSHA1 string // sha1sum of separate static dockerinit, if Docker itself was compiled dynamically via ./hack/make.sh dynbinary
|
||||
)
|
||||
|
||||
// A common interface to access the Fatal method of
|
||||
// both testing.B and testing.T.
|
||||
type Fataler interface {
|
||||
Fatal(args ...interface{})
|
||||
}
|
||||
|
||||
// ListOpts type
|
||||
type ListOpts []string
|
||||
|
||||
|
@ -177,6 +183,40 @@ func HumanSize(size int64) string {
|
|||
return fmt.Sprintf("%.4g %s", sizef, units[i])
|
||||
}
|
||||
|
||||
// Parses a human-readable string representing an amount of RAM
|
||||
// in bytes, kibibytes, mebibytes or gibibytes, and returns the
|
||||
// number of bytes, or -1 if the string is unparseable.
|
||||
// Units are case-insensitive, and the 'b' suffix is optional.
|
||||
func RAMInBytes(size string) (bytes int64, err error) {
|
||||
re, error := regexp.Compile("^(\\d+)([kKmMgG])?[bB]?$")
|
||||
if error != nil {
|
||||
return -1, error
|
||||
}
|
||||
|
||||
matches := re.FindStringSubmatch(size)
|
||||
|
||||
if len(matches) != 3 {
|
||||
return -1, fmt.Errorf("Invalid size: '%s'", size)
|
||||
}
|
||||
|
||||
memLimit, error := strconv.ParseInt(matches[1], 10, 0)
|
||||
if error != nil {
|
||||
return -1, error
|
||||
}
|
||||
|
||||
unit := strings.ToLower(matches[2])
|
||||
|
||||
if unit == "k" {
|
||||
memLimit *= 1024
|
||||
} else if unit == "m" {
|
||||
memLimit *= 1024 * 1024
|
||||
} else if unit == "g" {
|
||||
memLimit *= 1024 * 1024 * 1024
|
||||
}
|
||||
|
||||
return memLimit, nil
|
||||
}
|
||||
|
||||
func Trunc(s string, maxlen int) string {
|
||||
if len(s) <= maxlen {
|
||||
return s
|
||||
|
@ -910,7 +950,7 @@ func StripComments(input []byte, commentMarker []byte) []byte {
|
|||
func GetNameserversAsCIDR(resolvConf []byte) []string {
|
||||
var parsedResolvConf = StripComments(resolvConf, []byte("#"))
|
||||
nameservers := []string{}
|
||||
re := regexp.MustCompile(`^\s*nameserver\s*(([0-9]\.){3}([0-9]))\s*$`)
|
||||
re := regexp.MustCompile(`^\s*nameserver\s*(([0-9]+\.){3}([0-9]+))\s*$`)
|
||||
for _, line := range bytes.Split(parsedResolvConf, []byte("\n")) {
|
||||
var ns = re.FindSubmatch(line)
|
||||
if len(ns) > 0 {
|
||||
|
|
|
@ -265,6 +265,39 @@ func TestHumanSize(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestRAMInBytes(t *testing.T) {
|
||||
assertRAMInBytes(t, "32", false, 32)
|
||||
assertRAMInBytes(t, "32b", false, 32)
|
||||
assertRAMInBytes(t, "32B", false, 32)
|
||||
assertRAMInBytes(t, "32k", false, 32*1024)
|
||||
assertRAMInBytes(t, "32K", false, 32*1024)
|
||||
assertRAMInBytes(t, "32kb", false, 32*1024)
|
||||
assertRAMInBytes(t, "32Kb", false, 32*1024)
|
||||
assertRAMInBytes(t, "32Mb", false, 32*1024*1024)
|
||||
assertRAMInBytes(t, "32Gb", false, 32*1024*1024*1024)
|
||||
|
||||
assertRAMInBytes(t, "", true, -1)
|
||||
assertRAMInBytes(t, "hello", true, -1)
|
||||
assertRAMInBytes(t, "-32", true, -1)
|
||||
assertRAMInBytes(t, " 32 ", true, -1)
|
||||
assertRAMInBytes(t, "32 mb", true, -1)
|
||||
assertRAMInBytes(t, "32m b", true, -1)
|
||||
assertRAMInBytes(t, "32bm", true, -1)
|
||||
}
|
||||
|
||||
func assertRAMInBytes(t *testing.T, size string, expectError bool, expectedBytes int64) {
|
||||
actualBytes, err := RAMInBytes(size)
|
||||
if (err != nil) && !expectError {
|
||||
t.Errorf("Unexpected error parsing '%s': %s", size, err)
|
||||
}
|
||||
if (err == nil) && expectError {
|
||||
t.Errorf("Expected to get an error parsing '%s', but got none (bytes=%d)", size, actualBytes)
|
||||
}
|
||||
if actualBytes != expectedBytes {
|
||||
t.Errorf("Expected '%s' to parse as %d bytes, got %d", size, expectedBytes, actualBytes)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseHost(t *testing.T) {
|
||||
if addr, err := ParseHost("127.0.0.1", 4243, "0.0.0.0"); err != nil || addr != "tcp://0.0.0.0:4243" {
|
||||
t.Errorf("0.0.0.0 -> expected tcp://0.0.0.0:4243, got %s", addr)
|
||||
|
@ -448,12 +481,12 @@ func TestParsePortMapping(t *testing.T) {
|
|||
func TestGetNameserversAsCIDR(t *testing.T) {
|
||||
for resolv, result := range map[string][]string{`
|
||||
nameserver 1.2.3.4
|
||||
nameserver 4.3.2.1
|
||||
search example.com`: {"1.2.3.4/32", "4.3.2.1/32"},
|
||||
nameserver 40.3.200.10
|
||||
search example.com`: {"1.2.3.4/32", "40.3.200.10/32"},
|
||||
`search example.com`: {},
|
||||
`nameserver 1.2.3.4
|
||||
search example.com
|
||||
nameserver 4.3.2.1`: {"1.2.3.4/32", "4.3.2.1/32"},
|
||||
nameserver 4.30.20.100`: {"1.2.3.4/32", "4.30.20.100/32"},
|
||||
``: {},
|
||||
` nameserver 1.2.3.4 `: {"1.2.3.4/32"},
|
||||
`search example.com
|
||||
|
|
160
utils_test.go
160
utils_test.go
|
@ -2,6 +2,7 @@ package docker
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
@ -20,64 +21,97 @@ var globalTestID string
|
|||
|
||||
// Create a temporary runtime suitable for unit testing.
|
||||
// Call t.Fatal() at the first error.
|
||||
func mkRuntime(f Fataler) *Runtime {
|
||||
// Use the caller function name as a prefix.
|
||||
// This helps trace temp directories back to their test.
|
||||
pc, _, _, _ := runtime.Caller(1)
|
||||
callerLongName := runtime.FuncForPC(pc).Name()
|
||||
parts := strings.Split(callerLongName, ".")
|
||||
callerShortName := parts[len(parts)-1]
|
||||
if globalTestID == "" {
|
||||
globalTestID = GenerateID()[:4]
|
||||
}
|
||||
prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, callerShortName)
|
||||
utils.Debugf("prefix = '%s'", prefix)
|
||||
|
||||
runtime, err := newTestRuntime(prefix)
|
||||
func mkRuntime(f utils.Fataler) *Runtime {
|
||||
root, err := newTestDirectory(unitTestStoreBase)
|
||||
if err != nil {
|
||||
f.Fatal(err)
|
||||
}
|
||||
return runtime
|
||||
}
|
||||
|
||||
// A common interface to access the Fatal method of
|
||||
// both testing.B and testing.T.
|
||||
type Fataler interface {
|
||||
Fatal(args ...interface{})
|
||||
}
|
||||
|
||||
func newTestRuntime(prefix string) (runtime *Runtime, err error) {
|
||||
if prefix == "" {
|
||||
prefix = "docker-test-"
|
||||
}
|
||||
utils.Debugf("prefix = %s", prefix)
|
||||
utils.Debugf("newTestRuntime start")
|
||||
root, err := ioutil.TempDir("", prefix)
|
||||
defer func() {
|
||||
utils.Debugf("newTestRuntime: %s", root)
|
||||
}()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := os.Remove(root); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utils.Debugf("Copying %s to %s", unitTestStoreBase, root)
|
||||
if err := utils.CopyDirectory(unitTestStoreBase, root); err != nil {
|
||||
utils.Debugf("ERROR: Copying %s to %s returned %s", unitTestStoreBase, root, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config := &DaemonConfig{
|
||||
Root: root,
|
||||
AutoRestart: false,
|
||||
}
|
||||
runtime, err = NewRuntimeFromDirectory(config)
|
||||
r, err := NewRuntimeFromDirectory(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
f.Fatal(err)
|
||||
}
|
||||
runtime.UpdateCapabilities(true)
|
||||
return runtime, nil
|
||||
r.UpdateCapabilities(true)
|
||||
return r
|
||||
}
|
||||
|
||||
func createNamedTestContainer(eng *engine.Engine, config *Config, f utils.Fataler, name string) (shortId string) {
|
||||
job := eng.Job("create", name)
|
||||
if err := job.ImportEnv(config); err != nil {
|
||||
f.Fatal(err)
|
||||
}
|
||||
job.StdoutParseString(&shortId)
|
||||
if err := job.Run(); err != nil {
|
||||
f.Fatal(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func createTestContainer(eng *engine.Engine, config *Config, f utils.Fataler) (shortId string) {
|
||||
return createNamedTestContainer(eng, config, f, "")
|
||||
}
|
||||
|
||||
func mkServerFromEngine(eng *engine.Engine, t utils.Fataler) *Server {
|
||||
iSrv := eng.Hack_GetGlobalVar("httpapi.server")
|
||||
if iSrv == nil {
|
||||
panic("Legacy server field not set in engine")
|
||||
}
|
||||
srv, ok := iSrv.(*Server)
|
||||
if !ok {
|
||||
panic("Legacy server field in engine does not cast to *Server")
|
||||
}
|
||||
return srv
|
||||
}
|
||||
|
||||
func NewTestEngine(t utils.Fataler) *engine.Engine {
|
||||
root, err := newTestDirectory(unitTestStoreBase)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
eng, err := engine.New(root)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Load default plugins
|
||||
// (This is manually copied and modified from main() until we have a more generic plugin system)
|
||||
job := eng.Job("initapi")
|
||||
job.Setenv("Root", root)
|
||||
job.SetenvBool("AutoRestart", false)
|
||||
if err := job.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return eng
|
||||
}
|
||||
|
||||
func newTestDirectory(templateDir string) (dir string, err error) {
|
||||
if globalTestID == "" {
|
||||
globalTestID = GenerateID()[:4]
|
||||
}
|
||||
prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, getCallerName(2))
|
||||
if prefix == "" {
|
||||
prefix = "docker-test-"
|
||||
}
|
||||
dir, err = ioutil.TempDir("", prefix)
|
||||
if err = os.Remove(dir); err != nil {
|
||||
return
|
||||
}
|
||||
if err = utils.CopyDirectory(templateDir, dir); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getCallerName(depth int) string {
|
||||
// Use the caller function name as a prefix.
|
||||
// This helps trace temp directories back to their test.
|
||||
pc, _, _, _ := runtime.Caller(depth + 1)
|
||||
callerLongName := runtime.FuncForPC(pc).Name()
|
||||
parts := strings.Split(callerLongName, ".")
|
||||
callerShortName := parts[len(parts)-1]
|
||||
return callerShortName
|
||||
}
|
||||
|
||||
// Write `content` to the file at path `dst`, creating it if necessary,
|
||||
|
@ -249,7 +283,9 @@ func TestMergeConfig(t *testing.T) {
|
|||
Volumes: volumesUser,
|
||||
}
|
||||
|
||||
MergeConfig(configUser, configImage)
|
||||
if err := MergeConfig(configUser, configImage); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(configUser.Dns) != 3 {
|
||||
t.Fatalf("Expected 3 dns, 1.1.1.1, 2.2.2.2 and 3.3.3.3, found %d", len(configUser.Dns))
|
||||
|
@ -261,7 +297,7 @@ func TestMergeConfig(t *testing.T) {
|
|||
}
|
||||
|
||||
if len(configUser.ExposedPorts) != 3 {
|
||||
t.Fatalf("Expected 3 portSpecs, 1111, 2222 and 3333, found %d", len(configUser.PortSpecs))
|
||||
t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts))
|
||||
}
|
||||
for portSpecs := range configUser.ExposedPorts {
|
||||
if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" {
|
||||
|
@ -289,6 +325,28 @@ func TestMergeConfig(t *testing.T) {
|
|||
if configUser.VolumesFrom != "1111" {
|
||||
t.Fatalf("Expected VolumesFrom to be 1111, found %s", configUser.VolumesFrom)
|
||||
}
|
||||
|
||||
ports, _, err := parsePortSpecs([]string{"0000"})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
configImage2 := &Config{
|
||||
ExposedPorts: ports,
|
||||
}
|
||||
|
||||
if err := MergeConfig(configUser, configImage2); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(configUser.ExposedPorts) != 4 {
|
||||
t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts))
|
||||
}
|
||||
for portSpecs := range configUser.ExposedPorts {
|
||||
if portSpecs.Port() != "0000" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" {
|
||||
t.Fatalf("Expected 0000 or 1111 or 2222 or 3333, found %s", portSpecs)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestParseLxcConfOpt(t *testing.T) {
|
||||
|
|
Loading…
Reference in a new issue