Merge branch 'master' of https://github.com/dotcloud/docker
This commit is contained in:
commit
c24c5c3b60
75 changed files with 3410 additions and 1000 deletions
4
AUTHORS
4
AUTHORS
|
@ -17,6 +17,7 @@ Antony Messerli <amesserl@rackspace.com>
|
|||
Barry Allard <barry.allard@gmail.com>
|
||||
Brandon Liu <bdon@bdon.org>
|
||||
Brian McCallister <brianm@skife.org>
|
||||
Brian Olsen <brian@maven-group.org>
|
||||
Bruno Bigras <bigras.bruno@gmail.com>
|
||||
Caleb Spare <cespare@gmail.com>
|
||||
Calen Pennington <cale@edx.org>
|
||||
|
@ -34,6 +35,7 @@ Dominik Honnef <dominik@honnef.co>
|
|||
Don Spaulding <donspauldingii@gmail.com>
|
||||
Dr Nic Williams <drnicwilliams@gmail.com>
|
||||
Elias Probst <mail@eliasprobst.eu>
|
||||
Emily Rose <emily@contactvibe.com>
|
||||
Eric Hanchrow <ehanchrow@ine.com>
|
||||
Eric Myhre <hash@exultant.us>
|
||||
Erno Hopearuoho <erno.hopearuoho@gmail.com>
|
||||
|
@ -73,6 +75,7 @@ Louis Opter <kalessin@kalessin.fr>
|
|||
Marco Hennings <marco.hennings@freiheit.com>
|
||||
Marcus Farkas <toothlessgear@finitebox.com>
|
||||
Mark McGranaghan <mmcgrana@gmail.com>
|
||||
Martin Redmond <mrtodo@gmail.com>
|
||||
Maxim Treskin <zerthurd@gmail.com>
|
||||
meejah <meejah@meejah.ca>
|
||||
Michael Crosby <crosby.michael@gmail.com>
|
||||
|
@ -103,6 +106,7 @@ Solomon Hykes <solomon@dotcloud.com>
|
|||
Sridhar Ratnakumar <sridharr@activestate.com>
|
||||
Stefan Praszalowicz <stefan@greplin.com>
|
||||
Thatcher Peskens <thatcher@dotcloud.com>
|
||||
Thijs Terlouw <thijsterlouw@gmail.com>
|
||||
Thomas Bikeev <thomas.bikeev@mac.com>
|
||||
Thomas Hansen <thomas.hansen@gmail.com>
|
||||
Tianon Gravi <admwiggin@gmail.com>
|
||||
|
|
|
@ -27,7 +27,7 @@ run /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_
|
|||
run PKG=github.com/kr/pty REV=27435c699; git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
|
||||
run PKG=github.com/gorilla/context/ REV=708054d61e5; git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
|
||||
run PKG=github.com/gorilla/mux/ REV=9b36453141c; git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
|
||||
run PKG=github.com/dotcloud/tar/ REV=d06045a6d9; git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
|
||||
run PKG=github.com/dotcloud/tar/ REV=e5ea6bb21a3294; git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
|
||||
run PKG=code.google.com/p/go.net/ REV=84a4013f96e0; hg clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && hg checkout $REV
|
||||
# Upload docker source
|
||||
add . /go/src/github.com/dotcloud/docker
|
||||
|
|
260
README.md
260
README.md
|
@ -1,8 +1,8 @@
|
|||
Docker: the Linux container engine
|
||||
==================================
|
||||
|
||||
Docker is an open-source engine which automates the deployment of
|
||||
applications as highly portable, self-sufficient containers.
|
||||
Docker is an open source project to pack, ship and run any application
|
||||
as a lightweight container
|
||||
|
||||
Docker containers are both *hardware-agnostic* and
|
||||
*platform-agnostic*. This means that they can run anywhere, from your
|
||||
|
@ -18,7 +18,7 @@ Platform-as-a-Service. It benefits directly from the experience
|
|||
accumulated over several years of large-scale operation and support of
|
||||
hundreds of thousands of applications and databases.
|
||||
|
||||

|
||||

|
||||
|
||||
## Better than VMs
|
||||
|
||||
|
@ -140,125 +140,25 @@ Note that Docker doesn't care *how* dependencies are built - as long
|
|||
as they can be built by running a Unix command in a container.
|
||||
|
||||
|
||||
Install instructions
|
||||
==================
|
||||
Getting started
|
||||
===============
|
||||
|
||||
Quick install on Ubuntu 12.04 and 12.10
|
||||
---------------------------------------
|
||||
Docker can be installed on your local machine as well as servers - both bare metal and virtualized.
|
||||
It is available as a binary on most modern Linux systems, or as a VM on Windows, Mac and other systems.
|
||||
|
||||
```bash
|
||||
curl https://get.docker.io | sudo sh -x
|
||||
```
|
||||
We also offer an interactive tutorial for quickly learning the basics of using Docker.
|
||||
|
||||
Binary installs
|
||||
----------------
|
||||
|
||||
Docker supports the following binary installation methods. Note that
|
||||
some methods are community contributions and not yet officially
|
||||
supported.
|
||||
For up-to-date install instructions and online tutorials, see the [Getting Started page](http://www.docker.io/gettingstarted/).
|
||||
|
||||
* [Ubuntu 12.04 and 12.10 (officially supported)](http://docs.docker.io/en/latest/installation/ubuntulinux/)
|
||||
* [Arch Linux](http://docs.docker.io/en/latest/installation/archlinux/)
|
||||
* [Mac OS X (with Vagrant)](http://docs.docker.io/en/latest/installation/vagrant/)
|
||||
* [Windows (with Vagrant)](http://docs.docker.io/en/latest/installation/windows/)
|
||||
* [Amazon EC2 (with Vagrant)](http://docs.docker.io/en/latest/installation/amazon/)
|
||||
|
||||
Usage examples
|
||||
==============
|
||||
|
||||
First run the ``docker`` daemon
|
||||
-------------------------------
|
||||
Docker can be used to run short-lived commands, long-running daemons (app servers, databases etc.),
|
||||
interactive shell sessions, etc.
|
||||
|
||||
All the examples assume your machine is running the ``docker``
|
||||
daemon. To run the ``docker`` daemon in the background, simply type:
|
||||
|
||||
```bash
|
||||
# On a production system you want this running in an init script
|
||||
sudo docker -d &
|
||||
```
|
||||
|
||||
Now you can run ``docker`` in client mode: all commands will be
|
||||
forwarded to the ``docker`` daemon, so the client can run from any
|
||||
account.
|
||||
|
||||
```bash
|
||||
# Now you can run docker commands from any account.
|
||||
docker help
|
||||
```
|
||||
|
||||
|
||||
Throwaway shell in a base Ubuntu image
|
||||
--------------------------------------
|
||||
|
||||
```bash
|
||||
docker pull ubuntu:12.10
|
||||
|
||||
# Run an interactive shell, allocate a tty, attach stdin and stdout
|
||||
# To detach the tty without exiting the shell, use the escape sequence Ctrl-p + Ctrl-q
|
||||
docker run -i -t ubuntu:12.10 /bin/bash
|
||||
```
|
||||
|
||||
Starting a long-running worker process
|
||||
--------------------------------------
|
||||
|
||||
```bash
|
||||
# Start a very useful long-running process
|
||||
JOB=$(docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done")
|
||||
|
||||
# Collect the output of the job so far
|
||||
docker logs $JOB
|
||||
|
||||
# Kill the job
|
||||
docker kill $JOB
|
||||
```
|
||||
|
||||
Running an irc bouncer
|
||||
----------------------
|
||||
|
||||
```bash
|
||||
BOUNCER_ID=$(docker run -d -p 6667 -u irc shykes/znc zncrun $USER $PASSWORD)
|
||||
echo "Configure your irc client to connect to port $(docker port $BOUNCER_ID 6667) of this machine"
|
||||
```
|
||||
|
||||
Running Redis
|
||||
-------------
|
||||
|
||||
```bash
|
||||
REDIS_ID=$(docker run -d -p 6379 shykes/redis redis-server)
|
||||
echo "Configure your redis client to connect to port $(docker port $REDIS_ID 6379) of this machine"
|
||||
```
|
||||
|
||||
Share your own image!
|
||||
---------------------
|
||||
|
||||
```bash
|
||||
CONTAINER=$(docker run -d ubuntu:12.10 apt-get install -y curl)
|
||||
docker commit -m "Installed curl" $CONTAINER $USER/betterbase
|
||||
docker push $USER/betterbase
|
||||
```
|
||||
|
||||
A list of publicly available images is [available
|
||||
here](https://github.com/dotcloud/docker/wiki/Public-docker-images).
|
||||
|
||||
Expose a service on a TCP port
|
||||
------------------------------
|
||||
|
||||
```bash
|
||||
# Expose port 4444 of this container, and tell netcat to listen on it
|
||||
JOB=$(docker run -d -p 4444 base /bin/nc -l -p 4444)
|
||||
|
||||
# Which public port is NATed to my container?
|
||||
PORT=$(docker port $JOB 4444)
|
||||
|
||||
# Connect to the public port via the host's public address
|
||||
# Please note that because of how routing works connecting to localhost or 127.0.0.1 $PORT will not work.
|
||||
# Replace *eth0* according to your local interface name.
|
||||
IP=$(ip -o -4 addr list eth0 | perl -n -e 'if (m{inet\s([\d\.]+)\/\d+\s}xms) { print $1 }')
|
||||
echo hello world | nc $IP $PORT
|
||||
|
||||
# Verify that the network connection worked
|
||||
echo "Daemon received: $(docker logs $JOB)"
|
||||
```
|
||||
You can find a [list of real-world examples](http://docs.docker.io/en/latest/examples/) in the documentation.
|
||||
|
||||
Under the hood
|
||||
--------------
|
||||
|
@ -288,142 +188,6 @@ They are probably not perfect, please let us know if anything feels
|
|||
wrong or incomplete.
|
||||
|
||||
|
||||
Note
|
||||
----
|
||||
|
||||
We also keep the documentation in this repository. The website
|
||||
documentation is generated using Sphinx using these sources. Please
|
||||
find it under docs/sources/ and read more about it
|
||||
https://github.com/dotcloud/docker/tree/master/docs/README.md
|
||||
|
||||
Please feel free to fix / update the documentation and send us pull
|
||||
requests. More tutorials are also welcome.
|
||||
|
||||
|
||||
Setting up a dev environment
|
||||
----------------------------
|
||||
|
||||
Instructions that have been verified to work on Ubuntu 12.10,
|
||||
|
||||
```bash
|
||||
sudo apt-get -y install lxc curl xz-utils golang git
|
||||
|
||||
export GOPATH=~/go/
|
||||
export PATH=$GOPATH/bin:$PATH
|
||||
|
||||
mkdir -p $GOPATH/src/github.com/dotcloud
|
||||
cd $GOPATH/src/github.com/dotcloud
|
||||
git clone https://github.com/dotcloud/docker.git
|
||||
cd docker
|
||||
|
||||
go get -v github.com/dotcloud/docker/...
|
||||
go install -v github.com/dotcloud/docker/...
|
||||
```
|
||||
|
||||
Then run the docker daemon,
|
||||
|
||||
```bash
|
||||
sudo $GOPATH/bin/docker -d
|
||||
```
|
||||
|
||||
Run the `go install` command (above) to recompile docker.
|
||||
|
||||
|
||||
What is a Standard Container?
|
||||
=============================
|
||||
|
||||
Docker defines a unit of software delivery called a Standard
|
||||
Container. The goal of a Standard Container is to encapsulate a
|
||||
software component and all its dependencies in a format that is
|
||||
self-describing and portable, so that any compliant runtime can run it
|
||||
without extra dependencies, regardless of the underlying machine and
|
||||
the contents of the container.
|
||||
|
||||
The spec for Standard Containers is currently a work in progress, but
|
||||
it is very straightforward. It mostly defines 1) an image format, 2) a
|
||||
set of standard operations, and 3) an execution environment.
|
||||
|
||||
A great analogy for this is the shipping container. Just like how
|
||||
Standard Containers are a fundamental unit of software delivery,
|
||||
shipping containers are a fundamental unit of physical delivery.
|
||||
|
||||
### 1. STANDARD OPERATIONS
|
||||
|
||||
Just like shipping containers, Standard Containers define a set of
|
||||
STANDARD OPERATIONS. Shipping containers can be lifted, stacked,
|
||||
locked, loaded, unloaded and labelled. Similarly, Standard Containers
|
||||
can be started, stopped, copied, snapshotted, downloaded, uploaded and
|
||||
tagged.
|
||||
|
||||
|
||||
### 2. CONTENT-AGNOSTIC
|
||||
|
||||
Just like shipping containers, Standard Containers are
|
||||
CONTENT-AGNOSTIC: all standard operations have the same effect
|
||||
regardless of the contents. A shipping container will be stacked in
|
||||
exactly the same way whether it contains Vietnamese powder coffee or
|
||||
spare Maserati parts. Similarly, Standard Containers are started or
|
||||
uploaded in the same way whether they contain a postgres database, a
|
||||
php application with its dependencies and application server, or Java
|
||||
build artifacts.
|
||||
|
||||
|
||||
### 3. INFRASTRUCTURE-AGNOSTIC
|
||||
|
||||
Both types of containers are INFRASTRUCTURE-AGNOSTIC: they can be
|
||||
transported to thousands of facilities around the world, and
|
||||
manipulated by a wide variety of equipment. A shipping container can
|
||||
be packed in a factory in Ukraine, transported by truck to the nearest
|
||||
routing center, stacked onto a train, loaded into a German boat by an
|
||||
Australian-built crane, stored in a warehouse at a US facility,
|
||||
etc. Similarly, a standard container can be bundled on my laptop,
|
||||
uploaded to S3, downloaded, run and snapshotted by a build server at
|
||||
Equinix in Virginia, uploaded to 10 staging servers in a home-made
|
||||
Openstack cluster, then sent to 30 production instances across 3 EC2
|
||||
regions.
|
||||
|
||||
|
||||
### 4. DESIGNED FOR AUTOMATION
|
||||
|
||||
Because they offer the same standard operations regardless of content
|
||||
and infrastructure, Standard Containers, just like their physical
|
||||
counterparts, are extremely well-suited for automation. In fact, you
|
||||
could say automation is their secret weapon.
|
||||
|
||||
Many things that once required time-consuming and error-prone human
|
||||
effort can now be programmed. Before shipping containers, a bag of
|
||||
powder coffee was hauled, dragged, dropped, rolled and stacked by 10
|
||||
different people in 10 different locations by the time it reached its
|
||||
destination. 1 out of 50 disappeared. 1 out of 20 was damaged. The
|
||||
process was slow, inefficient and cost a fortune - and was entirely
|
||||
different depending on the facility and the type of goods.
|
||||
|
||||
Similarly, before Standard Containers, by the time a software
|
||||
component ran in production, it had been individually built,
|
||||
configured, bundled, documented, patched, vendored, templated, tweaked
|
||||
and instrumented by 10 different people on 10 different
|
||||
computers. Builds failed, libraries conflicted, mirrors crashed,
|
||||
post-it notes were lost, logs were misplaced, cluster updates were
|
||||
half-broken. The process was slow, inefficient and cost a fortune -
|
||||
and was entirely different depending on the language and
|
||||
infrastructure provider.
|
||||
|
||||
|
||||
### 5. INDUSTRIAL-GRADE DELIVERY
|
||||
|
||||
There are 17 million shipping containers in existence, packed with
|
||||
every physical good imaginable. Every single one of them can be loaded
|
||||
onto the same boats, by the same cranes, in the same facilities, and
|
||||
sent anywhere in the World with incredible efficiency. It is
|
||||
embarrassing to think that a 30 ton shipment of coffee can safely
|
||||
travel half-way across the World in *less time* than it takes a
|
||||
software team to deliver its code from one datacenter to another
|
||||
sitting 10 miles away.
|
||||
|
||||
With Standard Containers we can put an end to that embarrassment, by
|
||||
making INDUSTRIAL-GRADE DELIVERY of software a reality.
|
||||
|
||||
|
||||
### Legal
|
||||
|
||||
Transfers of Docker shall be in accordance with applicable export
|
||||
|
|
23
Vagrantfile
vendored
23
Vagrantfile
vendored
|
@ -12,23 +12,20 @@ Vagrant::Config.run do |config|
|
|||
# Setup virtual machine box. This VM configuration code is always executed.
|
||||
config.vm.box = BOX_NAME
|
||||
config.vm.box_url = BOX_URI
|
||||
config.vm.forward_port 4243, 4243
|
||||
|
||||
# Provision docker and new kernel if deployment was not done
|
||||
# Provision docker and new kernel if deployment was not done.
|
||||
# It is assumed Vagrant can successfully launch the provider instance.
|
||||
if Dir.glob("#{File.dirname(__FILE__)}/.vagrant/machines/default/*/id").empty?
|
||||
# Add lxc-docker package
|
||||
pkg_cmd = "wget -q -O - http://get.docker.io/gpg | apt-key add -;" \
|
||||
"echo deb https://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list;" \
|
||||
pkg_cmd = "wget -q -O - https://get.docker.io/gpg | apt-key add -;" \
|
||||
"echo deb http://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list;" \
|
||||
"apt-get update -qq; apt-get install -q -y --force-yes lxc-docker; "
|
||||
# Add X.org Ubuntu backported 3.8 kernel
|
||||
pkg_cmd << "apt-get update -qq; apt-get install -q -y python-software-properties; " \
|
||||
"add-apt-repository -y ppa:ubuntu-x-swat/r-lts-backport; " \
|
||||
"apt-get update -qq; apt-get install -q -y linux-image-3.8.0-19-generic; "
|
||||
# Add guest additions if local vbox VM
|
||||
is_vbox = true
|
||||
ARGV.each do |arg| is_vbox &&= !arg.downcase.start_with?("--provider") end
|
||||
if is_vbox
|
||||
pkg_cmd << "apt-get install -q -y linux-headers-3.8.0-19-generic dkms; " \
|
||||
# Add Ubuntu raring backported kernel
|
||||
pkg_cmd << "apt-get update -qq; apt-get install -q -y linux-image-generic-lts-raring; "
|
||||
# Add guest additions if local vbox VM. As virtualbox is the default provider,
|
||||
# it is assumed it won't be explicitly stated.
|
||||
if ENV["VAGRANT_DEFAULT_PROVIDER"].nil? && ARGV.none? { |arg| arg.downcase.start_with?("--provider") }
|
||||
pkg_cmd << "apt-get install -q -y linux-headers-generic-lts-raring dkms; " \
|
||||
"echo 'Downloading VBox Guest Additions...'; " \
|
||||
"wget -q http://dlc.sun.com.edgesuite.net/virtualbox/4.2.12/VBoxGuestAdditions_4.2.12.iso; "
|
||||
# Prepare the VM to add guest additions after reboot
|
||||
|
|
82
api.go
82
api.go
|
@ -2,6 +2,7 @@ package docker
|
|||
|
||||
import (
|
||||
"code.google.com/p/go.net/websocket"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/auth"
|
||||
|
@ -20,7 +21,7 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
const APIVERSION = 1.4
|
||||
const APIVERSION = 1.5
|
||||
const DEFAULTHTTPHOST = "127.0.0.1"
|
||||
const DEFAULTHTTPPORT = 4243
|
||||
const DEFAULTUNIXSOCKET = "/var/run/docker.sock"
|
||||
|
@ -326,8 +327,18 @@ func getContainersJSON(srv *Server, version float64, w http.ResponseWriter, r *h
|
|||
n = -1
|
||||
}
|
||||
|
||||
var b []byte
|
||||
outs := srv.Containers(all, size, n, since, before)
|
||||
b, err := json.Marshal(outs)
|
||||
if version < 1.5 {
|
||||
outs2 := []APIContainersOld{}
|
||||
for _, ctnr := range outs {
|
||||
outs2 = append(outs2, ctnr.ToLegacy())
|
||||
}
|
||||
b, err = json.Marshal(outs2)
|
||||
} else {
|
||||
b, err = json.Marshal(outs)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -394,6 +405,16 @@ func postImagesCreate(srv *Server, version float64, w http.ResponseWriter, r *ht
|
|||
tag := r.Form.Get("tag")
|
||||
repo := r.Form.Get("repo")
|
||||
|
||||
authEncoded := r.Header.Get("X-Registry-Auth")
|
||||
authConfig := &auth.AuthConfig{}
|
||||
if authEncoded != "" {
|
||||
authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
|
||||
if err := json.NewDecoder(authJson).Decode(authConfig); err != nil {
|
||||
// for a pull it is not an error if no auth was given
|
||||
// to increase compatibility with the existing api it is defaulting to be empty
|
||||
authConfig = &auth.AuthConfig{}
|
||||
}
|
||||
}
|
||||
if version > 1.0 {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
}
|
||||
|
@ -405,7 +426,7 @@ func postImagesCreate(srv *Server, version float64, w http.ResponseWriter, r *ht
|
|||
metaHeaders[k] = v
|
||||
}
|
||||
}
|
||||
if err := srv.ImagePull(image, tag, w, sf, &auth.AuthConfig{}, metaHeaders, version > 1.3); err != nil {
|
||||
if err := srv.ImagePull(image, tag, w, sf, authConfig, metaHeaders, version > 1.3); err != nil {
|
||||
if sf.Used() {
|
||||
w.Write(sf.FormatError(err))
|
||||
return nil
|
||||
|
@ -473,19 +494,32 @@ func postImagesInsert(srv *Server, version float64, w http.ResponseWriter, r *ht
|
|||
}
|
||||
|
||||
func postImagesPush(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
authConfig := &auth.AuthConfig{}
|
||||
metaHeaders := map[string][]string{}
|
||||
for k, v := range r.Header {
|
||||
if strings.HasPrefix(k, "X-Meta-") {
|
||||
metaHeaders[k] = v
|
||||
}
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
authConfig := &auth.AuthConfig{}
|
||||
|
||||
authEncoded := r.Header.Get("X-Registry-Auth")
|
||||
if authEncoded != "" {
|
||||
// the new format is to handle the authConfig as a header
|
||||
authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
|
||||
if err := json.NewDecoder(authJson).Decode(authConfig); err != nil {
|
||||
// to increase compatibility to existing api it is defaulting to be empty
|
||||
authConfig = &auth.AuthConfig{}
|
||||
}
|
||||
} else {
|
||||
// the old format is supported for compatibility if there was no authConfig header
|
||||
if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
|
@ -819,6 +853,11 @@ func getContainersByName(srv *Server, version float64, w http.ResponseWriter, r
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = srv.ImageInspect(name)
|
||||
if err == nil {
|
||||
return fmt.Errorf("Conflict between containers and images")
|
||||
}
|
||||
b, err := json.Marshal(container)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -837,6 +876,11 @@ func getImagesByName(srv *Server, version float64, w http.ResponseWriter, r *htt
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = srv.ContainerInspect(name)
|
||||
if err == nil {
|
||||
return fmt.Errorf("Conflict between containers and images")
|
||||
}
|
||||
b, err := json.Marshal(image)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -845,29 +889,6 @@ func getImagesByName(srv *Server, version float64, w http.ResponseWriter, r *htt
|
|||
return nil
|
||||
}
|
||||
|
||||
func postImagesGetCache(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
apiConfig := &APIImageConfig{}
|
||||
if err := json.NewDecoder(r.Body).Decode(apiConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
image, err := srv.ImageGetCached(apiConfig.ID, apiConfig.Config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if image == nil {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return nil
|
||||
}
|
||||
apiID := &APIID{ID: image.ID}
|
||||
b, err := json.Marshal(apiID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writeJSON(w, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if version < 1.3 {
|
||||
return fmt.Errorf("Multipart upload for build is no longer supported. Please upgrade your docker client.")
|
||||
|
@ -1043,7 +1064,6 @@ func createRouter(srv *Server, logging bool) (*mux.Router, error) {
|
|||
"/images/{name:.*}/insert": postImagesInsert,
|
||||
"/images/{name:.*}/push": postImagesPush,
|
||||
"/images/{name:.*}/tag": postImagesTag,
|
||||
"/images/getCache": postImagesGetCache,
|
||||
"/containers/create": postContainersCreate,
|
||||
"/containers/{name:.*}/kill": postContainersKill,
|
||||
"/containers/{name:.*}/restart": postContainersRestart,
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
package docker
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
type APIHistory struct {
|
||||
ID string `json:"Id"`
|
||||
Tags []string `json:",omitempty"`
|
||||
|
@ -42,6 +44,30 @@ type APIRmi struct {
|
|||
}
|
||||
|
||||
type APIContainers struct {
|
||||
ID string `json:"Id"`
|
||||
Image string
|
||||
Command string
|
||||
Created int64
|
||||
Status string
|
||||
Ports []APIPort
|
||||
SizeRw int64
|
||||
SizeRootFs int64
|
||||
}
|
||||
|
||||
func (self *APIContainers) ToLegacy() APIContainersOld {
|
||||
return APIContainersOld{
|
||||
ID: self.ID,
|
||||
Image: self.Image,
|
||||
Command: self.Command,
|
||||
Created: self.Created,
|
||||
Status: self.Status,
|
||||
Ports: displayablePorts(self.Ports),
|
||||
SizeRw: self.SizeRw,
|
||||
SizeRootFs: self.SizeRootFs,
|
||||
}
|
||||
}
|
||||
|
||||
type APIContainersOld struct {
|
||||
ID string `json:"Id"`
|
||||
Image string
|
||||
Command string
|
||||
|
@ -67,7 +93,17 @@ type APIRun struct {
|
|||
}
|
||||
|
||||
type APIPort struct {
|
||||
Port string
|
||||
PrivatePort int64
|
||||
PublicPort int64
|
||||
Type string
|
||||
}
|
||||
|
||||
func (port *APIPort) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(map[string]interface{}{
|
||||
"PrivatePort": port.PrivatePort,
|
||||
"PublicPort": port.PublicPort,
|
||||
"Type": port.Type,
|
||||
})
|
||||
}
|
||||
|
||||
type APIVersion struct {
|
||||
|
|
42
api_test.go
42
api_test.go
|
@ -68,7 +68,7 @@ func TestGetInfo(t *testing.T) {
|
|||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
initialImages, err := srv.runtime.graph.All()
|
||||
initialImages, err := srv.runtime.graph.Map()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -321,7 +321,7 @@ func TestGetContainersJSON(t *testing.T) {
|
|||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
container, err := NewBuilder(runtime).Create(&Config{
|
||||
container, err := runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"echo", "test"},
|
||||
})
|
||||
|
@ -357,10 +357,8 @@ func TestGetContainersExport(t *testing.T) {
|
|||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
builder := NewBuilder(runtime)
|
||||
|
||||
// Create a container and remove a file
|
||||
container, err := builder.Create(
|
||||
container, err := runtime.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"touch", "/test"},
|
||||
|
@ -409,10 +407,8 @@ func TestGetContainersChanges(t *testing.T) {
|
|||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
builder := NewBuilder(runtime)
|
||||
|
||||
// Create a container and remove a file
|
||||
container, err := builder.Create(
|
||||
container, err := runtime.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/rm", "/etc/passwd"},
|
||||
|
@ -449,6 +445,7 @@ func TestGetContainersChanges(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetContainersTop(t *testing.T) {
|
||||
t.Skip("Fixme. Skipping test for now. Reported error when testing using dind: 'api_test.go:527: Expected 2 processes, found 0.'")
|
||||
runtime, err := newTestRuntime()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -457,9 +454,7 @@ func TestGetContainersTop(t *testing.T) {
|
|||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
builder := NewBuilder(runtime)
|
||||
|
||||
container, err := builder.Create(
|
||||
container, err := runtime.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/sh", "-c", "cat"},
|
||||
|
@ -540,10 +535,8 @@ func TestGetContainersByName(t *testing.T) {
|
|||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
builder := NewBuilder(runtime)
|
||||
|
||||
// Create a container and remove a file
|
||||
container, err := builder.Create(
|
||||
container, err := runtime.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"echo", "test"},
|
||||
|
@ -573,10 +566,9 @@ func TestPostCommit(t *testing.T) {
|
|||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
builder := NewBuilder(runtime)
|
||||
|
||||
// Create a container and remove a file
|
||||
container, err := builder.Create(
|
||||
container, err := runtime.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"touch", "/test"},
|
||||
|
@ -670,7 +662,7 @@ func TestPostContainersKill(t *testing.T) {
|
|||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
container, err := NewBuilder(runtime).Create(
|
||||
container, err := runtime.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/cat"},
|
||||
|
@ -712,7 +704,7 @@ func TestPostContainersRestart(t *testing.T) {
|
|||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
container, err := NewBuilder(runtime).Create(
|
||||
container, err := runtime.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/cat"},
|
||||
|
@ -766,7 +758,7 @@ func TestPostContainersStart(t *testing.T) {
|
|||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
container, err := NewBuilder(runtime).Create(
|
||||
container, err := runtime.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/cat"},
|
||||
|
@ -816,7 +808,7 @@ func TestPostContainersStop(t *testing.T) {
|
|||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
container, err := NewBuilder(runtime).Create(
|
||||
container, err := runtime.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/cat"},
|
||||
|
@ -863,7 +855,7 @@ func TestPostContainersWait(t *testing.T) {
|
|||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
container, err := NewBuilder(runtime).Create(
|
||||
container, err := runtime.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/sleep", "1"},
|
||||
|
@ -905,7 +897,7 @@ func TestPostContainersAttach(t *testing.T) {
|
|||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
container, err := NewBuilder(runtime).Create(
|
||||
container, err := runtime.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/cat"},
|
||||
|
@ -997,7 +989,7 @@ func TestDeleteContainers(t *testing.T) {
|
|||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
container, err := NewBuilder(runtime).Create(&Config{
|
||||
container, err := runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"touch", "/test"},
|
||||
})
|
||||
|
@ -1184,10 +1176,8 @@ func TestPostContainersCopy(t *testing.T) {
|
|||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
builder := NewBuilder(runtime)
|
||||
|
||||
// Create a container and remove a file
|
||||
container, err := builder.Create(
|
||||
container, err := runtime.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"touch", "/test.txt"},
|
||||
|
|
97
auth/auth.go
97
auth/auth.go
|
@ -26,10 +26,11 @@ var (
|
|||
)
|
||||
|
||||
type AuthConfig struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Auth string `json:"auth"`
|
||||
Email string `json:"email"`
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Auth string `json:"auth"`
|
||||
Email string `json:"email"`
|
||||
ServerAddress string `json:"serveraddress,omitempty"`
|
||||
}
|
||||
|
||||
type ConfigFile struct {
|
||||
|
@ -96,6 +97,7 @@ func LoadConfig(rootPath string) (*ConfigFile, error) {
|
|||
}
|
||||
origEmail := strings.Split(arr[1], " = ")
|
||||
authConfig.Email = origEmail[1]
|
||||
authConfig.ServerAddress = IndexServerAddress()
|
||||
configFile.Configs[IndexServerAddress()] = authConfig
|
||||
} else {
|
||||
for k, authConfig := range configFile.Configs {
|
||||
|
@ -105,6 +107,7 @@ func LoadConfig(rootPath string) (*ConfigFile, error) {
|
|||
}
|
||||
authConfig.Auth = ""
|
||||
configFile.Configs[k] = authConfig
|
||||
authConfig.ServerAddress = k
|
||||
}
|
||||
}
|
||||
return &configFile, nil
|
||||
|
@ -125,7 +128,7 @@ func SaveConfig(configFile *ConfigFile) error {
|
|||
authCopy.Auth = encodeAuth(&authCopy)
|
||||
authCopy.Username = ""
|
||||
authCopy.Password = ""
|
||||
|
||||
authCopy.ServerAddress = ""
|
||||
configs[k] = authCopy
|
||||
}
|
||||
|
||||
|
@ -146,14 +149,26 @@ func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, e
|
|||
reqStatusCode := 0
|
||||
var status string
|
||||
var reqBody []byte
|
||||
jsonBody, err := json.Marshal(authConfig)
|
||||
|
||||
serverAddress := authConfig.ServerAddress
|
||||
if serverAddress == "" {
|
||||
serverAddress = IndexServerAddress()
|
||||
}
|
||||
|
||||
loginAgainstOfficialIndex := serverAddress == IndexServerAddress()
|
||||
|
||||
// to avoid sending the server address to the server it should be removed before marshalled
|
||||
authCopy := *authConfig
|
||||
authCopy.ServerAddress = ""
|
||||
|
||||
jsonBody, err := json.Marshal(authCopy)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Config Error: %s", err)
|
||||
}
|
||||
|
||||
// using `bytes.NewReader(jsonBody)` here causes the server to respond with a 411 status.
|
||||
b := strings.NewReader(string(jsonBody))
|
||||
req1, err := http.Post(IndexServerAddress()+"users/", "application/json; charset=utf-8", b)
|
||||
req1, err := http.Post(serverAddress+"users/", "application/json; charset=utf-8", b)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Server Error: %s", err)
|
||||
}
|
||||
|
@ -165,14 +180,23 @@ func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, e
|
|||
}
|
||||
|
||||
if reqStatusCode == 201 {
|
||||
status = "Account created. Please use the confirmation link we sent" +
|
||||
" to your e-mail to activate it."
|
||||
if loginAgainstOfficialIndex {
|
||||
status = "Account created. Please use the confirmation link we sent" +
|
||||
" to your e-mail to activate it."
|
||||
} else {
|
||||
status = "Account created. Please see the documentation of the registry " + serverAddress + " for instructions how to activate it."
|
||||
}
|
||||
} else if reqStatusCode == 403 {
|
||||
return "", fmt.Errorf("Login: Your account hasn't been activated. " +
|
||||
"Please check your e-mail for a confirmation link.")
|
||||
if loginAgainstOfficialIndex {
|
||||
return "", fmt.Errorf("Login: Your account hasn't been activated. " +
|
||||
"Please check your e-mail for a confirmation link.")
|
||||
} else {
|
||||
return "", fmt.Errorf("Login: Your account hasn't been activated. " +
|
||||
"Please see the documentation of the registry " + serverAddress + " for instructions how to activate it.")
|
||||
}
|
||||
} else if reqStatusCode == 400 {
|
||||
if string(reqBody) == "\"Username or email already exists\"" {
|
||||
req, err := factory.NewRequest("GET", IndexServerAddress()+"users/", nil)
|
||||
req, err := factory.NewRequest("GET", serverAddress+"users/", nil)
|
||||
req.SetBasicAuth(authConfig.Username, authConfig.Password)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
|
@ -199,3 +223,52 @@ func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, e
|
|||
}
|
||||
return status, nil
|
||||
}
|
||||
|
||||
// this method matches a auth configuration to a server address or a url
|
||||
func (config *ConfigFile) ResolveAuthConfig(registry string) AuthConfig {
|
||||
if registry == IndexServerAddress() || len(registry) == 0 {
|
||||
// default to the index server
|
||||
return config.Configs[IndexServerAddress()]
|
||||
}
|
||||
// if its not the index server there are three cases:
|
||||
//
|
||||
// 1. this is a full config url -> it should be used as is
|
||||
// 2. it could be a full url, but with the wrong protocol
|
||||
// 3. it can be the hostname optionally with a port
|
||||
//
|
||||
// as there is only one auth entry which is fully qualified we need to start
|
||||
// parsing and matching
|
||||
|
||||
swapProtocoll := func(url string) string {
|
||||
if strings.HasPrefix(url, "http:") {
|
||||
return strings.Replace(url, "http:", "https:", 1)
|
||||
}
|
||||
if strings.HasPrefix(url, "https:") {
|
||||
return strings.Replace(url, "https:", "http:", 1)
|
||||
}
|
||||
return url
|
||||
}
|
||||
|
||||
resolveIgnoringProtocol := func(url string) AuthConfig {
|
||||
if c, found := config.Configs[url]; found {
|
||||
return c
|
||||
}
|
||||
registrySwappedProtocoll := swapProtocoll(url)
|
||||
// now try to match with the different protocol
|
||||
if c, found := config.Configs[registrySwappedProtocoll]; found {
|
||||
return c
|
||||
}
|
||||
return AuthConfig{}
|
||||
}
|
||||
|
||||
// match both protocols as it could also be a server name like httpfoo
|
||||
if strings.HasPrefix(registry, "http:") || strings.HasPrefix(registry, "https:") {
|
||||
return resolveIgnoringProtocol(registry)
|
||||
}
|
||||
|
||||
url := "https://" + registry
|
||||
if !strings.Contains(registry, "/") {
|
||||
url = url + "/v1/"
|
||||
}
|
||||
return resolveIgnoringProtocol(url)
|
||||
}
|
||||
|
|
154
builder.go
154
builder.go
|
@ -1,154 +0,0 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
)
|
||||
|
||||
var defaultDns = []string{"8.8.8.8", "8.8.4.4"}
|
||||
|
||||
type Builder struct {
|
||||
runtime *Runtime
|
||||
repositories *TagStore
|
||||
graph *Graph
|
||||
|
||||
config *Config
|
||||
image *Image
|
||||
}
|
||||
|
||||
func NewBuilder(runtime *Runtime) *Builder {
|
||||
return &Builder{
|
||||
runtime: runtime,
|
||||
graph: runtime.graph,
|
||||
repositories: runtime.repositories,
|
||||
}
|
||||
}
|
||||
|
||||
func (builder *Builder) Create(config *Config) (*Container, error) {
|
||||
// Lookup image
|
||||
img, err := builder.repositories.LookupImage(config.Image)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if img.Config != nil {
|
||||
MergeConfig(config, img.Config)
|
||||
}
|
||||
|
||||
if len(config.Entrypoint) != 0 && config.Cmd == nil {
|
||||
config.Cmd = []string{}
|
||||
} else if config.Cmd == nil || len(config.Cmd) == 0 {
|
||||
return nil, fmt.Errorf("No command specified")
|
||||
}
|
||||
|
||||
// Generate id
|
||||
id := GenerateID()
|
||||
// Generate default hostname
|
||||
// FIXME: the lxc template no longer needs to set a default hostname
|
||||
if config.Hostname == "" {
|
||||
config.Hostname = id[:12]
|
||||
}
|
||||
|
||||
var args []string
|
||||
var entrypoint string
|
||||
|
||||
if len(config.Entrypoint) != 0 {
|
||||
entrypoint = config.Entrypoint[0]
|
||||
args = append(config.Entrypoint[1:], config.Cmd...)
|
||||
} else {
|
||||
entrypoint = config.Cmd[0]
|
||||
args = config.Cmd[1:]
|
||||
}
|
||||
|
||||
container := &Container{
|
||||
// FIXME: we should generate the ID here instead of receiving it as an argument
|
||||
ID: id,
|
||||
Created: time.Now(),
|
||||
Path: entrypoint,
|
||||
Args: args, //FIXME: de-duplicate from config
|
||||
Config: config,
|
||||
Image: img.ID, // Always use the resolved image id
|
||||
NetworkSettings: &NetworkSettings{},
|
||||
// FIXME: do we need to store this in the container?
|
||||
SysInitPath: sysInitPath,
|
||||
}
|
||||
container.root = builder.runtime.containerRoot(container.ID)
|
||||
// Step 1: create the container directory.
|
||||
// This doubles as a barrier to avoid race conditions.
|
||||
if err := os.Mkdir(container.root, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resolvConf, err := utils.GetResolvConf()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(config.Dns) == 0 && len(builder.runtime.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
|
||||
//"WARNING: Docker detected local DNS server on resolv.conf. Using default external servers: %v", defaultDns
|
||||
builder.runtime.Dns = defaultDns
|
||||
}
|
||||
|
||||
// If custom dns exists, then create a resolv.conf for the container
|
||||
if len(config.Dns) > 0 || len(builder.runtime.Dns) > 0 {
|
||||
var dns []string
|
||||
if len(config.Dns) > 0 {
|
||||
dns = config.Dns
|
||||
} else {
|
||||
dns = builder.runtime.Dns
|
||||
}
|
||||
container.ResolvConfPath = path.Join(container.root, "resolv.conf")
|
||||
f, err := os.Create(container.ResolvConfPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
for _, dns := range dns {
|
||||
if _, err := f.Write([]byte("nameserver " + dns + "\n")); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
container.ResolvConfPath = "/etc/resolv.conf"
|
||||
}
|
||||
|
||||
// Step 2: save the container json
|
||||
if err := container.ToDisk(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Step 3: register the container
|
||||
if err := builder.runtime.Register(container); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return container, nil
|
||||
}
|
||||
|
||||
// Commit creates a new filesystem image from the current state of a container.
|
||||
// The image can optionally be tagged into a repository
|
||||
func (builder *Builder) Commit(container *Container, repository, tag, comment, author string, config *Config) (*Image, error) {
|
||||
// FIXME: freeze the container before copying it to avoid data corruption?
|
||||
// FIXME: this shouldn't be in commands.
|
||||
if err := container.EnsureMounted(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rwTar, err := container.ExportRw()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Create a new image from the container's base layers + a new layer from container changes
|
||||
img, err := builder.graph.Create(rwTar, container, comment, author, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Register the image if needed
|
||||
if repository != "" {
|
||||
if err := builder.repositories.Set(repository, tag, img.ID, true); err != nil {
|
||||
return img, err
|
||||
}
|
||||
}
|
||||
return img, nil
|
||||
}
|
12
buildfile.go
12
buildfile.go
|
@ -23,7 +23,6 @@ type BuildFile interface {
|
|||
|
||||
type buildFile struct {
|
||||
runtime *Runtime
|
||||
builder *Builder
|
||||
srv *Server
|
||||
|
||||
image string
|
||||
|
@ -293,7 +292,7 @@ func (b *buildFile) addContext(container *Container, orig, dest string) error {
|
|||
}
|
||||
fi, err := os.Stat(origPath)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("%s: no such file or directory", orig)
|
||||
}
|
||||
if fi.IsDir() {
|
||||
if err := CopyWithTar(origPath, destPath); err != nil {
|
||||
|
@ -337,7 +336,7 @@ func (b *buildFile) CmdAdd(args string) error {
|
|||
|
||||
b.config.Image = b.image
|
||||
// Create the container and start it
|
||||
container, err := b.builder.Create(b.config)
|
||||
container, err := b.runtime.Create(b.config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -372,7 +371,7 @@ func (b *buildFile) run() (string, error) {
|
|||
b.config.Image = b.image
|
||||
|
||||
// Create the container and start it
|
||||
c, err := b.builder.Create(b.config)
|
||||
c, err := b.runtime.Create(b.config)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -428,7 +427,7 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
|
|||
}
|
||||
}
|
||||
|
||||
container, err := b.builder.Create(b.config)
|
||||
container, err := b.runtime.Create(b.config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -450,7 +449,7 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
|
|||
autoConfig := *b.config
|
||||
autoConfig.Cmd = autoCmd
|
||||
// Commit the container
|
||||
image, err := b.builder.Commit(container, "", "", "", b.maintainer, &autoConfig)
|
||||
image, err := b.runtime.Commit(container, "", "", "", b.maintainer, &autoConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -524,7 +523,6 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
|
|||
|
||||
func NewBuildFile(srv *Server, out io.Writer, verbose, utilizeCache bool) BuildFile {
|
||||
return &buildFile{
|
||||
builder: NewBuilder(srv.runtime),
|
||||
runtime: srv.runtime,
|
||||
srv: srv,
|
||||
config: &Config{},
|
||||
|
|
|
@ -483,3 +483,51 @@ func TestForbiddenContextPath(t *testing.T) {
|
|||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildADDFileNotFound(t *testing.T) {
|
||||
runtime, err := newTestRuntime()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer nuke(runtime)
|
||||
|
||||
srv := &Server{
|
||||
runtime: runtime,
|
||||
pullingPool: make(map[string]struct{}),
|
||||
pushingPool: make(map[string]struct{}),
|
||||
}
|
||||
|
||||
context := testContextTemplate{`
|
||||
from {IMAGE}
|
||||
add foo /usr/local/bar
|
||||
`,
|
||||
nil, nil}
|
||||
|
||||
httpServer, err := mkTestingFileServer(context.remoteFiles)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer httpServer.Close()
|
||||
|
||||
idx := strings.LastIndex(httpServer.URL, ":")
|
||||
if idx < 0 {
|
||||
t.Fatalf("could not get port from test http server address %s", httpServer.URL)
|
||||
}
|
||||
port := httpServer.URL[idx+1:]
|
||||
|
||||
ip := srv.runtime.networkManager.bridgeNetwork.IP
|
||||
dockerfile := constructDockerfile(context.dockerfile, ip, port)
|
||||
|
||||
buildfile := NewBuildFile(srv, ioutil.Discard, false, true)
|
||||
_, err = buildfile.Build(mkTestContext(dockerfile, context.files, t))
|
||||
|
||||
if err == nil {
|
||||
t.Log("Error should not be nil")
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if err.Error() != "foo: no such file or directory" {
|
||||
t.Logf("Error message is not expected: %s", err.Error())
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -99,7 +99,7 @@ func Changes(layers []string, rw string) ([]Change, error) {
|
|||
changes = append(changes, change)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
return changes, nil
|
||||
|
|
289
commands.go
289
commands.go
|
@ -2,11 +2,14 @@ package docker
|
|||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/auth"
|
||||
"github.com/dotcloud/docker/registry"
|
||||
"github.com/dotcloud/docker/term"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
|
@ -20,17 +23,17 @@ import (
|
|||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
var (
|
||||
GITCOMMIT string
|
||||
VERSION string
|
||||
VERSION string
|
||||
)
|
||||
|
||||
func (cli *DockerCli) getMethod(name string) (reflect.Method, bool) {
|
||||
|
@ -126,7 +129,7 @@ func (cli *DockerCli) CmdInsert(args ...string) error {
|
|||
v.Set("url", cmd.Arg(1))
|
||||
v.Set("path", cmd.Arg(2))
|
||||
|
||||
if err := cli.stream("POST", "/images/"+cmd.Arg(0)+"/insert?"+v.Encode(), nil, cli.out); err != nil {
|
||||
if err := cli.stream("POST", "/images/"+cmd.Arg(0)+"/insert?"+v.Encode(), nil, cli.out, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -252,75 +255,27 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
|
|||
|
||||
// 'docker login': login / register a user to registry service.
|
||||
func (cli *DockerCli) CmdLogin(args ...string) error {
|
||||
var readStringOnRawTerminal = func(stdin io.Reader, stdout io.Writer, echo bool) string {
|
||||
char := make([]byte, 1)
|
||||
buffer := make([]byte, 64)
|
||||
var i = 0
|
||||
for i < len(buffer) {
|
||||
n, err := stdin.Read(char)
|
||||
if n > 0 {
|
||||
if char[0] == '\r' || char[0] == '\n' {
|
||||
stdout.Write([]byte{'\r', '\n'})
|
||||
break
|
||||
} else if char[0] == 127 || char[0] == '\b' {
|
||||
if i > 0 {
|
||||
if echo {
|
||||
stdout.Write([]byte{'\b', ' ', '\b'})
|
||||
}
|
||||
i--
|
||||
}
|
||||
} else if !unicode.IsSpace(rune(char[0])) &&
|
||||
!unicode.IsControl(rune(char[0])) {
|
||||
if echo {
|
||||
stdout.Write(char)
|
||||
}
|
||||
buffer[i] = char[0]
|
||||
i++
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
fmt.Fprintf(stdout, "Read error: %v\r\n", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return string(buffer[:i])
|
||||
}
|
||||
var readAndEchoString = func(stdin io.Reader, stdout io.Writer) string {
|
||||
return readStringOnRawTerminal(stdin, stdout, true)
|
||||
}
|
||||
var readString = func(stdin io.Reader, stdout io.Writer) string {
|
||||
return readStringOnRawTerminal(stdin, stdout, false)
|
||||
}
|
||||
cmd := Subcmd("login", "[OPTIONS] [SERVER]", "Register or Login to a docker registry server, if no server is specified \""+auth.IndexServerAddress()+"\" is the default.")
|
||||
|
||||
cmd := Subcmd("login", "[OPTIONS]", "Register or Login to the docker registry server")
|
||||
flUsername := cmd.String("u", "", "username")
|
||||
flPassword := cmd.String("p", "", "password")
|
||||
flEmail := cmd.String("e", "", "email")
|
||||
var username, password, email string
|
||||
|
||||
cmd.StringVar(&username, "u", "", "username")
|
||||
cmd.StringVar(&password, "p", "", "password")
|
||||
cmd.StringVar(&email, "e", "", "email")
|
||||
err := cmd.Parse(args)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cli.LoadConfigFile()
|
||||
|
||||
var oldState *term.State
|
||||
if *flUsername == "" || *flPassword == "" || *flEmail == "" {
|
||||
oldState, err = term.SetRawTerminal(cli.terminalFd)
|
||||
serverAddress := auth.IndexServerAddress()
|
||||
if len(cmd.Args()) > 0 {
|
||||
serverAddress, err = registry.ExpandAndVerifyRegistryUrl(cmd.Arg(0))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer term.RestoreTerminal(cli.terminalFd, oldState)
|
||||
fmt.Fprintf(cli.out, "Login against server at %s\n", serverAddress)
|
||||
}
|
||||
|
||||
var (
|
||||
username string
|
||||
password string
|
||||
email string
|
||||
)
|
||||
|
||||
var promptDefault = func(prompt string, configDefault string) {
|
||||
promptDefault := func(prompt string, configDefault string) {
|
||||
if configDefault == "" {
|
||||
fmt.Fprintf(cli.out, "%s: ", prompt)
|
||||
} else {
|
||||
|
@ -328,55 +283,64 @@ func (cli *DockerCli) CmdLogin(args ...string) error {
|
|||
}
|
||||
}
|
||||
|
||||
readInput := func(in io.Reader, out io.Writer) string {
|
||||
reader := bufio.NewReader(in)
|
||||
line, _, err := reader.ReadLine()
|
||||
if err != nil {
|
||||
fmt.Fprintln(out, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
return string(line)
|
||||
}
|
||||
|
||||
cli.LoadConfigFile()
|
||||
authconfig, ok := cli.configFile.Configs[auth.IndexServerAddress()]
|
||||
if !ok {
|
||||
authconfig = auth.AuthConfig{}
|
||||
}
|
||||
|
||||
if *flUsername == "" {
|
||||
if username == "" {
|
||||
promptDefault("Username", authconfig.Username)
|
||||
username = readAndEchoString(cli.in, cli.out)
|
||||
username = readInput(cli.in, cli.out)
|
||||
if username == "" {
|
||||
username = authconfig.Username
|
||||
}
|
||||
} else {
|
||||
username = *flUsername
|
||||
}
|
||||
if username != authconfig.Username {
|
||||
if *flPassword == "" {
|
||||
if password == "" {
|
||||
oldState, _ := term.SaveState(cli.terminalFd)
|
||||
fmt.Fprintf(cli.out, "Password: ")
|
||||
password = readString(cli.in, cli.out)
|
||||
term.DisableEcho(cli.terminalFd, oldState)
|
||||
|
||||
password = readInput(cli.in, cli.out)
|
||||
fmt.Fprint(cli.out, "\n")
|
||||
|
||||
term.RestoreTerminal(cli.terminalFd, oldState)
|
||||
if password == "" {
|
||||
return fmt.Errorf("Error : Password Required")
|
||||
}
|
||||
} else {
|
||||
password = *flPassword
|
||||
}
|
||||
|
||||
if *flEmail == "" {
|
||||
if email == "" {
|
||||
promptDefault("Email", authconfig.Email)
|
||||
email = readAndEchoString(cli.in, cli.out)
|
||||
email = readInput(cli.in, cli.out)
|
||||
if email == "" {
|
||||
email = authconfig.Email
|
||||
}
|
||||
} else {
|
||||
email = *flEmail
|
||||
}
|
||||
} else {
|
||||
password = authconfig.Password
|
||||
email = authconfig.Email
|
||||
}
|
||||
if oldState != nil {
|
||||
term.RestoreTerminal(cli.terminalFd, oldState)
|
||||
}
|
||||
authconfig.Username = username
|
||||
authconfig.Password = password
|
||||
authconfig.Email = email
|
||||
cli.configFile.Configs[auth.IndexServerAddress()] = authconfig
|
||||
authconfig.ServerAddress = serverAddress
|
||||
cli.configFile.Configs[serverAddress] = authconfig
|
||||
|
||||
body, statusCode, err := cli.call("POST", "/auth", cli.configFile.Configs[auth.IndexServerAddress()])
|
||||
body, statusCode, err := cli.call("POST", "/auth", cli.configFile.Configs[serverAddress])
|
||||
if statusCode == 401 {
|
||||
delete(cli.configFile.Configs, auth.IndexServerAddress())
|
||||
delete(cli.configFile.Configs, serverAddress)
|
||||
auth.SaveConfig(cli.configFile)
|
||||
return err
|
||||
}
|
||||
|
@ -408,16 +372,11 @@ func (cli *DockerCli) CmdWait(args ...string) error {
|
|||
return nil
|
||||
}
|
||||
for _, name := range cmd.Args() {
|
||||
body, _, err := cli.call("POST", "/containers/"+name+"/wait", nil)
|
||||
status, err := waitForExit(cli, name)
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.err, "%s", err)
|
||||
} else {
|
||||
var out APIWait
|
||||
err = json.Unmarshal(body, &out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(cli.out, "%d\n", out.StatusCode)
|
||||
fmt.Fprintf(cli.out, "%d\n", status)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -434,8 +393,9 @@ func (cli *DockerCli) CmdVersion(args ...string) error {
|
|||
cmd.Usage()
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Fprintf(cli.out, "Client version: %s\n", VERSION)
|
||||
if VERSION != "" {
|
||||
fmt.Fprintf(cli.out, "Client version: %s\n", VERSION)
|
||||
}
|
||||
fmt.Fprintf(cli.out, "Go version (client): %s\n", runtime.Version())
|
||||
if GITCOMMIT != "" {
|
||||
fmt.Fprintf(cli.out, "Git commit (client): %s\n", GITCOMMIT)
|
||||
|
@ -452,7 +412,9 @@ func (cli *DockerCli) CmdVersion(args ...string) error {
|
|||
utils.Debugf("Error unmarshal: body: %s, err: %s\n", body, err)
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(cli.out, "Server version: %s\n", out.Version)
|
||||
if out.Version != "" {
|
||||
fmt.Fprintf(cli.out, "Server version: %s\n", out.Version)
|
||||
}
|
||||
if out.GitCommit != "" {
|
||||
fmt.Fprintf(cli.out, "Git commit (server): %s\n", out.GitCommit)
|
||||
}
|
||||
|
@ -463,7 +425,7 @@ func (cli *DockerCli) CmdVersion(args ...string) error {
|
|||
release := utils.GetReleaseVersion()
|
||||
if release != "" {
|
||||
fmt.Fprintf(cli.out, "Last stable version: %s", release)
|
||||
if strings.Trim(VERSION, "-dev") != release || strings.Trim(out.Version, "-dev") != release {
|
||||
if (VERSION != "" || out.Version != "") && (strings.Trim(VERSION, "-dev") != release || strings.Trim(out.Version, "-dev") != release) {
|
||||
fmt.Fprintf(cli.out, ", please update docker")
|
||||
}
|
||||
fmt.Fprintf(cli.out, "\n")
|
||||
|
@ -615,7 +577,7 @@ func (cli *DockerCli) CmdInspect(args ...string) error {
|
|||
if err != nil {
|
||||
obj, _, err = cli.call("GET", "/images/"+name+"/json", nil)
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.err, "%s\n", err)
|
||||
fmt.Fprintf(cli.err, "No such image or container: %s\n", name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
@ -829,7 +791,7 @@ func (cli *DockerCli) CmdImport(args ...string) error {
|
|||
v.Set("tag", tag)
|
||||
v.Set("fromSrc", src)
|
||||
|
||||
err := cli.stream("POST", "/images/create?"+v.Encode(), cli.in, cli.out)
|
||||
err := cli.stream("POST", "/images/create?"+v.Encode(), cli.in, cli.out, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -850,6 +812,13 @@ func (cli *DockerCli) CmdPush(args ...string) error {
|
|||
|
||||
cli.LoadConfigFile()
|
||||
|
||||
// Resolve the Repository name from fqn to endpoint + name
|
||||
endpoint, _, err := registry.ResolveRepositoryName(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Resolve the Auth config relevant for this server
|
||||
authConfig := cli.configFile.ResolveAuthConfig(endpoint)
|
||||
// If we're not using a custom registry, we know the restrictions
|
||||
// applied to repository names and can warn the user in advance.
|
||||
// Custom repositories can have different rules, and we must also
|
||||
|
@ -863,22 +832,28 @@ func (cli *DockerCli) CmdPush(args ...string) error {
|
|||
}
|
||||
|
||||
v := url.Values{}
|
||||
push := func() error {
|
||||
buf, err := json.Marshal(cli.configFile.Configs[auth.IndexServerAddress()])
|
||||
push := func(authConfig auth.AuthConfig) error {
|
||||
buf, err := json.Marshal(authConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
registryAuthHeader := []string{
|
||||
base64.URLEncoding.EncodeToString(buf),
|
||||
}
|
||||
|
||||
return cli.stream("POST", "/images/"+name+"/push?"+v.Encode(), bytes.NewBuffer(buf), cli.out)
|
||||
return cli.stream("POST", "/images/"+name+"/push?"+v.Encode(), nil, cli.out, map[string][]string{
|
||||
"X-Registry-Auth": registryAuthHeader,
|
||||
})
|
||||
}
|
||||
|
||||
if err := push(); err != nil {
|
||||
if err.Error() == "Authentication is required." {
|
||||
if err := push(authConfig); err != nil {
|
||||
if err.Error() == registry.ErrLoginRequired.Error() {
|
||||
fmt.Fprintln(cli.out, "\nPlease login prior to push:")
|
||||
if err := cli.CmdLogin(""); err != nil {
|
||||
if err := cli.CmdLogin(endpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
return push()
|
||||
authConfig := cli.configFile.ResolveAuthConfig(endpoint)
|
||||
return push(authConfig)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -902,11 +877,43 @@ func (cli *DockerCli) CmdPull(args ...string) error {
|
|||
*tag = parsedTag
|
||||
}
|
||||
|
||||
// Resolve the Repository name from fqn to endpoint + name
|
||||
endpoint, _, err := registry.ResolveRepositoryName(remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli.LoadConfigFile()
|
||||
|
||||
// Resolve the Auth config relevant for this server
|
||||
authConfig := cli.configFile.ResolveAuthConfig(endpoint)
|
||||
v := url.Values{}
|
||||
v.Set("fromImage", remote)
|
||||
v.Set("tag", *tag)
|
||||
|
||||
if err := cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.out); err != nil {
|
||||
pull := func(authConfig auth.AuthConfig) error {
|
||||
buf, err := json.Marshal(authConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
registryAuthHeader := []string{
|
||||
base64.URLEncoding.EncodeToString(buf),
|
||||
}
|
||||
|
||||
return cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.out, map[string][]string{
|
||||
"X-Registry-Auth": registryAuthHeader,
|
||||
})
|
||||
}
|
||||
|
||||
if err := pull(authConfig); err != nil {
|
||||
if err.Error() == registry.ErrLoginRequired.Error() {
|
||||
fmt.Fprintln(cli.out, "\nPlease login prior to push:")
|
||||
if err := cli.CmdLogin(endpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
authConfig := cli.configFile.ResolveAuthConfig(endpoint)
|
||||
return pull(authConfig)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -996,6 +1003,19 @@ func (cli *DockerCli) CmdImages(args ...string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func displayablePorts(ports []APIPort) string {
|
||||
result := []string{}
|
||||
for _, port := range ports {
|
||||
if port.Type == "tcp" {
|
||||
result = append(result, fmt.Sprintf("%d->%d", port.PublicPort, port.PrivatePort))
|
||||
} else {
|
||||
result = append(result, fmt.Sprintf("%d->%d/%s", port.PublicPort, port.PrivatePort, port.Type))
|
||||
}
|
||||
}
|
||||
sort.Strings(result)
|
||||
return strings.Join(result, ", ")
|
||||
}
|
||||
|
||||
func (cli *DockerCli) CmdPs(args ...string) error {
|
||||
cmd := Subcmd("ps", "[OPTIONS]", "List containers")
|
||||
quiet := cmd.Bool("q", false, "Only display numeric IDs")
|
||||
|
@ -1053,9 +1073,9 @@ func (cli *DockerCli) CmdPs(args ...string) error {
|
|||
for _, out := range outs {
|
||||
if !*quiet {
|
||||
if *noTrunc {
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t", out.ID, out.Image, out.Command, utils.HumanDuration(time.Now().Sub(time.Unix(out.Created, 0))), out.Status, out.Ports)
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t", out.ID, out.Image, out.Command, utils.HumanDuration(time.Now().Sub(time.Unix(out.Created, 0))), out.Status, displayablePorts(out.Ports))
|
||||
} else {
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t", utils.TruncateID(out.ID), out.Image, utils.Trunc(out.Command, 20), utils.HumanDuration(time.Now().Sub(time.Unix(out.Created, 0))), out.Status, out.Ports)
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t", utils.TruncateID(out.ID), out.Image, utils.Trunc(out.Command, 20), utils.HumanDuration(time.Now().Sub(time.Unix(out.Created, 0))), out.Status, displayablePorts(out.Ports))
|
||||
}
|
||||
if *size {
|
||||
if out.SizeRootFs > 0 {
|
||||
|
@ -1140,7 +1160,7 @@ func (cli *DockerCli) CmdEvents(args ...string) error {
|
|||
v.Set("since", *since)
|
||||
}
|
||||
|
||||
if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out); err != nil {
|
||||
if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -1157,7 +1177,7 @@ func (cli *DockerCli) CmdExport(args ...string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
if err := cli.stream("GET", "/containers/"+cmd.Arg(0)+"/export", nil, cli.out); err != nil {
|
||||
if err := cli.stream("GET", "/containers/"+cmd.Arg(0)+"/export", nil, cli.out, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -1423,13 +1443,36 @@ func (cli *DockerCli) CmdRun(args ...string) error {
|
|||
tag = DEFAULTTAG
|
||||
}
|
||||
|
||||
fmt.Printf("Unable to find image '%s' (tag: %s) locally\n", config.Image, tag)
|
||||
fmt.Fprintf(cli.err, "Unable to find image '%s' (tag: %s) locally\n", config.Image, tag)
|
||||
|
||||
v := url.Values{}
|
||||
repos, tag := utils.ParseRepositoryTag(config.Image)
|
||||
v.Set("fromImage", repos)
|
||||
v.Set("tag", tag)
|
||||
err = cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.err)
|
||||
|
||||
// Resolve the Repository name from fqn to endpoint + name
|
||||
var endpoint string
|
||||
endpoint, _, err = registry.ResolveRepositoryName(repos)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Load the auth config file, to be able to pull the image
|
||||
cli.LoadConfigFile()
|
||||
|
||||
// Resolve the Auth config relevant for this server
|
||||
authConfig := cli.configFile.ResolveAuthConfig(endpoint)
|
||||
buf, err := json.Marshal(authConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
registryAuthHeader := []string{
|
||||
base64.URLEncoding.EncodeToString(buf),
|
||||
}
|
||||
err = cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.err, map[string][]string{
|
||||
"X-Registry-Auth": registryAuthHeader,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1515,8 +1558,18 @@ func (cli *DockerCli) CmdRun(args ...string) error {
|
|||
}
|
||||
|
||||
if !config.AttachStdout && !config.AttachStderr {
|
||||
// Detached mode
|
||||
<-wait
|
||||
} else {
|
||||
status, err := waitForExit(cli, runResult.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if status != 0 {
|
||||
return &utils.StatusError{status}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1606,7 +1659,7 @@ func (cli *DockerCli) call(method, path string, data interface{}) ([]byte, int,
|
|||
return body, resp.StatusCode, nil
|
||||
}
|
||||
|
||||
func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer) error {
|
||||
func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error {
|
||||
if (method == "POST" || method == "PUT") && in == nil {
|
||||
in = bytes.NewReader([]byte{})
|
||||
}
|
||||
|
@ -1619,6 +1672,13 @@ func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer) e
|
|||
if method == "POST" {
|
||||
req.Header.Set("Content-Type", "plain/text")
|
||||
}
|
||||
|
||||
if headers != nil {
|
||||
for k, v := range headers {
|
||||
req.Header[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
dial, err := net.Dial(cli.proto, cli.addr)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
|
@ -1797,6 +1857,19 @@ func (cli *DockerCli) LoadConfigFile() (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
func waitForExit(cli *DockerCli, containerId string) (int, error) {
|
||||
body, _, err := cli.call("POST", "/containers/"+containerId+"/wait", nil)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
var out APIWait
|
||||
if err := json.Unmarshal(body, &out); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return out.StatusCode, nil
|
||||
}
|
||||
|
||||
func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *DockerCli {
|
||||
var (
|
||||
isTerminal = false
|
||||
|
|
|
@ -152,7 +152,6 @@ func TestRunWorkdirExists(t *testing.T) {
|
|||
|
||||
}
|
||||
|
||||
|
||||
func TestRunExit(t *testing.T) {
|
||||
stdin, stdinPipe := io.Pipe()
|
||||
stdout, stdoutPipe := io.Pipe()
|
||||
|
|
85
container.go
85
container.go
|
@ -11,16 +11,15 @@ import (
|
|||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
"net"
|
||||
)
|
||||
|
||||
type Container struct {
|
||||
|
@ -42,6 +41,8 @@ type Container struct {
|
|||
|
||||
SysInitPath string
|
||||
ResolvConfPath string
|
||||
HostnamePath string
|
||||
HostsPath string
|
||||
|
||||
cmd *exec.Cmd
|
||||
stdout *utils.WriteBroadcaster
|
||||
|
@ -61,6 +62,7 @@ type Container struct {
|
|||
|
||||
type Config struct {
|
||||
Hostname string
|
||||
Domainname string
|
||||
User string
|
||||
Memory int64 // Memory limit (in bytes)
|
||||
MemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap
|
||||
|
@ -107,7 +109,7 @@ type KeyValuePair struct {
|
|||
|
||||
func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig, *flag.FlagSet, error) {
|
||||
cmd := Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container")
|
||||
if len(args) > 0 && args[0] != "--help" {
|
||||
if os.Getenv("TEST") != "" {
|
||||
cmd.SetOutput(ioutil.Discard)
|
||||
cmd.Usage = nil
|
||||
}
|
||||
|
@ -203,8 +205,17 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
|
|||
return nil, nil, cmd, err
|
||||
}
|
||||
|
||||
hostname := *flHostname
|
||||
domainname := ""
|
||||
|
||||
parts := strings.SplitN(hostname, ".", 2)
|
||||
if len(parts) > 1 {
|
||||
hostname = parts[0]
|
||||
domainname = parts[1]
|
||||
}
|
||||
config := &Config{
|
||||
Hostname: *flHostname,
|
||||
Hostname: hostname,
|
||||
Domainname: domainname,
|
||||
PortSpecs: flPorts,
|
||||
User: *flUser,
|
||||
Tty: *flTty,
|
||||
|
@ -253,17 +264,28 @@ type NetworkSettings struct {
|
|||
PortMapping map[string]PortMapping
|
||||
}
|
||||
|
||||
// String returns a human-readable description of the port mapping defined in the settings
|
||||
func (settings *NetworkSettings) PortMappingHuman() string {
|
||||
var mapping []string
|
||||
// returns a more easy to process description of the port mapping defined in the settings
|
||||
func (settings *NetworkSettings) PortMappingAPI() []APIPort {
|
||||
var mapping []APIPort
|
||||
for private, public := range settings.PortMapping["Tcp"] {
|
||||
mapping = append(mapping, fmt.Sprintf("%s->%s", public, private))
|
||||
pubint, _ := strconv.ParseInt(public, 0, 0)
|
||||
privint, _ := strconv.ParseInt(private, 0, 0)
|
||||
mapping = append(mapping, APIPort{
|
||||
PrivatePort: privint,
|
||||
PublicPort: pubint,
|
||||
Type: "tcp",
|
||||
})
|
||||
}
|
||||
for private, public := range settings.PortMapping["Udp"] {
|
||||
mapping = append(mapping, fmt.Sprintf("%s->%s/udp", public, private))
|
||||
pubint, _ := strconv.ParseInt(public, 0, 0)
|
||||
privint, _ := strconv.ParseInt(private, 0, 0)
|
||||
mapping = append(mapping, APIPort{
|
||||
PrivatePort: privint,
|
||||
PublicPort: pubint,
|
||||
Type: "udp",
|
||||
})
|
||||
}
|
||||
sort.Strings(mapping)
|
||||
return strings.Join(mapping, ", ")
|
||||
return mapping
|
||||
}
|
||||
|
||||
// Inject the io.Reader at the given path. Note: do not close the reader
|
||||
|
@ -642,11 +664,13 @@ func (container *Container) Start(hostConfig *HostConfig) error {
|
|||
if _, exists := container.Volumes[volPath]; exists {
|
||||
continue
|
||||
}
|
||||
var srcPath string
|
||||
srcRW := false
|
||||
// If an external bind is defined for this volume, use that as a source
|
||||
if bindMap, exists := binds[volPath]; exists {
|
||||
container.Volumes[volPath] = bindMap.SrcPath
|
||||
srcPath = bindMap.SrcPath
|
||||
if strings.ToLower(bindMap.Mode) == "rw" {
|
||||
container.VolumesRW[volPath] = true
|
||||
srcRW = true
|
||||
}
|
||||
// Otherwise create an directory in $ROOT/volumes/ and use that
|
||||
} else {
|
||||
|
@ -654,17 +678,36 @@ func (container *Container) Start(hostConfig *HostConfig) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcPath, err := c.layer()
|
||||
srcPath, err = c.layer()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
container.Volumes[volPath] = srcPath
|
||||
container.VolumesRW[volPath] = true // RW by default
|
||||
srcRW = true // RW by default
|
||||
}
|
||||
container.Volumes[volPath] = srcPath
|
||||
container.VolumesRW[volPath] = srcRW
|
||||
// Create the mountpoint
|
||||
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
|
||||
rootVolPath := path.Join(container.RootfsPath(), volPath)
|
||||
if err := os.MkdirAll(rootVolPath, 0755); err != nil {
|
||||
return nil
|
||||
}
|
||||
if srcRW {
|
||||
volList, err := ioutil.ReadDir(rootVolPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(volList) > 0 {
|
||||
srcList, err := ioutil.ReadDir(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(srcList) == 0 {
|
||||
if err := CopyWithTar(rootVolPath, srcPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := container.generateLXCConfig(hostConfig); err != nil {
|
||||
|
@ -813,10 +856,10 @@ func (container *Container) allocateNetwork() error {
|
|||
iface = &NetworkInterface{disabled: true}
|
||||
} else {
|
||||
iface = &NetworkInterface{
|
||||
IPNet: net.IPNet{IP: net.ParseIP(container.NetworkSettings.IPAddress), Mask: manager.bridgeNetwork.Mask},
|
||||
IPNet: net.IPNet{IP: net.ParseIP(container.NetworkSettings.IPAddress), Mask: manager.bridgeNetwork.Mask},
|
||||
Gateway: manager.bridgeNetwork.IP,
|
||||
manager: manager,
|
||||
}
|
||||
}
|
||||
ipNum := ipToInt(iface.IPNet.IP)
|
||||
manager.ipAllocator.inUse[ipNum] = struct{}{}
|
||||
}
|
||||
|
@ -827,10 +870,10 @@ func (container *Container) allocateNetwork() error {
|
|||
portSpecs = container.Config.PortSpecs
|
||||
} else {
|
||||
for backend, frontend := range container.NetworkSettings.PortMapping["Tcp"] {
|
||||
portSpecs = append(portSpecs, fmt.Sprintf("%s:%s/tcp",frontend, backend))
|
||||
portSpecs = append(portSpecs, fmt.Sprintf("%s:%s/tcp", frontend, backend))
|
||||
}
|
||||
for backend, frontend := range container.NetworkSettings.PortMapping["Udp"] {
|
||||
portSpecs = append(portSpecs, fmt.Sprintf("%s:%s/udp",frontend, backend))
|
||||
portSpecs = append(portSpecs, fmt.Sprintf("%s:%s/udp", frontend, backend))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ import (
|
|||
func TestIDFormat(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
container1, err := NewBuilder(runtime).Create(
|
||||
container1, err := runtime.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/sh", "-c", "echo hello world"},
|
||||
|
@ -138,12 +138,21 @@ func TestDiff(t *testing.T) {
|
|||
container1, _, _ := mkContainer(runtime, []string{"_", "/bin/rm", "/etc/passwd"}, t)
|
||||
defer runtime.Destroy(container1)
|
||||
|
||||
// The changelog should be empty and not fail before run. See #1705
|
||||
c, err := container1.Changes()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(c) != 0 {
|
||||
t.Fatalf("Changelog should be empty before run")
|
||||
}
|
||||
|
||||
if err := container1.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check the changelog
|
||||
c, err := container1.Changes()
|
||||
c, err = container1.Changes()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -379,7 +388,7 @@ func TestRun(t *testing.T) {
|
|||
func TestOutput(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
container, err := NewBuilder(runtime).Create(
|
||||
container, err := runtime.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"echo", "-n", "foobar"},
|
||||
|
@ -402,7 +411,7 @@ func TestKillDifferentUser(t *testing.T) {
|
|||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
|
||||
container, err := NewBuilder(runtime).Create(&Config{
|
||||
container, err := runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"cat"},
|
||||
OpenStdin: true,
|
||||
|
@ -462,7 +471,7 @@ func TestCreateVolume(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c, err := NewBuilder(runtime).Create(config)
|
||||
c, err := runtime.Create(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -477,7 +486,7 @@ func TestCreateVolume(t *testing.T) {
|
|||
func TestKill(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
container, err := NewBuilder(runtime).Create(&Config{
|
||||
container, err := runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"sleep", "2"},
|
||||
},
|
||||
|
@ -521,9 +530,7 @@ func TestExitCode(t *testing.T) {
|
|||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
|
||||
builder := NewBuilder(runtime)
|
||||
|
||||
trueContainer, err := builder.Create(&Config{
|
||||
trueContainer, err := runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/true", ""},
|
||||
})
|
||||
|
@ -538,7 +545,7 @@ func TestExitCode(t *testing.T) {
|
|||
t.Errorf("Unexpected exit code %d (expected 0)", trueContainer.State.ExitCode)
|
||||
}
|
||||
|
||||
falseContainer, err := builder.Create(&Config{
|
||||
falseContainer, err := runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/false", ""},
|
||||
})
|
||||
|
@ -557,7 +564,7 @@ func TestExitCode(t *testing.T) {
|
|||
func TestRestart(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
container, err := NewBuilder(runtime).Create(&Config{
|
||||
container, err := runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"echo", "-n", "foobar"},
|
||||
},
|
||||
|
@ -587,7 +594,7 @@ func TestRestart(t *testing.T) {
|
|||
func TestRestartStdin(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
container, err := NewBuilder(runtime).Create(&Config{
|
||||
container, err := runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"cat"},
|
||||
|
||||
|
@ -664,10 +671,8 @@ func TestUser(t *testing.T) {
|
|||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
|
||||
builder := NewBuilder(runtime)
|
||||
|
||||
// Default user must be root
|
||||
container, err := builder.Create(&Config{
|
||||
container, err := runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"id"},
|
||||
},
|
||||
|
@ -685,7 +690,7 @@ func TestUser(t *testing.T) {
|
|||
}
|
||||
|
||||
// Set a username
|
||||
container, err = builder.Create(&Config{
|
||||
container, err = runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"id"},
|
||||
|
||||
|
@ -705,7 +710,7 @@ func TestUser(t *testing.T) {
|
|||
}
|
||||
|
||||
// Set a UID
|
||||
container, err = builder.Create(&Config{
|
||||
container, err = runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"id"},
|
||||
|
||||
|
@ -725,7 +730,7 @@ func TestUser(t *testing.T) {
|
|||
}
|
||||
|
||||
// Set a different user by uid
|
||||
container, err = builder.Create(&Config{
|
||||
container, err = runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"id"},
|
||||
|
||||
|
@ -747,7 +752,7 @@ func TestUser(t *testing.T) {
|
|||
}
|
||||
|
||||
// Set a different user by username
|
||||
container, err = builder.Create(&Config{
|
||||
container, err = runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"id"},
|
||||
|
||||
|
@ -767,7 +772,7 @@ func TestUser(t *testing.T) {
|
|||
}
|
||||
|
||||
// Test an wrong username
|
||||
container, err = builder.Create(&Config{
|
||||
container, err = runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"id"},
|
||||
|
||||
|
@ -788,9 +793,7 @@ func TestMultipleContainers(t *testing.T) {
|
|||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
|
||||
builder := NewBuilder(runtime)
|
||||
|
||||
container1, err := builder.Create(&Config{
|
||||
container1, err := runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"sleep", "2"},
|
||||
},
|
||||
|
@ -800,7 +803,7 @@ func TestMultipleContainers(t *testing.T) {
|
|||
}
|
||||
defer runtime.Destroy(container1)
|
||||
|
||||
container2, err := builder.Create(&Config{
|
||||
container2, err := runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"sleep", "2"},
|
||||
},
|
||||
|
@ -844,7 +847,7 @@ func TestMultipleContainers(t *testing.T) {
|
|||
func TestStdin(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
container, err := NewBuilder(runtime).Create(&Config{
|
||||
container, err := runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"cat"},
|
||||
|
||||
|
@ -889,7 +892,7 @@ func TestStdin(t *testing.T) {
|
|||
func TestTty(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
container, err := NewBuilder(runtime).Create(&Config{
|
||||
container, err := runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"cat"},
|
||||
|
||||
|
@ -934,7 +937,7 @@ func TestTty(t *testing.T) {
|
|||
func TestEnv(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
container, err := NewBuilder(runtime).Create(&Config{
|
||||
container, err := runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"env"},
|
||||
},
|
||||
|
@ -983,7 +986,7 @@ func TestEnv(t *testing.T) {
|
|||
func TestEntrypoint(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
container, err := NewBuilder(runtime).Create(
|
||||
container, err := runtime.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Entrypoint: []string{"/bin/echo"},
|
||||
|
@ -1006,7 +1009,7 @@ func TestEntrypoint(t *testing.T) {
|
|||
func TestEntrypointNoCmd(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
container, err := NewBuilder(runtime).Create(
|
||||
container, err := runtime.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Entrypoint: []string{"/bin/echo", "foobar"},
|
||||
|
@ -1057,7 +1060,7 @@ func TestLXCConfig(t *testing.T) {
|
|||
cpuMin := 100
|
||||
cpuMax := 10000
|
||||
cpu := cpuMin + rand.Intn(cpuMax-cpuMin)
|
||||
container, err := NewBuilder(runtime).Create(&Config{
|
||||
container, err := runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/true"},
|
||||
|
||||
|
@ -1081,7 +1084,7 @@ func TestLXCConfig(t *testing.T) {
|
|||
func TestCustomLxcConfig(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
container, err := NewBuilder(runtime).Create(&Config{
|
||||
container, err := runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/true"},
|
||||
|
||||
|
@ -1112,7 +1115,7 @@ func BenchmarkRunSequencial(b *testing.B) {
|
|||
runtime := mkRuntime(b)
|
||||
defer nuke(runtime)
|
||||
for i := 0; i < b.N; i++ {
|
||||
container, err := NewBuilder(runtime).Create(&Config{
|
||||
container, err := runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"echo", "-n", "foo"},
|
||||
},
|
||||
|
@ -1144,7 +1147,7 @@ func BenchmarkRunParallel(b *testing.B) {
|
|||
complete := make(chan error)
|
||||
tasks = append(tasks, complete)
|
||||
go func(i int, complete chan error) {
|
||||
container, err := NewBuilder(runtime).Create(&Config{
|
||||
container, err := runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"echo", "-n", "foo"},
|
||||
},
|
||||
|
@ -1193,6 +1196,60 @@ func tempDir(t *testing.T) string {
|
|||
return tmpDir
|
||||
}
|
||||
|
||||
// Test for #1582
|
||||
func TestCopyVolumeContent(t *testing.T) {
|
||||
r := mkRuntime(t)
|
||||
defer nuke(r)
|
||||
|
||||
// Put some content in a directory of a container and commit it
|
||||
container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello/local && echo hello > /hello/local/world"}, t)
|
||||
defer r.Destroy(container1)
|
||||
|
||||
if container1.State.Running {
|
||||
t.Errorf("Container shouldn't be running")
|
||||
}
|
||||
if err := container1.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if container1.State.Running {
|
||||
t.Errorf("Container shouldn't be running")
|
||||
}
|
||||
|
||||
rwTar, err := container1.ExportRw()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
img, err := r.graph.Create(rwTar, container1, "unit test commited image", "", nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// Test that the content is copied from the image to the volume
|
||||
tmpDir1 := tempDir(t)
|
||||
defer os.RemoveAll(tmpDir1)
|
||||
stdout1, _ := runContainer(r, []string{"-v", fmt.Sprintf("%s:/hello", tmpDir1), img.ID, "find", "/hello"}, t)
|
||||
if !(strings.Contains(stdout1, "/hello/local/world") && strings.Contains(stdout1, "/hello/local")) {
|
||||
t.Fatal("Container failed to transfer content to volume")
|
||||
}
|
||||
|
||||
// Test that the content is not copied when the volume is readonly
|
||||
tmpDir2 := tempDir(t)
|
||||
defer os.RemoveAll(tmpDir2)
|
||||
stdout2, _ := runContainer(r, []string{"-v", fmt.Sprintf("%s:/hello:ro", tmpDir2), img.ID, "find", "/hello"}, t)
|
||||
if strings.Contains(stdout2, "/hello/local/world") || strings.Contains(stdout2, "/hello/local") {
|
||||
t.Fatal("Container transfered content to readonly volume")
|
||||
}
|
||||
|
||||
// Test that the content is not copied when the volume is non-empty
|
||||
tmpDir3 := tempDir(t)
|
||||
defer os.RemoveAll(tmpDir3)
|
||||
writeFile(path.Join(tmpDir3, "touch-me"), "", t)
|
||||
stdout3, _ := runContainer(r, []string{"-v", fmt.Sprintf("%s:/hello:rw", tmpDir3), img.ID, "find", "/hello"}, t)
|
||||
if strings.Contains(stdout3, "/hello/local/world") || strings.Contains(stdout3, "/hello/local") || !strings.Contains(stdout3, "/hello/touch-me") {
|
||||
t.Fatal("Container transfered content to non-empty volume")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBindMounts(t *testing.T) {
|
||||
r := mkRuntime(t)
|
||||
defer nuke(r)
|
||||
|
@ -1220,7 +1277,7 @@ func TestBindMounts(t *testing.T) {
|
|||
func TestVolumesFromReadonlyMount(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
container, err := NewBuilder(runtime).Create(
|
||||
container, err := runtime.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/echo", "-n", "foobar"},
|
||||
|
@ -1239,7 +1296,7 @@ func TestVolumesFromReadonlyMount(t *testing.T) {
|
|||
t.Fail()
|
||||
}
|
||||
|
||||
container2, err := NewBuilder(runtime).Create(
|
||||
container2, err := runtime.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/echo", "-n", "foobar"},
|
||||
|
@ -1275,7 +1332,7 @@ func TestRestartWithVolumes(t *testing.T) {
|
|||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
|
||||
container, err := NewBuilder(runtime).Create(&Config{
|
||||
container, err := runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"echo", "-n", "foobar"},
|
||||
Volumes: map[string]struct{}{"/test": {}},
|
||||
|
@ -1318,7 +1375,7 @@ func TestVolumesFromWithVolumes(t *testing.T) {
|
|||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
|
||||
container, err := NewBuilder(runtime).Create(&Config{
|
||||
container, err := runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"},
|
||||
Volumes: map[string]struct{}{"/test": {}},
|
||||
|
@ -1345,7 +1402,7 @@ func TestVolumesFromWithVolumes(t *testing.T) {
|
|||
t.Fail()
|
||||
}
|
||||
|
||||
container2, err := NewBuilder(runtime).Create(
|
||||
container2, err := runtime.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"cat", "/test/foo"},
|
||||
|
@ -1386,7 +1443,7 @@ func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c, err := NewBuilder(runtime).Create(config)
|
||||
c, err := runtime.Create(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -1 +1 @@
|
|||
# Maintainer wanted! Enroll on #docker@freenode
|
||||
Kawsar Saiyeed <kawsar.saiyeed@projiris.com>
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
# If the docker daemon is using a unix socket for communication your user
|
||||
# must have access to the socket for the completions to function correctly
|
||||
|
||||
have docker && {
|
||||
__docker_containers_all()
|
||||
{
|
||||
local containers
|
||||
|
@ -542,4 +541,3 @@ _docker()
|
|||
}
|
||||
|
||||
complete -F _docker docker
|
||||
}
|
|
@ -47,7 +47,7 @@ else
|
|||
echo "Creating /etc/init/dockerd.conf..."
|
||||
cat >/etc/init/dockerd.conf <<EOF
|
||||
description "Docker daemon"
|
||||
start on filesystem or runlevel [2345]
|
||||
start on filesystem and started lxc-net
|
||||
stop on runlevel [!2345]
|
||||
respawn
|
||||
exec /usr/local/bin/docker -d
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
|
||||
var (
|
||||
GITCOMMIT string
|
||||
VERSION string
|
||||
VERSION string
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@ -75,6 +75,9 @@ func main() {
|
|||
}
|
||||
protoAddrParts := strings.SplitN(flHosts[0], "://", 2)
|
||||
if err := docker.ParseCommands(protoAddrParts[0], protoAddrParts[1], flag.Args()...); err != nil {
|
||||
if sterr, ok := err.(*utils.StatusError); ok {
|
||||
os.Exit(sterr.Status)
|
||||
}
|
||||
log.Fatal(err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
|
|
15
docs/Dockerfile
Normal file
15
docs/Dockerfile
Normal file
|
@ -0,0 +1,15 @@
|
|||
from ubuntu:12.04
|
||||
maintainer Nick Stinemates
|
||||
|
||||
run apt-get update
|
||||
run apt-get install -y python-setuptools make
|
||||
run easy_install pip
|
||||
add . /docs
|
||||
run pip install -r /docs/requirements.txt
|
||||
run cd /docs; make docs
|
||||
|
||||
expose 8000
|
||||
|
||||
workdir /docs/_build/html
|
||||
|
||||
entrypoint ["python", "-m", "SimpleHTTPServer"]
|
|
@ -27,14 +27,36 @@ Docker Remote API
|
|||
2. Versions
|
||||
===========
|
||||
|
||||
The current version of the API is 1.4
|
||||
The current version of the API is 1.5
|
||||
|
||||
Calling /images/<name>/insert is the same as calling
|
||||
/v1.4/images/<name>/insert
|
||||
/v1.5/images/<name>/insert
|
||||
|
||||
You can still call an old version of the api using
|
||||
/v1.0/images/<name>/insert
|
||||
|
||||
:doc:`docker_remote_api_v1.5`
|
||||
*****************************
|
||||
|
||||
What's new
|
||||
----------
|
||||
|
||||
.. http:post:: /images/create
|
||||
|
||||
**New!** You can now pass registry credentials (via an AuthConfig object)
|
||||
through the `X-Registry-Auth` header
|
||||
|
||||
.. http:post:: /images/(name)/push
|
||||
|
||||
**New!** The AuthConfig object now needs to be passed through
|
||||
the `X-Registry-Auth` header
|
||||
|
||||
.. http:get:: /containers/json
|
||||
|
||||
**New!** The format of the `Ports` entry has been changed to a list of
|
||||
dicts each containing `PublicPort`, `PrivatePort` and `Type` describing a
|
||||
port mapping.
|
||||
|
||||
:doc:`docker_remote_api_v1.4`
|
||||
*****************************
|
||||
|
||||
|
@ -175,12 +197,13 @@ and we will add the libraries here.
|
|||
+======================+================+============================================+
|
||||
| Python | docker-py | https://github.com/dotcloud/docker-py |
|
||||
+----------------------+----------------+--------------------------------------------+
|
||||
| Ruby | docker-ruby | https://github.com/ActiveState/docker-ruby |
|
||||
+----------------------+----------------+--------------------------------------------+
|
||||
| Ruby | docker-client | https://github.com/geku/docker-client |
|
||||
+----------------------+----------------+--------------------------------------------+
|
||||
| Ruby | docker-api | https://github.com/swipely/docker-api |
|
||||
+----------------------+----------------+--------------------------------------------+
|
||||
| Javascript (NodeJS) | docker.io | https://github.com/appersonlabs/docker.io |
|
||||
| | | Install via NPM: `npm install docker.io` |
|
||||
+----------------------+----------------+--------------------------------------------+
|
||||
| Javascript | docker-js | https://github.com/dgoujard/docker-js |
|
||||
+----------------------+----------------+--------------------------------------------+
|
||||
| Javascript (Angular) | dockerui | https://github.com/crosbymichael/dockerui |
|
||||
|
@ -188,4 +211,7 @@ and we will add the libraries here.
|
|||
+----------------------+----------------+--------------------------------------------+
|
||||
| Java | docker-java | https://github.com/kpelykh/docker-java |
|
||||
+----------------------+----------------+--------------------------------------------+
|
||||
|
||||
| Erlang | erldocker | https://github.com/proger/erldocker |
|
||||
+----------------------+----------------+--------------------------------------------+
|
||||
| Go | go-dockerclient| https://github.com/fsouza/go-dockerclient |
|
||||
+----------------------+----------------+--------------------------------------------+
|
||||
|
|
|
@ -119,6 +119,7 @@ Create a container
|
|||
"AttachStdout":true,
|
||||
"AttachStderr":true,
|
||||
"PortSpecs":null,
|
||||
"Privileged": false,
|
||||
"Tty":false,
|
||||
"OpenStdin":false,
|
||||
"StdinOnce":false,
|
||||
|
@ -223,6 +224,7 @@ Inspect a container
|
|||
|
||||
:statuscode 200: no error
|
||||
:statuscode 404: no such container
|
||||
:statuscode 409: conflict between containers and images
|
||||
:statuscode 500: server error
|
||||
|
||||
|
||||
|
@ -368,7 +370,7 @@ Start a container
|
|||
Content-Type: text/plain
|
||||
|
||||
:jsonparam hostConfig: the container's host configuration (optional)
|
||||
:statuscode 200: no error
|
||||
:statuscode 204: no error
|
||||
:statuscode 404: no such container
|
||||
:statuscode 500: server error
|
||||
|
||||
|
@ -679,8 +681,8 @@ Create an image
|
|||
:statuscode 500: server error
|
||||
|
||||
|
||||
Insert a file in a image
|
||||
************************
|
||||
Insert a file in an image
|
||||
*************************
|
||||
|
||||
.. http:post:: /images/(name)/insert
|
||||
|
||||
|
@ -759,7 +761,8 @@ Inspect an image
|
|||
|
||||
:statuscode 200: no error
|
||||
:statuscode 404: no such image
|
||||
:statuscode 500: server error
|
||||
:statuscode 409: conflict between containers and images
|
||||
:statuscode 500: server error
|
||||
|
||||
|
||||
Get the history of an image
|
||||
|
@ -990,7 +993,8 @@ Check auth configuration
|
|||
{
|
||||
"username":"hannibal",
|
||||
"password:"xxxx",
|
||||
"email":"hannibal@a-team.com"
|
||||
"email":"hannibal@a-team.com",
|
||||
"serveraddress":"https://index.docker.io/v1/"
|
||||
}
|
||||
|
||||
**Example response**:
|
||||
|
@ -1082,7 +1086,7 @@ Create a new image from a container's changes
|
|||
|
||||
POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1
|
||||
|
||||
**Example response**:
|
||||
**Example response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
|
@ -1091,15 +1095,15 @@ Create a new image from a container's changes
|
|||
|
||||
{"Id":"596069db4bf5"}
|
||||
|
||||
:query container: source container
|
||||
:query repo: repository
|
||||
:query tag: tag
|
||||
:query m: commit message
|
||||
:query author: author (eg. "John Hannibal Smith <hannibal@a-team.com>")
|
||||
:query run: config automatically applied when the image is run. (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]})
|
||||
:statuscode 201: no error
|
||||
:statuscode 404: no such container
|
||||
:statuscode 500: server error
|
||||
:query container: source container
|
||||
:query repo: repository
|
||||
:query tag: tag
|
||||
:query m: commit message
|
||||
:query author: author (eg. "John Hannibal Smith <hannibal@a-team.com>")
|
||||
:query run: config automatically applied when the image is run. (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]})
|
||||
:statuscode 201: no error
|
||||
:statuscode 404: no such container
|
||||
:statuscode 500: server error
|
||||
|
||||
|
||||
Monitor Docker's events
|
||||
|
|
1175
docs/sources/api/docker_remote_api_v1.5.rst
Normal file
1175
docs/sources/api/docker_remote_api_v1.5.rst
Normal file
File diff suppressed because it is too large
Load diff
|
@ -32,11 +32,13 @@ Available Commands
|
|||
command/commit
|
||||
command/cp
|
||||
command/diff
|
||||
command/events
|
||||
command/export
|
||||
command/history
|
||||
command/images
|
||||
command/import
|
||||
command/info
|
||||
command/insert
|
||||
command/inspect
|
||||
command/kill
|
||||
command/login
|
||||
|
|
34
docs/sources/commandline/command/events.rst
Normal file
34
docs/sources/commandline/command/events.rst
Normal file
|
@ -0,0 +1,34 @@
|
|||
:title: Events Command
|
||||
:description: Get real time events from the server
|
||||
:keywords: events, docker, documentation
|
||||
|
||||
=================================================================
|
||||
``events`` -- Get real time events from the server
|
||||
=================================================================
|
||||
|
||||
::
|
||||
|
||||
Usage: docker events
|
||||
|
||||
Get real time events from the server
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
Starting and stopping a container
|
||||
.................................
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker start 4386fb97867d
|
||||
$ sudo docker stop 4386fb97867d
|
||||
|
||||
In another shell
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker events
|
||||
[2013-09-03 15:49:26 +0200 CEST] 4386fb97867d: (from 12de384bfb10) start
|
||||
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
|
||||
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
|
||||
|
23
docs/sources/commandline/command/insert.rst
Normal file
23
docs/sources/commandline/command/insert.rst
Normal file
|
@ -0,0 +1,23 @@
|
|||
:title: Insert Command
|
||||
:description: Insert a file in an image
|
||||
:keywords: insert, image, docker, documentation
|
||||
|
||||
==========================================================================
|
||||
``insert`` -- Insert a file in an image
|
||||
==========================================================================
|
||||
|
||||
::
|
||||
|
||||
Usage: docker insert IMAGE URL PATH
|
||||
|
||||
Insert a file from URL in the IMAGE at PATH
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
Insert file from github
|
||||
.......................
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker insert 8283e18b24bc https://raw.github.com/metalivedev/django/master/postinstall /tmp/postinstall.sh
|
|
@ -8,10 +8,17 @@
|
|||
|
||||
::
|
||||
|
||||
Usage: docker login [OPTIONS]
|
||||
Usage: docker login [OPTIONS] [SERVER]
|
||||
|
||||
Register or Login to the docker registry server
|
||||
|
||||
-e="": email
|
||||
-p="": password
|
||||
-u="": username
|
||||
|
||||
If you want to login to a private registry you can
|
||||
specify this by adding the server name.
|
||||
|
||||
example:
|
||||
docker login localhost:8080
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ use-cases, like running Docker within Docker.
|
|||
|
||||
docker run -w /path/to/dir/ -i -t ubuntu pwd
|
||||
|
||||
The ``-w`` lets the command beeing executed inside directory given,
|
||||
The ``-w`` lets the command being executed inside directory given,
|
||||
here /path/to/dir/. If the path does not exists it is created inside the
|
||||
container.
|
||||
|
||||
|
@ -76,8 +76,8 @@ container.
|
|||
docker run -v `pwd`:`pwd` -w `pwd` -i -t ubuntu pwd
|
||||
|
||||
The ``-v`` flag mounts the current working directory into the container.
|
||||
The ``-w`` lets the command beeing executed inside the current
|
||||
working directory, by changeing into the directory to the value
|
||||
The ``-w`` lets the command being executed inside the current
|
||||
working directory, by changing into the directory to the value
|
||||
returned by ``pwd``. So this combination executes the command
|
||||
using the container, but inside the current working directory.
|
||||
|
||||
|
|
|
@ -17,11 +17,13 @@ Contents:
|
|||
commit <command/commit>
|
||||
cp <command/cp>
|
||||
diff <command/diff>
|
||||
events <command/events>
|
||||
export <command/export>
|
||||
history <command/history>
|
||||
images <command/images>
|
||||
import <command/import>
|
||||
info <command/info>
|
||||
insert <command/insert>
|
||||
inspect <command/inspect>
|
||||
kill <command/kill>
|
||||
login <command/login>
|
||||
|
|
|
@ -11,8 +11,8 @@ Hello World Daemon
|
|||
|
||||
The most boring daemon ever written.
|
||||
|
||||
This example assumes you have Docker installed and with the Ubuntu
|
||||
image already imported ``docker pull ubuntu``. We will use the Ubuntu
|
||||
This example assumes you have Docker installed and the Ubuntu
|
||||
image already imported with ``docker pull ubuntu``. We will use the Ubuntu
|
||||
image to run a simple hello world daemon that will just print hello
|
||||
world to standard out every second. It will continue to do this until
|
||||
we stop it.
|
||||
|
@ -56,6 +56,8 @@ Attach to the container to see the results in realtime.
|
|||
process to see what is going on.
|
||||
- **$CONTAINER_ID** The Id of the container we want to attach too.
|
||||
|
||||
Exit from the container attachment by pressing Control-C.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker ps
|
||||
|
|
|
@ -22,3 +22,4 @@ Contents:
|
|||
couchdb_data_volumes
|
||||
postgresql_service
|
||||
mongodb
|
||||
running_riak_service
|
||||
|
|
|
@ -86,7 +86,7 @@ http://0.0.0.0:5000/" in the log output.
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
WEB_PORT=$(docker port $WEB_WORKER 5000)
|
||||
WEB_PORT=$(sudo docker port $WEB_WORKER 5000)
|
||||
|
||||
Look up the public-facing port which is NAT-ed. Find the private port
|
||||
used by the container and store it inside of the WEB_PORT variable.
|
||||
|
|
151
docs/sources/examples/running_riak_service.rst
Normal file
151
docs/sources/examples/running_riak_service.rst
Normal file
|
@ -0,0 +1,151 @@
|
|||
:title: Running a Riak service
|
||||
:description: Build a Docker image with Riak pre-installed
|
||||
:keywords: docker, example, package installation, networking, riak
|
||||
|
||||
Riak Service
|
||||
==============================
|
||||
|
||||
.. include:: example_header.inc
|
||||
|
||||
The goal of this example is to show you how to build a Docker image with Riak
|
||||
pre-installed.
|
||||
|
||||
Creating a ``Dockerfile``
|
||||
+++++++++++++++++++++++++
|
||||
|
||||
Create an empty file called ``Dockerfile``:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
touch Dockerfile
|
||||
|
||||
Next, define the parent image you want to use to build your image on top of.
|
||||
We’ll use `Ubuntu <https://index.docker.io/_/ubuntu/>`_ (tag: ``latest``),
|
||||
which is available on the `docker index <http://index.docker.io>`_:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Riak
|
||||
#
|
||||
# VERSION 0.1.0
|
||||
|
||||
# Use the Ubuntu base image provided by dotCloud
|
||||
FROM ubuntu:latest
|
||||
MAINTAINER Hector Castro hector@basho.com
|
||||
|
||||
Next, we update the APT cache and apply any updates:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Update the APT cache
|
||||
RUN sed -i.bak 's/main$/main universe/' /etc/apt/sources.list
|
||||
RUN apt-get update
|
||||
RUN apt-get upgrade -y
|
||||
|
||||
After that, we install and setup a few dependencies:
|
||||
|
||||
- ``curl`` is used to download Basho's APT repository key
|
||||
- ``lsb-release`` helps us derive the Ubuntu release codename
|
||||
- ``openssh-server`` allows us to login to containers remotely and join Riak
|
||||
nodes to form a cluster
|
||||
- ``supervisor`` is used manage the OpenSSH and Riak processes
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Install and setup project dependencies
|
||||
RUN apt-get install -y curl lsb-release supervisor openssh-server
|
||||
|
||||
RUN mkdir -p /var/run/sshd
|
||||
RUN mkdir -p /var/log/supervisor
|
||||
|
||||
RUN locale-gen en_US en_US.UTF-8
|
||||
|
||||
ADD supervisord.conf /etc/supervisor/conf.d/supervisord.conf
|
||||
|
||||
RUN echo 'root:basho' | chpasswd
|
||||
|
||||
Next, we add Basho's APT repository:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
RUN curl -s http://apt.basho.com/gpg/basho.apt.key | apt-key add --
|
||||
RUN echo "deb http://apt.basho.com $(lsb_release -cs) main" > /etc/apt/sources.list.d/basho.list
|
||||
RUN apt-get update
|
||||
|
||||
After that, we install Riak and alter a few defaults:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Install Riak and prepare it to run
|
||||
RUN apt-get install -y riak
|
||||
RUN sed -i.bak 's/127.0.0.1/0.0.0.0/' /etc/riak/app.config
|
||||
RUN echo "ulimit -n 4096" >> /etc/default/riak
|
||||
|
||||
Almost there. Next, we add a hack to get us by the lack of ``initctl``:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Hack for initctl
|
||||
# See: https://github.com/dotcloud/docker/issues/1024
|
||||
RUN dpkg-divert --local --rename --add /sbin/initctl
|
||||
RUN ln -s /bin/true /sbin/initctl
|
||||
|
||||
Then, we expose the Riak Protocol Buffers and HTTP interfaces, along with SSH:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Expose Riak Protocol Buffers and HTTP interfaces, along with SSH
|
||||
EXPOSE 8087 8098 22
|
||||
|
||||
Finally, run ``supervisord`` so that Riak and OpenSSH are started:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
CMD ["/usr/bin/supervisord"]
|
||||
|
||||
Create a ``supervisord`` configuration file
|
||||
+++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
Create an empty file called ``supervisord.conf``. Make sure it's at the same
|
||||
level as your ``Dockerfile``:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
touch supervisord.conf
|
||||
|
||||
Populate it with the following program definitions:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
[supervisord]
|
||||
nodaemon=true
|
||||
|
||||
[program:sshd]
|
||||
command=/usr/sbin/sshd -D
|
||||
stdout_logfile=/var/log/supervisor/%(program_name)s.log
|
||||
stderr_logfile=/var/log/supervisor/%(program_name)s.log
|
||||
autorestart=true
|
||||
|
||||
[program:riak]
|
||||
command=bash -c ". /etc/default/riak && /usr/sbin/riak console"
|
||||
pidfile=/var/log/riak/riak.pid
|
||||
stdout_logfile=/var/log/supervisor/%(program_name)s.log
|
||||
stderr_logfile=/var/log/supervisor/%(program_name)s.log
|
||||
|
||||
Build the Docker image for Riak
|
||||
+++++++++++++++++++++++++++++++
|
||||
|
||||
Now you should be able to build a Docker image for Riak:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker build -t "<yourname>/riak" .
|
||||
|
||||
Next steps
|
||||
++++++++++
|
||||
|
||||
Riak is a distributed database. Many production deployments consist of `at
|
||||
least five nodes <http://basho.com/why-your-riak-cluster-should-have-at-least-
|
||||
five-nodes/>`_. See the `docker-riak <https://github.com/hectcastro /docker-
|
||||
riak>`_ project details on how to deploy a Riak cluster using Docker and
|
||||
Pipework.
|
|
@ -5,18 +5,20 @@
|
|||
Using Vagrant (Amazon EC2)
|
||||
==========================
|
||||
|
||||
This page explains how to setup and run an Amazon EC2 instance from your local machine.
|
||||
Vagrant is not necessary to run Docker on EC2. You can follow the :ref:`ubuntu_linux` instructions
|
||||
installing Docker on any EC2 instance running Ubuntu
|
||||
This page explains how to setup and run an Amazon EC2 instance from
|
||||
your local machine. **Vagrant is not necessary to run Docker on
|
||||
EC2.** You can follow the :ref:`ubuntu_linux` instructions installing
|
||||
Docker on any EC2 instance running Ubuntu.
|
||||
|
||||
Please note this is a community contributed installation path. The only 'official' installation is using the
|
||||
:ref:`ubuntu_linux` installation path. This version may sometimes be out of date.
|
||||
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
Docker can now be installed on Amazon EC2 with a single vagrant command. Vagrant 1.1 or higher is required.
|
||||
.. include:: install_header.inc
|
||||
|
||||
.. include:: install_unofficial.inc
|
||||
|
||||
Docker can now be installed on Amazon EC2 with a single vagrant
|
||||
command. Vagrant 1.1 or higher is required.
|
||||
|
||||
1. Install vagrant from http://www.vagrantup.com/ (or use your package manager)
|
||||
2. Install the vagrant aws plugin
|
||||
|
|
|
@ -7,10 +7,6 @@
|
|||
Arch Linux
|
||||
==========
|
||||
|
||||
Please note this is a community contributed installation path. The only 'official' installation is using the
|
||||
:ref:`ubuntu_linux` installation path. This version may sometimes be out of date.
|
||||
|
||||
|
||||
Installing on Arch Linux is not officially supported but can be handled via
|
||||
either of the following AUR packages:
|
||||
|
||||
|
@ -36,6 +32,10 @@ either AUR package.
|
|||
Installation
|
||||
------------
|
||||
|
||||
.. include:: install_header.inc
|
||||
|
||||
.. include:: install_unofficial.inc
|
||||
|
||||
The instructions here assume **yaourt** is installed. See
|
||||
`Arch User Repository <https://wiki.archlinux.org/index.php/Arch_User_Repository#Installing_packages>`_
|
||||
for information on building and installing packages from the AUR if you have not
|
||||
|
|
|
@ -7,9 +7,10 @@
|
|||
Binaries
|
||||
========
|
||||
|
||||
**Please note this project is currently under heavy development. It should not be used in production.**
|
||||
.. include:: install_header.inc
|
||||
|
||||
**This instruction set is meant for hackers who want to try out Docker on a variety of environments.**
|
||||
**This instruction set is meant for hackers who want to try out Docker
|
||||
on a variety of environments.**
|
||||
|
||||
Right now, the officially supported distributions are:
|
||||
|
||||
|
@ -23,22 +24,18 @@ But we know people have had success running it under
|
|||
- Suse
|
||||
- :ref:`arch_linux`
|
||||
|
||||
Check Your Kernel
|
||||
-----------------
|
||||
|
||||
Dependencies:
|
||||
-------------
|
||||
|
||||
* 3.8 Kernel (read more about :ref:`kernel`)
|
||||
* AUFS filesystem support
|
||||
* lxc
|
||||
* xz-utils
|
||||
Your host's Linux kernel must meet the Docker :ref:`kernel`
|
||||
|
||||
Get the docker binary:
|
||||
----------------------
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
wget http://get.docker.io/builds/Linux/x86_64/docker-latest.tgz
|
||||
tar -xf docker-latest.tgz
|
||||
wget --output-document=docker https://get.docker.io/builds/Linux/x86_64/docker-latest
|
||||
chmod +x docker
|
||||
|
||||
|
||||
Run the docker daemon
|
||||
|
|
125
docs/sources/installation/gentoolinux.rst
Normal file
125
docs/sources/installation/gentoolinux.rst
Normal file
|
@ -0,0 +1,125 @@
|
|||
:title: Installation on Gentoo Linux
|
||||
:description: Docker installation instructions and nuances for Gentoo Linux.
|
||||
:keywords: gentoo linux, virtualization, docker, documentation, installation
|
||||
|
||||
.. _gentoo_linux:
|
||||
|
||||
Gentoo Linux
|
||||
============
|
||||
|
||||
.. include:: install_header.inc
|
||||
|
||||
.. include:: install_unofficial.inc
|
||||
|
||||
Installing Docker on Gentoo Linux can be accomplished by using the overlay
|
||||
provided at https://github.com/tianon/docker-overlay. The most up-to-date
|
||||
documentation for properly installing the overlay can be found in the overlay
|
||||
README. The information here is provided for reference, and may be out of date.
|
||||
|
||||
Installation
|
||||
^^^^^^^^^^^^
|
||||
|
||||
Ensure that layman is installed:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo emerge -av app-portage/layman
|
||||
|
||||
Using your favorite editor, add
|
||||
``https://raw.github.com/tianon/docker-overlay/master/repositories.xml`` to the
|
||||
``overlays`` section in ``/etc/layman/layman.cfg`` (as per instructions on the
|
||||
`Gentoo Wiki <http://wiki.gentoo.org/wiki/Layman#Adding_custom_overlays>`_),
|
||||
then invoke the following:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo layman -f -a docker
|
||||
|
||||
Once that completes, the ``app-emulation/docker`` package will be available
|
||||
for emerge:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo emerge -av app-emulation/docker
|
||||
|
||||
If you prefer to use the official binaries, or just do not wish to compile
|
||||
docker, emerge ``app-emulation/docker-bin`` instead. It is important to
|
||||
remember that Gentoo is still an unsupported platform, even when using the
|
||||
official binaries.
|
||||
|
||||
The package should already include all the necessary dependencies. For the
|
||||
simplest installation experience, use ``sys-kernel/aufs-sources`` directly as
|
||||
your kernel sources. If you prefer not to use ``sys-kernel/aufs-sources``, the
|
||||
portage tree also contains ``sys-fs/aufs3``, which contains the patches
|
||||
necessary for adding AUFS support to other kernel source packages (and a
|
||||
``kernel-patch`` use flag to perform the patching automatically).
|
||||
|
||||
Between ``app-emulation/lxc`` and ``app-emulation/docker``, all the
|
||||
necessary kernel configuration flags should be checked for and warned about in
|
||||
the standard manner.
|
||||
|
||||
If any issues arise from this ebuild or the resulting binary, including and
|
||||
especially missing kernel configuration flags and/or dependencies, `open an
|
||||
issue <https://github.com/tianon/docker-overlay/issues>`_ on the docker-overlay
|
||||
repository or ping tianon in the #docker IRC channel.
|
||||
|
||||
Starting Docker
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
Ensure that you are running a kernel that includes the necessary AUFS support
|
||||
and includes all the necessary modules and/or configuration for LXC.
|
||||
|
||||
OpenRC
|
||||
------
|
||||
|
||||
To start the docker daemon:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo /etc/init.d/docker start
|
||||
|
||||
To start on system boot:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo rc-update add docker default
|
||||
|
||||
systemd
|
||||
-------
|
||||
|
||||
To start the docker daemon:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo systemctl start docker.service
|
||||
|
||||
To start on system boot:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo systemctl enable docker.service
|
||||
|
||||
Network Configuration
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
IPv4 packet forwarding is disabled by default, so internet access from inside
|
||||
the container will not work unless ``net.ipv4.ip_forward`` is enabled:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo sysctl -w net.ipv4.ip_forward=1
|
||||
|
||||
Or, to enable it more permanently:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
echo net.ipv4.ip_forward = 1 | sudo tee /etc/sysctl.d/docker.conf
|
||||
|
||||
fork/exec /usr/sbin/lxc-start: operation not permitted
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Unfortunately, Gentoo suffers from `issue #1422
|
||||
<https://github.com/dotcloud/docker/issues/1422>`_, meaning that after every
|
||||
fresh start of docker, the first docker run fails due to some tricky terminal
|
||||
issues, so be sure to run something trivial (such as ``docker run -i -t busybox
|
||||
echo hi``) before attempting to run anything important.
|
|
@ -24,5 +24,6 @@ Contents:
|
|||
amazon
|
||||
rackspace
|
||||
archlinux
|
||||
gentoolinux
|
||||
upgrading
|
||||
kernel
|
||||
|
|
7
docs/sources/installation/install_header.inc
Normal file
7
docs/sources/installation/install_header.inc
Normal file
|
@ -0,0 +1,7 @@
|
|||
|
||||
.. note::
|
||||
|
||||
Docker is still under heavy development! We don't recommend using
|
||||
it in production yet, but we're getting closer with each
|
||||
release. Please see our blog post, `"Getting to Docker 1.0"
|
||||
<http://blog.docker.io/2013/08/getting-to-docker-1-0/>`_
|
7
docs/sources/installation/install_unofficial.inc
Normal file
7
docs/sources/installation/install_unofficial.inc
Normal file
|
@ -0,0 +1,7 @@
|
|||
|
||||
.. note::
|
||||
|
||||
This is a community contributed installation path. The only
|
||||
'official' installation is using the :ref:`ubuntu_linux`
|
||||
installation path. This version may be out of date because it
|
||||
depends on some binaries to be updated and published
|
|
@ -6,21 +6,22 @@
|
|||
Rackspace Cloud
|
||||
===============
|
||||
|
||||
Please note this is a community contributed installation path. The only 'official' installation is using the
|
||||
:ref:`ubuntu_linux` installation path. This version may sometimes be out of date.
|
||||
.. include:: install_unofficial.inc
|
||||
|
||||
|
||||
Installing Docker on Ubuntu provided by Rackspace is pretty straightforward, and you should mostly be able to follow the
|
||||
Installing Docker on Ubuntu provided by Rackspace is pretty
|
||||
straightforward, and you should mostly be able to follow the
|
||||
:ref:`ubuntu_linux` installation guide.
|
||||
|
||||
**However, there is one caveat:**
|
||||
|
||||
If you are using any linux not already shipping with the 3.8 kernel you will need to install it. And this is a little
|
||||
more difficult on Rackspace.
|
||||
If you are using any linux not already shipping with the 3.8 kernel
|
||||
you will need to install it. And this is a little more difficult on
|
||||
Rackspace.
|
||||
|
||||
Rackspace boots their servers using grub's menu.lst and does not like non 'virtual' packages (e.g. xen compatible)
|
||||
kernels there, although they do work. This makes ``update-grub`` to not have the expected result, and you need to
|
||||
set the kernel manually.
|
||||
Rackspace boots their servers using grub's ``menu.lst`` and does not
|
||||
like non 'virtual' packages (e.g. xen compatible) kernels there,
|
||||
although they do work. This makes ``update-grub`` to not have the
|
||||
expected result, and you need to set the kernel manually.
|
||||
|
||||
**Do not attempt this on a production machine!**
|
||||
|
||||
|
@ -33,7 +34,8 @@ set the kernel manually.
|
|||
apt-get install linux-generic-lts-raring
|
||||
|
||||
|
||||
Great, now you have kernel installed in /boot/, next is to make it boot next time.
|
||||
Great, now you have kernel installed in ``/boot/``, next is to make it
|
||||
boot next time.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
@ -43,9 +45,10 @@ Great, now you have kernel installed in /boot/, next is to make it boot next tim
|
|||
# this should return some results
|
||||
|
||||
|
||||
Now you need to manually edit /boot/grub/menu.lst, you will find a section at the bottom with the existing options.
|
||||
Copy the top one and substitute the new kernel into that. Make sure the new kernel is on top, and double check kernel
|
||||
and initrd point to the right files.
|
||||
Now you need to manually edit ``/boot/grub/menu.lst``, you will find a
|
||||
section at the bottom with the existing options. Copy the top one and
|
||||
substitute the new kernel into that. Make sure the new kernel is on
|
||||
top, and double check kernel and initrd point to the right files.
|
||||
|
||||
Make special care to double check the kernel and initrd entries.
|
||||
|
||||
|
@ -92,4 +95,4 @@ Verify the kernel was updated
|
|||
# nice! 3.8.
|
||||
|
||||
|
||||
Now you can finish with the :ref:`ubuntu_linux` instructions.
|
||||
Now you can finish with the :ref:`ubuntu_linux` instructions.
|
||||
|
|
|
@ -2,15 +2,17 @@
|
|||
:description: Please note this project is currently under heavy development. It should not be used in production.
|
||||
:keywords: Docker, Docker documentation, requirements, virtualbox, vagrant, git, ssh, putty, cygwin, linux
|
||||
|
||||
**These instructions have changed for 0.6. If you are upgrading from an earlier version, you will need to follow them again.**
|
||||
|
||||
.. _ubuntu_linux:
|
||||
|
||||
Ubuntu Linux
|
||||
============
|
||||
|
||||
**Please note this project is currently under heavy development. It should not be used in production.**
|
||||
.. warning::
|
||||
|
||||
These instructions have changed for 0.6. If you are upgrading from
|
||||
an earlier version, you will need to follow them again.
|
||||
|
||||
.. include:: install_header.inc
|
||||
|
||||
Right now, the officially supported distribution are:
|
||||
|
||||
|
@ -22,7 +24,8 @@ Docker has the following dependencies
|
|||
* Linux kernel 3.8 (read more about :ref:`kernel`)
|
||||
* AUFS file system support (we are working on BTRFS support as an alternative)
|
||||
|
||||
Please read :ref:`ufw`, if you plan to use `UFW (Uncomplicated Firewall) <https://help.ubuntu.com/community/UFW>`_
|
||||
Please read :ref:`ufw`, if you plan to use `UFW (Uncomplicated
|
||||
Firewall) <https://help.ubuntu.com/community/UFW>`_
|
||||
|
||||
.. _ubuntu_precise:
|
||||
|
||||
|
@ -38,12 +41,13 @@ Dependencies
|
|||
**Linux kernel 3.8**
|
||||
|
||||
Due to a bug in LXC, docker works best on the 3.8 kernel. Precise
|
||||
comes with a 3.2 kernel, so we need to upgrade it. The kernel you'll install when following these steps
|
||||
comes with AUFS built in. We also include the generic headers
|
||||
to enable packages that depend on them, like ZFS and the VirtualBox
|
||||
guest additions. If you didn't install the headers for your "precise"
|
||||
kernel, then you can skip these headers for the "raring" kernel. But
|
||||
it is safer to include them if you're not sure.
|
||||
comes with a 3.2 kernel, so we need to upgrade it. The kernel you'll
|
||||
install when following these steps comes with AUFS built in. We also
|
||||
include the generic headers to enable packages that depend on them,
|
||||
like ZFS and the VirtualBox guest additions. If you didn't install the
|
||||
headers for your "precise" kernel, then you can skip these headers for
|
||||
the "raring" kernel. But it is safer to include them if you're not
|
||||
sure.
|
||||
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -59,14 +63,18 @@ it is safer to include them if you're not sure.
|
|||
Installation
|
||||
------------
|
||||
|
||||
.. warning::
|
||||
|
||||
These instructions have changed for 0.6. If you are upgrading from
|
||||
an earlier version, you will need to follow them again.
|
||||
|
||||
Docker is available as a Debian package, which makes installation easy.
|
||||
|
||||
*Please note that these instructions have changed for 0.6. If you are upgrading from an earlier version, you will need
|
||||
to follow them again.*
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Add the Docker repository key to your local keychain
|
||||
# using apt-key finger you can check the fingerprint matches 36A1 D786 9245 C895 0F96 6E92 D857 6A8B A88D 21E9
|
||||
sudo sh -c "curl https://get.docker.io/gpg | apt-key add -"
|
||||
|
||||
# Add the Docker repository to your apt sources list.
|
||||
|
@ -120,6 +128,7 @@ to follow them again.*
|
|||
.. code-block:: bash
|
||||
|
||||
# Add the Docker repository key to your local keychain
|
||||
# using apt-key finger you can check the fingerprint matches 36A1 D786 9245 C895 0F96 6E92 D857 6A8B A88D 21E9
|
||||
sudo sh -c "curl http://get.docker.io/gpg | apt-key add -"
|
||||
|
||||
# Add the Docker repository to your apt sources list.
|
||||
|
@ -136,7 +145,8 @@ Verify it worked
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
# download the base 'ubuntu' container and run bash inside it while setting up an interactive shell
|
||||
# download the base 'ubuntu' container
|
||||
# and run bash inside it while setting up an interactive shell
|
||||
sudo docker run -i -t ubuntu /bin/bash
|
||||
|
||||
# type exit to exit
|
||||
|
@ -150,7 +160,8 @@ Verify it worked
|
|||
Docker and UFW
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
Docker uses a bridge to manage containers networking, by default UFW drop all `forwarding`, a first step is to enable forwarding:
|
||||
Docker uses a bridge to manage containers networking, by default UFW
|
||||
drop all `forwarding`, a first step is to enable forwarding:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
@ -168,8 +179,9 @@ Then reload UFW:
|
|||
sudo ufw reload
|
||||
|
||||
|
||||
UFW's default set of rules denied all `incoming`, so if you want to be able to reach your containers from another host,
|
||||
you should allow incoming connections on the docker port (default 4243):
|
||||
UFW's default set of rules denied all `incoming`, so if you want to be
|
||||
able to reach your containers from another host, you should allow
|
||||
incoming connections on the docker port (default 4243):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
|
|
@ -5,18 +5,32 @@
|
|||
.. _upgrading:
|
||||
|
||||
Upgrading
|
||||
============
|
||||
=========
|
||||
|
||||
**These instructions are for upgrading Docker**
|
||||
The technique for upgrading ``docker`` to a newer version depends on
|
||||
how you installed ``docker``.
|
||||
|
||||
.. versionadded:: 0.5.3
|
||||
You may wish to add a ``docker`` group to your system to avoid using sudo with ``docker``. (see :ref:`dockergroup`)
|
||||
|
||||
|
||||
After normal installation
|
||||
-------------------------
|
||||
After ``apt-get``
|
||||
-----------------
|
||||
|
||||
If you installed Docker normally using apt-get or used Vagrant, use apt-get to upgrade.
|
||||
If you installed Docker using ``apt-get`` or Vagrant, then you should
|
||||
use ``apt-get`` to upgrade.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
Add Docker repository information to your system first.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Add the Docker repository key to your local keychain
|
||||
sudo sh -c "curl https://get.docker.io/gpg | apt-key add -"
|
||||
|
||||
# Add the Docker repository to your apt sources list.
|
||||
sudo sh -c "echo deb https://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list"
|
||||
|
||||
# update your sources list
|
||||
sudo apt-get update
|
||||
|
||||
|
@ -27,7 +41,7 @@ If you installed Docker normally using apt-get or used Vagrant, use apt-get to u
|
|||
After manual installation
|
||||
-------------------------
|
||||
|
||||
If you installed the Docker binary
|
||||
If you installed the Docker :ref:`binaries` then follow these steps:
|
||||
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -48,8 +62,10 @@ If you installed the Docker binary
|
|||
tar -xf docker-latest.tgz
|
||||
|
||||
|
||||
Start docker in daemon mode (-d) and disconnect (&) starting ./docker will start the version in your current dir rather than a version which
|
||||
might reside in your path.
|
||||
Start docker in daemon mode (``-d``) and disconnect, running the
|
||||
daemon in the background (``&``). Starting as ``./docker`` guarantees
|
||||
to run the version in your current directory rather than a version
|
||||
which might reside in your path.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
|
|
@ -2,37 +2,48 @@
|
|||
:description: This guide will setup a new virtualbox virtual machine with docker installed on your computer.
|
||||
:keywords: Docker, Docker documentation, virtualbox, vagrant, git, ssh, putty, cygwin
|
||||
|
||||
**Vagrant installation is temporarily out of date, it will be updated for 0.6 soon.**
|
||||
|
||||
.. _install_using_vagrant:
|
||||
|
||||
Using Vagrant (Mac, Linux)
|
||||
==========================
|
||||
|
||||
This guide will setup a new virtualbox virtual machine with docker installed on your computer. This works on most operating
|
||||
systems, including MacOX, Windows, Linux, FreeBSD and others. If you can install these and have at least 400Mb RAM
|
||||
to spare you should be good.
|
||||
|
||||
This guide will setup a new virtualbox virtual machine with docker
|
||||
installed on your computer. This works on most operating systems,
|
||||
including MacOX, Windows, Linux, FreeBSD and others. If you can
|
||||
install these and have at least 400MB RAM to spare you should be good.
|
||||
|
||||
Install Vagrant and Virtualbox
|
||||
------------------------------
|
||||
|
||||
1. Install virtualbox from https://www.virtualbox.org/ (or use your package manager)
|
||||
2. Install vagrant from http://www.vagrantup.com/ (or use your package manager)
|
||||
3. Install git if you had not installed it before, check if it is installed by running
|
||||
``git`` in a terminal window
|
||||
.. include:: install_header.inc
|
||||
|
||||
.. include:: install_unofficial.inc
|
||||
|
||||
#. Install virtualbox from https://www.virtualbox.org/ (or use your
|
||||
package manager)
|
||||
#. Install vagrant from http://www.vagrantup.com/ (or use your package
|
||||
manager)
|
||||
#. Install git if you had not installed it before, check if it is
|
||||
installed by running ``git`` in a terminal window
|
||||
|
||||
|
||||
Spin it up
|
||||
----------
|
||||
|
||||
1. Fetch the docker sources (this includes the Vagrantfile for machine setup).
|
||||
1. Fetch the docker sources (this includes the ``Vagrantfile`` for
|
||||
machine setup).
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git clone https://github.com/dotcloud/docker.git
|
||||
|
||||
2. Run vagrant from the sources directory
|
||||
2. Change directory to docker
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
cd docker
|
||||
|
||||
3. Run vagrant from the sources directory
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
|
|
@ -2,21 +2,21 @@
|
|||
:description: Docker's tutorial to run docker on Windows
|
||||
:keywords: Docker, Docker documentation, Windows, requirements, virtualbox, vagrant, git, ssh, putty, cygwin
|
||||
|
||||
**Vagrant installation is temporarily out of date, it will be updated for 0.6 soon.**
|
||||
|
||||
.. _windows:
|
||||
|
||||
Using Vagrant (Windows)
|
||||
=======================
|
||||
|
||||
Please note this is a community contributed installation path. The only 'official' installation is using the :ref:`ubuntu_linux` installation path. This version
|
||||
may be out of date because it depends on some binaries to be updated and published
|
||||
Docker can run on Windows using a VM like VirtualBox. You then run
|
||||
Linux within the VM.
|
||||
|
||||
|
||||
|
||||
Requirements
|
||||
Installation
|
||||
------------
|
||||
|
||||
.. include:: install_header.inc
|
||||
|
||||
.. include:: install_unofficial.inc
|
||||
|
||||
1. Install virtualbox from https://www.virtualbox.org - or follow this tutorial__
|
||||
|
||||
.. __: http://www.slideshare.net/julienbarbier42/install-virtualbox-on-windows-7
|
||||
|
@ -35,7 +35,10 @@ We recommend having at least 2Gb of free disk space and 2Gb of RAM (or more).
|
|||
Opening a command prompt
|
||||
------------------------
|
||||
|
||||
First open a cmd prompt. Press Windows key and then press “R” key. This will open the RUN dialog box for you. Type “cmd” and press Enter. Or you can click on Start, type “cmd” in the “Search programs and files” field, and click on cmd.exe.
|
||||
First open a cmd prompt. Press Windows key and then press “R”
|
||||
key. This will open the RUN dialog box for you. Type “cmd” and press
|
||||
Enter. Or you can click on Start, type “cmd” in the “Search programs
|
||||
and files” field, and click on cmd.exe.
|
||||
|
||||
.. image:: images/win/_01.gif
|
||||
:alt: Git install
|
||||
|
@ -47,14 +50,17 @@ This should open a cmd prompt window.
|
|||
:alt: run docker
|
||||
:align: center
|
||||
|
||||
Alternatively, you can also use a Cygwin terminal, or Git Bash (or any other command line program you are usually using). The next steps would be the same.
|
||||
Alternatively, you can also use a Cygwin terminal, or Git Bash (or any
|
||||
other command line program you are usually using). The next steps
|
||||
would be the same.
|
||||
|
||||
.. _launch_ubuntu:
|
||||
|
||||
Launch an Ubuntu virtual server
|
||||
-------------------------------
|
||||
|
||||
Let’s download and run an Ubuntu image with docker binaries already installed.
|
||||
Let’s download and run an Ubuntu image with docker binaries already
|
||||
installed.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
@ -66,7 +72,9 @@ Let’s download and run an Ubuntu image with docker binaries already installed.
|
|||
:alt: run docker
|
||||
:align: center
|
||||
|
||||
Congratulations! You are running an Ubuntu server with docker installed on it. You do not see it though, because it is running in the background.
|
||||
Congratulations! You are running an Ubuntu server with docker
|
||||
installed on it. You do not see it though, because it is running in
|
||||
the background.
|
||||
|
||||
Log onto your Ubuntu server
|
||||
---------------------------
|
||||
|
@ -85,7 +93,12 @@ Run the following command
|
|||
|
||||
vagrant ssh
|
||||
|
||||
You may see an error message starting with “`ssh` executable not found”. In this case it means that you do not have SSH in your PATH. If you do not have SSH in your PATH you can set it up with the “set” command. For instance, if your ssh.exe is in the folder named “C:\Program Files (x86)\Git\bin”, then you can run the following command:
|
||||
You may see an error message starting with “`ssh` executable not
|
||||
found”. In this case it means that you do not have SSH in your
|
||||
PATH. If you do not have SSH in your PATH you can set it up with the
|
||||
“set” command. For instance, if your ssh.exe is in the folder named
|
||||
“C:\Program Files (x86)\Git\bin”, then you can run the following
|
||||
command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
@ -104,13 +117,16 @@ First step is to get the IP and port of your Ubuntu server. Simply run:
|
|||
|
||||
vagrant ssh-config
|
||||
|
||||
You should see an output with HostName and Port information. In this example, HostName is 127.0.0.1 and port is 2222. And the User is “vagrant”. The password is not shown, but it is also “vagrant”.
|
||||
You should see an output with HostName and Port information. In this
|
||||
example, HostName is 127.0.0.1 and port is 2222. And the User is
|
||||
“vagrant”. The password is not shown, but it is also “vagrant”.
|
||||
|
||||
.. image:: images/win/ssh-config.gif
|
||||
:alt: run docker
|
||||
:align: center
|
||||
|
||||
You can now use this information for connecting via SSH to your server. To do so you can:
|
||||
You can now use this information for connecting via SSH to your
|
||||
server. To do so you can:
|
||||
|
||||
- Use putty.exe OR
|
||||
- Use SSH from a terminal
|
||||
|
@ -118,8 +134,9 @@ You can now use this information for connecting via SSH to your server. To do so
|
|||
Use putty.exe
|
||||
'''''''''''''
|
||||
|
||||
You can download putty.exe from this page http://www.chiark.greenend.org.uk/~sgtatham/putty/download.html
|
||||
Launch putty.exe and simply enter the information you got from last step.
|
||||
You can download putty.exe from this page
|
||||
http://www.chiark.greenend.org.uk/~sgtatham/putty/download.html Launch
|
||||
putty.exe and simply enter the information you got from last step.
|
||||
|
||||
.. image:: images/win/putty.gif
|
||||
:alt: run docker
|
||||
|
@ -134,7 +151,9 @@ Open, and enter user = vagrant and password = vagrant.
|
|||
SSH from a terminal
|
||||
'''''''''''''''''''
|
||||
|
||||
You can also run this command on your favorite terminal (windows prompt, cygwin, git-bash, …). Make sure to adapt the IP and port from what you got from the vagrant ssh-config command.
|
||||
You can also run this command on your favorite terminal (windows
|
||||
prompt, cygwin, git-bash, …). Make sure to adapt the IP and port from
|
||||
what you got from the vagrant ssh-config command.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
@ -146,12 +165,14 @@ Enter user = vagrant and password = vagrant.
|
|||
:alt: run docker
|
||||
:align: center
|
||||
|
||||
Congratulations, you are now logged onto your Ubuntu Server, running on top of your Windows machine !
|
||||
Congratulations, you are now logged onto your Ubuntu Server, running
|
||||
on top of your Windows machine !
|
||||
|
||||
Running Docker
|
||||
--------------
|
||||
|
||||
First you have to be root in order to run docker. Simply run the following command:
|
||||
First you have to be root in order to run docker. Simply run the
|
||||
following command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
@ -179,10 +200,11 @@ VM does not boot
|
|||
|
||||
.. image:: images/win/ts_go_bios.JPG
|
||||
|
||||
If you run into this error message "The VM failed to remain in the 'running'
|
||||
state while attempting to boot", please check that your computer has virtualization
|
||||
technology available and activated by going to the BIOS. Here's an example for an HP
|
||||
computer (System configuration / Device configuration)
|
||||
If you run into this error message "The VM failed to remain in the
|
||||
'running' state while attempting to boot", please check that your
|
||||
computer has virtualization technology available and activated by
|
||||
going to the BIOS. Here's an example for an HP computer (System
|
||||
configuration / Device configuration)
|
||||
|
||||
.. image:: images/win/hp_bios_vm.JPG
|
||||
|
||||
|
@ -192,5 +214,6 @@ Docker is not installed
|
|||
|
||||
.. image:: images/win/ts_no_docker.JPG
|
||||
|
||||
If you run into this error message "The program 'docker' is currently not installed",
|
||||
try deleting the docker folder and restart from :ref:`launch_ubuntu`
|
||||
If you run into this error message "The program 'docker' is currently
|
||||
not installed", try deleting the docker folder and restart from
|
||||
:ref:`launch_ubuntu`
|
||||
|
|
|
@ -37,6 +37,8 @@ Running an interactive shell
|
|||
# use the escape sequence Ctrl-p + Ctrl-q
|
||||
sudo docker run -i -t ubuntu /bin/bash
|
||||
|
||||
.. _dockergroup:
|
||||
|
||||
Why ``sudo``?
|
||||
-------------
|
||||
|
||||
|
@ -140,7 +142,7 @@ Expose a service on a TCP port
|
|||
.. code-block:: bash
|
||||
|
||||
# Expose port 4444 of this container, and tell netcat to listen on it
|
||||
JOB=$(sudo docker run -d -p 4444 ubuntu /bin/nc -l -p 4444)
|
||||
JOB=$(sudo docker run -d -p 4444 ubuntu:12.10 /bin/nc -l -p 4444)
|
||||
|
||||
# Which public port is NATed to my container?
|
||||
PORT=$(sudo docker port $JOB 4444)
|
||||
|
|
|
@ -68,6 +68,10 @@ building images.
|
|||
|
||||
``FROM <image>``
|
||||
|
||||
Or
|
||||
|
||||
``FROM <image>:<tag>``
|
||||
|
||||
The ``FROM`` instruction sets the :ref:`base_image_def` for subsequent
|
||||
instructions. As such, a valid Dockerfile must have ``FROM`` as its
|
||||
first instruction. The image can be any valid image -- it is
|
||||
|
@ -81,6 +85,9 @@ especially easy to start by **pulling an image** from the
|
|||
to create multiple images. Simply make a note of the last image id
|
||||
output by the commit before each new ``FROM`` command.
|
||||
|
||||
If no ``tag`` is given to the ``FROM`` instruction, ``latest`` is
|
||||
assumed. If the used tag does not exist, an error will be returned.
|
||||
|
||||
3.2 MAINTAINER
|
||||
--------------
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
Port redirection
|
||||
================
|
||||
|
||||
Docker can redirect public TCP ports to your container, so it can be
|
||||
Docker can redirect public TCP and UDP ports to your container, so it can be
|
||||
reached over the network. Port redirection is done on ``docker run``
|
||||
using the -p flag.
|
||||
|
||||
|
@ -25,6 +25,12 @@ will be allocated.
|
|||
# PUBLIC port 80 is redirected to PRIVATE port 80
|
||||
sudo docker run -p 80:80 <image> <cmd>
|
||||
|
||||
To redirect a UDP port the redirection must be expressed as *PUBLIC:PRIVATE/udp*:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# PUBLIC port 5300 is redirected to the PRIVATE port 53 using UDP
|
||||
sudo docker run -p 5300:53/udp <image> <cmd>
|
||||
|
||||
Default port redirects can be built into a container with the
|
||||
``EXPOSE`` build command.
|
||||
|
|
27
docs/theme/docker/layout.html
vendored
27
docs/theme/docker/layout.html
vendored
|
@ -111,6 +111,13 @@
|
|||
|
||||
<div class="span3 sidebar bs-docs-sidebar">
|
||||
{{ toctree(collapse=False, maxdepth=3) }}
|
||||
<form>
|
||||
<input type="text" id="st-search-input" class="st-search-input span3" style="width:160px;" />
|
||||
</form>
|
||||
<a href="http://swiftype.com?ref=pb">
|
||||
<img id="swiftype-img" src="http://swiftype.com/assets/media/swiftype-logo-lightbg-small.png"
|
||||
alt="Search by Swiftype" />
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<!-- body block -->
|
||||
|
@ -122,6 +129,26 @@
|
|||
{% block body %}{% endblock %}
|
||||
</section>
|
||||
|
||||
<!-- Swiftype search -->
|
||||
<div id="st-results-container"></div>
|
||||
<script type="text/javascript">
|
||||
var Swiftype = window.Swiftype || {};
|
||||
(function() {
|
||||
Swiftype.key = 'pWPnnyvwcfpcrw1o51Sz';
|
||||
Swiftype.inputElement = '#st-search-input';
|
||||
Swiftype.resultContainingElement = '#st-results-container';
|
||||
Swiftype.attachElement = '#st-search-input';
|
||||
Swiftype.renderStyle = "overlay";
|
||||
|
||||
var script = document.createElement('script');
|
||||
script.type = 'text/javascript';
|
||||
script.async = true;
|
||||
script.src = "//swiftype.com/embed.js";
|
||||
var entry = document.getElementsByTagName('script')[0];
|
||||
entry.parentNode.insertBefore(script, entry);
|
||||
}());
|
||||
</script>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
|
18
docs/theme/docker/static/css/main.css
vendored
18
docs/theme/docker/static/css/main.css
vendored
|
@ -391,3 +391,21 @@ dt:hover > a.headerlink {
|
|||
float: right;
|
||||
visibility: hidden;
|
||||
}
|
||||
|
||||
/* Swiftype style */
|
||||
|
||||
#st-search-input {
|
||||
margin-right: 14px;
|
||||
margin-left: 9px;
|
||||
height: 19px;
|
||||
width: 120px;
|
||||
|
||||
}
|
||||
#swiftype-img {
|
||||
border: none;
|
||||
width: 145px;
|
||||
height: auto;
|
||||
margin: 0px auto;
|
||||
margin-left: 13px;
|
||||
margin-top: -30px;
|
||||
}
|
29
graph.go
29
graph.go
|
@ -202,6 +202,8 @@ func (graph *Graph) getDockerInitLayer() (string, error) {
|
|||
"/sys": "dir",
|
||||
"/.dockerinit": "file",
|
||||
"/etc/resolv.conf": "file",
|
||||
"/etc/hosts": "file",
|
||||
"/etc/hostname": "file",
|
||||
// "var/run": "dir",
|
||||
// "var/lock": "dir",
|
||||
} {
|
||||
|
@ -272,30 +274,19 @@ func (graph *Graph) Delete(name string) error {
|
|||
|
||||
// Map returns a list of all images in the graph, addressable by ID.
|
||||
func (graph *Graph) Map() (map[string]*Image, error) {
|
||||
// FIXME: this should replace All()
|
||||
all, err := graph.All()
|
||||
images := make(map[string]*Image)
|
||||
err := graph.walkAll(func(image *Image) {
|
||||
images[image.ID] = image
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
images := make(map[string]*Image, len(all))
|
||||
for _, image := range all {
|
||||
images[image.ID] = image
|
||||
}
|
||||
return images, nil
|
||||
}
|
||||
|
||||
// All returns a list of all images in the graph.
|
||||
func (graph *Graph) All() ([]*Image, error) {
|
||||
var images []*Image
|
||||
err := graph.WalkAll(func(image *Image) {
|
||||
images = append(images, image)
|
||||
})
|
||||
return images, err
|
||||
}
|
||||
|
||||
// WalkAll iterates over each image in the graph, and passes it to a handler.
|
||||
// walkAll iterates over each image in the graph, and passes it to a handler.
|
||||
// The walking order is undetermined.
|
||||
func (graph *Graph) WalkAll(handler func(*Image)) error {
|
||||
func (graph *Graph) walkAll(handler func(*Image)) error {
|
||||
files, err := ioutil.ReadDir(graph.Root)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -317,7 +308,7 @@ func (graph *Graph) WalkAll(handler func(*Image)) error {
|
|||
// If an image has no children, it will not have an entry in the table.
|
||||
func (graph *Graph) ByParent() (map[string][]*Image, error) {
|
||||
byParent := make(map[string][]*Image)
|
||||
err := graph.WalkAll(func(image *Image) {
|
||||
err := graph.walkAll(func(image *Image) {
|
||||
parent, err := graph.Get(image.Parent)
|
||||
if err != nil {
|
||||
return
|
||||
|
@ -339,7 +330,7 @@ func (graph *Graph) Heads() (map[string]*Image, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = graph.WalkAll(func(image *Image) {
|
||||
err = graph.walkAll(func(image *Image) {
|
||||
// If it's not in the byParent lookup table, then
|
||||
// it's not a parent -> so it's a head!
|
||||
if _, exists := byParent[image.ID]; !exists {
|
||||
|
|
|
@ -20,11 +20,11 @@ func TestInit(t *testing.T) {
|
|||
if _, err := os.Stat(graph.Root); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// All() should be empty
|
||||
if l, err := graph.All(); err != nil {
|
||||
// Map() should be empty
|
||||
if l, err := graph.Map(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if len(l) != 0 {
|
||||
t.Fatalf("List() should return %d, not %d", 0, len(l))
|
||||
t.Fatalf("len(Map()) should return %d, not %d", 0, len(l))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -76,11 +76,15 @@ func TestGraphCreate(t *testing.T) {
|
|||
if image.DockerVersion != VERSION {
|
||||
t.Fatalf("Wrong docker_version: should be '%s', not '%s'", VERSION, image.DockerVersion)
|
||||
}
|
||||
if images, err := graph.All(); err != nil {
|
||||
images, err := graph.Map()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if l := len(images); l != 1 {
|
||||
t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l)
|
||||
}
|
||||
if images[image.ID] == nil {
|
||||
t.Fatalf("Could not find image with id %s", image.ID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegister(t *testing.T) {
|
||||
|
@ -99,7 +103,7 @@ func TestRegister(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if images, err := graph.All(); err != nil {
|
||||
if images, err := graph.Map(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if l := len(images); l != 1 {
|
||||
t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l)
|
||||
|
@ -274,7 +278,7 @@ func TestByParent(t *testing.T) {
|
|||
}
|
||||
|
||||
func assertNImages(graph *Graph, t *testing.T, n int) {
|
||||
if images, err := graph.All(); err != nil {
|
||||
if images, err := graph.Map(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if actualN := len(images); actualN != n {
|
||||
t.Fatalf("Expected %d images, found %d", n, actualN)
|
||||
|
|
|
@ -7,99 +7,39 @@ For a more complete view of planned and requested improvements, see [the Github
|
|||
|
||||
Tu suggest changes to the roadmap, including additions, please write the change as if it were already in effect, and make a pull request.
|
||||
|
||||
Broader kernel support
|
||||
----------------------
|
||||
|
||||
Our goal is to make Docker run everywhere, but currently Docker requires [Linux version 3.8 or higher with lxc and aufs support](http://docs.docker.io/en/latest/installation/kernel.html). If you're deploying new machines for the purpose of running Docker, this is a fairly easy requirement to meet.
|
||||
However, if you're adding Docker to an existing deployment, you may not have the flexibility to update and patch the kernel.
|
||||
## Container wiring and service discovery
|
||||
|
||||
Expanding Docker's kernel support is a priority. This includes running on older kernel versions,
|
||||
but also on kernels with no AUFS support, or with incomplete lxc capabilities.
|
||||
In its current version, docker doesn’t make it very easy to manipulate multiple containers as a cohesive group (ie. orchestration), and it doesn’t make it seamless for containers to connect to each other as network services (ie. wiring).
|
||||
|
||||
To achieve wiring and orchestration with docker today, you need to write glue scripts yourself, or use one several companion tools available, like Orchestra, Shipper, Deis, Pipeworks, etc.
|
||||
|
||||
We want the Docker API to support orchestration and wiring natively, so that these tools can cleanly and seamlessly integrate into the Docker user experience, and remain interoperable with each other.
|
||||
|
||||
|
||||
Cross-architecture support
|
||||
--------------------------
|
||||
## Better integration with process supervisors
|
||||
|
||||
Our goal is to make Docker run everywhere. However currently Docker only runs on x86_64 systems.
|
||||
We plan on expanding architecture support, so that Docker containers can be created and used on more architectures.
|
||||
For docker to be fully usable in production, it needs to cleanly integrate with the host machine’s process supervisor of choice. Whether it’s sysV-init, upstart, systemd, runit or supervisord, we want to make sure docker plays nice with your existing system. This will be a major focus of the 0.7 release.
|
||||
|
||||
|
||||
Even more integrations
|
||||
----------------------
|
||||
## Plugin API
|
||||
|
||||
We want Docker to be the secret ingredient that makes your existing tools more awesome.
|
||||
Thanks to this philosophy, Docker has already been integrated with
|
||||
[Puppet](http://forge.puppetlabs.com/garethr/docker), [Chef](http://www.opscode.com/chef),
|
||||
[Openstack Nova](https://github.com/dotcloud/openstack-docker), [Jenkins](https://github.com/georgebashi/jenkins-docker-plugin),
|
||||
[DotCloud sandbox](http://github.com/dotcloud/sandbox), [Pallet](https://github.com/pallet/pallet-docker),
|
||||
[Strider CI](http://blog.frozenridge.co/next-generation-continuous-integration-deployment-with-dotclouds-docker-and-strider/)
|
||||
and even [Heroku buildpacks](https://github.com/progrium/buildstep).
|
||||
We want Docker to run everywhere, and to integrate with every devops tool. Those are ambitious goals, and the only way to reach them is with the Docker community. For the community to participate fully, we need an API which allows Docker to be deeply and easily customized.
|
||||
|
||||
Expect Docker to integrate with even more of your favorite tools going forward, including:
|
||||
|
||||
* Alternative storage backends such as ZFS, LVM or [BTRFS](github.com/dotcloud/docker/issues/443)
|
||||
* Alternative containerization backends such as [OpenVZ](http://openvz.org), Solaris Zones, BSD Jails and even plain Chroot.
|
||||
* Process managers like [Supervisord](http://supervisord.org/), [Runit](http://smarden.org/runit/), [Gaffer](https://gaffer.readthedocs.org/en/latest/#gaffer) and [Systemd](http://www.freedesktop.org/wiki/Software/systemd/)
|
||||
* Build and integration tools like Make, Maven, Scons, Jenkins, Buildbot and Cruise Control.
|
||||
* Configuration management tools like [Puppet](http://puppetlabs.com), [Chef](http://www.opscode.com/chef/) and [Salt](http://saltstack.org)
|
||||
* Personal development environments like [Vagrant](http://vagrantup.com), [Boxen](http://boxen.github.com/), [Koding](http://koding.com) and [Cloud9](http://c9.io).
|
||||
* Orchestration tools like [Zookeeper](http://zookeeper.apache.org/), [Mesos](http://incubator.apache.org/mesos/) and [Galaxy](https://github.com/ning/galaxy)
|
||||
* Infrastructure deployment tools like [Openstack](http://openstack.org), [Apache Cloudstack](http://apache.cloudstack.org), [Ganeti](https://code.google.com/p/ganeti/)
|
||||
We are working on a plugin API which will make Docker very, very customization-friendly. We believe it will facilitate the integrations listed above – and many more we didn’t even think about.
|
||||
|
||||
|
||||
Plugin API
|
||||
----------
|
||||
## Broader kernel support
|
||||
|
||||
We want Docker to run everywhere, and to integrate with every devops tool.
|
||||
Those are ambitious goals, and the only way to reach them is with the Docker community.
|
||||
For the community to participate fully, we need an API which allows Docker to be deeply and easily customized.
|
||||
Our goal is to make Docker run everywhere, but currently Docker requires Linux version 3.8 or higher with lxc and aufs support. If you’re deploying new machines for the purpose of running Docker, this is a fairly easy requirement to meet. However, if you’re adding Docker to an existing deployment, you may not have the flexibility to update and patch the kernel.
|
||||
|
||||
We are working on a plugin API which will make Docker very, very customization-friendly.
|
||||
We believe it will facilitate the integrations listed above - and many more we didn't even think about.
|
||||
|
||||
Let us know if you want to start playing with the API before it's generally available.
|
||||
Expanding Docker’s kernel support is a priority. This includes running on older kernel versions, but also on kernels with no AUFS support, or with incomplete lxc capabilities.
|
||||
|
||||
|
||||
Externally mounted volumes
|
||||
--------------------------
|
||||
## Cross-architecture support
|
||||
|
||||
In 0.3 we [introduced data volumes](https://github.com/dotcloud/docker/wiki/Docker-0.3.0-release-note%2C-May-6-2013#data-volumes),
|
||||
a great mechanism for manipulating persistent data such as database files, log files, etc.
|
||||
Our goal is to make Docker run everywhere. However currently Docker only runs on x86_64 systems. We plan on expanding architecture support, so that Docker containers can be created and used on more architectures.
|
||||
|
||||
Data volumes can be shared between containers, a powerful capability [which allows many advanced use cases](http://docs.docker.io/en/latest/examples/couchdb_data_volumes.html). In the future it will also be possible to share volumes between a container and the underlying host. This will make certain scenarios much easier, such as using a high-performance storage backend for your production database,
|
||||
making live development changes available to a container, etc.
|
||||
## Production-ready
|
||||
|
||||
|
||||
Better documentation
|
||||
--------------------
|
||||
|
||||
We believe that great documentation is worth 10 features. We are often told that "Docker's documentation is great for a 2-month old project".
|
||||
Our goal is to make it great, period.
|
||||
|
||||
If you have feedback on how to improve our documentation, please get in touch by replying to this email,
|
||||
or by [filing an issue](https://github.com/dotcloud/docker/issues). We always appreciate it!
|
||||
|
||||
|
||||
Production-ready
|
||||
----------------
|
||||
|
||||
Docker is still alpha software, and not suited for production.
|
||||
We are working hard to get there, and we are confident that it will be possible within a few months.
|
||||
|
||||
|
||||
Advanced port redirections
|
||||
--------------------------
|
||||
|
||||
Docker currently supports 2 flavors of port redirection: STATIC->STATIC (eg. "redirect public port 80 to private port 80")
|
||||
and RANDOM->STATIC (eg. "redirect any public port to private port 80").
|
||||
|
||||
With these 2 flavors, docker can support the majority of backend programs out there. But some applications have more exotic
|
||||
requirements, generally to implement custom clustering techniques. These applications include Hadoop, MongoDB, Riak, RabbitMQ,
|
||||
Disco, and all programs relying on Erlang's OTP.
|
||||
|
||||
To support these applications, Docker needs to support more advanced redirection flavors, including:
|
||||
|
||||
* RANDOM->RANDOM
|
||||
* STATIC1->STATIC2
|
||||
|
||||
These flavors should be implemented without breaking existing semantics, if at all possible.
|
||||
Docker is still beta software, and not suited for production. We are working hard to get there, and we are confident that it will be possible within a few months. Stay tuned for a more detailed roadmap soon.
|
||||
|
|
15
hack/infrastructure/docker-ci.rst
Normal file
15
hack/infrastructure/docker-ci.rst
Normal file
|
@ -0,0 +1,15 @@
|
|||
docker-ci github pull request
|
||||
=============================
|
||||
|
||||
The entire docker pull request test workflow is event driven by github. Its
|
||||
usage is fully automatic and the results are logged in docker-ci.dotcloud.com
|
||||
|
||||
Each time there is a pull request on docker's github project, github connects
|
||||
to docker-ci using github's rest API documented in http://developer.github.com/v3/repos/hooks
|
||||
The issued command to program github's notification PR event was:
|
||||
curl -u GITHUB_USER:GITHUB_PASSWORD -d '{"name":"web","active":true,"events":["pull_request"],"config":{"url":"http://docker-ci.dotcloud.com:8011/change_hook/github?project=docker"}}' https://api.github.com/repos/dotcloud/docker/hooks
|
||||
|
||||
buildbot (0.8.7p1) was patched using ./testing/buildbot/github.py, so it
|
||||
can understand the PR data github sends to it. Originally PR #1603 (ee64e099e0)
|
||||
implemented this capability. Also we added a new scheduler to exclusively filter
|
||||
PRs. and the 'pullrequest' builder to rebase the PR on top of master and test it.
|
|
@ -52,7 +52,7 @@ private PaaS, service-oriented architectures, etc."
|
|||
|
||||
UPSTART_SCRIPT='description "Docker daemon"
|
||||
|
||||
start on filesystem or runlevel [2345]
|
||||
start on filesystem and started lxc-net
|
||||
stop on runlevel [!2345]
|
||||
|
||||
respawn
|
||||
|
|
|
@ -30,6 +30,10 @@ lxc.network.ipv4 = {{.NetworkSettings.IPAddress}}/{{.NetworkSettings.IPPrefixLen
|
|||
{{$ROOTFS := .RootfsPath}}
|
||||
lxc.rootfs = {{$ROOTFS}}
|
||||
|
||||
# enable domain name support
|
||||
lxc.mount.entry = {{.HostnamePath}} {{$ROOTFS}}/etc/hostname none bind,ro 0 0
|
||||
lxc.mount.entry = {{.HostsPath}} {{$ROOTFS}}/etc/hosts none bind,ro 0 0
|
||||
|
||||
# use a dedicated pts for the container (and limit the number of pseudo terminal
|
||||
# available)
|
||||
lxc.pts = 1024
|
||||
|
|
|
@ -642,7 +642,7 @@ func (manager *NetworkManager) Allocate() (*NetworkInterface, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// avoid duplicate IP
|
||||
// avoid duplicate IP
|
||||
ipNum := ipToInt(ip)
|
||||
firstIP := manager.ipAllocator.network.IP.To4().Mask(manager.ipAllocator.network.Mask)
|
||||
firstIPNum := ipToInt(firstIP) + 1
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
description "Run docker"
|
||||
|
||||
start on filesystem or runlevel [2345]
|
||||
start on filesystem and started lxc-net
|
||||
stop on runlevel [!2345]
|
||||
|
||||
respawn
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
var (
|
||||
ErrAlreadyExists = errors.New("Image already exists")
|
||||
ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")")
|
||||
ErrLoginRequired = errors.New("Authentication is required.")
|
||||
)
|
||||
|
||||
func pingRegistryEndpoint(endpoint string) error {
|
||||
|
@ -102,17 +103,38 @@ func ResolveRepositoryName(reposName string) (string, string, error) {
|
|||
if err := validateRepositoryName(reposName); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
endpoint, err := ExpandAndVerifyRegistryUrl(hostname)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return endpoint, reposName, err
|
||||
}
|
||||
|
||||
// this method expands the registry name as used in the prefix of a repo
|
||||
// to a full url. if it already is a url, there will be no change.
|
||||
// The registry is pinged to test if it http or https
|
||||
func ExpandAndVerifyRegistryUrl(hostname string) (string, error) {
|
||||
if strings.HasPrefix(hostname, "http:") || strings.HasPrefix(hostname, "https:") {
|
||||
// if there is no slash after https:// (8 characters) then we have no path in the url
|
||||
if strings.LastIndex(hostname, "/") < 9 {
|
||||
// there is no path given. Expand with default path
|
||||
hostname = hostname + "/v1/"
|
||||
}
|
||||
if err := pingRegistryEndpoint(hostname); err != nil {
|
||||
return "", errors.New("Invalid Registry endpoint: " + err.Error())
|
||||
}
|
||||
return hostname, nil
|
||||
}
|
||||
endpoint := fmt.Sprintf("https://%s/v1/", hostname)
|
||||
if err := pingRegistryEndpoint(endpoint); err != nil {
|
||||
utils.Debugf("Registry %s does not work (%s), falling back to http", endpoint, err)
|
||||
endpoint = fmt.Sprintf("http://%s/v1/", hostname)
|
||||
if err = pingRegistryEndpoint(endpoint); err != nil {
|
||||
//TODO: triggering highland build can be done there without "failing"
|
||||
return "", "", errors.New("Invalid Registry endpoint: " + err.Error())
|
||||
return "", errors.New("Invalid Registry endpoint: " + err.Error())
|
||||
}
|
||||
}
|
||||
err := validateRepositoryName(reposName)
|
||||
return endpoint, reposName, err
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
func doWithCookies(c *http.Client, req *http.Request) (*http.Response, error) {
|
||||
|
@ -139,6 +161,9 @@ func (r *Registry) GetRemoteHistory(imgID, registry string, token []string) ([]s
|
|||
req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
|
||||
res, err := doWithCookies(r.client, req)
|
||||
if err != nil || res.StatusCode != 200 {
|
||||
if res.StatusCode == 401 {
|
||||
return nil, ErrLoginRequired
|
||||
}
|
||||
if res != nil {
|
||||
return nil, utils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res)
|
||||
}
|
||||
|
@ -282,7 +307,7 @@ func (r *Registry) GetRepositoryData(indexEp, remote string) (*RepositoryData, e
|
|||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode == 401 {
|
||||
return nil, utils.NewHTTPRequestError(fmt.Sprintf("Please login first (HTTP code %d)", res.StatusCode), res)
|
||||
return nil, ErrLoginRequired
|
||||
}
|
||||
// TODO: Right now we're ignoring checksums in the response body.
|
||||
// In the future, we need to use them to check image validity.
|
||||
|
|
174
runtime.go
174
runtime.go
|
@ -12,8 +12,11 @@ import (
|
|||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var defaultDns = []string{"8.8.8.8", "8.8.4.4"}
|
||||
|
||||
type Capabilities struct {
|
||||
MemoryLimit bool
|
||||
SwapLimit bool
|
||||
|
@ -42,6 +45,7 @@ func init() {
|
|||
sysInitPath = utils.SelfPath()
|
||||
}
|
||||
|
||||
// List returns an array of all containers registered in the runtime.
|
||||
func (runtime *Runtime) List() []*Container {
|
||||
containers := new(History)
|
||||
for e := runtime.containers.Front(); e != nil; e = e.Next() {
|
||||
|
@ -60,6 +64,8 @@ func (runtime *Runtime) getContainerElement(id string) *list.Element {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Get looks for a container by the specified ID or name, and returns it.
|
||||
// If the container is not found, or if an error occurs, nil is returned.
|
||||
func (runtime *Runtime) Get(name string) *Container {
|
||||
id, err := runtime.idIndex.Get(name)
|
||||
if err != nil {
|
||||
|
@ -72,6 +78,8 @@ func (runtime *Runtime) Get(name string) *Container {
|
|||
return e.Value.(*Container)
|
||||
}
|
||||
|
||||
// Exists returns a true if a container of the specified ID or name exists,
|
||||
// false otherwise.
|
||||
func (runtime *Runtime) Exists(id string) bool {
|
||||
return runtime.Get(id) != nil
|
||||
}
|
||||
|
@ -80,6 +88,9 @@ func (runtime *Runtime) containerRoot(id string) string {
|
|||
return path.Join(runtime.repository, id)
|
||||
}
|
||||
|
||||
// Load reads the contents of a container from disk and registers
|
||||
// it with Register.
|
||||
// This is typically done at startup.
|
||||
func (runtime *Runtime) Load(id string) (*Container, error) {
|
||||
container := &Container{root: runtime.containerRoot(id)}
|
||||
if err := container.FromDisk(); err != nil {
|
||||
|
@ -177,6 +188,7 @@ func (runtime *Runtime) LogToDisk(src *utils.WriteBroadcaster, dst, stream strin
|
|||
return nil
|
||||
}
|
||||
|
||||
// Destroy unregisters a container from the runtime and cleanly removes its contents from the filesystem.
|
||||
func (runtime *Runtime) Destroy(container *Container) error {
|
||||
if container == nil {
|
||||
return fmt.Errorf("The given container is <nil>")
|
||||
|
@ -208,7 +220,7 @@ func (runtime *Runtime) Destroy(container *Container) error {
|
|||
|
||||
func (runtime *Runtime) restore() error {
|
||||
wheel := "-\\|/"
|
||||
if os.Getenv("DEBUG") == "" {
|
||||
if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" {
|
||||
fmt.Printf("Loading containers: ")
|
||||
}
|
||||
dir, err := ioutil.ReadDir(runtime.repository)
|
||||
|
@ -218,7 +230,7 @@ func (runtime *Runtime) restore() error {
|
|||
for i, v := range dir {
|
||||
id := v.Name()
|
||||
container, err := runtime.Load(id)
|
||||
if i%21 == 0 && os.Getenv("DEBUG") == "" {
|
||||
if i%21 == 0 && os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" {
|
||||
fmt.Printf("\b%c", wheel[i%4])
|
||||
}
|
||||
if err != nil {
|
||||
|
@ -227,12 +239,13 @@ func (runtime *Runtime) restore() error {
|
|||
}
|
||||
utils.Debugf("Loaded container %v", container.ID)
|
||||
}
|
||||
if os.Getenv("DEBUG") == "" {
|
||||
if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" {
|
||||
fmt.Printf("\bdone.\n")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FIXME: comment please!
|
||||
func (runtime *Runtime) UpdateCapabilities(quiet bool) {
|
||||
if cgroupMemoryMountpoint, err := utils.FindCgroupMountpoint("memory"); err != nil {
|
||||
if !quiet {
|
||||
|
@ -260,6 +273,159 @@ func (runtime *Runtime) UpdateCapabilities(quiet bool) {
|
|||
}
|
||||
}
|
||||
|
||||
// Create creates a new container from the given configuration.
|
||||
func (runtime *Runtime) Create(config *Config) (*Container, error) {
|
||||
// Lookup image
|
||||
img, err := runtime.repositories.LookupImage(config.Image)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if img.Config != nil {
|
||||
MergeConfig(config, img.Config)
|
||||
}
|
||||
|
||||
if len(config.Entrypoint) != 0 && config.Cmd == nil {
|
||||
config.Cmd = []string{}
|
||||
} else if config.Cmd == nil || len(config.Cmd) == 0 {
|
||||
return nil, fmt.Errorf("No command specified")
|
||||
}
|
||||
|
||||
// Generate id
|
||||
id := GenerateID()
|
||||
// Generate default hostname
|
||||
// FIXME: the lxc template no longer needs to set a default hostname
|
||||
if config.Hostname == "" {
|
||||
config.Hostname = id[:12]
|
||||
}
|
||||
|
||||
var args []string
|
||||
var entrypoint string
|
||||
|
||||
if len(config.Entrypoint) != 0 {
|
||||
entrypoint = config.Entrypoint[0]
|
||||
args = append(config.Entrypoint[1:], config.Cmd...)
|
||||
} else {
|
||||
entrypoint = config.Cmd[0]
|
||||
args = config.Cmd[1:]
|
||||
}
|
||||
|
||||
container := &Container{
|
||||
// FIXME: we should generate the ID here instead of receiving it as an argument
|
||||
ID: id,
|
||||
Created: time.Now(),
|
||||
Path: entrypoint,
|
||||
Args: args, //FIXME: de-duplicate from config
|
||||
Config: config,
|
||||
Image: img.ID, // Always use the resolved image id
|
||||
NetworkSettings: &NetworkSettings{},
|
||||
// FIXME: do we need to store this in the container?
|
||||
SysInitPath: sysInitPath,
|
||||
}
|
||||
container.root = runtime.containerRoot(container.ID)
|
||||
// Step 1: create the container directory.
|
||||
// This doubles as a barrier to avoid race conditions.
|
||||
if err := os.Mkdir(container.root, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resolvConf, err := utils.GetResolvConf()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(config.Dns) == 0 && len(runtime.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
|
||||
//"WARNING: Docker detected local DNS server on resolv.conf. Using default external servers: %v", defaultDns
|
||||
runtime.Dns = defaultDns
|
||||
}
|
||||
|
||||
// If custom dns exists, then create a resolv.conf for the container
|
||||
if len(config.Dns) > 0 || len(runtime.Dns) > 0 {
|
||||
var dns []string
|
||||
if len(config.Dns) > 0 {
|
||||
dns = config.Dns
|
||||
} else {
|
||||
dns = runtime.Dns
|
||||
}
|
||||
container.ResolvConfPath = path.Join(container.root, "resolv.conf")
|
||||
f, err := os.Create(container.ResolvConfPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
for _, dns := range dns {
|
||||
if _, err := f.Write([]byte("nameserver " + dns + "\n")); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
container.ResolvConfPath = "/etc/resolv.conf"
|
||||
}
|
||||
|
||||
// Step 2: save the container json
|
||||
if err := container.ToDisk(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Step 3: if hostname, build hostname and hosts files
|
||||
container.HostnamePath = path.Join(container.root, "hostname")
|
||||
ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644)
|
||||
|
||||
hostsContent := []byte(`
|
||||
127.0.0.1 localhost
|
||||
::1 localhost ip6-localhost ip6-loopback
|
||||
fe00::0 ip6-localnet
|
||||
ff00::0 ip6-mcastprefix
|
||||
ff02::1 ip6-allnodes
|
||||
ff02::2 ip6-allrouters
|
||||
`)
|
||||
|
||||
container.HostsPath = path.Join(container.root, "hosts")
|
||||
|
||||
if container.Config.Domainname != "" {
|
||||
hostsContent = append([]byte(fmt.Sprintf("::1\t\t%s.%s %s\n", container.Config.Hostname, container.Config.Domainname, container.Config.Hostname)), hostsContent...)
|
||||
hostsContent = append([]byte(fmt.Sprintf("127.0.0.1\t%s.%s %s\n", container.Config.Hostname, container.Config.Domainname, container.Config.Hostname)), hostsContent...)
|
||||
} else {
|
||||
hostsContent = append([]byte(fmt.Sprintf("::1\t\t%s\n", container.Config.Hostname)), hostsContent...)
|
||||
hostsContent = append([]byte(fmt.Sprintf("127.0.0.1\t%s\n", container.Config.Hostname)), hostsContent...)
|
||||
}
|
||||
|
||||
ioutil.WriteFile(container.HostsPath, hostsContent, 0644)
|
||||
|
||||
// Step 4: register the container
|
||||
if err := runtime.Register(container); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return container, nil
|
||||
}
|
||||
|
||||
// Commit creates a new filesystem image from the current state of a container.
|
||||
// The image can optionally be tagged into a repository
|
||||
func (runtime *Runtime) Commit(container *Container, repository, tag, comment, author string, config *Config) (*Image, error) {
|
||||
// FIXME: freeze the container before copying it to avoid data corruption?
|
||||
// FIXME: this shouldn't be in commands.
|
||||
if err := container.EnsureMounted(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rwTar, err := container.ExportRw()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Create a new image from the container's base layers + a new layer from container changes
|
||||
img, err := runtime.graph.Create(rwTar, container, comment, author, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Register the image if needed
|
||||
if repository != "" {
|
||||
if err := runtime.repositories.Set(repository, tag, img.ID, true); err != nil {
|
||||
return img, err
|
||||
}
|
||||
}
|
||||
return img, nil
|
||||
}
|
||||
|
||||
// FIXME: harmonize with NewGraph()
|
||||
func NewRuntime(flGraphPath string, autoRestart bool, dns []string) (*Runtime, error) {
|
||||
runtime, err := NewRuntimeFromDirectory(flGraphPath, autoRestart)
|
||||
|
@ -325,6 +491,8 @@ func NewRuntimeFromDirectory(root string, autoRestart bool) (*Runtime, error) {
|
|||
return runtime, nil
|
||||
}
|
||||
|
||||
// History is a convenience type for storing a list of containers,
|
||||
// ordered by creation date.
|
||||
type History []*Container
|
||||
|
||||
func (history *History) Len() int {
|
||||
|
|
|
@ -50,7 +50,7 @@ func cleanup(runtime *Runtime) error {
|
|||
container.Kill()
|
||||
runtime.Destroy(container)
|
||||
}
|
||||
images, err := runtime.graph.All()
|
||||
images, err := runtime.graph.Map()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -72,6 +72,8 @@ func layerArchive(tarfile string) (io.Reader, error) {
|
|||
}
|
||||
|
||||
func init() {
|
||||
os.Setenv("TEST", "1")
|
||||
|
||||
// Hack to run sys init during unit testing
|
||||
if selfPath := utils.SelfPath(); selfPath == "/sbin/init" || selfPath == "/.dockerinit" {
|
||||
SysInit()
|
||||
|
@ -121,13 +123,13 @@ func init() {
|
|||
// FIXME: test that ImagePull(json=true) send correct json output
|
||||
|
||||
func GetTestImage(runtime *Runtime) *Image {
|
||||
imgs, err := runtime.graph.All()
|
||||
imgs, err := runtime.graph.Map()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for i := range imgs {
|
||||
if imgs[i].ID == unitTestImageID {
|
||||
return imgs[i]
|
||||
for _, image := range imgs {
|
||||
if image.ID == unitTestImageID {
|
||||
return image
|
||||
}
|
||||
}
|
||||
panic(fmt.Errorf("Test image %v not found", unitTestImageID))
|
||||
|
@ -142,9 +144,7 @@ func TestRuntimeCreate(t *testing.T) {
|
|||
t.Errorf("Expected 0 containers, %v found", len(runtime.List()))
|
||||
}
|
||||
|
||||
builder := NewBuilder(runtime)
|
||||
|
||||
container, err := builder.Create(&Config{
|
||||
container, err := runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"ls", "-al"},
|
||||
},
|
||||
|
@ -185,7 +185,7 @@ func TestRuntimeCreate(t *testing.T) {
|
|||
}
|
||||
|
||||
// Make sure crete with bad parameters returns an error
|
||||
_, err = builder.Create(
|
||||
_, err = runtime.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
},
|
||||
|
@ -194,7 +194,7 @@ func TestRuntimeCreate(t *testing.T) {
|
|||
t.Fatal("Builder.Create should throw an error when Cmd is missing")
|
||||
}
|
||||
|
||||
_, err = builder.Create(
|
||||
_, err = runtime.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{},
|
||||
|
@ -208,7 +208,7 @@ func TestRuntimeCreate(t *testing.T) {
|
|||
func TestDestroy(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
container, err := NewBuilder(runtime).Create(&Config{
|
||||
container, err := runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"ls", "-al"},
|
||||
},
|
||||
|
@ -294,7 +294,7 @@ func startEchoServerContainer(t *testing.T, proto string) (*Runtime, *Container,
|
|||
t.Fatal(fmt.Errorf("Unknown protocol %v", proto))
|
||||
}
|
||||
t.Log("Trying port", strPort)
|
||||
container, err = NewBuilder(runtime).Create(&Config{
|
||||
container, err = runtime.Create(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"sh", "-c", cmd},
|
||||
PortSpecs: []string{fmt.Sprintf("%s/%s", strPort, proto)},
|
||||
|
|
134
server.go
134
server.go
|
@ -139,8 +139,7 @@ func (srv *Server) ImageInsert(name, url, path string, out io.Writer, sf *utils.
|
|||
return "", err
|
||||
}
|
||||
|
||||
b := NewBuilder(srv.runtime)
|
||||
c, err := b.Create(config)
|
||||
c, err := srv.runtime.Create(config)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -149,7 +148,7 @@ func (srv *Server) ImageInsert(name, url, path string, out io.Writer, sf *utils.
|
|||
return "", err
|
||||
}
|
||||
// FIXME: Handle custom repo, tag comment, author
|
||||
img, err = b.Commit(c, "", "", img.Comment, img.Author, nil)
|
||||
img, err = srv.runtime.Commit(c, "", "", img.Comment, img.Author, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -158,7 +157,7 @@ func (srv *Server) ImageInsert(name, url, path string, out io.Writer, sf *utils.
|
|||
}
|
||||
|
||||
func (srv *Server) ImagesViz(out io.Writer) error {
|
||||
images, _ := srv.runtime.graph.All()
|
||||
images, _ := srv.runtime.graph.Map()
|
||||
if images == nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -210,8 +209,10 @@ func (srv *Server) Images(all bool, filter string) ([]APIImages, error) {
|
|||
}
|
||||
outs := []APIImages{} //produce [] when empty instead of 'null'
|
||||
for name, repository := range srv.runtime.repositories.Repositories {
|
||||
if filter != "" && name != filter {
|
||||
continue
|
||||
if filter != "" {
|
||||
if match, _ := path.Match(filter, name); !match {
|
||||
continue
|
||||
}
|
||||
}
|
||||
for tag, id := range repository {
|
||||
var out APIImages
|
||||
|
@ -247,7 +248,7 @@ func (srv *Server) Images(all bool, filter string) ([]APIImages, error) {
|
|||
}
|
||||
|
||||
func (srv *Server) DockerInfo() *APIInfo {
|
||||
images, _ := srv.runtime.graph.All()
|
||||
images, _ := srv.runtime.graph.Map()
|
||||
var imgcount int
|
||||
if images == nil {
|
||||
imgcount = 0
|
||||
|
@ -386,7 +387,7 @@ func (srv *Server) Containers(all, size bool, n int, since, before string) []API
|
|||
c.Command = fmt.Sprintf("%s %s", container.Path, strings.Join(container.Args, " "))
|
||||
c.Created = container.Created.Unix()
|
||||
c.Status = container.State.String()
|
||||
c.Ports = container.NetworkSettings.PortMappingHuman()
|
||||
c.Ports = container.NetworkSettings.PortMappingAPI()
|
||||
if size {
|
||||
c.SizeRw, c.SizeRootFs = container.GetSize()
|
||||
}
|
||||
|
@ -400,7 +401,7 @@ func (srv *Server) ContainerCommit(name, repo, tag, author, comment string, conf
|
|||
if container == nil {
|
||||
return "", fmt.Errorf("No such container: %s", name)
|
||||
}
|
||||
img, err := NewBuilder(srv.runtime).Commit(container, repo, tag, comment, author, config)
|
||||
img, err := srv.runtime.Commit(container, repo, tag, comment, author, config)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -655,6 +656,9 @@ func (srv *Server) ImagePull(localName string, tag string, out io.Writer, sf *ut
|
|||
|
||||
out = utils.NewWriteFlusher(out)
|
||||
err = srv.pullRepository(r, out, localName, remoteName, tag, endpoint, sf, parallel)
|
||||
if err == registry.ErrLoginRequired {
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
if err := srv.pullImage(r, out, remoteName, endpoint, nil, sf); err != nil {
|
||||
return err
|
||||
|
@ -667,29 +671,57 @@ func (srv *Server) ImagePull(localName string, tag string, out io.Writer, sf *ut
|
|||
|
||||
// Retrieve the all the images to be uploaded in the correct order
|
||||
// Note: we can't use a map as it is not ordered
|
||||
func (srv *Server) getImageList(localRepo map[string]string) ([]*registry.ImgData, error) {
|
||||
var imgList []*registry.ImgData
|
||||
func (srv *Server) getImageList(localRepo map[string]string) ([][]*registry.ImgData, error) {
|
||||
imgList := map[string]*registry.ImgData{}
|
||||
depGraph := utils.NewDependencyGraph()
|
||||
|
||||
imageSet := make(map[string]struct{})
|
||||
for tag, id := range localRepo {
|
||||
img, err := srv.runtime.graph.Get(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
img.WalkHistory(func(img *Image) error {
|
||||
if _, exists := imageSet[img.ID]; exists {
|
||||
depGraph.NewNode(img.ID)
|
||||
img.WalkHistory(func(current *Image) error {
|
||||
imgList[current.ID] = ®istry.ImgData{
|
||||
ID: current.ID,
|
||||
Tag: tag,
|
||||
}
|
||||
parent, err := current.GetParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if parent == nil {
|
||||
return nil
|
||||
}
|
||||
imageSet[img.ID] = struct{}{}
|
||||
|
||||
imgList = append([]*registry.ImgData{{
|
||||
ID: img.ID,
|
||||
Tag: tag,
|
||||
}}, imgList...)
|
||||
depGraph.NewNode(parent.ID)
|
||||
depGraph.AddDependency(current.ID, parent.ID)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return imgList, nil
|
||||
|
||||
traversalMap, err := depGraph.GenerateTraversalMap()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
utils.Debugf("Traversal map: %v", traversalMap)
|
||||
result := [][]*registry.ImgData{}
|
||||
for _, round := range traversalMap {
|
||||
dataRound := []*registry.ImgData{}
|
||||
for _, imgID := range round {
|
||||
dataRound = append(dataRound, imgList[imgID])
|
||||
}
|
||||
result = append(result, dataRound)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func flatten(slc [][]*registry.ImgData) []*registry.ImgData {
|
||||
result := []*registry.ImgData{}
|
||||
for _, x := range slc {
|
||||
result = append(result, x...)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName, remoteName string, localRepo map[string]string, indexEp string, sf *utils.StreamFormatter) error {
|
||||
|
@ -698,39 +730,54 @@ func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
flattenedImgList := flatten(imgList)
|
||||
out.Write(sf.FormatStatus("", "Sending image list"))
|
||||
|
||||
var repoData *registry.RepositoryData
|
||||
repoData, err = r.PushImageJSONIndex(indexEp, remoteName, imgList, false, nil)
|
||||
repoData, err = r.PushImageJSONIndex(indexEp, remoteName, flattenedImgList, false, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, ep := range repoData.Endpoints {
|
||||
out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, len(localRepo)))
|
||||
// For each image within the repo, push them
|
||||
for _, elem := range imgList {
|
||||
if _, exists := repoData.ImgList[elem.ID]; exists {
|
||||
out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", elem.ID))
|
||||
continue
|
||||
} else if r.LookupRemoteImage(elem.ID, ep, repoData.Tokens) {
|
||||
out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", elem.ID))
|
||||
continue
|
||||
}
|
||||
if checksum, err := srv.pushImage(r, out, remoteName, elem.ID, ep, repoData.Tokens, sf); err != nil {
|
||||
// FIXME: Continue on error?
|
||||
return err
|
||||
} else {
|
||||
elem.Checksum = checksum
|
||||
}
|
||||
out.Write(sf.FormatStatus("", "Pushing tags for rev [%s] on {%s}", elem.ID, ep+"repositories/"+remoteName+"/tags/"+elem.Tag))
|
||||
if err := r.PushRegistryTag(remoteName, elem.ID, elem.Tag, ep, repoData.Tokens); err != nil {
|
||||
return err
|
||||
// This section can not be parallelized (each round depends on the previous one)
|
||||
for _, round := range imgList {
|
||||
// FIXME: This section can be parallelized
|
||||
for _, elem := range round {
|
||||
var pushTags func() error
|
||||
pushTags = func() error {
|
||||
out.Write(sf.FormatStatus("", "Pushing tags for rev [%s] on {%s}", elem.ID, ep+"repositories/"+remoteName+"/tags/"+elem.Tag))
|
||||
if err := r.PushRegistryTag(remoteName, elem.ID, elem.Tag, ep, repoData.Tokens); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if _, exists := repoData.ImgList[elem.ID]; exists {
|
||||
if err := pushTags(); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", elem.ID))
|
||||
continue
|
||||
} else if r.LookupRemoteImage(elem.ID, ep, repoData.Tokens) {
|
||||
if err := pushTags(); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", elem.ID))
|
||||
continue
|
||||
}
|
||||
if checksum, err := srv.pushImage(r, out, remoteName, elem.ID, ep, repoData.Tokens, sf); err != nil {
|
||||
// FIXME: Continue on error?
|
||||
return err
|
||||
} else {
|
||||
elem.Checksum = checksum
|
||||
}
|
||||
return pushTags()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := r.PushImageJSONIndex(indexEp, remoteName, imgList, true, repoData.Endpoints); err != nil {
|
||||
if _, err := r.PushImageJSONIndex(indexEp, remoteName, flattenedImgList, true, repoData.Endpoints); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -872,8 +919,7 @@ func (srv *Server) ContainerCreate(config *Config) (string, error) {
|
|||
if config.Memory > 0 && !srv.runtime.capabilities.SwapLimit {
|
||||
config.MemorySwap = -1
|
||||
}
|
||||
b := NewBuilder(srv.runtime)
|
||||
container, err := b.Create(config)
|
||||
container, err := srv.runtime.Create(config)
|
||||
if err != nil {
|
||||
if srv.runtime.graph.IsNotExist(err) {
|
||||
|
||||
|
@ -1064,7 +1110,7 @@ func (srv *Server) ImageDelete(name string, autoPrune bool) ([]APIRmi, error) {
|
|||
func (srv *Server) ImageGetCached(imgID string, config *Config) (*Image, error) {
|
||||
|
||||
// Retrieve all images
|
||||
images, err := srv.runtime.graph.All()
|
||||
images, err := srv.runtime.graph.Map()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -431,3 +431,57 @@ func TestRmi(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestImagesFilter(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
if err := srv.runtime.repositories.Set("utest", "tag1", unitTestImageName, false); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := srv.runtime.repositories.Set("utest/docker", "tag2", unitTestImageName, false); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := srv.runtime.repositories.Set("utest:5000/docker", "tag3", unitTestImageName, false); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
images, err := srv.Images(false, "utest*/*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(images) != 2 {
|
||||
t.Fatal("incorrect number of matches returned")
|
||||
}
|
||||
|
||||
images, err = srv.Images(false, "utest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(images) != 1 {
|
||||
t.Fatal("incorrect number of matches returned")
|
||||
}
|
||||
|
||||
images, err = srv.Images(false, "utest*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(images) != 1 {
|
||||
t.Fatal("incorrect number of matches returned")
|
||||
}
|
||||
|
||||
images, err = srv.Images(false, "*5000*/*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(images) != 1 {
|
||||
t.Fatal("incorrect number of matches returned")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,10 +27,9 @@ func setupWorkingDirectory(workdir string) {
|
|||
if workdir == "" {
|
||||
return
|
||||
}
|
||||
syscall.Chdir(workdir)
|
||||
syscall.Chdir(workdir)
|
||||
}
|
||||
|
||||
|
||||
// Takes care of dropping privileges to the desired user
|
||||
func changeUser(u string) {
|
||||
if u == "" {
|
||||
|
|
39
term/term.go
39
term/term.go
|
@ -43,17 +43,42 @@ func RestoreTerminal(fd uintptr, state *State) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func SaveState(fd uintptr) (*State, error) {
|
||||
var oldState State
|
||||
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &oldState, nil
|
||||
}
|
||||
|
||||
func DisableEcho(fd uintptr, state *State) error {
|
||||
newState := state.termios
|
||||
newState.Lflag &^= syscall.ECHO
|
||||
|
||||
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 {
|
||||
return err
|
||||
}
|
||||
handleInterrupt(fd, state)
|
||||
return nil
|
||||
}
|
||||
|
||||
func SetRawTerminal(fd uintptr) (*State, error) {
|
||||
oldState, err := MakeRaw(fd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt)
|
||||
go func() {
|
||||
_ = <-c
|
||||
RestoreTerminal(fd, oldState)
|
||||
os.Exit(0)
|
||||
}()
|
||||
handleInterrupt(fd, oldState)
|
||||
return oldState, err
|
||||
}
|
||||
|
||||
func handleInterrupt(fd uintptr, state *State) {
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, os.Interrupt)
|
||||
|
||||
go func() {
|
||||
_ = <-sigchan
|
||||
RestoreTerminal(fd, state)
|
||||
os.Exit(0)
|
||||
}()
|
||||
}
|
||||
|
|
31
testing/Vagrantfile
vendored
31
testing/Vagrantfile
vendored
|
@ -2,11 +2,10 @@
|
|||
# vi: set ft=ruby :
|
||||
|
||||
BOX_NAME = "docker-ci"
|
||||
BOX_URI = "http://files.vagrantup.com/precise64.box"
|
||||
AWS_AMI = "ami-d0f89fb9"
|
||||
BOX_URI = "http://cloud-images.ubuntu.com/vagrant/raring/current/raring-server-cloudimg-amd64-vagrant-disk1.box"
|
||||
AWS_AMI = "ami-10314d79"
|
||||
DOCKER_PATH = "/data/docker"
|
||||
CFG_PATH = "#{DOCKER_PATH}/testing/buildbot"
|
||||
BUILDBOT_IP = "192.168.33.41"
|
||||
on_vbox = File.file?("#{File.dirname(__FILE__)}/.vagrant/machines/default/virtualbox/id") | \
|
||||
Dir.glob("#{File.dirname(__FILE__)}/.vagrant/machines/default/*/id").empty? & \
|
||||
(on_vbox=true; ARGV.each do |arg| on_vbox &&= !arg.downcase.start_with?("--provider") end; on_vbox)
|
||||
|
@ -16,16 +15,22 @@ Vagrant::Config.run do |config|
|
|||
# Setup virtual machine box. This VM configuration code is always executed.
|
||||
config.vm.box = BOX_NAME
|
||||
config.vm.box_url = BOX_URI
|
||||
config.vm.forward_port 8010, 8010
|
||||
config.vm.share_folder "v-data", DOCKER_PATH, "#{File.dirname(__FILE__)}/.."
|
||||
config.vm.network :hostonly, BUILDBOT_IP
|
||||
|
||||
|
||||
# Deploy buildbot and its dependencies if it was not done
|
||||
if Dir.glob("#{File.dirname(__FILE__)}/.vagrant/machines/default/*/id").empty?
|
||||
# Add memory limitation capabilities
|
||||
pkg_cmd = 'sed -Ei \'s/^(GRUB_CMDLINE_LINUX_DEFAULT)=.+/\\1="cgroup_enable=memory swapaccount=1 quiet"/\' /etc/default/grub; '
|
||||
# Install new kernel
|
||||
pkg_cmd << "apt-get update -qq; apt-get install -q -y linux-image-generic-lts-raring; "
|
||||
# Adjust kernel
|
||||
pkg_cmd << "apt-get update -qq; "
|
||||
if on_vbox
|
||||
pkg_cmd << "apt-get install -q -y linux-image-extra-`uname -r`; "
|
||||
else
|
||||
pkg_cmd << "apt-get install -q -y linux-image-generic; "
|
||||
end
|
||||
|
||||
# Deploy buildbot CI
|
||||
pkg_cmd << "apt-get install -q -y python-dev python-pip supervisor; " \
|
||||
"pip install -r #{CFG_PATH}/requirements.txt; " \
|
||||
|
@ -35,11 +40,15 @@ Vagrant::Config.run do |config|
|
|||
"#{ENV['SMTP_PWD']} #{ENV['EMAIL_RCP']}; " \
|
||||
"#{CFG_PATH}/setup_credentials.sh #{USER} " \
|
||||
"#{ENV['REGISTRY_USER']} #{ENV['REGISTRY_PWD']}; "
|
||||
# Install docker dependencies
|
||||
pkg_cmd << "apt-get install -q -y python-software-properties; " \
|
||||
"add-apt-repository -y ppa:dotcloud/docker-golang/ubuntu; apt-get update -qq; " \
|
||||
"DEBIAN_FRONTEND=noninteractive apt-get install -q -y lxc git mercurial golang-stable aufs-tools make; "
|
||||
# Activate new kernel
|
||||
# Install docker and testing dependencies
|
||||
pkg_cmd << "curl -s https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz | " \
|
||||
" tar -v -C /usr/local -xz; ln -s /usr/local/go/bin/go /usr/bin/go; " \
|
||||
"curl -s https://phantomjs.googlecode.com/files/phantomjs-1.9.1-linux-x86_64.tar.bz2 | " \
|
||||
" tar jx -C /usr/bin --strip-components=2 phantomjs-1.9.1-linux-x86_64/bin/phantomjs; " \
|
||||
"DEBIAN_FRONTEND=noninteractive apt-get install -q -y lxc git mercurial aufs-tools make libfontconfig; " \
|
||||
"export GOPATH=/data/docker-dependencies; go get -d github.com/dotcloud/docker; " \
|
||||
"rm -rf ${GOPATH}/src/github.com/dotcloud/docker; "
|
||||
# Activate new kernel options
|
||||
pkg_cmd << "shutdown -r +1; "
|
||||
config.vm.provision :shell, :inline => pkg_cmd
|
||||
end
|
||||
|
|
169
testing/buildbot/github.py
Normal file
169
testing/buildbot/github.py
Normal file
|
@ -0,0 +1,169 @@
|
|||
# This file is part of Buildbot. Buildbot is free software: you can
|
||||
# redistribute it and/or modify it under the terms of the GNU General Public
|
||||
# License as published by the Free Software Foundation, version 2.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
|
||||
# details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along with
|
||||
# this program; if not, write to the Free Software Foundation, Inc., 51
|
||||
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Copyright Buildbot Team Members
|
||||
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
github_buildbot.py is based on git_buildbot.py
|
||||
|
||||
github_buildbot.py will determine the repository information from the JSON
|
||||
HTTP POST it receives from github.com and build the appropriate repository.
|
||||
If your github repository is private, you must add a ssh key to the github
|
||||
repository for the user who initiated the build on the buildslave.
|
||||
|
||||
"""
|
||||
|
||||
import re
|
||||
import datetime
|
||||
from twisted.python import log
|
||||
import calendar
|
||||
|
||||
try:
|
||||
import json
|
||||
assert json
|
||||
except ImportError:
|
||||
import simplejson as json
|
||||
|
||||
# python is silly about how it handles timezones
|
||||
class fixedOffset(datetime.tzinfo):
|
||||
"""
|
||||
fixed offset timezone
|
||||
"""
|
||||
def __init__(self, minutes, hours, offsetSign = 1):
|
||||
self.minutes = int(minutes) * offsetSign
|
||||
self.hours = int(hours) * offsetSign
|
||||
self.offset = datetime.timedelta(minutes = self.minutes,
|
||||
hours = self.hours)
|
||||
|
||||
def utcoffset(self, dt):
|
||||
return self.offset
|
||||
|
||||
def dst(self, dt):
|
||||
return datetime.timedelta(0)
|
||||
|
||||
def convertTime(myTestTimestamp):
|
||||
#"1970-01-01T00:00:00+00:00"
|
||||
# Normalize myTestTimestamp
|
||||
if myTestTimestamp[-1] == 'Z':
|
||||
myTestTimestamp = myTestTimestamp[:-1] + '-00:00'
|
||||
matcher = re.compile(r'(\d\d\d\d)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)([-+])(\d\d):(\d\d)')
|
||||
result = matcher.match(myTestTimestamp)
|
||||
(year, month, day, hour, minute, second, offsetsign, houroffset, minoffset) = \
|
||||
result.groups()
|
||||
if offsetsign == '+':
|
||||
offsetsign = 1
|
||||
else:
|
||||
offsetsign = -1
|
||||
|
||||
offsetTimezone = fixedOffset( minoffset, houroffset, offsetsign )
|
||||
myDatetime = datetime.datetime( int(year),
|
||||
int(month),
|
||||
int(day),
|
||||
int(hour),
|
||||
int(minute),
|
||||
int(second),
|
||||
0,
|
||||
offsetTimezone)
|
||||
return calendar.timegm( myDatetime.utctimetuple() )
|
||||
|
||||
def getChanges(request, options = None):
|
||||
"""
|
||||
Reponds only to POST events and starts the build process
|
||||
|
||||
:arguments:
|
||||
request
|
||||
the http request object
|
||||
"""
|
||||
payload = json.loads(request.args['payload'][0])
|
||||
if 'pull_request' in payload:
|
||||
user = payload['repository']['owner']['login']
|
||||
repo = payload['repository']['name']
|
||||
repo_url = payload['repository']['html_url']
|
||||
else:
|
||||
user = payload['repository']['owner']['name']
|
||||
repo = payload['repository']['name']
|
||||
repo_url = payload['repository']['url']
|
||||
project = request.args.get('project', None)
|
||||
if project:
|
||||
project = project[0]
|
||||
elif project is None:
|
||||
project = ''
|
||||
# This field is unused:
|
||||
#private = payload['repository']['private']
|
||||
changes = process_change(payload, user, repo, repo_url, project)
|
||||
log.msg("Received %s changes from github" % len(changes))
|
||||
return (changes, 'git')
|
||||
|
||||
def process_change(payload, user, repo, repo_url, project):
|
||||
"""
|
||||
Consumes the JSON as a python object and actually starts the build.
|
||||
|
||||
:arguments:
|
||||
payload
|
||||
Python Object that represents the JSON sent by GitHub Service
|
||||
Hook.
|
||||
"""
|
||||
changes = []
|
||||
|
||||
newrev = payload['after'] if 'after' in payload else payload['pull_request']['head']['sha']
|
||||
refname = payload['ref'] if 'ref' in payload else payload['pull_request']['head']['ref']
|
||||
|
||||
# We only care about regular heads, i.e. branches
|
||||
match = re.match(r"^(refs\/heads\/|)([^/]+)$", refname)
|
||||
if not match:
|
||||
log.msg("Ignoring refname `%s': Not a branch" % refname)
|
||||
return []
|
||||
|
||||
branch = match.groups()[1]
|
||||
if re.match(r"^0*$", newrev):
|
||||
log.msg("Branch `%s' deleted, ignoring" % branch)
|
||||
return []
|
||||
else:
|
||||
if 'pull_request' in payload:
|
||||
changes = [{
|
||||
'category' : 'github_pullrequest',
|
||||
'who' : user,
|
||||
'files' : [],
|
||||
'comments' : payload['pull_request']['title'],
|
||||
'revision' : newrev,
|
||||
'when' : convertTime(payload['pull_request']['updated_at']),
|
||||
'branch' : branch,
|
||||
'revlink' : '{0}/commit/{1}'.format(repo_url,newrev),
|
||||
'repository' : repo_url,
|
||||
'project' : project }]
|
||||
return changes
|
||||
for commit in payload['commits']:
|
||||
files = []
|
||||
if 'added' in commit:
|
||||
files.extend(commit['added'])
|
||||
if 'modified' in commit:
|
||||
files.extend(commit['modified'])
|
||||
if 'removed' in commit:
|
||||
files.extend(commit['removed'])
|
||||
when = convertTime( commit['timestamp'])
|
||||
log.msg("New revision: %s" % commit['id'][:8])
|
||||
chdict = dict(
|
||||
who = commit['author']['name']
|
||||
+ " <" + commit['author']['email'] + ">",
|
||||
files = files,
|
||||
comments = commit['message'],
|
||||
revision = commit['id'],
|
||||
when = when,
|
||||
branch = branch,
|
||||
revlink = commit['url'],
|
||||
repository = repo_url,
|
||||
project = project)
|
||||
changes.append(chdict)
|
||||
return changes
|
||||
|
|
@ -22,7 +22,7 @@ GITHUB_DOCKER = 'github.com/dotcloud/docker'
|
|||
BUILDBOT_PATH = '/data/buildbot'
|
||||
DOCKER_PATH = '/data/docker'
|
||||
BUILDER_PATH = '/data/buildbot/slave/{0}/build'.format(BUILDER_NAME)
|
||||
DOCKER_BUILD_PATH = BUILDER_PATH + '/src/github.com/dotcloud/docker'
|
||||
PULL_REQUEST_PATH = '/data/buildbot/slave/pullrequest/build'
|
||||
|
||||
# Credentials set by setup.sh and Vagrantfile
|
||||
BUILDBOT_PWD = ''
|
||||
|
@ -45,24 +45,40 @@ c['slavePortnum'] = PORT_MASTER
|
|||
|
||||
# Schedulers
|
||||
c['schedulers'] = [ForceScheduler(name='trigger', builderNames=[BUILDER_NAME,
|
||||
'registry','coverage'])]
|
||||
'index','registry','coverage'])]
|
||||
c['schedulers'] += [SingleBranchScheduler(name="all",
|
||||
change_filter=filter.ChangeFilter(branch='master'), treeStableTimer=None,
|
||||
builderNames=[BUILDER_NAME])]
|
||||
c['schedulers'] += [Nightly(name='daily', branch=None, builderNames=['coverage','registry'],
|
||||
c['schedulers'] += [SingleBranchScheduler(name='pullrequest',
|
||||
change_filter=filter.ChangeFilter(category='github_pullrequest'), treeStableTimer=None,
|
||||
builderNames=['pullrequest'])]
|
||||
c['schedulers'] += [Nightly(name='daily', branch=None, builderNames=['coverage'],
|
||||
hour=0, minute=30)]
|
||||
|
||||
c['schedulers'] += [Nightly(name='every4hrs', branch=None, builderNames=['registry','index'],
|
||||
hour=range(0,24,4), minute=15)]
|
||||
|
||||
# Builders
|
||||
# Docker commit test
|
||||
factory = BuildFactory()
|
||||
factory.addStep(ShellCommand(description='Docker',logEnviron=False,usePTY=True,
|
||||
command=["sh", "-c", Interpolate("cd ..; rm -rf build; export GOPATH={0}; "
|
||||
"go get -d {1}; cd {2}; git reset --hard %(src::revision:-unknown)s; "
|
||||
"go test -v".format(BUILDER_PATH,GITHUB_DOCKER,DOCKER_BUILD_PATH))]))
|
||||
command=["sh", "-c", Interpolate("cd ..; rm -rf build; mkdir build; "
|
||||
"cp -r {2}-dependencies/src {0}; export GOPATH={0}; go get {3}; cd {1}; "
|
||||
"git reset --hard %(src::revision)s; go test -v".format(
|
||||
BUILDER_PATH, BUILDER_PATH+'/src/'+GITHUB_DOCKER, DOCKER_PATH, GITHUB_DOCKER))]))
|
||||
c['builders'] = [BuilderConfig(name=BUILDER_NAME,slavenames=['buildworker'],
|
||||
factory=factory)]
|
||||
|
||||
# Docker pull request test
|
||||
factory = BuildFactory()
|
||||
factory.addStep(ShellCommand(description='pull_request',logEnviron=False,usePTY=True,
|
||||
command=["sh", "-c", Interpolate("cd ..; rm -rf build; mkdir build; "
|
||||
"cp -r {2}-dependencies/src {0}; export GOPATH={0}; go get {3}; cd {1}; "
|
||||
"git fetch %(src::repository)s %(src::branch)s:PR-%(src::branch)s; "
|
||||
"git checkout %(src::revision)s; git rebase master; go test -v".format(
|
||||
PULL_REQUEST_PATH, PULL_REQUEST_PATH+'/src/'+GITHUB_DOCKER, DOCKER_PATH, GITHUB_DOCKER))]))
|
||||
c['builders'] += [BuilderConfig(name='pullrequest',slavenames=['buildworker'],
|
||||
factory=factory)]
|
||||
|
||||
# Docker coverage test
|
||||
coverage_cmd = ('GOPATH=`pwd` go get -d github.com/dotcloud/docker\n'
|
||||
'GOPATH=`pwd` go get github.com/axw/gocov/gocov\n'
|
||||
|
@ -75,16 +91,24 @@ factory.addStep(ShellCommand(description='Coverage',logEnviron=False,usePTY=True
|
|||
c['builders'] += [BuilderConfig(name='coverage',slavenames=['buildworker'],
|
||||
factory=factory)]
|
||||
|
||||
# Registry Functionaltest builder
|
||||
# Registry functional test
|
||||
factory = BuildFactory()
|
||||
factory.addStep(ShellCommand(description='registry', logEnviron=False,
|
||||
command='. {0}/master/credentials.cfg; '
|
||||
'{1}/testing/functionaltests/test_registry.sh'.format(BUILDBOT_PATH,
|
||||
DOCKER_PATH), usePTY=True))
|
||||
|
||||
c['builders'] += [BuilderConfig(name='registry',slavenames=['buildworker'],
|
||||
factory=factory)]
|
||||
|
||||
# Index functional test
|
||||
factory = BuildFactory()
|
||||
factory.addStep(ShellCommand(description='index', logEnviron=False,
|
||||
command='. {0}/master/credentials.cfg; '
|
||||
'{1}/testing/functionaltests/test_index.py'.format(BUILDBOT_PATH,
|
||||
DOCKER_PATH), usePTY=True))
|
||||
c['builders'] += [BuilderConfig(name='index',slavenames=['buildworker'],
|
||||
factory=factory)]
|
||||
|
||||
|
||||
# Status
|
||||
authz_cfg = authz.Authz(auth=auth.BasicAuth([(TEST_USER, TEST_PWD)]),
|
||||
|
|
|
@ -5,3 +5,5 @@ buildbot_slave==0.8.7p1
|
|||
nose==1.2.1
|
||||
requests==1.1.0
|
||||
flask==0.10.1
|
||||
simplejson==2.3.2
|
||||
selenium==2.35.0
|
||||
|
|
|
@ -36,6 +36,9 @@ run "sed -i -E 's#(SMTP_PWD = ).+#\1\"$SMTP_PWD\"#' master/master.cfg"
|
|||
run "sed -i -E 's#(EMAIL_RCP = ).+#\1\"$EMAIL_RCP\"#' master/master.cfg"
|
||||
run "buildslave create-slave slave $SLAVE_SOCKET $SLAVE_NAME $BUILDBOT_PWD"
|
||||
|
||||
# Patch github webstatus to capture pull requests
|
||||
cp $CFG_PATH/github.py /usr/local/lib/python2.7/dist-packages/buildbot/status/web/hooks
|
||||
|
||||
# Allow buildbot subprocesses (docker tests) to properly run in containers,
|
||||
# in particular with docker -u
|
||||
run "sed -i 's/^umask = None/umask = 000/' slave/buildbot.tac"
|
||||
|
|
61
testing/functionaltests/test_index.py
Executable file
61
testing/functionaltests/test_index.py
Executable file
|
@ -0,0 +1,61 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
import os
|
||||
username, password = os.environ['DOCKER_CREDS'].split(':')
|
||||
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.common.keys import Keys
|
||||
from selenium.webdriver.support.ui import Select
|
||||
from selenium.common.exceptions import NoSuchElementException
|
||||
import unittest, time, re
|
||||
|
||||
class Docker(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.driver = webdriver.PhantomJS()
|
||||
self.driver.implicitly_wait(30)
|
||||
self.base_url = "http://www.docker.io/"
|
||||
self.verificationErrors = []
|
||||
self.accept_next_alert = True
|
||||
|
||||
def test_docker(self):
|
||||
driver = self.driver
|
||||
print "Login into {0} as login user {1} ...".format(self.base_url,username)
|
||||
driver.get(self.base_url + "/")
|
||||
driver.find_element_by_link_text("INDEX").click()
|
||||
driver.find_element_by_link_text("login").click()
|
||||
driver.find_element_by_id("id_username").send_keys(username)
|
||||
driver.find_element_by_id("id_password").send_keys(password)
|
||||
print "Checking login user ..."
|
||||
driver.find_element_by_css_selector("input[type=\"submit\"]").click()
|
||||
try: self.assertEqual("test", driver.find_element_by_css_selector("h3").text)
|
||||
except AssertionError as e: self.verificationErrors.append(str(e))
|
||||
print "Login user {0} found".format(username)
|
||||
|
||||
def is_element_present(self, how, what):
|
||||
try: self.driver.find_element(by=how, value=what)
|
||||
except NoSuchElementException, e: return False
|
||||
return True
|
||||
|
||||
def is_alert_present(self):
|
||||
try: self.driver.switch_to_alert()
|
||||
except NoAlertPresentException, e: return False
|
||||
return True
|
||||
|
||||
def close_alert_and_get_its_text(self):
|
||||
try:
|
||||
alert = self.driver.switch_to_alert()
|
||||
alert_text = alert.text
|
||||
if self.accept_next_alert:
|
||||
alert.accept()
|
||||
else:
|
||||
alert.dismiss()
|
||||
return alert_text
|
||||
finally: self.accept_next_alert = True
|
||||
|
||||
def tearDown(self):
|
||||
self.driver.quit()
|
||||
self.assertEqual([], self.verificationErrors)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
161
utils/utils.go
161
utils/utils.go
|
@ -14,7 +14,6 @@ import (
|
|||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
|
@ -293,6 +292,7 @@ func (w *WriteBroadcaster) Write(p []byte) (n int, err error) {
|
|||
continue
|
||||
}
|
||||
lp = append(lp, b...)
|
||||
lp = append(lp, '\n')
|
||||
}
|
||||
}
|
||||
if n, err := sw.wc.Write(lp); err != nil || n != len(lp) {
|
||||
|
@ -780,21 +780,37 @@ func GetResolvConf() ([]byte, error) {
|
|||
// CheckLocalDns looks into the /etc/resolv.conf,
|
||||
// it returns true if there is a local nameserver or if there is no nameserver.
|
||||
func CheckLocalDns(resolvConf []byte) bool {
|
||||
if !bytes.Contains(resolvConf, []byte("nameserver")) {
|
||||
var parsedResolvConf = StripComments(resolvConf, []byte("#"))
|
||||
if !bytes.Contains(parsedResolvConf, []byte("nameserver")) {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, ip := range [][]byte{
|
||||
[]byte("127.0.0.1"),
|
||||
[]byte("127.0.1.1"),
|
||||
} {
|
||||
if bytes.Contains(resolvConf, ip) {
|
||||
if bytes.Contains(parsedResolvConf, ip) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// StripComments parses input into lines and strips away comments.
|
||||
func StripComments(input []byte, commentMarker []byte) []byte {
|
||||
lines := bytes.Split(input, []byte("\n"))
|
||||
var output []byte
|
||||
for _, currentLine := range lines {
|
||||
var commentIndex = bytes.Index(currentLine, commentMarker)
|
||||
if commentIndex == -1 {
|
||||
output = append(output, currentLine...)
|
||||
} else {
|
||||
output = append(output, currentLine[:commentIndex]...)
|
||||
}
|
||||
output = append(output, []byte("\n")...)
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
func ParseHost(host string, port int, addr string) string {
|
||||
if strings.HasPrefix(addr, "unix://") {
|
||||
return addr
|
||||
|
@ -850,10 +866,18 @@ func ParseRepositoryTag(repos string) (string, string) {
|
|||
return repos, ""
|
||||
}
|
||||
|
||||
type User struct {
|
||||
Uid string // user id
|
||||
Gid string // primary group id
|
||||
Username string
|
||||
Name string
|
||||
HomeDir string
|
||||
}
|
||||
|
||||
// UserLookup check if the given username or uid is present in /etc/passwd
|
||||
// and returns the user struct.
|
||||
// If the username is not found, an error is returned.
|
||||
func UserLookup(uid string) (*user.User, error) {
|
||||
func UserLookup(uid string) (*User, error) {
|
||||
file, err := ioutil.ReadFile("/etc/passwd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -861,7 +885,7 @@ func UserLookup(uid string) (*user.User, error) {
|
|||
for _, line := range strings.Split(string(file), "\n") {
|
||||
data := strings.Split(line, ":")
|
||||
if len(data) > 5 && (data[0] == uid || data[2] == uid) {
|
||||
return &user.User{
|
||||
return &User{
|
||||
Uid: data[2],
|
||||
Gid: data[3],
|
||||
Username: data[0],
|
||||
|
@ -872,3 +896,128 @@ func UserLookup(uid string) (*user.User, error) {
|
|||
}
|
||||
return nil, fmt.Errorf("User not found in /etc/passwd")
|
||||
}
|
||||
|
||||
type DependencyGraph struct {
|
||||
nodes map[string]*DependencyNode
|
||||
}
|
||||
|
||||
type DependencyNode struct {
|
||||
id string
|
||||
deps map[*DependencyNode]bool
|
||||
}
|
||||
|
||||
func NewDependencyGraph() DependencyGraph {
|
||||
return DependencyGraph{
|
||||
nodes: map[string]*DependencyNode{},
|
||||
}
|
||||
}
|
||||
|
||||
func (graph *DependencyGraph) addNode(node *DependencyNode) string {
|
||||
if graph.nodes[node.id] == nil {
|
||||
graph.nodes[node.id] = node
|
||||
}
|
||||
return node.id
|
||||
}
|
||||
|
||||
func (graph *DependencyGraph) NewNode(id string) string {
|
||||
if graph.nodes[id] != nil {
|
||||
return id
|
||||
}
|
||||
nd := &DependencyNode{
|
||||
id: id,
|
||||
deps: map[*DependencyNode]bool{},
|
||||
}
|
||||
graph.addNode(nd)
|
||||
return id
|
||||
}
|
||||
|
||||
func (graph *DependencyGraph) AddDependency(node, to string) error {
|
||||
if graph.nodes[node] == nil {
|
||||
return fmt.Errorf("Node %s does not belong to this graph", node)
|
||||
}
|
||||
|
||||
if graph.nodes[to] == nil {
|
||||
return fmt.Errorf("Node %s does not belong to this graph", to)
|
||||
}
|
||||
|
||||
if node == to {
|
||||
return fmt.Errorf("Dependency loops are forbidden!")
|
||||
}
|
||||
|
||||
graph.nodes[node].addDependency(graph.nodes[to])
|
||||
return nil
|
||||
}
|
||||
|
||||
func (node *DependencyNode) addDependency(to *DependencyNode) bool {
|
||||
node.deps[to] = true
|
||||
return node.deps[to]
|
||||
}
|
||||
|
||||
func (node *DependencyNode) Degree() int {
|
||||
return len(node.deps)
|
||||
}
|
||||
|
||||
// The magic happens here ::
|
||||
func (graph *DependencyGraph) GenerateTraversalMap() ([][]string, error) {
|
||||
Debugf("Generating traversal map. Nodes: %d", len(graph.nodes))
|
||||
result := [][]string{}
|
||||
processed := map[*DependencyNode]bool{}
|
||||
// As long as we haven't processed all nodes...
|
||||
for len(processed) < len(graph.nodes) {
|
||||
// Use a temporary buffer for processed nodes, otherwise
|
||||
// nodes that depend on each other could end up in the same round.
|
||||
tmp_processed := []*DependencyNode{}
|
||||
for _, node := range graph.nodes {
|
||||
// If the node has more dependencies than what we have cleared,
|
||||
// it won't be valid for this round.
|
||||
if node.Degree() > len(processed) {
|
||||
continue
|
||||
}
|
||||
// If it's already processed, get to the next one
|
||||
if processed[node] {
|
||||
continue
|
||||
}
|
||||
// It's not been processed yet and has 0 deps. Add it!
|
||||
// (this is a shortcut for what we're doing below)
|
||||
if node.Degree() == 0 {
|
||||
tmp_processed = append(tmp_processed, node)
|
||||
continue
|
||||
}
|
||||
// If at least one dep hasn't been processed yet, we can't
|
||||
// add it.
|
||||
ok := true
|
||||
for dep := range node.deps {
|
||||
if !processed[dep] {
|
||||
ok = false
|
||||
break
|
||||
}
|
||||
}
|
||||
// All deps have already been processed. Add it!
|
||||
if ok {
|
||||
tmp_processed = append(tmp_processed, node)
|
||||
}
|
||||
}
|
||||
Debugf("Round %d: found %d available nodes", len(result), len(tmp_processed))
|
||||
// If no progress has been made this round,
|
||||
// that means we have circular dependencies.
|
||||
if len(tmp_processed) == 0 {
|
||||
return nil, fmt.Errorf("Could not find a solution to this dependency graph")
|
||||
}
|
||||
round := []string{}
|
||||
for _, nd := range tmp_processed {
|
||||
round = append(round, nd.id)
|
||||
processed[nd] = true
|
||||
}
|
||||
result = append(result, round)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// An StatusError reports an unsuccessful exit by a command.
|
||||
type StatusError struct {
|
||||
Status int
|
||||
}
|
||||
|
||||
func (e *StatusError) Error() string {
|
||||
return fmt.Sprintf("Status: %d", e.Status)
|
||||
}
|
||||
|
|
|
@ -323,6 +323,16 @@ func TestCheckLocalDns(t *testing.T) {
|
|||
nameserver 10.0.2.3
|
||||
search dotcloud.net`: false,
|
||||
`# Dynamic
|
||||
#nameserver 127.0.0.1
|
||||
nameserver 10.0.2.3
|
||||
search dotcloud.net`: false,
|
||||
`# Dynamic
|
||||
nameserver 10.0.2.3 #not used 127.0.1.1
|
||||
search dotcloud.net`: false,
|
||||
`# Dynamic
|
||||
#nameserver 10.0.2.3
|
||||
#search dotcloud.net`: true,
|
||||
`# Dynamic
|
||||
nameserver 127.0.0.1
|
||||
search dotcloud.net`: true,
|
||||
`# Dynamic
|
||||
|
@ -355,3 +365,60 @@ func TestParseRelease(t *testing.T) {
|
|||
assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: "1"}, 0)
|
||||
assertParseRelease(t, "3.8.0-19-generic", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "19-generic"}, 0)
|
||||
}
|
||||
|
||||
|
||||
func TestDependencyGraphCircular(t *testing.T) {
|
||||
g1 := NewDependencyGraph()
|
||||
a := g1.NewNode("a")
|
||||
b := g1.NewNode("b")
|
||||
g1.AddDependency(a, b)
|
||||
g1.AddDependency(b, a)
|
||||
res, err := g1.GenerateTraversalMap()
|
||||
if res != nil {
|
||||
t.Fatalf("Expected nil result")
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error (circular graph can not be resolved)")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDependencyGraph(t *testing.T) {
|
||||
g1 := NewDependencyGraph()
|
||||
a := g1.NewNode("a")
|
||||
b := g1.NewNode("b")
|
||||
c := g1.NewNode("c")
|
||||
d := g1.NewNode("d")
|
||||
g1.AddDependency(b, a)
|
||||
g1.AddDependency(c, a)
|
||||
g1.AddDependency(d, c)
|
||||
g1.AddDependency(d, b)
|
||||
res, err := g1.GenerateTraversalMap()
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("%s", err)
|
||||
}
|
||||
|
||||
if res == nil {
|
||||
t.Fatalf("Unexpected nil result")
|
||||
}
|
||||
|
||||
if len(res) != 3 {
|
||||
t.Fatalf("Expected map of length 3, found %d instead", len(res))
|
||||
}
|
||||
|
||||
if len(res[0]) != 1 || res[0][0] != "a" {
|
||||
t.Fatalf("Expected [a], found %v instead", res[0])
|
||||
}
|
||||
|
||||
if len(res[1]) != 2 {
|
||||
t.Fatalf("Expected 2 nodes for step 2, found %d", len(res[1]))
|
||||
}
|
||||
|
||||
if (res[1][0] != "b" && res[1][1] != "b") || (res[1][0] != "c" && res[1][1] != "c") {
|
||||
t.Fatalf("Expected [b, c], found %v instead", res[1])
|
||||
}
|
||||
|
||||
if len(res[2]) != 1 || res[2][0] != "d" {
|
||||
t.Fatalf("Expected [d], found %v instead", res[2])
|
||||
}
|
||||
}
|
|
@ -101,7 +101,7 @@ func mkContainer(r *Runtime, args []string, t *testing.T) (*Container, *HostConf
|
|||
if config.Image == "_" {
|
||||
config.Image = GetTestImage(r).ID
|
||||
}
|
||||
c, err := NewBuilder(r).Create(config)
|
||||
c, err := r.Create(config)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue