commit
28b162eeb4
108 changed files with 3361 additions and 3089 deletions
38
CHANGELOG.md
38
CHANGELOG.md
|
@ -1,5 +1,43 @@
|
|||
# Changelog
|
||||
|
||||
## 0.7.2 (2013-12-16)
|
||||
|
||||
#### Runtime
|
||||
|
||||
+ Validate container names on creation with standard regex
|
||||
* Increase maximum image depth to 127 from 42
|
||||
* Continue to move api endpoints to the job api
|
||||
+ Add -bip flag to allow specification of dynamic bridge IP via CIDR
|
||||
- Allow bridge creation when ipv6 is not enabled on certain systems
|
||||
* Set hostname and IP address from within dockerinit
|
||||
* Drop capabilities from within dockerinit
|
||||
- Fix volumes on host when symlink is present the image
|
||||
- Prevent deletion of image if ANY container is depending on it even if the container is not running
|
||||
* Update docker push to use new progress display
|
||||
* Use os.Lstat to allow mounting unix sockets when inspecting volumes
|
||||
- Adjusted handling of inactive user login
|
||||
- Add missing defines in devicemapper for older kernels
|
||||
- Allow untag operations with no container validation
|
||||
- Add auth config to docker build
|
||||
|
||||
#### Documentation
|
||||
|
||||
* Add more information about Docker logging
|
||||
+ Add RHEL documentation
|
||||
* Add a direct example for changing the CMD that is run in a container
|
||||
* Update Arch installation documentation
|
||||
+ Add section on Trusted Builds
|
||||
+ Add Network documentation page
|
||||
|
||||
#### Other
|
||||
|
||||
+ Add new cover bundle for providing code coverage reporting
|
||||
* Separate integration tests in bundles
|
||||
* Make Tianon the hack maintainer
|
||||
* Update mkimage-debootstrap with more tweaks for keeping images small
|
||||
* Use https to get the install script
|
||||
* Remove vendored dotcloud/tar now that Go 1.2 has been released
|
||||
|
||||
## 0.7.1 (2013-12-05)
|
||||
|
||||
#### Documentation
|
||||
|
|
|
@ -65,6 +65,9 @@ RUN git clone https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /u
|
|||
RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper
|
||||
# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
|
||||
|
||||
# Grab Go's cover tool for dead-simple code coverage testing
|
||||
RUN go get code.google.com/p/go.tools/cmd/cover
|
||||
|
||||
VOLUME /var/lib/docker
|
||||
WORKDIR /go/src/github.com/dotcloud/docker
|
||||
|
||||
|
|
6
Makefile
6
Makefile
|
@ -1,4 +1,4 @@
|
|||
.PHONY: all binary build default doc shell test
|
||||
.PHONY: all binary build default docs shell test
|
||||
|
||||
DOCKER_RUN_DOCKER := docker run -rm -i -t -privileged -e TESTFLAGS -v $(CURDIR)/bundles:/go/src/github.com/dotcloud/docker/bundles docker
|
||||
|
||||
|
@ -10,11 +10,11 @@ all: build
|
|||
binary: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh binary
|
||||
|
||||
doc:
|
||||
docs:
|
||||
docker build -t docker-docs docs && docker run -p 8000:8000 docker-docs
|
||||
|
||||
test: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh test
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh test test-integration
|
||||
|
||||
shell: build
|
||||
$(DOCKER_RUN_DOCKER) bash
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
0.7.1
|
||||
0.7.2
|
||||
|
|
7
Vagrantfile
vendored
7
Vagrantfile
vendored
|
@ -70,7 +70,7 @@ SCRIPT
|
|||
# trigger dkms to build the virtualbox guest module install.
|
||||
$vbox_script = <<VBOX_SCRIPT + $script
|
||||
# Install the VirtualBox guest additions if they aren't already installed.
|
||||
if [ ! -d /opt/VBoxGuestAdditions-4.3.2/ ]; then
|
||||
if [ ! -d /opt/VBoxGuestAdditions-4.3.4/ ]; then
|
||||
# Update remote package metadata. 'apt-get update' is idempotent.
|
||||
apt-get update -q
|
||||
|
||||
|
@ -79,9 +79,10 @@ if [ ! -d /opt/VBoxGuestAdditions-4.3.2/ ]; then
|
|||
apt-get install -q -y linux-headers-generic-lts-raring dkms
|
||||
|
||||
echo 'Downloading VBox Guest Additions...'
|
||||
wget -cq http://dlc.sun.com.edgesuite.net/virtualbox/4.3.2/VBoxGuestAdditions_4.3.2.iso
|
||||
wget -cq http://dlc.sun.com.edgesuite.net/virtualbox/4.3.4/VBoxGuestAdditions_4.3.4.iso
|
||||
echo "f120793fa35050a8280eacf9c930cf8d9b88795161520f6515c0cc5edda2fe8a VBoxGuestAdditions_4.3.4.iso" | sha256sum --check || exit 1
|
||||
|
||||
mount -o loop,ro /home/vagrant/VBoxGuestAdditions_4.3.2.iso /mnt
|
||||
mount -o loop,ro /home/vagrant/VBoxGuestAdditions_4.3.4.iso /mnt
|
||||
/mnt/VBoxLinuxAdditions.run --nox11
|
||||
umount /mnt
|
||||
fi
|
||||
|
|
123
api.go
123
api.go
|
@ -140,7 +140,8 @@ func postAuth(srv *Server, version float64, w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
func getVersion(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
return writeJSON(w, http.StatusOK, srv.DockerVersion())
|
||||
srv.Eng.ServeHTTP(w, r)
|
||||
return nil
|
||||
}
|
||||
|
||||
func postContainersKill(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
|
@ -150,19 +151,11 @@ func postContainersKill(srv *Server, version float64, w http.ResponseWriter, r *
|
|||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
name := vars["name"]
|
||||
|
||||
signal := 0
|
||||
if r != nil {
|
||||
if s := r.Form.Get("signal"); s != "" {
|
||||
s, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
signal = s
|
||||
}
|
||||
job := srv.Eng.Job("kill", vars["name"])
|
||||
if sig := r.Form.Get("signal"); sig != "" {
|
||||
job.Args = append(job.Args, sig)
|
||||
}
|
||||
if err := srv.ContainerKill(name, signal); err != nil {
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
|
@ -173,10 +166,11 @@ func getContainersExport(srv *Server, version float64, w http.ResponseWriter, r
|
|||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
|
||||
if err := srv.ContainerExport(name, w); err != nil {
|
||||
utils.Errorf("%s", err)
|
||||
job := srv.Eng.Job("export", vars["name"])
|
||||
if err := job.Stdout.Add(w); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -222,7 +216,8 @@ func getImagesViz(srv *Server, version float64, w http.ResponseWriter, r *http.R
|
|||
}
|
||||
|
||||
func getInfo(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
return writeJSON(w, http.StatusOK, srv.DockerInfo())
|
||||
srv.Eng.ServeHTTP(w, r)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getEvents(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
|
@ -362,18 +357,13 @@ func postImagesTag(srv *Server, version float64, w http.ResponseWriter, r *http.
|
|||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
repo := r.Form.Get("repo")
|
||||
tag := r.Form.Get("tag")
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
force, err := getBoolParam(r.Form.Get("force"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := srv.ContainerTag(name, repo, tag, force); err != nil {
|
||||
job := srv.Eng.Job("tag", vars["name"], r.Form.Get("repo"), r.Form.Get("tag"))
|
||||
job.Setenv("force", r.Form.Get("force"))
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
|
@ -388,13 +378,17 @@ func postCommit(srv *Server, version float64, w http.ResponseWriter, r *http.Req
|
|||
if err := json.NewDecoder(r.Body).Decode(config); err != nil && err != io.EOF {
|
||||
utils.Errorf("%s", err)
|
||||
}
|
||||
repo := r.Form.Get("repo")
|
||||
tag := r.Form.Get("tag")
|
||||
container := r.Form.Get("container")
|
||||
author := r.Form.Get("author")
|
||||
comment := r.Form.Get("comment")
|
||||
id, err := srv.ContainerCommit(container, repo, tag, author, comment, config)
|
||||
if err != nil {
|
||||
|
||||
job := srv.Eng.Job("commit", r.Form.Get("container"))
|
||||
job.Setenv("repo", r.Form.Get("repo"))
|
||||
job.Setenv("tag", r.Form.Get("tag"))
|
||||
job.Setenv("author", r.Form.Get("author"))
|
||||
job.Setenv("comment", r.Form.Get("comment"))
|
||||
job.SetenvJson("config", config)
|
||||
|
||||
var id string
|
||||
job.Stdout.AddString(&id)
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -689,17 +683,12 @@ func postContainersStop(srv *Server, version float64, w http.ResponseWriter, r *
|
|||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
t, err := strconv.Atoi(r.Form.Get("t"))
|
||||
if err != nil || t < 0 {
|
||||
t = 10
|
||||
}
|
||||
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
|
||||
if err := srv.ContainerStop(name, t); err != nil {
|
||||
job := srv.Eng.Job("stop", vars["name"])
|
||||
job.Setenv("t", r.Form.Get("t"))
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
|
@ -710,33 +699,28 @@ func postContainersWait(srv *Server, version float64, w http.ResponseWriter, r *
|
|||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
|
||||
status, err := srv.ContainerWait(name)
|
||||
job := srv.Eng.Job("wait", vars["name"])
|
||||
var statusStr string
|
||||
job.Stdout.AddString(&statusStr)
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Parse a 16-bit encoded integer to map typical unix exit status.
|
||||
status, err := strconv.ParseInt(statusStr, 10, 16)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return writeJSON(w, http.StatusOK, &APIWait{StatusCode: status})
|
||||
return writeJSON(w, http.StatusOK, &APIWait{StatusCode: int(status)})
|
||||
}
|
||||
|
||||
func postContainersResize(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
height, err := strconv.Atoi(r.Form.Get("h"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
width, err := strconv.Atoi(r.Form.Get("w"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
if err := srv.ContainerResize(name, height, width); err != nil {
|
||||
if err := srv.Eng.Job("resize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -905,12 +889,25 @@ func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
|
|||
if version < 1.3 {
|
||||
return fmt.Errorf("Multipart upload for build is no longer supported. Please upgrade your docker client.")
|
||||
}
|
||||
remoteURL := r.FormValue("remote")
|
||||
repoName := r.FormValue("t")
|
||||
rawSuppressOutput := r.FormValue("q")
|
||||
rawNoCache := r.FormValue("nocache")
|
||||
rawRm := r.FormValue("rm")
|
||||
repoName, tag := utils.ParseRepositoryTag(repoName)
|
||||
var (
|
||||
remoteURL = r.FormValue("remote")
|
||||
repoName = r.FormValue("t")
|
||||
rawSuppressOutput = r.FormValue("q")
|
||||
rawNoCache = r.FormValue("nocache")
|
||||
rawRm = r.FormValue("rm")
|
||||
authEncoded = r.Header.Get("X-Registry-Auth")
|
||||
authConfig = &auth.AuthConfig{}
|
||||
tag string
|
||||
)
|
||||
repoName, tag = utils.ParseRepositoryTag(repoName)
|
||||
if authEncoded != "" {
|
||||
authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
|
||||
if err := json.NewDecoder(authJson).Decode(authConfig); err != nil {
|
||||
// for a pull it is not an error if no auth was given
|
||||
// to increase compatibility with the existing api it is defaulting to be empty
|
||||
authConfig = &auth.AuthConfig{}
|
||||
}
|
||||
}
|
||||
|
||||
var context io.Reader
|
||||
|
||||
|
@ -978,7 +975,7 @@ func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
|
|||
Writer: utils.NewWriteFlusher(w),
|
||||
StreamFormatter: sf,
|
||||
},
|
||||
!suppressOutput, !noCache, rm, utils.NewWriteFlusher(w), sf)
|
||||
!suppressOutput, !noCache, rm, utils.NewWriteFlusher(w), sf, authConfig)
|
||||
id, err := b.Build(context)
|
||||
if err != nil {
|
||||
if sf.Used() {
|
||||
|
|
|
@ -29,23 +29,6 @@ type (
|
|||
VirtualSize int64
|
||||
}
|
||||
|
||||
APIInfo struct {
|
||||
Debug bool
|
||||
Containers int
|
||||
Images int
|
||||
Driver string `json:",omitempty"`
|
||||
DriverStatus [][2]string `json:",omitempty"`
|
||||
NFd int `json:",omitempty"`
|
||||
NGoroutines int `json:",omitempty"`
|
||||
MemoryLimit bool `json:",omitempty"`
|
||||
SwapLimit bool `json:",omitempty"`
|
||||
IPv4Forwarding bool `json:",omitempty"`
|
||||
LXCVersion string `json:",omitempty"`
|
||||
NEventsListener int `json:",omitempty"`
|
||||
KernelVersion string `json:",omitempty"`
|
||||
IndexServerAddress string `json:",omitempty"`
|
||||
}
|
||||
|
||||
APITop struct {
|
||||
Titles []string
|
||||
Processes [][]string
|
||||
|
@ -95,12 +78,6 @@ type (
|
|||
IP string
|
||||
}
|
||||
|
||||
APIVersion struct {
|
||||
Version string
|
||||
GitCommit string `json:",omitempty"`
|
||||
GoVersion string `json:",omitempty"`
|
||||
}
|
||||
|
||||
APIWait struct {
|
||||
StatusCode int
|
||||
}
|
||||
|
|
17
auth/auth.go
17
auth/auth.go
|
@ -192,13 +192,6 @@ func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, e
|
|||
} else {
|
||||
status = "Account created. Please see the documentation of the registry " + serverAddress + " for instructions how to activate it."
|
||||
}
|
||||
} else if reqStatusCode == 403 {
|
||||
if loginAgainstOfficialIndex {
|
||||
return "", fmt.Errorf("Login: Your account hasn't been activated. " +
|
||||
"Please check your e-mail for a confirmation link.")
|
||||
}
|
||||
return "", fmt.Errorf("Login: Your account hasn't been activated. " +
|
||||
"Please see the documentation of the registry " + serverAddress + " for instructions how to activate it.")
|
||||
} else if reqStatusCode == 400 {
|
||||
if string(reqBody) == "\"Username or email already exists\"" {
|
||||
req, err := factory.NewRequest("GET", serverAddress+"users/", nil)
|
||||
|
@ -216,9 +209,13 @@ func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, e
|
|||
status = "Login Succeeded"
|
||||
} else if resp.StatusCode == 401 {
|
||||
return "", fmt.Errorf("Wrong login/password, please try again")
|
||||
} else if resp.StatusCode == 403 {
|
||||
if loginAgainstOfficialIndex {
|
||||
return "", fmt.Errorf("Login: Account is not Active. Please check your e-mail for a confirmation link.")
|
||||
}
|
||||
return "", fmt.Errorf("Login: Account is not Active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress)
|
||||
} else {
|
||||
return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body,
|
||||
resp.StatusCode, resp.Header)
|
||||
return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header)
|
||||
}
|
||||
} else {
|
||||
return "", fmt.Errorf("Registration: %s", reqBody)
|
||||
|
@ -236,7 +233,7 @@ func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, e
|
|||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
if resp.StatusCode == 200 {
|
||||
status = "Login Succeeded"
|
||||
} else if resp.StatusCode == 401 {
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/auth"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
@ -33,6 +34,8 @@ type buildFile struct {
|
|||
utilizeCache bool
|
||||
rm bool
|
||||
|
||||
authConfig *auth.AuthConfig
|
||||
|
||||
tmpContainers map[string]struct{}
|
||||
tmpImages map[string]struct{}
|
||||
|
||||
|
@ -57,7 +60,7 @@ func (b *buildFile) CmdFrom(name string) error {
|
|||
if err != nil {
|
||||
if b.runtime.graph.IsNotExist(err) {
|
||||
remote, tag := utils.ParseRepositoryTag(name)
|
||||
if err := b.srv.ImagePull(remote, tag, b.outOld, b.sf, nil, nil, true); err != nil {
|
||||
if err := b.srv.ImagePull(remote, tag, b.outOld, b.sf, b.authConfig, nil, true); err != nil {
|
||||
return err
|
||||
}
|
||||
image, err = b.runtime.repositories.LookupImage(name)
|
||||
|
@ -568,7 +571,7 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
|
|||
return "", fmt.Errorf("An error occurred during the build\n")
|
||||
}
|
||||
|
||||
func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter) BuildFile {
|
||||
func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *auth.AuthConfig) BuildFile {
|
||||
return &buildFile{
|
||||
runtime: srv.runtime,
|
||||
srv: srv,
|
||||
|
@ -581,6 +584,7 @@ func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeC
|
|||
utilizeCache: utilizeCache,
|
||||
rm: rm,
|
||||
sf: sf,
|
||||
authConfig: auth,
|
||||
outOld: outOld,
|
||||
}
|
||||
}
|
||||
|
|
187
commands.go
187
commands.go
|
@ -11,6 +11,7 @@ import (
|
|||
"fmt"
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/auth"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/registry"
|
||||
"github.com/dotcloud/docker/term"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
|
@ -226,6 +227,12 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
|
|||
}
|
||||
|
||||
headers := http.Header(make(map[string][]string))
|
||||
buf, err := json.Marshal(cli.configFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headers.Add("X-Registry-Auth", base64.URLEncoding.EncodeToString(buf))
|
||||
|
||||
if context != nil {
|
||||
headers.Set("Content-Type", "application/tar")
|
||||
}
|
||||
|
@ -391,26 +398,24 @@ func (cli *DockerCli) CmdVersion(args ...string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
var out APIVersion
|
||||
err = json.Unmarshal(body, &out)
|
||||
out := engine.NewOutput()
|
||||
remoteVersion, err := out.AddEnv()
|
||||
if err != nil {
|
||||
utils.Errorf("Error unmarshal: body: %s, err: %s\n", body, err)
|
||||
utils.Errorf("Error reading remote version: %s\n", err)
|
||||
return err
|
||||
}
|
||||
if out.Version != "" {
|
||||
fmt.Fprintf(cli.out, "Server version: %s\n", out.Version)
|
||||
if _, err := out.Write(body); err != nil {
|
||||
utils.Errorf("Error reading remote version: %s\n", err)
|
||||
return err
|
||||
}
|
||||
if out.GitCommit != "" {
|
||||
fmt.Fprintf(cli.out, "Git commit (server): %s\n", out.GitCommit)
|
||||
}
|
||||
if out.GoVersion != "" {
|
||||
fmt.Fprintf(cli.out, "Go version (server): %s\n", out.GoVersion)
|
||||
}
|
||||
|
||||
out.Close()
|
||||
fmt.Fprintf(cli.out, "Server version: %s\n", remoteVersion.Get("Version"))
|
||||
fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit"))
|
||||
fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion"))
|
||||
release := utils.GetReleaseVersion()
|
||||
if release != "" {
|
||||
fmt.Fprintf(cli.out, "Last stable version: %s", release)
|
||||
if (VERSION != "" || out.Version != "") && (strings.Trim(VERSION, "-dev") != release || strings.Trim(out.Version, "-dev") != release) {
|
||||
if (VERSION != "" || remoteVersion.Exists("Version")) && (strings.Trim(VERSION, "-dev") != release || strings.Trim(remoteVersion.Get("Version"), "-dev") != release) {
|
||||
fmt.Fprintf(cli.out, ", please update docker")
|
||||
}
|
||||
fmt.Fprintf(cli.out, "\n")
|
||||
|
@ -434,42 +439,53 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
var out APIInfo
|
||||
if err := json.Unmarshal(body, &out); err != nil {
|
||||
out := engine.NewOutput()
|
||||
remoteInfo, err := out.AddEnv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprintf(cli.out, "Containers: %d\n", out.Containers)
|
||||
fmt.Fprintf(cli.out, "Images: %d\n", out.Images)
|
||||
fmt.Fprintf(cli.out, "Driver: %s\n", out.Driver)
|
||||
for _, pair := range out.DriverStatus {
|
||||
if _, err := out.Write(body); err != nil {
|
||||
utils.Errorf("Error reading remote info: %s\n", err)
|
||||
return err
|
||||
}
|
||||
out.Close()
|
||||
|
||||
fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers"))
|
||||
fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images"))
|
||||
fmt.Fprintf(cli.out, "Driver: %s\n", remoteInfo.Get("Driver"))
|
||||
var driverStatus [][2]string
|
||||
if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, pair := range driverStatus {
|
||||
fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1])
|
||||
}
|
||||
if out.Debug || os.Getenv("DEBUG") != "" {
|
||||
fmt.Fprintf(cli.out, "Debug mode (server): %v\n", out.Debug)
|
||||
if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" {
|
||||
fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug"))
|
||||
fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "")
|
||||
fmt.Fprintf(cli.out, "Fds: %d\n", out.NFd)
|
||||
fmt.Fprintf(cli.out, "Goroutines: %d\n", out.NGoroutines)
|
||||
fmt.Fprintf(cli.out, "LXC Version: %s\n", out.LXCVersion)
|
||||
fmt.Fprintf(cli.out, "EventsListeners: %d\n", out.NEventsListener)
|
||||
fmt.Fprintf(cli.out, "Kernel Version: %s\n", out.KernelVersion)
|
||||
fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd"))
|
||||
fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines"))
|
||||
fmt.Fprintf(cli.out, "LXC Version: %s\n", remoteInfo.Get("LXCVersion"))
|
||||
fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener"))
|
||||
fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion"))
|
||||
}
|
||||
|
||||
if len(out.IndexServerAddress) != 0 {
|
||||
if len(remoteInfo.GetList("IndexServerAddress")) != 0 {
|
||||
cli.LoadConfigFile()
|
||||
u := cli.configFile.Configs[out.IndexServerAddress].Username
|
||||
u := cli.configFile.Configs[remoteInfo.Get("IndexServerAddress")].Username
|
||||
if len(u) > 0 {
|
||||
fmt.Fprintf(cli.out, "Username: %v\n", u)
|
||||
fmt.Fprintf(cli.out, "Registry: %v\n", out.IndexServerAddress)
|
||||
fmt.Fprintf(cli.out, "Registry: %v\n", remoteInfo.GetList("IndexServerAddress"))
|
||||
}
|
||||
}
|
||||
if !out.MemoryLimit {
|
||||
if !remoteInfo.GetBool("MemoryLimit") {
|
||||
fmt.Fprintf(cli.err, "WARNING: No memory limit support\n")
|
||||
}
|
||||
if !out.SwapLimit {
|
||||
if !remoteInfo.GetBool("SwapLimit") {
|
||||
fmt.Fprintf(cli.err, "WARNING: No swap limit support\n")
|
||||
}
|
||||
if !out.IPv4Forwarding {
|
||||
if !remoteInfo.GetBool("IPv4Forwarding") {
|
||||
fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n")
|
||||
}
|
||||
return nil
|
||||
|
@ -1102,33 +1118,9 @@ func (cli *DockerCli) CmdImages(args ...string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
if *flViz {
|
||||
body, _, err := cli.call("GET", "/images/json?all=1", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filter := cmd.Arg(0)
|
||||
|
||||
var outs []APIImages
|
||||
err = json.Unmarshal(body, &outs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprintf(cli.out, "digraph docker {\n")
|
||||
|
||||
for _, image := range outs {
|
||||
if image.ParentId == "" {
|
||||
fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", utils.TruncateID(image.ID))
|
||||
} else {
|
||||
fmt.Fprintf(cli.out, " \"%s\" -> \"%s\"\n", utils.TruncateID(image.ParentId), utils.TruncateID(image.ID))
|
||||
}
|
||||
if image.RepoTags[0] != "<none>:<none>" {
|
||||
fmt.Fprintf(cli.out, " \"%s\" [label=\"%s\\n%s\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n", utils.TruncateID(image.ID), utils.TruncateID(image.ID), strings.Join(image.RepoTags, "\\n"))
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(cli.out, " base [style=invisible]\n}\n")
|
||||
} else if *flTree {
|
||||
if *flViz || *flTree {
|
||||
body, _, err := cli.call("GET", "/images/json?all=1", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -1140,8 +1132,8 @@ func (cli *DockerCli) CmdImages(args ...string) error {
|
|||
}
|
||||
|
||||
var (
|
||||
startImageArg = cmd.Arg(0)
|
||||
startImage APIImages
|
||||
printNode func(cli *DockerCli, noTrunc bool, image APIImages, prefix string)
|
||||
startImage APIImages
|
||||
|
||||
roots []APIImages
|
||||
byParent = make(map[string][]APIImages)
|
||||
|
@ -1158,28 +1150,38 @@ func (cli *DockerCli) CmdImages(args ...string) error {
|
|||
}
|
||||
}
|
||||
|
||||
if startImageArg != "" {
|
||||
if startImageArg == image.ID || startImageArg == utils.TruncateID(image.ID) {
|
||||
if filter != "" {
|
||||
if filter == image.ID || filter == utils.TruncateID(image.ID) {
|
||||
startImage = image
|
||||
}
|
||||
|
||||
for _, repotag := range image.RepoTags {
|
||||
if repotag == startImageArg {
|
||||
if repotag == filter {
|
||||
startImage = image
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if startImageArg != "" {
|
||||
WalkTree(cli, noTrunc, []APIImages{startImage}, byParent, "")
|
||||
if *flViz {
|
||||
fmt.Fprintf(cli.out, "digraph docker {\n")
|
||||
printNode = (*DockerCli).printVizNode
|
||||
} else {
|
||||
WalkTree(cli, noTrunc, roots, byParent, "")
|
||||
printNode = (*DockerCli).printTreeNode
|
||||
}
|
||||
|
||||
if startImage.ID != "" {
|
||||
cli.WalkTree(*noTrunc, &[]APIImages{startImage}, byParent, "", printNode)
|
||||
} else if filter == "" {
|
||||
cli.WalkTree(*noTrunc, &roots, byParent, "", printNode)
|
||||
}
|
||||
if *flViz {
|
||||
fmt.Fprintf(cli.out, " base [style=invisible]\n}\n")
|
||||
}
|
||||
} else {
|
||||
v := url.Values{}
|
||||
if cmd.NArg() == 1 {
|
||||
v.Set("filter", cmd.Arg(0))
|
||||
v.Set("filter", filter)
|
||||
}
|
||||
if *all {
|
||||
v.Set("all", "1")
|
||||
|
@ -1225,41 +1227,64 @@ func (cli *DockerCli) CmdImages(args ...string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func WalkTree(cli *DockerCli, noTrunc *bool, images []APIImages, byParent map[string][]APIImages, prefix string) {
|
||||
if len(images) > 1 {
|
||||
length := len(images)
|
||||
for index, image := range images {
|
||||
func (cli *DockerCli) WalkTree(noTrunc bool, images *[]APIImages, byParent map[string][]APIImages, prefix string, printNode func(cli *DockerCli, noTrunc bool, image APIImages, prefix string)) {
|
||||
length := len(*images)
|
||||
if length > 1 {
|
||||
for index, image := range *images {
|
||||
if index+1 == length {
|
||||
PrintTreeNode(cli, noTrunc, image, prefix+"└─")
|
||||
printNode(cli, noTrunc, image, prefix+"└─")
|
||||
if subimages, exists := byParent[image.ID]; exists {
|
||||
WalkTree(cli, noTrunc, subimages, byParent, prefix+" ")
|
||||
cli.WalkTree(noTrunc, &subimages, byParent, prefix+" ", printNode)
|
||||
}
|
||||
} else {
|
||||
PrintTreeNode(cli, noTrunc, image, prefix+"|─")
|
||||
printNode(cli, noTrunc, image, prefix+"|─")
|
||||
if subimages, exists := byParent[image.ID]; exists {
|
||||
WalkTree(cli, noTrunc, subimages, byParent, prefix+"| ")
|
||||
cli.WalkTree(noTrunc, &subimages, byParent, prefix+"| ", printNode)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, image := range images {
|
||||
PrintTreeNode(cli, noTrunc, image, prefix+"└─")
|
||||
for _, image := range *images {
|
||||
printNode(cli, noTrunc, image, prefix+"└─")
|
||||
if subimages, exists := byParent[image.ID]; exists {
|
||||
WalkTree(cli, noTrunc, subimages, byParent, prefix+" ")
|
||||
cli.WalkTree(noTrunc, &subimages, byParent, prefix+" ", printNode)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func PrintTreeNode(cli *DockerCli, noTrunc *bool, image APIImages, prefix string) {
|
||||
func (cli *DockerCli) printVizNode(noTrunc bool, image APIImages, prefix string) {
|
||||
var (
|
||||
imageID string
|
||||
parentID string
|
||||
)
|
||||
if noTrunc {
|
||||
imageID = image.ID
|
||||
parentID = image.ParentId
|
||||
} else {
|
||||
imageID = utils.TruncateID(image.ID)
|
||||
parentID = utils.TruncateID(image.ParentId)
|
||||
}
|
||||
if image.ParentId == "" {
|
||||
fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID)
|
||||
} else {
|
||||
fmt.Fprintf(cli.out, " \"%s\" -> \"%s\"\n", parentID, imageID)
|
||||
}
|
||||
if image.RepoTags[0] != "<none>:<none>" {
|
||||
fmt.Fprintf(cli.out, " \"%s\" [label=\"%s\\n%s\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n",
|
||||
imageID, imageID, strings.Join(image.RepoTags, "\\n"))
|
||||
}
|
||||
}
|
||||
|
||||
func (cli *DockerCli) printTreeNode(noTrunc bool, image APIImages, prefix string) {
|
||||
var imageID string
|
||||
if *noTrunc {
|
||||
if noTrunc {
|
||||
imageID = image.ID
|
||||
} else {
|
||||
imageID = utils.TruncateID(image.ID)
|
||||
}
|
||||
|
||||
fmt.Fprintf(cli.out, "%s%s Size: %s (virtual %s)", prefix, imageID, utils.HumanSize(image.Size), utils.HumanSize(image.VirtualSize))
|
||||
fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, utils.HumanSize(image.VirtualSize))
|
||||
if image.RepoTags[0] != "<none>:<none>" {
|
||||
fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.RepoTags, ", "))
|
||||
} else {
|
||||
|
|
|
@ -14,6 +14,7 @@ type DaemonConfig struct {
|
|||
Dns []string
|
||||
EnableIptables bool
|
||||
BridgeIface string
|
||||
BridgeIp string
|
||||
DefaultIp net.IP
|
||||
InterContainerCommunication bool
|
||||
GraphDriver string
|
||||
|
@ -36,6 +37,7 @@ func ConfigFromJob(job *engine.Job) *DaemonConfig {
|
|||
} else {
|
||||
config.BridgeIface = DefaultNetworkBridge
|
||||
}
|
||||
config.BridgeIp = job.Getenv("BridgeIp")
|
||||
config.DefaultIp = net.ParseIP(job.Getenv("DefaultIp"))
|
||||
config.InterContainerCommunication = job.GetenvBool("InterContainerCommunication")
|
||||
config.GraphDriver = job.Getenv("GraphDriver")
|
||||
|
|
49
container.go
49
container.go
|
@ -574,7 +574,11 @@ func (container *Container) Start() (err error) {
|
|||
|
||||
// Networking
|
||||
if !container.Config.NetworkDisabled {
|
||||
params = append(params, "-g", container.network.Gateway.String())
|
||||
network := container.NetworkSettings
|
||||
params = append(params,
|
||||
"-g", network.Gateway,
|
||||
"-i", fmt.Sprintf("%s/%d", network.IPAddress, network.IPPrefixLen),
|
||||
)
|
||||
}
|
||||
|
||||
// User
|
||||
|
@ -586,7 +590,6 @@ func (container *Container) Start() (err error) {
|
|||
env := []string{
|
||||
"HOME=/",
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
|
||||
"container=lxc",
|
||||
"HOSTNAME=" + container.Config.Hostname,
|
||||
}
|
||||
|
||||
|
@ -594,6 +597,10 @@ func (container *Container) Start() (err error) {
|
|||
env = append(env, "TERM=xterm")
|
||||
}
|
||||
|
||||
if container.hostConfig.Privileged {
|
||||
params = append(params, "-privileged")
|
||||
}
|
||||
|
||||
// Init any links between the parent and children
|
||||
runtime := container.runtime
|
||||
|
||||
|
@ -774,14 +781,14 @@ func (container *Container) getBindMap() (map[string]BindMap, error) {
|
|||
}
|
||||
binds[path.Clean(dst)] = bindMap
|
||||
}
|
||||
return binds, nil
|
||||
return binds, nil
|
||||
}
|
||||
|
||||
func (container *Container) createVolumes() error {
|
||||
binds, err := container.getBindMap()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
binds, err := container.getBindMap()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
volumesDriver := container.runtime.volumes.driver
|
||||
// Create the requested volumes if they don't exist
|
||||
for volPath := range container.Config.Volumes {
|
||||
|
@ -801,15 +808,10 @@ func (container *Container) createVolumes() error {
|
|||
if strings.ToLower(bindMap.Mode) == "rw" {
|
||||
srcRW = true
|
||||
}
|
||||
if file, err := os.Open(bindMap.SrcPath); err != nil {
|
||||
if stat, err := os.Lstat(bindMap.SrcPath); err != nil {
|
||||
return err
|
||||
} else {
|
||||
defer file.Close()
|
||||
if stat, err := file.Stat(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
volIsDir = stat.IsDir()
|
||||
}
|
||||
volIsDir = stat.IsDir()
|
||||
}
|
||||
// Otherwise create an directory in $ROOT/volumes/ and use that
|
||||
} else {
|
||||
|
@ -829,26 +831,25 @@ func (container *Container) createVolumes() error {
|
|||
}
|
||||
container.Volumes[volPath] = srcPath
|
||||
container.VolumesRW[volPath] = srcRW
|
||||
|
||||
// Create the mountpoint
|
||||
rootVolPath := path.Join(container.RootfsPath(), volPath)
|
||||
if volIsDir {
|
||||
if err := os.MkdirAll(rootVolPath, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
volPath = path.Join(container.RootfsPath(), volPath)
|
||||
rootVolPath, err := utils.FollowSymlinkInScope(volPath, container.RootfsPath())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
volPath = path.Join(container.RootfsPath(), volPath)
|
||||
if _, err := os.Stat(volPath); err != nil {
|
||||
if _, err := os.Stat(rootVolPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if volIsDir {
|
||||
if err := os.MkdirAll(volPath, 0755); err != nil {
|
||||
if err := os.MkdirAll(rootVolPath, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := os.MkdirAll(path.Dir(volPath), 0755); err != nil {
|
||||
if err := os.MkdirAll(path.Dir(rootVolPath), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if f, err := os.OpenFile(volPath, os.O_CREATE, 0755); err != nil {
|
||||
if f, err := os.OpenFile(rootVolPath, os.O_CREATE, 0755); err != nil {
|
||||
return err
|
||||
} else {
|
||||
f.Close()
|
||||
|
|
|
@ -142,14 +142,22 @@ if [ -z "$strictDebootstrap" ]; then
|
|||
# this forces dpkg not to call sync() after package extraction and speeds up install
|
||||
# the benefit is huge on spinning disks, and the penalty is nonexistent on SSD or decent server virtualization
|
||||
echo 'force-unsafe-io' | sudo tee etc/dpkg/dpkg.cfg.d/02apt-speedup > /dev/null
|
||||
# we want to effectively run "apt-get clean" after every install to keep images small
|
||||
echo 'DPkg::Post-Invoke {"/bin/rm -f /var/cache/apt/archives/*.deb || true";};' | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null
|
||||
# we want to effectively run "apt-get clean" after every install to keep images small (see output of "apt-get clean -s" for context)
|
||||
{
|
||||
aptGetClean='rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true'
|
||||
echo 'DPkg::Post-Invoke { "'$aptGetClean'"; };'
|
||||
echo 'APT::Update::Post-Invoke { "'$aptGetClean'"; };'
|
||||
echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";'
|
||||
} | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null
|
||||
# and remove the translations, too
|
||||
echo 'Acquire::Languages "none";' | sudo tee etc/apt/apt.conf.d/no-languages > /dev/null
|
||||
|
||||
# helpful undo lines for each the above tweaks (for lack of a better home to keep track of them):
|
||||
# rm /usr/sbin/policy-rc.d
|
||||
# rm /sbin/initctl; dpkg-divert --rename --remove /sbin/initctl
|
||||
# rm /etc/dpkg/dpkg.cfg.d/02apt-speedup
|
||||
# rm /etc/apt/apt.conf.d/no-cache
|
||||
# rm /etc/apt/apt.conf.d/no-languages
|
||||
|
||||
if [ -z "$skipDetection" ]; then
|
||||
# see also rudimentary platform detection in hack/install.sh
|
||||
|
|
|
@ -11,7 +11,8 @@ branch named [zfs_driver].
|
|||
|
||||
# Status
|
||||
|
||||
Pre-alpha
|
||||
Alpha: The code is now capable of creating, running and destroying containers
|
||||
and images.
|
||||
|
||||
The code is under development. Contributions in the form of suggestions,
|
||||
code-reviews, and patches are welcome.
|
||||
|
|
|
@ -30,6 +30,7 @@ func main() {
|
|||
flDebug = flag.Bool("D", false, "Enable debug mode")
|
||||
flAutoRestart = flag.Bool("r", true, "Restart previously running containers")
|
||||
bridgeName = flag.String("b", "", "Attach containers to a pre-existing network bridge; use 'none' to disable container networking")
|
||||
bridgeIp = flag.String("bip", "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b")
|
||||
pidfile = flag.String("p", "/var/run/docker.pid", "Path to use for daemon PID file")
|
||||
flRoot = flag.String("g", "/var/lib/docker", "Path to use as the root of the docker runtime")
|
||||
flEnableCors = flag.Bool("api-enable-cors", false, "Enable CORS headers in the remote API")
|
||||
|
@ -54,6 +55,10 @@ func main() {
|
|||
flHosts.Set(fmt.Sprintf("unix://%s", docker.DEFAULTUNIXSOCKET))
|
||||
}
|
||||
|
||||
if *bridgeName != "" && *bridgeIp != "" {
|
||||
log.Fatal("You specified -b & -bip, mutually exclusive options. Please specify only one.")
|
||||
}
|
||||
|
||||
if *flDebug {
|
||||
os.Setenv("DEBUG", "1")
|
||||
}
|
||||
|
@ -77,6 +82,7 @@ func main() {
|
|||
job.SetenvList("Dns", flDns.GetAll())
|
||||
job.SetenvBool("EnableIptables", *flEnableIptables)
|
||||
job.Setenv("BridgeIface", *bridgeName)
|
||||
job.Setenv("BridgeIp", *bridgeIp)
|
||||
job.Setenv("DefaultIp", *flDefaultIp)
|
||||
job.SetenvBool("InterContainerCommunication", *flInterContainerComm)
|
||||
job.Setenv("GraphDriver", *flGraphDriver)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
Andy Rothfusz <andy@dotcloud.com> (@metalivedev)
|
||||
Ken Cochrane <ken@dotcloud.com> (@kencochrane)
|
||||
James Turnbull <james@lovedthanlost.net> (@jamesturnbull)
|
||||
James Turnbull <james@lovedthanlost.net> (@jamtur01)
|
||||
Sven Dowideit <SvenDowideit@fosiki.com> (@SvenDowideit)
|
||||
|
|
|
@ -51,15 +51,15 @@ directory:
|
|||
###Alternative Installation: Docker Container
|
||||
|
||||
If you're running ``docker`` on your development machine then you may
|
||||
find it easier and cleaner to use the Dockerfile. This installs Sphinx
|
||||
find it easier and cleaner to use the docs Dockerfile. This installs Sphinx
|
||||
in a container, adds the local ``docs/`` directory and builds the HTML
|
||||
docs inside the container, even starting a simple HTTP server on port
|
||||
8000 so that you can connect and see your changes. Just run ``docker
|
||||
build .`` and run the resulting image. This is the equivalent to
|
||||
``make clean server`` since each container starts clean.
|
||||
8000 so that you can connect and see your changes.
|
||||
|
||||
In the ``docs/`` directory, run:
|
||||
```docker build -t docker:docs . && docker run -p 8000:8000 docker:docs```
|
||||
In the ``docker`` source directory, run:
|
||||
```make docs```
|
||||
|
||||
This is the equivalent to ``make clean server`` since each container starts clean.
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
@ -128,7 +128,8 @@ Guides on using sphinx
|
|||
|
||||
* Code examples
|
||||
|
||||
* Start without $, so it's easy to copy and paste.
|
||||
* Start typed commands with ``$ `` (dollar space) so that they
|
||||
are easily differentiated from program output.
|
||||
* Use "sudo" with docker to ensure that your command is runnable
|
||||
even if they haven't [used the *docker*
|
||||
group](http://docs.docker.io/en/latest/use/basics/#why-sudo).
|
||||
|
|
|
@ -55,6 +55,13 @@ What's new
|
|||
|
||||
**New!** This endpoint now returns the host config for the container.
|
||||
|
||||
.. http:post:: /images/create
|
||||
.. http:post:: /images/(name)/insert
|
||||
.. http:post:: /images/(name)/push
|
||||
|
||||
**New!** progressDetail object was added in the JSON. It's now possible
|
||||
to get the current value and the total of the progress without having to
|
||||
parse the string.
|
||||
|
||||
v1.7
|
||||
****
|
||||
|
|
|
@ -696,7 +696,7 @@ Create an image
|
|||
Content-Type: application/json
|
||||
|
||||
{"status":"Pulling..."}
|
||||
{"status":"Pulling", "progress":"1/? (n/a)"}
|
||||
{"status":"Pulling", "progress":"1 B/ 100 B", "progressDetail":{"current":1, "total":100}}
|
||||
{"error":"Invalid..."}
|
||||
...
|
||||
|
||||
|
@ -736,7 +736,7 @@ Insert a file in an image
|
|||
Content-Type: application/json
|
||||
|
||||
{"status":"Inserting..."}
|
||||
{"status":"Inserting", "progress":"1/? (n/a)"}
|
||||
{"status":"Inserting", "progress":"1/? (n/a)", "progressDetail":{"current":1}}
|
||||
{"error":"Invalid..."}
|
||||
...
|
||||
|
||||
|
@ -857,7 +857,7 @@ Push an image on the registry
|
|||
Content-Type: application/json
|
||||
|
||||
{"status":"Pushing..."}
|
||||
{"status":"Pushing", "progress":"1/? (n/a)"}
|
||||
{"status":"Pushing", "progress":"1/? (n/a)", "progressDetail":{"current":1}}}
|
||||
{"error":"Invalid..."}
|
||||
...
|
||||
|
||||
|
@ -1026,6 +1026,7 @@ Build an image from Dockerfile via stdin
|
|||
:query q: suppress verbose build output
|
||||
:query nocache: do not use the cache when building the image
|
||||
:reqheader Content-type: should be set to ``"application/tar"``.
|
||||
:reqheader X-Registry-Auth: base64-encoded AuthConfig object
|
||||
:statuscode 200: no error
|
||||
:statuscode 500: server error
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@ To list available commands, either run ``docker`` with no parameters or execute
|
|||
-H=[unix:///var/run/docker.sock]: Multiple tcp://host:port or unix://path/to/socket to bind in daemon mode, single connection otherwise
|
||||
-api-enable-cors=false: Enable CORS headers in the remote API
|
||||
-b="": Attach containers to a pre-existing network bridge; use 'none' to disable container networking
|
||||
-bip="": Use the provided CIDR notation address for the dynamically created bridge (docker0); Mutually exclusive of -b
|
||||
-d=false: Enable daemon mode
|
||||
-dns="": Force docker to use specific DNS servers
|
||||
-g="/var/lib/docker": Path to use as the root of the docker runtime
|
||||
|
@ -225,8 +226,10 @@ by using the ``git://`` schema.
|
|||
-run="": Configuration to be applied when the image is launched with `docker run`.
|
||||
(ex: -run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')
|
||||
|
||||
Simple commit of an existing container
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
.. _cli_commit_examples:
|
||||
|
||||
Commit an existing container
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
@ -240,10 +243,33 @@ Simple commit of an existing container
|
|||
REPOSITORY TAG ID CREATED VIRTUAL SIZE
|
||||
SvenDowideit/testimage version3 f5283438590d 16 seconds ago 335.7 MB
|
||||
|
||||
Change the command that a container runs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Sometimes you have an application container running just a service and you need
|
||||
to make a quick change (run bash?) and then change it back.
|
||||
|
||||
In this example, we run a container with ``ls`` and then change the image to
|
||||
run ``ls /etc``.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker run -t -name test ubuntu ls
|
||||
bin boot dev etc home lib lib64 media mnt opt proc root run sbin selinux srv sys tmp usr var
|
||||
$ docker commit -run='{"Cmd": ["ls","/etc"]}' test test2
|
||||
933d16de9e70005304c1717b5c6f2f39d6fd50752834c6f34a155c70790011eb
|
||||
$ docker run -t test2
|
||||
adduser.conf gshadow login.defs rc0.d
|
||||
alternatives gshadow- logrotate.d rc1.d
|
||||
apt host.conf lsb-base rc2.d
|
||||
...
|
||||
|
||||
Full -run example
|
||||
.................
|
||||
|
||||
The ``-run`` JSON hash changes the ``Config`` section when running ``docker inspect CONTAINERID``
|
||||
or ``config`` when running ``docker inspect IMAGEID``.
|
||||
|
||||
(multiline is ok within a single quote ``'``)
|
||||
|
||||
::
|
||||
|
@ -769,6 +795,15 @@ Known Issues (kill)
|
|||
|
||||
Fetch the logs of a container
|
||||
|
||||
``docker logs`` is a convenience which batch-retrieves whatever logs
|
||||
are present at the time of execution. This does not guarantee
|
||||
execution order when combined with a ``docker run`` (i.e. your run may
|
||||
not have generated any logs at the time you execute ``docker logs``).
|
||||
|
||||
``docker logs -f`` combines ``docker logs`` and ``docker attach``: it
|
||||
will first return all logs from the beginning and then continue
|
||||
streaming new output from the container's stdout and stderr.
|
||||
|
||||
|
||||
.. _cli_port:
|
||||
|
||||
|
@ -900,6 +935,38 @@ containers will not be deleted.
|
|||
Usage: docker rmi IMAGE [IMAGE...]
|
||||
|
||||
Remove one or more images
|
||||
|
||||
Removing tagged images
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Images can be removed either by their short or long ID's, or their image names.
|
||||
If an image has more than one name, each of them needs to be removed before the
|
||||
image is removed.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker images
|
||||
REPOSITORY TAG IMAGE ID CREATED SIZE
|
||||
test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB)
|
||||
test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB)
|
||||
test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB)
|
||||
|
||||
$ sudo docker rmi fd484f19954f
|
||||
Error: Conflict, fd484f19954f wasn't deleted
|
||||
2013/12/11 05:47:16 Error: failed to remove one or more images
|
||||
|
||||
$ sudo docker rmi test1
|
||||
Untagged: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8
|
||||
$ sudo docker rmi test2
|
||||
Untagged: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8
|
||||
|
||||
$ sudo docker images
|
||||
REPOSITORY TAG IMAGE ID CREATED SIZE
|
||||
test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB)
|
||||
$ sudo docker rmi test
|
||||
Untagged: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8
|
||||
Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8
|
||||
|
||||
|
||||
.. _cli_run:
|
||||
|
||||
|
@ -937,6 +1004,13 @@ containers will not be deleted.
|
|||
-link="": Add link to another container (name:alias)
|
||||
-name="": Assign the specified name to the container. If no name is specific docker will generate a random name
|
||||
-P=false: Publish all exposed ports to the host interfaces
|
||||
|
||||
``'docker run'`` first ``'creates'`` a writeable container layer over
|
||||
the specified image, and then ``'starts'`` it using the specified
|
||||
command. That is, ``'docker run'`` is equivalent to the API
|
||||
``/containers/create`` then ``/containers/(id)/start``.
|
||||
|
||||
``docker run`` can be used in combination with ``docker commit`` to :ref:`change the command that a container runs <cli_commit_examples>`.
|
||||
|
||||
Known Issues (run -volumes-from)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
@ -1080,7 +1154,7 @@ in the same mode (rw or ro) as the reference container.
|
|||
|
||||
::
|
||||
|
||||
Usage: docker start [OPTIONS] NAME
|
||||
Usage: docker start [OPTIONS] CONTAINER
|
||||
|
||||
Start a stopped container
|
||||
|
||||
|
|
|
@ -44,7 +44,8 @@ This following command will build a development environment using the Dockerfile
|
|||
|
||||
sudo make build
|
||||
|
||||
If the build is successful, congratulations! You have produced a clean build of docker, neatly encapsulated in a standard build environment.
|
||||
If the build is successful, congratulations! You have produced a clean build of
|
||||
docker, neatly encapsulated in a standard build environment.
|
||||
|
||||
|
||||
Step 4: Build the Docker Binary
|
||||
|
@ -58,6 +59,19 @@ To create the Docker binary, run this command:
|
|||
|
||||
This will create the Docker binary in ``./bundles/<version>-dev/binary/``
|
||||
|
||||
Using your built Docker binary
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The binary is available outside the container in the directory
|
||||
``./bundles/<version>-dev/binary/``. You can swap your host docker executable
|
||||
with this binary for live testing - for example, on ubuntu:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo service docker stop ; sudo cp $(which docker) $(which docker)_ ; sudo cp ./bundles/<version>-dev/binary/docker-<version>-dev $(which docker);sudo service docker start
|
||||
|
||||
.. note:: Its safer to run the tests below before swapping your hosts docker binary.
|
||||
|
||||
|
||||
Step 5: Run the Tests
|
||||
---------------------
|
||||
|
@ -121,7 +135,7 @@ You can run an interactive session in the newly built container:
|
|||
# type 'exit' or Ctrl-D to exit
|
||||
|
||||
|
||||
Extra Step: Build and view the Documenation
|
||||
Extra Step: Build and view the Documentation
|
||||
-------------------------------------------
|
||||
|
||||
If you want to read the documentation from a local website, or are making changes
|
||||
|
@ -129,14 +143,11 @@ to it, you can build the documentation and then serve it by:
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo make doc
|
||||
sudo make docs
|
||||
# when its done, you can point your browser to http://yourdockerhost:8000
|
||||
# type Ctrl-C to exit
|
||||
|
||||
|
||||
.. note:: The binary is available outside the container in the directory ``./bundles/<version>-dev/binary/``. You can swap your host docker executable with this binary for live testing - for example, on ubuntu: ``sudo service docker stop ; sudo cp $(which docker) $(which docker)_ ; sudo cp ./bundles/<version>-dev/binary/docker-<version>-dev $(which docker);sudo service docker start``.
|
||||
|
||||
|
||||
**Need More Help?**
|
||||
|
||||
If you need more help then hop on to the `#docker-dev IRC channel <irc://chat.freenode.net#docker-dev>`_ or post a message on the `Docker developer mailinglist <https://groups.google.com/d/forum/docker-dev>`_.
|
||||
If you need more help then hop on to the `#docker-dev IRC channel <irc://chat.freenode.net#docker-dev>`_ or post a message on the `Docker developer mailing list <https://groups.google.com/d/forum/docker-dev>`_.
|
||||
|
|
|
@ -11,41 +11,50 @@ Arch Linux
|
|||
|
||||
.. include:: install_unofficial.inc
|
||||
|
||||
Installing on Arch Linux is not officially supported but can be handled via
|
||||
one of the following AUR packages:
|
||||
Installing on Arch Linux can be handled via the package in community:
|
||||
|
||||
* `lxc-docker <https://aur.archlinux.org/packages/lxc-docker/>`_
|
||||
* `lxc-docker-git <https://aur.archlinux.org/packages/lxc-docker-git/>`_
|
||||
* `lxc-docker-nightly <https://aur.archlinux.org/packages/lxc-docker-nightly/>`_
|
||||
* `docker <https://www.archlinux.org/packages/community/x86_64/docker/>`_
|
||||
|
||||
The lxc-docker package will install the latest tagged version of docker.
|
||||
The lxc-docker-git package will build from the current master branch.
|
||||
The lxc-docker-nightly package will install the latest build.
|
||||
or the following AUR package:
|
||||
|
||||
* `docker-git <https://aur.archlinux.org/packages/docker-git/>`_
|
||||
|
||||
The docker package will install the latest tagged version of docker.
|
||||
The docker-git package will build from the current master branch.
|
||||
|
||||
Dependencies
|
||||
------------
|
||||
|
||||
Docker depends on several packages which are specified as dependencies in
|
||||
the AUR packages. The core dependencies are:
|
||||
the packages. The core dependencies are:
|
||||
|
||||
* bridge-utils
|
||||
* device-mapper
|
||||
* iproute2
|
||||
* lxc
|
||||
* sqlite
|
||||
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
For the normal package a simple
|
||||
::
|
||||
|
||||
pacman -S docker
|
||||
|
||||
is all that is needed.
|
||||
|
||||
For the AUR package execute:
|
||||
::
|
||||
|
||||
yaourt -S docker-git
|
||||
|
||||
The instructions here assume **yaourt** is installed. See
|
||||
`Arch User Repository <https://wiki.archlinux.org/index.php/Arch_User_Repository#Installing_packages>`_
|
||||
for information on building and installing packages from the AUR if you have not
|
||||
done so before.
|
||||
|
||||
::
|
||||
|
||||
yaourt -S lxc-docker
|
||||
|
||||
|
||||
Starting Docker
|
||||
---------------
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
:title: Requirements and Installation on Fedora
|
||||
:description: Please note this project is currently under heavy development. It should not be used in production.
|
||||
:keywords: Docker, Docker documentation, requirements, virtualbox, vagrant, git, ssh, putty, cygwin, linux
|
||||
:keywords: Docker, Docker documentation, fedora, requirements, virtualbox, vagrant, git, ssh, putty, cygwin, linux
|
||||
|
||||
.. _fedora:
|
||||
|
||||
|
@ -18,25 +18,25 @@ architecture.
|
|||
Installation
|
||||
------------
|
||||
|
||||
Firstly, let's make sure our Fedora host is up-to-date.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo yum -y upgrade
|
||||
|
||||
Next let's install the ``docker-io`` package which will install Docker on our host.
|
||||
Install the ``docker-io`` package which will install Docker on our host.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo yum -y install docker-io
|
||||
|
||||
Now it's installed lets start the Docker daemon.
|
||||
To update the ``docker-io`` package
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo yum -y update docker-io
|
||||
|
||||
Now that it's installed, let's start the Docker daemon.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo systemctl start docker
|
||||
|
||||
If we want Docker to start at boot we should also:
|
||||
If we want Docker to start at boot, we should also:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
@ -46,7 +46,7 @@ Now let's verify that Docker is working.
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -i -t ubuntu /bin/bash
|
||||
sudo docker run -i -t mattdm/fedora /bin/bash
|
||||
|
||||
**Done!**, now continue with the :ref:`hello_world` example.
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
`Compute Engine <https://developers.google.com/compute>`_ QuickStart for `Debian <https://www.debian.org>`_
|
||||
-----------------------------------------------------------------------------------------------------------
|
||||
|
||||
1. Go to `Google Cloud Console <https://cloud.google.com/console>`_ and create a new Cloud Project with billing enabled.
|
||||
1. Go to `Google Cloud Console <https://cloud.google.com/console>`_ and create a new Cloud Project with `Compute Engine enabled <https://developers.google.com/compute/docs/signup>`_.
|
||||
|
||||
2. Download and configure the `Google Cloud SDK <https://developers.google.com/cloud/sdk/>`_ to use your project with the following commands:
|
||||
|
||||
|
|
|
@ -111,3 +111,40 @@ And replace it by the following one::
|
|||
GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"
|
||||
|
||||
Then run ``update-grub``, and reboot.
|
||||
|
||||
Details
|
||||
-------
|
||||
|
||||
Networking:
|
||||
|
||||
- CONFIG_BRIDGE
|
||||
- CONFIG_NETFILTER_XT_MATCH_ADDRTYPE
|
||||
- CONFIG_NF_NAT
|
||||
- CONFIG_NF_NAT_IPV4
|
||||
- CONFIG_NF_NAT_NEEDED
|
||||
|
||||
LVM:
|
||||
|
||||
- CONFIG_BLK_DEV_DM
|
||||
- CONFIG_DM_THIN_PROVISIONING
|
||||
- CONFIG_EXT4_FS
|
||||
|
||||
Namespaces:
|
||||
|
||||
- CONFIG_NAMESPACES
|
||||
- CONFIG_UTS_NS
|
||||
- CONFIG_IPC_NS
|
||||
- CONFIG_UID_NS
|
||||
- CONFIG_PID_NS
|
||||
- CONFIG_NET_NS
|
||||
|
||||
Cgroups:
|
||||
|
||||
- CONFIG_CGROUPS
|
||||
|
||||
Cgroup controllers (optional but highly recommended):
|
||||
|
||||
- CONFIG_CGROUP_CPUACCT
|
||||
- CONFIG_BLK_CGROUP
|
||||
- CONFIG_MEMCG
|
||||
- CONFIG_MEMCG_SWAP
|
||||
|
|
|
@ -1,56 +1,62 @@
|
|||
:title: Requirements and Installation on Red Hat Enterprise Linux / CentOS
|
||||
:title: Requirements and Installation on Red Hat Enterprise Linux
|
||||
:description: Please note this project is currently under heavy development. It should not be used in production.
|
||||
:keywords: Docker, Docker documentation, requirements, linux, rhel, centos
|
||||
|
||||
.. _rhel:
|
||||
|
||||
Red Hat Enterprise Linux / CentOS
|
||||
=================================
|
||||
Red Hat Enterprise Linux
|
||||
========================
|
||||
|
||||
.. include:: install_header.inc
|
||||
|
||||
.. include:: install_unofficial.inc
|
||||
|
||||
Docker is available for **RHEL/CentOS 6**.
|
||||
Docker is available for **RHEL** on EPEL. These instructions should work for
|
||||
both RHEL and CentOS. They will likely work for other binary compatible EL6
|
||||
distributions as well, but they haven't been tested.
|
||||
|
||||
Please note that this package is part of a `Extra Packages for Enterprise Linux (EPEL)`_, a community effort to create and maintain additional packages for RHEL distribution.
|
||||
Please note that this package is part of `Extra Packages for Enterprise
|
||||
Linux (EPEL)`_, a community effort to create and maintain additional packages
|
||||
for the RHEL distribution.
|
||||
|
||||
Please note that due to the current Docker limitations Docker is able to run only on the **64 bit** architecture.
|
||||
Also note that due to the current Docker limitations, Docker is able to run
|
||||
only on the **64 bit** architecture.
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
1. Firstly, let's make sure our RHEL host is up-to-date.
|
||||
Firstly, you need to install the EPEL repository. Please follow the `EPEL installation instructions`_.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo yum -y upgrade
|
||||
|
||||
2. Next you need to install the EPEL repository. Please follow the `EPEL installation instructions`_.
|
||||
|
||||
3. Next let's install the ``docker-io`` package which will install Docker on our host.
|
||||
Next, let's install the ``docker-io`` package which will install Docker on our host.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo yum -y install docker-io
|
||||
|
||||
4. Now it's installed lets start the Docker daemon.
|
||||
To update the ``docker-io`` package
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo yum -y update docker-io
|
||||
|
||||
Now that it's installed, let's start the Docker daemon.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo service docker start
|
||||
|
||||
If we want Docker to start at boot we should also:
|
||||
If we want Docker to start at boot, we should also:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo chkconfig docker on
|
||||
|
||||
5. Now let's verify that Docker is working.
|
||||
Now let's verify that Docker is working.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -i -t ubuntu /bin/bash
|
||||
sudo docker run -i -t mattdm/fedora /bin/bash
|
||||
|
||||
**Done!**, now continue with the :ref:`hello_world` example.
|
||||
|
||||
|
|
|
@ -63,7 +63,10 @@ Installation
|
|||
These instructions have changed for 0.6. If you are upgrading from
|
||||
an earlier version, you will need to follow them again.
|
||||
|
||||
Docker is available as a Debian package, which makes installation easy.
|
||||
Docker is available as a Debian package, which makes installation
|
||||
easy. **See the :ref:`installmirrors` section below if you are not in
|
||||
the United States.** Other sources of the Debian packages may be
|
||||
faster for you to install.
|
||||
|
||||
First add the Docker repository key to your local keychain. You can use the
|
||||
``apt-key`` command to check the fingerprint matches: ``36A1 D786 9245 C895 0F96
|
||||
|
@ -74,7 +77,7 @@ First add the Docker repository key to your local keychain. You can use the
|
|||
sudo sh -c "wget -qO- https://get.docker.io/gpg | apt-key add -"
|
||||
|
||||
Add the Docker repository to your apt sources list, update and install the
|
||||
``lxc-docker`` package.
|
||||
``lxc-docker`` package.
|
||||
|
||||
*You may receive a warning that the package isn't trusted. Answer yes to
|
||||
continue installation.*
|
||||
|
@ -92,7 +95,7 @@ continue installation.*
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
curl -s http://get.docker.io/ubuntu/ | sudo sh
|
||||
curl -s https://get.docker.io/ubuntu/ | sudo sh
|
||||
|
||||
Now verify that the installation has worked by downloading the ``ubuntu`` image
|
||||
and launching a container.
|
||||
|
@ -199,3 +202,25 @@ incoming connections on the Docker port (default 4243):
|
|||
|
||||
sudo ufw allow 4243/tcp
|
||||
|
||||
.. _installmirrors:
|
||||
|
||||
Mirrors
|
||||
^^^^^^^
|
||||
|
||||
You should ``ping get.docker.io`` and compare the latency to the
|
||||
following mirrors, and pick whichever one is best for you.
|
||||
|
||||
Yandex
|
||||
------
|
||||
|
||||
`Yandex <http://yandex.ru/>`_ in Russia is mirroring the Docker Debian
|
||||
packages, updating every 6 hours. Substitute
|
||||
``http://mirror.yandex.ru/mirrors/docker/`` for
|
||||
``http://get.docker.io/ubuntu`` in the instructions above. For example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo sh -c "echo deb http://mirror.yandex.ru/mirrors/docker/ docker main\
|
||||
> /etc/apt/sources.list.d/docker.list"
|
||||
sudo apt-get update
|
||||
sudo apt-get install lxc-docker
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
:title: Ambassador pattern linking
|
||||
:title: Link via an Ambassador Container
|
||||
:description: Using the Ambassador pattern to abstract (network) services
|
||||
:keywords: Examples, Usage, links, docker, documentation, examples, names, name, container naming
|
||||
|
||||
.. _ambassador_pattern_linking:
|
||||
|
||||
Ambassador pattern linking
|
||||
==========================
|
||||
Link via an Ambassador Container
|
||||
================================
|
||||
|
||||
Rather than hardcoding network links between a service consumer and provider, Docker
|
||||
encourages service portability.
|
||||
|
@ -27,7 +27,7 @@ you can add ambassadors
|
|||
|
||||
(consumer) --> (redis-ambassador) ---network---> (redis-ambassador) --> (redis)
|
||||
|
||||
When you need to rewire your consumer to talk to a different resdis server, you
|
||||
When you need to rewire your consumer to talk to a different redis server, you
|
||||
can just restart the ``redis-ambassador`` container that the consumer is connected to.
|
||||
|
||||
This pattern also allows you to transparently move the redis server to a different
|
||||
|
@ -161,11 +161,12 @@ variable using the ``-e`` command line option.
|
|||
local ``1234`` port to the remote IP and port - in this case ``192.168.1.52:6379``.
|
||||
|
||||
|
||||
.. code-block:: Dockerfile
|
||||
::
|
||||
|
||||
#
|
||||
#
|
||||
# first you need to build the docker-ut image using ./contrib/mkimage-unittest.sh
|
||||
# first you need to build the docker-ut image
|
||||
# using ./contrib/mkimage-unittest.sh
|
||||
# then
|
||||
# docker build -t SvenDowideit/ambassador .
|
||||
# docker tag SvenDowideit/ambassador ambassador
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
:title: Base Image Creation
|
||||
:title: Create a Base Image
|
||||
:description: How to create base images
|
||||
:keywords: Examples, Usage, base image, docker, documentation, examples
|
||||
|
||||
.. _base_image_creation:
|
||||
|
||||
Base Image Creation
|
||||
Create a Base Image
|
||||
===================
|
||||
|
||||
So you want to create your own :ref:`base_image_def`? Great!
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
:title: Basic Commands
|
||||
:title: Learn Basic Commands
|
||||
:description: Common usage and commands
|
||||
:keywords: Examples, Usage, basic commands, docker, documentation, examples
|
||||
|
||||
|
||||
The Basics
|
||||
==========
|
||||
Learn Basic Commands
|
||||
====================
|
||||
|
||||
Starting Docker
|
||||
---------------
|
||||
|
@ -67,7 +67,7 @@ daemon will make the ownership of the Unix socket read/writable by the
|
|||
*docker* group when the daemon starts. The ``docker`` daemon must
|
||||
always run as root, but if you run the ``docker`` client as a user in
|
||||
the *docker* group then you don't need to add ``sudo`` to all the
|
||||
client commands.
|
||||
client commands. Warning: the *docker* group is root-equivalent.
|
||||
|
||||
**Example:**
|
||||
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
:title: Dockerfiles for Images
|
||||
:title: Build Images (Dockerfile Reference)
|
||||
:description: Dockerfiles use a simple DSL which allows you to automate the steps you would normally manually take to create an image.
|
||||
:keywords: builder, docker, Dockerfile, automation, image creation
|
||||
|
||||
.. _dockerbuilder:
|
||||
|
||||
======================
|
||||
Dockerfiles for Images
|
||||
======================
|
||||
===================================
|
||||
Build Images (Dockerfile Reference)
|
||||
===================================
|
||||
|
||||
**Docker can act as a builder** and read instructions from a text
|
||||
``Dockerfile`` to automate the steps you would otherwise take manually
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
:title: Host Integration
|
||||
:title: Automatically Start Containers
|
||||
:description: How to generate scripts for upstart, systemd, etc.
|
||||
:keywords: systemd, upstart, supervisor, docker, documentation, host integration
|
||||
|
||||
|
||||
|
||||
Host Integration
|
||||
================
|
||||
Automatically Start Containers
|
||||
==============================
|
||||
|
||||
You can use your Docker containers with process managers like ``upstart``,
|
||||
``systemd`` and ``supervisor``.
|
||||
|
|
|
@ -17,8 +17,9 @@ Contents:
|
|||
workingwithrepository
|
||||
baseimages
|
||||
port_redirection
|
||||
puppet
|
||||
networking
|
||||
host_integration
|
||||
working_with_volumes
|
||||
working_with_links_names
|
||||
ambassador_pattern_linking
|
||||
puppet
|
||||
|
|
153
docs/sources/use/networking.rst
Normal file
153
docs/sources/use/networking.rst
Normal file
|
@ -0,0 +1,153 @@
|
|||
:title: Configure Networking
|
||||
:description: Docker networking
|
||||
:keywords: network, networking, bridge, docker, documentation
|
||||
|
||||
|
||||
Configure Networking
|
||||
====================
|
||||
|
||||
Docker uses Linux bridge capabilities to provide network connectivity
|
||||
to containers. The ``docker0`` bridge interface is managed by Docker
|
||||
itself for this purpose. Thus, when the Docker daemon starts it :
|
||||
|
||||
- creates the ``docker0`` bridge if not present
|
||||
- searches for an IP address range which doesn't overlap with an existing route
|
||||
- picks an IP in the selected range
|
||||
- assigns this IP to the ``docker0`` bridge
|
||||
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# List host bridges
|
||||
$ sudo brctl show
|
||||
bridge name bridge id STP enabled interfaces
|
||||
docker0 8000.000000000000 no
|
||||
|
||||
# Show docker0 IP address
|
||||
$ sudo ifconfig docker0
|
||||
docker0 Link encap:Ethernet HWaddr xx:xx:xx:xx:xx:xx
|
||||
inet addr:172.17.42.1 Bcast:0.0.0.0 Mask:255.255.0.0
|
||||
|
||||
|
||||
|
||||
At runtime, a :ref:`specific kind of virtual
|
||||
interface<vethxxxx-device>` is given to each containers which is then
|
||||
bonded to the ``docker0`` bridge. Each containers also receives a
|
||||
dedicated IP address from the same range as ``docker0``. The
|
||||
``docker0`` IP address is then used as the default gateway for the
|
||||
containers.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Run a container
|
||||
$ sudo docker run -t -i -d base /bin/bash
|
||||
52f811c5d3d69edddefc75aff5a4525fc8ba8bcfa1818132f9dc7d4f7c7e78b4
|
||||
|
||||
$ sudo brctl show
|
||||
bridge name bridge id STP enabled interfaces
|
||||
docker0 8000.fef213db5a66 no vethQCDY1N
|
||||
|
||||
|
||||
Above, ``docker0`` acts as a bridge for the ``vethQCDY1N`` interface
|
||||
which is dedicated to the 52f811c5d3d6 container.
|
||||
|
||||
|
||||
How to use a specific IP address range
|
||||
---------------------------------------
|
||||
|
||||
Docker will try hard to find an IP range which is not used by the
|
||||
host. Even if it works for most cases, it's not bullet-proof and
|
||||
sometimes you need to have more control over the IP addressing scheme.
|
||||
|
||||
For this purpose, Docker allows you to manage the ``docker0`` bridge
|
||||
or your own one using the ``-b=<bridgename>`` parameter.
|
||||
|
||||
In this scenario:
|
||||
|
||||
- ensure Docker is stopped
|
||||
- create your own bridge (``bridge0`` for example)
|
||||
- assign a specific IP to this bridge
|
||||
- start Docker with the ``-b=bridge0`` parameter
|
||||
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Stop Docker
|
||||
$ sudo service docker stop
|
||||
|
||||
# Clean docker0 bridge and
|
||||
# add your very own bridge0
|
||||
$ sudo ifconfig docker0 down
|
||||
$ sudo brctl addbr bridge0
|
||||
$ sudo ifconfig bridge0 192.168.227.1 netmask 255.255.255.0
|
||||
|
||||
# Edit your Docker startup file
|
||||
$ echo "DOCKER_OPTS=\"-b=bridge0\"" /etc/default/docker
|
||||
|
||||
# Start Docker
|
||||
$ sudo service docker start
|
||||
|
||||
# Ensure bridge0 IP is not changed by Docker
|
||||
$ sudo ifconfig bridge0
|
||||
bridge0 Link encap:Ethernet HWaddr xx:xx:xx:xx:xx:xx
|
||||
inet addr:192.168.227.1 Bcast:192.168.227.255 Mask:255.255.255.0
|
||||
|
||||
# Run a container
|
||||
$ docker run -i -t base /bin/bash
|
||||
|
||||
# Container IP in the 192.168.227/24 range
|
||||
root@261c272cd7d5:/# ifconfig eth0
|
||||
eth0 Link encap:Ethernet HWaddr xx:xx:xx:xx:xx:xx
|
||||
inet addr:192.168.227.5 Bcast:192.168.227.255 Mask:255.255.255.0
|
||||
|
||||
# bridge0 IP as the default gateway
|
||||
root@261c272cd7d5:/# route -n
|
||||
Kernel IP routing table
|
||||
Destination Gateway Genmask Flags Metric Ref Use Iface
|
||||
0.0.0.0 192.168.227.1 0.0.0.0 UG 0 0 0 eth0
|
||||
192.168.227.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
|
||||
|
||||
# hits CTRL+P then CTRL+Q to detach
|
||||
|
||||
# Display bridge info
|
||||
$ sudo brctl show
|
||||
bridge name bridge id STP enabled interfaces
|
||||
bridge0 8000.fe7c2e0faebd no vethAQI2QT
|
||||
|
||||
|
||||
Container intercommunication
|
||||
-------------------------------
|
||||
|
||||
Containers can communicate with each other according to the ``icc``
|
||||
parameter value of the Docker daemon.
|
||||
|
||||
- The default, ``-icc=true`` allows containers to communicate with each other.
|
||||
- ``-icc=false`` means containers are isolated from each other.
|
||||
|
||||
Under the hood, ``iptables`` is used by Docker to either accept or
|
||||
drop communication between containers.
|
||||
|
||||
|
||||
.. _vethxxxx-device:
|
||||
|
||||
What's about the vethXXXX device?
|
||||
-----------------------------------
|
||||
Well. Things get complicated here.
|
||||
|
||||
The ``vethXXXX`` interface is the host side of a point-to-point link
|
||||
between the host and the corresponding container, the other side of
|
||||
the link being materialized by the container's ``eth0``
|
||||
interface. This pair (host ``vethXXX`` and container ``eth0``) are
|
||||
connected like a tube. Everything that comes in one side will come out
|
||||
the other side.
|
||||
|
||||
All the plumbing is delegated to Linux network capabilities (check the
|
||||
ip link command) and the namespaces infrastructure.
|
||||
|
||||
|
||||
I want more
|
||||
------------
|
||||
|
||||
Jérôme Petazzoni has create ``pipework`` to connect together
|
||||
containers in arbitrarily complex scenarios :
|
||||
https://github.com/jpetazzo/pipework
|
|
@ -1,12 +1,12 @@
|
|||
:title: Port redirection
|
||||
:title: Redirect Ports
|
||||
:description: usage about port redirection
|
||||
:keywords: Usage, basic port, docker, documentation, examples
|
||||
|
||||
|
||||
.. _port_redirection:
|
||||
|
||||
Port redirection
|
||||
================
|
||||
Redirect Ports
|
||||
==============
|
||||
|
||||
Interacting with a service is commonly done through a connection to a
|
||||
port. When this service runs inside a container, one can connect to
|
||||
|
|
|
@ -1,15 +1,16 @@
|
|||
:title: Working with Links and Names
|
||||
:description: How to create and use links and names
|
||||
:keywords: Examples, Usage, links, docker, documentation, examples, names, name, container naming
|
||||
:title: Link Containers
|
||||
:description: How to create and use both links and names
|
||||
:keywords: Examples, Usage, links, linking, docker, documentation, examples, names, name, container naming
|
||||
|
||||
.. _working_with_links_names:
|
||||
|
||||
Working with Links and Names
|
||||
============================
|
||||
Link Containers
|
||||
===============
|
||||
|
||||
From version 0.6.5 you are now able to ``name`` a container and ``link`` it to another
|
||||
container by referring to its name. This will create a parent -> child relationship
|
||||
where the parent container can see selected information about its child.
|
||||
From version 0.6.5 you are now able to ``name`` a container and
|
||||
``link`` it to another container by referring to its name. This will
|
||||
create a parent -> child relationship where the parent container can
|
||||
see selected information about its child.
|
||||
|
||||
.. _run_name:
|
||||
|
||||
|
@ -18,8 +19,9 @@ Container Naming
|
|||
|
||||
.. versionadded:: v0.6.5
|
||||
|
||||
You can now name your container by using the ``-name`` flag. If no name is provided, Docker
|
||||
will automatically generate a name. You can see this name using the ``docker ps`` command.
|
||||
You can now name your container by using the ``-name`` flag. If no
|
||||
name is provided, Docker will automatically generate a name. You can
|
||||
see this name using the ``docker ps`` command.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
@ -38,47 +40,53 @@ Links: service discovery for docker
|
|||
|
||||
.. versionadded:: v0.6.5
|
||||
|
||||
Links allow containers to discover and securely communicate with each other by using the
|
||||
flag ``-link name:alias``. Inter-container communication can be disabled with the daemon
|
||||
flag ``-icc=false``. With this flag set to false, Container A cannot access Container B
|
||||
unless explicitly allowed via a link. This is a huge win for securing your containers.
|
||||
When two containers are linked together Docker creates a parent child relationship
|
||||
between the containers. The parent container will be able to access information via
|
||||
environment variables of the child such as name, exposed ports, IP and other selected
|
||||
environment variables.
|
||||
Links allow containers to discover and securely communicate with each
|
||||
other by using the flag ``-link name:alias``. Inter-container
|
||||
communication can be disabled with the daemon flag
|
||||
``-icc=false``. With this flag set to ``false``, Container A cannot
|
||||
access Container B unless explicitly allowed via a link. This is a
|
||||
huge win for securing your containers. When two containers are linked
|
||||
together Docker creates a parent child relationship between the
|
||||
containers. The parent container will be able to access information
|
||||
via environment variables of the child such as name, exposed ports, IP
|
||||
and other selected environment variables.
|
||||
|
||||
When linking two containers Docker will use the exposed ports of the container to create
|
||||
a secure tunnel for the parent to access. If a database container only exposes port 8080
|
||||
then the linked container will only be allowed to access port 8080 and nothing else if
|
||||
When linking two containers Docker will use the exposed ports of the
|
||||
container to create a secure tunnel for the parent to access. If a
|
||||
database container only exposes port 8080 then the linked container
|
||||
will only be allowed to access port 8080 and nothing else if
|
||||
inter-container communication is set to false.
|
||||
|
||||
For example, there is an image called ``crosbymichael/redis`` that exposes the
|
||||
port 6379 and starts the Redis server. Let's name the container as ``redis``
|
||||
based on that image and run it as daemon.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Example: there is an image called crosbymichael/redis that exposes the port 6379 and starts redis-server.
|
||||
# Let's name the container as "redis" based on that image and run it as daemon.
|
||||
$ sudo docker run -d -name redis crosbymichael/redis
|
||||
|
||||
We can issue all the commands that you would expect using the name "redis"; start, stop,
|
||||
attach, using the name for our container. The name also allows us to link other containers
|
||||
into this one.
|
||||
We can issue all the commands that you would expect using the name
|
||||
``redis``; start, stop, attach, using the name for our container. The
|
||||
name also allows us to link other containers into this one.
|
||||
|
||||
Next, we can start a new web application that has a dependency on Redis and apply a link
|
||||
to connect both containers. If you noticed when running our Redis server we did not use
|
||||
the -p flag to publish the Redis port to the host system. Redis exposed port 6379 and
|
||||
this is all we need to establish a link.
|
||||
Next, we can start a new web application that has a dependency on
|
||||
Redis and apply a link to connect both containers. If you noticed when
|
||||
running our Redis server we did not use the ``-p`` flag to publish the
|
||||
Redis port to the host system. Redis exposed port 6379 and this is all
|
||||
we need to establish a link.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Linking the redis container as a child
|
||||
$ sudo docker run -t -i -link redis:db -name webapp ubuntu bash
|
||||
|
||||
When you specified -link redis:db you are telling docker to link the container named redis
|
||||
into this new container with the alias db. Environment variables are prefixed with the alias
|
||||
so that the parent container can access network and environment information from the containers
|
||||
that are linked into it.
|
||||
When you specified ``-link redis:db`` you are telling Docker to link
|
||||
the container named ``redis`` into this new container with the alias
|
||||
``db``. Environment variables are prefixed with the alias so that the
|
||||
parent container can access network and environment information from
|
||||
the containers that are linked into it.
|
||||
|
||||
If we inspect the environment variables of the second container, we would see all the information
|
||||
about the child container.
|
||||
If we inspect the environment variables of the second container, we
|
||||
would see all the information about the child container.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
@ -100,14 +108,17 @@ about the child container.
|
|||
_=/usr/bin/env
|
||||
root@4c01db0b339c:/#
|
||||
|
||||
Accessing the network information along with the environment of the child container allows
|
||||
us to easily connect to the Redis service on the specific IP and port in the environment.
|
||||
Accessing the network information along with the environment of the
|
||||
child container allows us to easily connect to the Redis service on
|
||||
the specific IP and port in the environment.
|
||||
|
||||
Running ``docker ps`` shows the 2 containers, and the webapp/db alias name for the redis container.
|
||||
Running ``docker ps`` shows the 2 containers, and the ``webapp/db``
|
||||
alias name for the redis container.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds webapp
|
||||
d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db
|
||||
4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds webapp
|
||||
d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db
|
||||
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
:title: Working with Volumes
|
||||
:title: Share Directories via Volumes
|
||||
:description: How to create and share volumes
|
||||
:keywords: Examples, Usage, volume, docker, documentation, examples
|
||||
|
||||
.. _volume_def:
|
||||
|
||||
Data Volume
|
||||
===========
|
||||
Share Directories via Volumes
|
||||
=============================
|
||||
|
||||
.. versionadded:: v0.3.0
|
||||
Data volumes have been available since version 1 of the
|
||||
|
@ -46,7 +46,7 @@ volumes to any container created from the image::
|
|||
Mount Volumes from an Existing Container:
|
||||
-----------------------------------------
|
||||
|
||||
The command below creates a new container which is runnning as daemon
|
||||
The command below creates a new container which is running as daemon
|
||||
``-d`` and with one volume ``/var/lib/couchdb``::
|
||||
|
||||
COUCH1=$(sudo docker run -d -v /var/lib/couchdb shykes/couchdb:2013-05-03)
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
:title: Working With Repositories
|
||||
:title: Share Images via Repositories
|
||||
:description: Repositories allow users to share images.
|
||||
:keywords: repo, repositories, usage, pull image, push image, image, documentation
|
||||
|
||||
.. _working_with_the_repository:
|
||||
|
||||
Working with Repositories
|
||||
=========================
|
||||
Share Images via Repositories
|
||||
=============================
|
||||
|
||||
A *repository* is a hosted collection of tagged :ref:`images
|
||||
<image_def>` that together create the file system for a container. The
|
||||
|
@ -152,6 +152,41 @@ or tag.
|
|||
|
||||
.. _using_private_repositories:
|
||||
|
||||
Trusted Builds
|
||||
--------------
|
||||
|
||||
Trusted Builds automate the building and updating of images from GitHub, directly
|
||||
on docker.io servers. It works by adding a commit hook to your selected repository,
|
||||
triggering a build and update when you push a commit.
|
||||
|
||||
To setup a trusted build
|
||||
++++++++++++++++++++++++
|
||||
|
||||
#. Create a `Docker Index account <https://index.docker.io/>`_ and login.
|
||||
#. Link your GitHub account through the ``Link Accounts`` menu.
|
||||
#. `Configure a Trusted build <https://index.docker.io/builds/>`_.
|
||||
#. Pick a GitHub project that has a ``Dockerfile`` that you want to build.
|
||||
#. Pick the branch you want to build (the default is the ``master`` branch).
|
||||
#. Give the Trusted Build a name.
|
||||
#. Assign an optional Docker tag to the Build.
|
||||
#. Specify where the ``Dockerfile`` is located. The default is ``/``.
|
||||
|
||||
Once the Trusted Build is configured it will automatically trigger a build, and
|
||||
in a few minutes, if there are no errors, you will see your new trusted build
|
||||
on the Docker Index. It will will stay in sync with your GitHub repo until you
|
||||
deactivate the Trusted Build.
|
||||
|
||||
If you want to see the status of your Trusted Builds you can go to your
|
||||
`Trusted Builds page <https://index.docker.io/builds/>`_ on the Docker index,
|
||||
and it will show you the status of your builds, and the build history.
|
||||
|
||||
Once you've created a Trusted Build you can deactive or delete it. You cannot
|
||||
however push to a Trusted Build with the ``docker push`` command. You can only
|
||||
manage it by committing code to your GitHub repository.
|
||||
|
||||
You can create multiple Trusted Builds per repository and configure them to
|
||||
point to specific ``Dockerfile``'s or Git branches.
|
||||
|
||||
Private Repositories
|
||||
--------------------
|
||||
|
||||
|
|
20
docs/theme/docker/static/css/main.css
vendored
20
docs/theme/docker/static/css/main.css
vendored
|
@ -410,3 +410,23 @@ dt:hover > a.headerlink {
|
|||
.admonition.seealso {
|
||||
border-color: #23cb1f;
|
||||
}
|
||||
|
||||
.versionchanged,
|
||||
.versionadded,
|
||||
.versionmodified,
|
||||
.deprecated {
|
||||
font-size: larger;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.versionchanged {
|
||||
color: lightseagreen;
|
||||
}
|
||||
|
||||
.versionadded {
|
||||
color: mediumblue;
|
||||
}
|
||||
|
||||
.deprecated {
|
||||
color: orangered;
|
||||
}
|
||||
|
|
|
@ -1 +1 @@
|
|||
Solomon Hykes <solomon@dotcloud.com>
|
||||
#Solomon Hykes <solomon@dotcloud.com> Temporarily unavailable
|
||||
|
|
|
@ -3,6 +3,7 @@ package engine
|
|||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
|
@ -34,6 +35,9 @@ type Engine struct {
|
|||
handlers map[string]Handler
|
||||
hack Hack // data for temporary hackery (see hack.go)
|
||||
id string
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
Stdin io.Reader
|
||||
}
|
||||
|
||||
func (eng *Engine) Root() string {
|
||||
|
@ -82,6 +86,9 @@ func New(root string) (*Engine, error) {
|
|||
root: root,
|
||||
handlers: make(map[string]Handler),
|
||||
id: utils.RandomString(),
|
||||
Stdout: os.Stdout,
|
||||
Stderr: os.Stderr,
|
||||
Stdin: os.Stdin,
|
||||
}
|
||||
// Copy existing global handlers
|
||||
for k, v := range globalHandlers {
|
||||
|
@ -104,9 +111,9 @@ func (eng *Engine) Job(name string, args ...string) *Job {
|
|||
Stdin: NewInput(),
|
||||
Stdout: NewOutput(),
|
||||
Stderr: NewOutput(),
|
||||
env: &Env{},
|
||||
}
|
||||
job.Stdout.Add(utils.NopWriteCloser(os.Stdout))
|
||||
job.Stderr.Add(utils.NopWriteCloser(os.Stderr))
|
||||
job.Stderr.Add(utils.NopWriteCloser(eng.Stderr))
|
||||
handler, exists := eng.handlers[name]
|
||||
if exists {
|
||||
job.handler = handler
|
||||
|
@ -116,5 +123,5 @@ func (eng *Engine) Job(name string, args ...string) *Job {
|
|||
|
||||
func (eng *Engine) Logf(format string, args ...interface{}) (n int, err error) {
|
||||
prefixedFormat := fmt.Sprintf("[%s] %s\n", eng, strings.TrimRight(format, "\n"))
|
||||
return fmt.Fprintf(os.Stderr, prefixedFormat, args...)
|
||||
return fmt.Fprintf(eng.Stderr, prefixedFormat, args...)
|
||||
}
|
||||
|
|
234
engine/env.go
Normal file
234
engine/env.go
Normal file
|
@ -0,0 +1,234 @@
|
|||
package engine
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Env []string
|
||||
|
||||
func (env *Env) Get(key string) (value string) {
|
||||
// FIXME: use Map()
|
||||
for _, kv := range *env {
|
||||
if strings.Index(kv, "=") == -1 {
|
||||
continue
|
||||
}
|
||||
parts := strings.SplitN(kv, "=", 2)
|
||||
if parts[0] != key {
|
||||
continue
|
||||
}
|
||||
if len(parts) < 2 {
|
||||
value = ""
|
||||
} else {
|
||||
value = parts[1]
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (env *Env) Exists(key string) bool {
|
||||
_, exists := env.Map()[key]
|
||||
return exists
|
||||
}
|
||||
|
||||
func (env *Env) GetBool(key string) (value bool) {
|
||||
s := strings.ToLower(strings.Trim(env.Get(key), " \t"))
|
||||
if s == "" || s == "0" || s == "no" || s == "false" || s == "none" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (env *Env) SetBool(key string, value bool) {
|
||||
if value {
|
||||
env.Set(key, "1")
|
||||
} else {
|
||||
env.Set(key, "0")
|
||||
}
|
||||
}
|
||||
|
||||
func (env *Env) GetInt(key string) int {
|
||||
return int(env.GetInt64(key))
|
||||
}
|
||||
|
||||
func (env *Env) GetInt64(key string) int64 {
|
||||
s := strings.Trim(env.Get(key), " \t")
|
||||
val, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
func (env *Env) SetInt(key string, value int) {
|
||||
env.Set(key, fmt.Sprintf("%d", value))
|
||||
}
|
||||
|
||||
func (env *Env) SetInt64(key string, value int64) {
|
||||
env.Set(key, fmt.Sprintf("%d", value))
|
||||
}
|
||||
|
||||
// Returns nil if key not found
|
||||
func (env *Env) GetList(key string) []string {
|
||||
sval := env.Get(key)
|
||||
if sval == "" {
|
||||
return nil
|
||||
}
|
||||
l := make([]string, 0, 1)
|
||||
if err := json.Unmarshal([]byte(sval), &l); err != nil {
|
||||
l = append(l, sval)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func (env *Env) GetJson(key string, iface interface{}) error {
|
||||
sval := env.Get(key)
|
||||
if sval == "" {
|
||||
return nil
|
||||
}
|
||||
return json.Unmarshal([]byte(sval), iface)
|
||||
}
|
||||
|
||||
func (env *Env) SetJson(key string, value interface{}) error {
|
||||
sval, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
env.Set(key, string(sval))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (env *Env) SetList(key string, value []string) error {
|
||||
return env.SetJson(key, value)
|
||||
}
|
||||
|
||||
func (env *Env) Set(key, value string) {
|
||||
*env = append(*env, key+"="+value)
|
||||
}
|
||||
|
||||
func NewDecoder(src io.Reader) *Decoder {
|
||||
return &Decoder{
|
||||
json.NewDecoder(src),
|
||||
}
|
||||
}
|
||||
|
||||
type Decoder struct {
|
||||
*json.Decoder
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Decode() (*Env, error) {
|
||||
m := make(map[string]interface{})
|
||||
if err := decoder.Decoder.Decode(&m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
env := &Env{}
|
||||
for key, value := range m {
|
||||
env.SetAuto(key, value)
|
||||
}
|
||||
return env, nil
|
||||
}
|
||||
|
||||
// DecodeEnv decodes `src` as a json dictionary, and adds
|
||||
// each decoded key-value pair to the environment.
|
||||
//
|
||||
// If `src` cannot be decoded as a json dictionary, an error
|
||||
// is returned.
|
||||
func (env *Env) Decode(src io.Reader) error {
|
||||
m := make(map[string]interface{})
|
||||
if err := json.NewDecoder(src).Decode(&m); err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range m {
|
||||
env.SetAuto(k, v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (env *Env) SetAuto(k string, v interface{}) {
|
||||
// FIXME: we fix-convert float values to int, because
|
||||
// encoding/json decodes integers to float64, but cannot encode them back.
|
||||
// (See http://golang.org/src/pkg/encoding/json/decode.go#L46)
|
||||
if fval, ok := v.(float64); ok {
|
||||
env.SetInt64(k, int64(fval))
|
||||
} else if sval, ok := v.(string); ok {
|
||||
env.Set(k, sval)
|
||||
} else if val, err := json.Marshal(v); err == nil {
|
||||
env.Set(k, string(val))
|
||||
} else {
|
||||
env.Set(k, fmt.Sprintf("%v", v))
|
||||
}
|
||||
}
|
||||
|
||||
func (env *Env) Encode(dst io.Writer) error {
|
||||
m := make(map[string]interface{})
|
||||
for k, v := range env.Map() {
|
||||
var val interface{}
|
||||
if err := json.Unmarshal([]byte(v), &val); err == nil {
|
||||
// FIXME: we fix-convert float values to int, because
|
||||
// encoding/json decodes integers to float64, but cannot encode them back.
|
||||
// (See http://golang.org/src/pkg/encoding/json/decode.go#L46)
|
||||
if fval, isFloat := val.(float64); isFloat {
|
||||
val = int(fval)
|
||||
}
|
||||
m[k] = val
|
||||
} else {
|
||||
m[k] = v
|
||||
}
|
||||
}
|
||||
if err := json.NewEncoder(dst).Encode(&m); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (env *Env) WriteTo(dst io.Writer) (n int64, err error) {
|
||||
// FIXME: return the number of bytes written to respect io.WriterTo
|
||||
return 0, env.Encode(dst)
|
||||
}
|
||||
|
||||
func (env *Env) Export(dst interface{}) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
err = fmt.Errorf("ExportEnv %s", err)
|
||||
}
|
||||
}()
|
||||
var buf bytes.Buffer
|
||||
// step 1: encode/marshal the env to an intermediary json representation
|
||||
if err := env.Encode(&buf); err != nil {
|
||||
return err
|
||||
}
|
||||
// step 2: decode/unmarshal the intermediary json into the destination object
|
||||
if err := json.NewDecoder(&buf).Decode(dst); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (env *Env) Import(src interface{}) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
err = fmt.Errorf("ImportEnv: %s", err)
|
||||
}
|
||||
}()
|
||||
var buf bytes.Buffer
|
||||
if err := json.NewEncoder(&buf).Encode(src); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := env.Decode(&buf); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (env *Env) Map() map[string]string {
|
||||
m := make(map[string]string)
|
||||
for _, kv := range *env {
|
||||
parts := strings.SplitN(kv, "=", 2)
|
||||
m[parts[0]] = parts[1]
|
||||
}
|
||||
return m
|
||||
}
|
40
engine/http.go
Normal file
40
engine/http.go
Normal file
|
@ -0,0 +1,40 @@
|
|||
package engine
|
||||
|
||||
import (
|
||||
"path"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// ServeHTTP executes a job as specified by the http request `r`, and sends the
|
||||
// result as an http response.
|
||||
// This method allows an Engine instance to be passed as a standard http.Handler interface.
|
||||
//
|
||||
// Note that the protocol used in this methid is a convenience wrapper and is not the canonical
|
||||
// implementation of remote job execution. This is because HTTP/1 does not handle stream multiplexing,
|
||||
// and so cannot differentiate stdout from stderr. Additionally, headers cannot be added to a response
|
||||
// once data has been written to the body, which makes it inconvenient to return metadata such
|
||||
// as the exit status.
|
||||
//
|
||||
func (eng *Engine) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
jobName := path.Base(r.URL.Path)
|
||||
jobArgs, exists := r.URL.Query()["a"]
|
||||
if !exists {
|
||||
jobArgs = []string{}
|
||||
}
|
||||
w.Header().Set("Job-Name", jobName)
|
||||
for _, arg := range(jobArgs) {
|
||||
w.Header().Add("Job-Args", arg)
|
||||
}
|
||||
job := eng.Job(jobName, jobArgs...)
|
||||
job.Stdout.Add(w)
|
||||
job.Stderr.Add(w)
|
||||
// FIXME: distinguish job status from engine error in Run()
|
||||
// The former should be passed as a special header, the former
|
||||
// should cause a 500 status
|
||||
w.WriteHeader(http.StatusOK)
|
||||
// The exit status cannot be sent reliably with HTTP1, because headers
|
||||
// can only be sent before the body.
|
||||
// (we could possibly use http footers via chunked encoding, but I couldn't find
|
||||
// how to use them in net/http)
|
||||
job.Run()
|
||||
}
|
156
engine/job.go
156
engine/job.go
|
@ -1,11 +1,8 @@
|
|||
package engine
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
@ -27,7 +24,7 @@ type Job struct {
|
|||
Eng *Engine
|
||||
Name string
|
||||
Args []string
|
||||
env []string
|
||||
env *Env
|
||||
Stdout *Output
|
||||
Stderr *Output
|
||||
Stdin *Input
|
||||
|
@ -105,80 +102,52 @@ func (job *Job) String() string {
|
|||
}
|
||||
|
||||
func (job *Job) Getenv(key string) (value string) {
|
||||
for _, kv := range job.env {
|
||||
if strings.Index(kv, "=") == -1 {
|
||||
continue
|
||||
}
|
||||
parts := strings.SplitN(kv, "=", 2)
|
||||
if parts[0] != key {
|
||||
continue
|
||||
}
|
||||
if len(parts) < 2 {
|
||||
value = ""
|
||||
} else {
|
||||
value = parts[1]
|
||||
}
|
||||
}
|
||||
return
|
||||
return job.env.Get(key)
|
||||
}
|
||||
|
||||
func (job *Job) GetenvBool(key string) (value bool) {
|
||||
s := strings.ToLower(strings.Trim(job.Getenv(key), " \t"))
|
||||
if s == "" || s == "0" || s == "no" || s == "false" || s == "none" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return job.env.GetBool(key)
|
||||
}
|
||||
|
||||
func (job *Job) SetenvBool(key string, value bool) {
|
||||
if value {
|
||||
job.Setenv(key, "1")
|
||||
} else {
|
||||
job.Setenv(key, "0")
|
||||
}
|
||||
job.env.SetBool(key, value)
|
||||
}
|
||||
|
||||
func (job *Job) GetenvInt(key string) int64 {
|
||||
s := strings.Trim(job.Getenv(key), " \t")
|
||||
val, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
return val
|
||||
func (job *Job) GetenvInt64(key string) int64 {
|
||||
return job.env.GetInt64(key)
|
||||
}
|
||||
|
||||
func (job *Job) SetenvInt(key string, value int64) {
|
||||
job.Setenv(key, fmt.Sprintf("%d", value))
|
||||
func (job *Job) GetenvInt(key string) int {
|
||||
return job.env.GetInt(key)
|
||||
}
|
||||
|
||||
func (job *Job) SetenvInt64(key string, value int64) {
|
||||
job.env.SetInt64(key, value)
|
||||
}
|
||||
|
||||
func (job *Job) SetenvInt(key string, value int) {
|
||||
job.env.SetInt(key, value)
|
||||
}
|
||||
|
||||
// Returns nil if key not found
|
||||
func (job *Job) GetenvList(key string) []string {
|
||||
sval := job.Getenv(key)
|
||||
if sval == "" {
|
||||
return nil
|
||||
}
|
||||
l := make([]string, 0, 1)
|
||||
if err := json.Unmarshal([]byte(sval), &l); err != nil {
|
||||
l = append(l, sval)
|
||||
}
|
||||
return l
|
||||
return job.env.GetList(key)
|
||||
}
|
||||
|
||||
func (job *Job) GetenvJson(key string, iface interface{}) error {
|
||||
return job.env.GetJson(key, iface)
|
||||
}
|
||||
|
||||
func (job *Job) SetenvJson(key string, value interface{}) error {
|
||||
sval, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
job.Setenv(key, string(sval))
|
||||
return nil
|
||||
return job.env.SetJson(key, value)
|
||||
}
|
||||
|
||||
func (job *Job) SetenvList(key string, value []string) error {
|
||||
return job.SetenvJson(key, value)
|
||||
return job.env.SetJson(key, value)
|
||||
}
|
||||
|
||||
func (job *Job) Setenv(key, value string) {
|
||||
job.env = append(job.env, key+"="+value)
|
||||
job.env.Set(key, value)
|
||||
}
|
||||
|
||||
// DecodeEnv decodes `src` as a json dictionary, and adds
|
||||
|
@ -187,90 +156,23 @@ func (job *Job) Setenv(key, value string) {
|
|||
// If `src` cannot be decoded as a json dictionary, an error
|
||||
// is returned.
|
||||
func (job *Job) DecodeEnv(src io.Reader) error {
|
||||
m := make(map[string]interface{})
|
||||
if err := json.NewDecoder(src).Decode(&m); err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range m {
|
||||
// FIXME: we fix-convert float values to int, because
|
||||
// encoding/json decodes integers to float64, but cannot encode them back.
|
||||
// (See http://golang.org/src/pkg/encoding/json/decode.go#L46)
|
||||
if fval, ok := v.(float64); ok {
|
||||
job.SetenvInt(k, int64(fval))
|
||||
} else if sval, ok := v.(string); ok {
|
||||
job.Setenv(k, sval)
|
||||
} else if val, err := json.Marshal(v); err == nil {
|
||||
job.Setenv(k, string(val))
|
||||
} else {
|
||||
job.Setenv(k, fmt.Sprintf("%v", v))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return job.env.Decode(src)
|
||||
}
|
||||
|
||||
func (job *Job) EncodeEnv(dst io.Writer) error {
|
||||
m := make(map[string]interface{})
|
||||
for k, v := range job.Environ() {
|
||||
var val interface{}
|
||||
if err := json.Unmarshal([]byte(v), &val); err == nil {
|
||||
// FIXME: we fix-convert float values to int, because
|
||||
// encoding/json decodes integers to float64, but cannot encode them back.
|
||||
// (See http://golang.org/src/pkg/encoding/json/decode.go#L46)
|
||||
if fval, isFloat := val.(float64); isFloat {
|
||||
val = int(fval)
|
||||
}
|
||||
m[k] = val
|
||||
} else {
|
||||
m[k] = v
|
||||
}
|
||||
}
|
||||
if err := json.NewEncoder(dst).Encode(&m); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return job.env.Encode(dst)
|
||||
}
|
||||
|
||||
func (job *Job) ExportEnv(dst interface{}) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
err = fmt.Errorf("ExportEnv %s", err)
|
||||
}
|
||||
}()
|
||||
var buf bytes.Buffer
|
||||
// step 1: encode/marshal the env to an intermediary json representation
|
||||
if err := job.EncodeEnv(&buf); err != nil {
|
||||
return err
|
||||
}
|
||||
// step 2: decode/unmarshal the intermediary json into the destination object
|
||||
if err := json.NewDecoder(&buf).Decode(dst); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return job.env.Export(dst)
|
||||
}
|
||||
|
||||
func (job *Job) ImportEnv(src interface{}) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
err = fmt.Errorf("ImportEnv: %s", err)
|
||||
}
|
||||
}()
|
||||
var buf bytes.Buffer
|
||||
if err := json.NewEncoder(&buf).Encode(src); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := job.DecodeEnv(&buf); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return job.env.Import(src)
|
||||
}
|
||||
|
||||
func (job *Job) Environ() map[string]string {
|
||||
m := make(map[string]string)
|
||||
for _, kv := range job.env {
|
||||
parts := strings.SplitN(kv, "=", 2)
|
||||
m[parts[0]] = parts[1]
|
||||
}
|
||||
return m
|
||||
return job.env.Map()
|
||||
}
|
||||
|
||||
func (job *Job) Logf(format string, args ...interface{}) (n int, err error) {
|
||||
|
|
|
@ -164,3 +164,29 @@ func Tail(src io.Reader, n int, dst *[]string) {
|
|||
*dst = append(*dst, v.(string))
|
||||
})
|
||||
}
|
||||
|
||||
// AddEnv starts a new goroutine which will decode all subsequent data
|
||||
// as a stream of json-encoded objects, and point `dst` to the last
|
||||
// decoded object.
|
||||
// The result `env` can be queried using the type-neutral Env interface.
|
||||
// It is not safe to query `env` until the Output is closed.
|
||||
func (o *Output) AddEnv() (dst *Env, err error) {
|
||||
src, err := o.AddPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dst = &Env{}
|
||||
o.tasks.Add(1)
|
||||
go func() {
|
||||
defer o.tasks.Done()
|
||||
decoder := NewDecoder(src)
|
||||
for {
|
||||
env, err := decoder.Decode()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
*dst = *env
|
||||
}
|
||||
}()
|
||||
return dst, nil
|
||||
}
|
||||
|
|
|
@ -72,6 +72,26 @@ func (w *sentinelWriteCloser) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func TestOutputAddEnv(t *testing.T) {
|
||||
input := "{\"foo\": \"bar\", \"answer_to_life_the_universe_and_everything\": 42}"
|
||||
o := NewOutput()
|
||||
result, err := o.AddEnv()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
o.Write([]byte(input))
|
||||
o.Close()
|
||||
if v := result.Get("foo"); v != "bar" {
|
||||
t.Errorf("Expected %v, got %v", "bar", v)
|
||||
}
|
||||
if v := result.GetInt("answer_to_life_the_universe_and_everything"); v != 42 {
|
||||
t.Errorf("Expected %v, got %v", 42, v)
|
||||
}
|
||||
if v := result.Get("this-value-doesnt-exist"); v != "" {
|
||||
t.Errorf("Expected %v, got %v", "", v)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOutputAddClose(t *testing.T) {
|
||||
o := NewOutput()
|
||||
var s sentinelWriteCloser
|
||||
|
|
2
graph.go
2
graph.go
|
@ -219,7 +219,7 @@ func (graph *Graph) TempLayerArchive(id string, compression archive.Compression,
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return archive.NewTempArchive(utils.ProgressReader(ioutil.NopCloser(a), 0, output, sf, true, "", "Buffering to disk"), tmp)
|
||||
return archive.NewTempArchive(utils.ProgressReader(ioutil.NopCloser(a), 0, output, sf, false, utils.TruncateID(id), "Buffering to disk"), tmp)
|
||||
}
|
||||
|
||||
// Mktemp creates a temporary sub-directory inside the graph's filesystem.
|
||||
|
|
|
@ -26,11 +26,11 @@ import (
|
|||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/graphdriver"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -313,24 +313,44 @@ func (a *Driver) Cleanup() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (a *Driver) aufsMount(ro []string, rw, target string) error {
|
||||
rwBranch := fmt.Sprintf("%v=rw", rw)
|
||||
roBranches := ""
|
||||
for _, layer := range ro {
|
||||
roBranches += fmt.Sprintf("%v=ro+wh:", layer)
|
||||
}
|
||||
branches := fmt.Sprintf("br:%v:%v,xino=/dev/shm/aufs.xino", rwBranch, roBranches)
|
||||
func (a *Driver) aufsMount(ro []string, rw, target string) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
Unmount(target)
|
||||
}
|
||||
}()
|
||||
|
||||
//if error, try to load aufs kernel module
|
||||
if err := mount("none", target, "aufs", 0, branches); err != nil {
|
||||
log.Printf("Kernel does not support AUFS, trying to load the AUFS module with modprobe...")
|
||||
if err := exec.Command("modprobe", "aufs").Run(); err != nil {
|
||||
return fmt.Errorf("Unable to load the AUFS module")
|
||||
if err = a.tryMount(ro, rw, target); err != nil {
|
||||
if err = a.mountRw(rw, target); err != nil {
|
||||
return
|
||||
}
|
||||
log.Printf("...module loaded.")
|
||||
if err := mount("none", target, "aufs", 0, branches); err != nil {
|
||||
return fmt.Errorf("Unable to mount using aufs %s", err)
|
||||
|
||||
for _, layer := range ro {
|
||||
branch := fmt.Sprintf("append:%s=ro+wh", layer)
|
||||
if err = mount("none", target, "aufs", syscall.MS_REMOUNT, branch); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
// Try to mount using the aufs fast path, if this fails then
|
||||
// append ro layers.
|
||||
func (a *Driver) tryMount(ro []string, rw, target string) (err error) {
|
||||
var (
|
||||
rwBranch = fmt.Sprintf("%s=rw", rw)
|
||||
roBranches = fmt.Sprintf("%s=ro+wh:", strings.Join(ro, "=ro+wh:"))
|
||||
)
|
||||
return mount("none", target, "aufs", 0, fmt.Sprintf("br:%v:%v,xino=/dev/shm/aufs.xino", rwBranch, roBranches))
|
||||
}
|
||||
|
||||
func (a *Driver) mountRw(rw, target string) error {
|
||||
return mount("none", target, "aufs", 0, fmt.Sprintf("br:%s,xino=/dev/shm/aufs.xino", rw))
|
||||
}
|
||||
|
||||
func rollbackMount(target string, err error) {
|
||||
if err != nil {
|
||||
Unmount(target)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,11 @@
|
|||
package aufs
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
@ -621,3 +625,70 @@ func TestApplyDiff(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func hash(c string) string {
|
||||
h := sha256.New()
|
||||
fmt.Fprint(h, c)
|
||||
return hex.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
func TestMountMoreThan42Layers(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
defer d.Cleanup()
|
||||
var last string
|
||||
var expected int
|
||||
|
||||
for i := 1; i < 127; i++ {
|
||||
expected++
|
||||
var (
|
||||
parent = fmt.Sprintf("%d", i-1)
|
||||
current = fmt.Sprintf("%d", i)
|
||||
)
|
||||
|
||||
if parent == "0" {
|
||||
parent = ""
|
||||
} else {
|
||||
parent = hash(parent)
|
||||
}
|
||||
current = hash(current)
|
||||
|
||||
if err := d.Create(current, parent); err != nil {
|
||||
t.Logf("Current layer %d", i)
|
||||
t.Fatal(err)
|
||||
}
|
||||
point, err := d.Get(current)
|
||||
if err != nil {
|
||||
t.Logf("Current layer %d", i)
|
||||
t.Fatal(err)
|
||||
}
|
||||
f, err := os.Create(path.Join(point, current))
|
||||
if err != nil {
|
||||
t.Logf("Current layer %d", i)
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
if i%10 == 0 {
|
||||
if err := os.Remove(path.Join(point, parent)); err != nil {
|
||||
t.Logf("Current layer %d", i)
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected--
|
||||
}
|
||||
last = current
|
||||
}
|
||||
|
||||
// Perform the actual mount for the top most image
|
||||
point, err := d.Get(last)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
files, err := ioutil.ReadDir(point)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(files) != expected {
|
||||
t.Fatalf("Expected %d got %d", expected, len(files))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,6 +2,6 @@ package aufs
|
|||
|
||||
import "syscall"
|
||||
|
||||
func mount(source string, target string, fstype string, flags uintptr, data string) (err error) {
|
||||
func mount(source string, target string, fstype string, flags uintptr, data string) error {
|
||||
return syscall.Mount(source, target, fstype, flags, data)
|
||||
}
|
||||
|
|
|
@ -8,6 +8,14 @@ package devmapper
|
|||
#include <linux/loop.h> // FIXME: present only for defines, maybe we can remove it?
|
||||
#include <linux/fs.h> // FIXME: present only for BLKGETSIZE64, maybe we can remove it?
|
||||
|
||||
#ifndef LOOP_CTL_GET_FREE
|
||||
#define LOOP_CTL_GET_FREE 0x4C82
|
||||
#endif
|
||||
|
||||
#ifndef LO_FLAGS_PARTSCAN
|
||||
#define LO_FLAGS_PARTSCAN 8
|
||||
#endif
|
||||
|
||||
// FIXME: Can't we find a way to do the logging in pure Go?
|
||||
extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str);
|
||||
|
||||
|
@ -55,7 +63,6 @@ type (
|
|||
}
|
||||
)
|
||||
|
||||
// FIXME: Make sure the values are defined in C
|
||||
// IOCTL consts
|
||||
const (
|
||||
BlkGetSize64 = C.BLKGETSIZE64
|
||||
|
|
|
@ -1,2 +1 @@
|
|||
Solomon Hykes <solomon@dotcloud.com> (@shykes)
|
||||
Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
|
|
34
hack/make.sh
34
hack/make.sh
|
@ -35,8 +35,11 @@ grep -q "$RESOLVCONF" /proc/mounts || {
|
|||
DEFAULT_BUNDLES=(
|
||||
binary
|
||||
test
|
||||
test-integration
|
||||
dynbinary
|
||||
dyntest
|
||||
dyntest-integration
|
||||
cover
|
||||
tgz
|
||||
ubuntu
|
||||
)
|
||||
|
@ -62,6 +65,37 @@ LDFLAGS='-X main.GITCOMMIT "'$GITCOMMIT'" -X main.VERSION "'$VERSION'" -w'
|
|||
LDFLAGS_STATIC='-X github.com/dotcloud/docker/utils.IAMSTATIC true -linkmode external -extldflags "-lpthread -static -Wl,--unresolved-symbols=ignore-in-object-files"'
|
||||
BUILDFLAGS='-tags netgo'
|
||||
|
||||
HAVE_GO_TEST_COVER=
|
||||
if go help testflag | grep -q -- -cover; then
|
||||
HAVE_GO_TEST_COVER=1
|
||||
fi
|
||||
|
||||
# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'.
|
||||
# You can use this to select certain tests to run, eg.
|
||||
#
|
||||
# TESTFLAGS='-run ^TestBuild$' ./hack/make.sh test
|
||||
#
|
||||
go_test_dir() {
|
||||
dir=$1
|
||||
testcover=()
|
||||
if [ "$HAVE_GO_TEST_COVER" ]; then
|
||||
# if our current go install has -cover, we want to use it :)
|
||||
mkdir -p "$DEST/coverprofiles"
|
||||
coverprofile="docker${dir#.}"
|
||||
coverprofile="$DEST/coverprofiles/${coverprofile//\//-}"
|
||||
testcover=( -cover -coverprofile "$coverprofile" )
|
||||
fi
|
||||
( # we run "go test -i" ouside the "set -x" to provde cleaner output
|
||||
cd "$dir"
|
||||
go test -i -ldflags "$LDFLAGS" $BUILDFLAGS
|
||||
)
|
||||
(
|
||||
set -x
|
||||
cd "$dir"
|
||||
go test ${testcover[@]} -ldflags "$LDFLAGS" $BUILDFLAGS $TESTFLAGS
|
||||
)
|
||||
}
|
||||
|
||||
bundle() {
|
||||
bundlescript=$1
|
||||
bundle=$(basename $bundlescript)
|
||||
|
|
21
hack/make/cover
Normal file
21
hack/make/cover
Normal file
|
@ -0,0 +1,21 @@
|
|||
#!/bin/bash
|
||||
|
||||
DEST="$1"
|
||||
|
||||
bundle_cover() {
|
||||
coverprofiles=( "$DEST/../"*"/coverprofiles/"* )
|
||||
for p in "${coverprofiles[@]}"; do
|
||||
echo
|
||||
(
|
||||
set -x
|
||||
go tool cover -func="$p"
|
||||
)
|
||||
done
|
||||
}
|
||||
|
||||
if [ "$HAVE_GO_TEST_COVER" ]; then
|
||||
bundle_cover 2>&1 | tee "$DEST/report.log"
|
||||
else
|
||||
echo >&2 'warning: the current version of go does not support -cover'
|
||||
echo >&2 ' skipping test coverage report'
|
||||
fi
|
|
@ -11,5 +11,7 @@ ln -sf dockerinit-$VERSION $DEST/dockerinit
|
|||
export DOCKER_INITSHA1="$(sha1sum $DEST/dockerinit-$VERSION | cut -d' ' -f1)"
|
||||
# exported so that "dyntest" can easily access it later without recalculating it
|
||||
|
||||
go build -o $DEST/docker-$VERSION -ldflags "$LDFLAGS -X github.com/dotcloud/docker/utils.INITSHA1 \"$DOCKER_INITSHA1\"" $BUILDFLAGS ./docker
|
||||
echo "Created binary: $DEST/docker-$VERSION"
|
||||
(
|
||||
export LDFLAGS_STATIC="-X github.com/dotcloud/docker/utils.INITSHA1 \"$DOCKER_INITSHA1\""
|
||||
source "$(dirname "$BASH_SOURCE")/binary"
|
||||
)
|
||||
|
|
|
@ -10,55 +10,8 @@ if [ ! -x "$INIT" ]; then
|
|||
false
|
||||
fi
|
||||
|
||||
# Run Docker's test suite, including sub-packages, and store their output as a bundle
|
||||
# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'.
|
||||
# You can use this to select certain tests to run, eg.
|
||||
#
|
||||
# TESTFLAGS='-run ^TestBuild$' ./hack/make.sh test
|
||||
#
|
||||
bundle_test() {
|
||||
{
|
||||
date
|
||||
|
||||
TESTS_FAILED=()
|
||||
for test_dir in $(find_test_dirs); do
|
||||
echo
|
||||
|
||||
if ! (
|
||||
set -x
|
||||
cd $test_dir
|
||||
|
||||
# Install packages that are dependencies of the tests.
|
||||
# Note: Does not run the tests.
|
||||
go test -i -ldflags "$LDFLAGS" $BUILDFLAGS
|
||||
|
||||
# Run the tests with the optional $TESTFLAGS.
|
||||
export TEST_DOCKERINIT_PATH=$DEST/../dynbinary/dockerinit-$VERSION
|
||||
go test -ldflags "$LDFLAGS -X github.com/dotcloud/docker/utils.INITSHA1 \"$DOCKER_INITSHA1\"" $BUILDFLAGS $TESTFLAGS
|
||||
); then
|
||||
TESTS_FAILED+=("$test_dir")
|
||||
sleep 1 # give it a second, so observers watching can take note
|
||||
fi
|
||||
done
|
||||
|
||||
# if some tests fail, we want the bundlescript to fail, but we want to
|
||||
# try running ALL the tests first, hence TESTS_FAILED
|
||||
if [ "${#TESTS_FAILED[@]}" -gt 0 ]; then
|
||||
echo
|
||||
echo "Test failures in: ${TESTS_FAILED[@]}"
|
||||
false
|
||||
fi
|
||||
} 2>&1 | tee $DEST/test.log
|
||||
}
|
||||
|
||||
|
||||
# This helper function walks the current directory looking for directories
|
||||
# holding Go test files, and prints their paths on standard output, one per
|
||||
# line.
|
||||
find_test_dirs() {
|
||||
find . -name '*_test.go' | grep -v '^./vendor' |
|
||||
{ while read f; do dirname $f; done; } |
|
||||
sort -u
|
||||
}
|
||||
|
||||
bundle_test
|
||||
(
|
||||
export TEST_DOCKERINIT_PATH="$INIT"
|
||||
export LDFLAGS_STATIC="-X github.com/dotcloud/docker/utils.INITSHA1 \"$DOCKER_INITSHA1\""
|
||||
source "$(dirname "$BASH_SOURCE")/test"
|
||||
)
|
||||
|
|
17
hack/make/dyntest-integration
Normal file
17
hack/make/dyntest-integration
Normal file
|
@ -0,0 +1,17 @@
|
|||
#!/bin/bash
|
||||
|
||||
DEST=$1
|
||||
INIT=$DEST/../dynbinary/dockerinit-$VERSION
|
||||
|
||||
set -e
|
||||
|
||||
if [ ! -x "$INIT" ]; then
|
||||
echo >&2 'error: dynbinary must be run before dyntest-integration'
|
||||
false
|
||||
fi
|
||||
|
||||
(
|
||||
export TEST_DOCKERINIT_PATH="$INIT"
|
||||
export LDFLAGS_STATIC="-X github.com/dotcloud/docker/utils.INITSHA1 \"$DOCKER_INITSHA1\""
|
||||
source "$(dirname "$BASH_SOURCE")/test-integration"
|
||||
)
|
|
@ -12,7 +12,7 @@ GREEN=$'\033[32m'
|
|||
# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'.
|
||||
# You can use this to select certain tests to run, eg.
|
||||
#
|
||||
# TESTFLAGS='-run ^TestBuild$' ./hack/make.sh test
|
||||
# TESTFLAGS='-run ^TestBuild$' ./hack/make.sh test
|
||||
#
|
||||
bundle_test() {
|
||||
{
|
||||
|
@ -22,34 +22,27 @@ bundle_test() {
|
|||
for test_dir in $(find_test_dirs); do
|
||||
echo
|
||||
|
||||
if ! (
|
||||
set -x
|
||||
cd $test_dir
|
||||
|
||||
# Install packages that are dependencies of the tests.
|
||||
# Note: Does not run the tests.
|
||||
go test -i -ldflags "$LDFLAGS $LDFLAGS_STATIC" $BUILDFLAGS
|
||||
|
||||
# Run the tests with the optional $TESTFLAGS.
|
||||
go test -ldflags "$LDFLAGS $LDFLAGS_STATIC" $BUILDFLAGS $TESTFLAGS
|
||||
); then
|
||||
if ! LDFLAGS="$LDFLAGS $LDFLAGS_STATIC" go_test_dir "$test_dir"; then
|
||||
TESTS_FAILED+=("$test_dir")
|
||||
echo
|
||||
echo "${RED}Test Failed: $test_dir${TEXTRESET}"
|
||||
echo
|
||||
echo "${RED}Tests failed: $test_dir${TEXTRESET}"
|
||||
sleep 1 # give it a second, so observers watching can take note
|
||||
fi
|
||||
done
|
||||
|
||||
echo
|
||||
echo
|
||||
echo
|
||||
|
||||
# if some tests fail, we want the bundlescript to fail, but we want to
|
||||
# try running ALL the tests first, hence TESTS_FAILED
|
||||
if [ "${#TESTS_FAILED[@]}" -gt 0 ]; then
|
||||
echo
|
||||
echo "${RED}Test failures in: ${TESTS_FAILED[@]}${TEXTRESET}"
|
||||
echo
|
||||
false
|
||||
else
|
||||
echo
|
||||
echo "${GREEN}Test success${TEXTRESET}"
|
||||
echo
|
||||
true
|
||||
fi
|
||||
} 2>&1 | tee $DEST/test.log
|
||||
|
@ -60,9 +53,10 @@ bundle_test() {
|
|||
# holding Go test files, and prints their paths on standard output, one per
|
||||
# line.
|
||||
find_test_dirs() {
|
||||
find . -name '*_test.go' | grep -v '^./vendor' |
|
||||
{ while read f; do dirname $f; done; } |
|
||||
sort -u
|
||||
find -not \( \
|
||||
\( -wholename './vendor' -o -wholename './integration' \) \
|
||||
-prune \
|
||||
\) -name '*_test.go' -print0 | xargs -0n1 dirname | sort -u
|
||||
}
|
||||
|
||||
bundle_test
|
||||
|
|
11
hack/make/test-integration
Normal file
11
hack/make/test-integration
Normal file
|
@ -0,0 +1,11 @@
|
|||
#!/bin/bash
|
||||
|
||||
DEST=$1
|
||||
|
||||
set -e
|
||||
|
||||
bundle_test_integration() {
|
||||
LDFLAGS="$LDFLAGS $LDFLAGS_STATIC" go_test_dir ./integration
|
||||
}
|
||||
|
||||
bundle_test_integration 2>&1 | tee $DEST/test.log
|
22
hack/stats.sh
Executable file
22
hack/stats.sh
Executable file
|
@ -0,0 +1,22 @@
|
|||
#!/bin/bash
|
||||
|
||||
## Run this script from the root of the docker repository
|
||||
## to query project stats useful to the maintainers.
|
||||
## You will need to install `pulls` and `issues` from
|
||||
## http://github.com/crosbymichael/pulls
|
||||
|
||||
set -e
|
||||
|
||||
echo -n "Open pulls: "
|
||||
PULLS=$(pulls | wc -l); let PULLS=$PULLS-1
|
||||
echo $PULLS
|
||||
|
||||
echo -n "Pulls alru: "
|
||||
pulls alru
|
||||
|
||||
echo -n "Open issues: "
|
||||
ISSUES=$(issues list | wc -l); let ISSUES=$ISSUES-1
|
||||
echo $ISSUES
|
||||
|
||||
echo -n "Issues alru: "
|
||||
issues alru
|
|
@ -27,6 +27,8 @@ git_clone github.com/gorilla/context/ 708054d61e5
|
|||
|
||||
git_clone github.com/gorilla/mux/ 9b36453141c
|
||||
|
||||
git_clone github.com/syndtr/gocapability 3454319be2
|
||||
|
||||
# Docker requires code.google.com/p/go.net/websocket
|
||||
PKG=code.google.com/p/go.net REV=84a4013f96e0
|
||||
(
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"net"
|
||||
|
@ -35,12 +36,18 @@ func TestGetVersion(t *testing.T) {
|
|||
}
|
||||
assertHttpNotError(r, t)
|
||||
|
||||
v := &docker.APIVersion{}
|
||||
if err = json.Unmarshal(r.Body.Bytes(), v); err != nil {
|
||||
out := engine.NewOutput()
|
||||
v, err := out.AddEnv()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if v.Version != docker.VERSION {
|
||||
t.Errorf("Expected version %s, %s found", docker.VERSION, v.Version)
|
||||
if _, err := io.Copy(out, r.Body); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
out.Close()
|
||||
expected := docker.VERSION
|
||||
if result := v.Get("Version"); result != expected {
|
||||
t.Errorf("Expected version %s, %s found", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -65,13 +72,17 @@ func TestGetInfo(t *testing.T) {
|
|||
}
|
||||
assertHttpNotError(r, t)
|
||||
|
||||
infos := &docker.APIInfo{}
|
||||
err = json.Unmarshal(r.Body.Bytes(), infos)
|
||||
out := engine.NewOutput()
|
||||
i, err := out.AddEnv()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if infos.Images != len(initialImages) {
|
||||
t.Errorf("Expected images: %d, %d found", len(initialImages), infos.Images)
|
||||
if _, err := io.Copy(out, r.Body); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
out.Close()
|
||||
if images := i.GetInt("Images"); images != len(initialImages) {
|
||||
t.Errorf("Expected images: %d, %d found", len(initialImages), images)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1113,7 +1124,7 @@ func TestDeleteImages(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := srv.ContainerTag(unitTestImageName, "test", "test", false); err != nil {
|
||||
if err := eng.Job("tag", unitTestImageName, "test", "test").Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
images, err := srv.Images(false, "")
|
||||
|
|
|
@ -266,7 +266,7 @@ func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, u
|
|||
}
|
||||
dockerfile := constructDockerfile(context.dockerfile, ip, port)
|
||||
|
||||
buildfile := docker.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, useCache, false, ioutil.Discard, utils.NewStreamFormatter(false))
|
||||
buildfile := docker.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, useCache, false, ioutil.Discard, utils.NewStreamFormatter(false), nil)
|
||||
id, err := buildfile.Build(mkTestContext(dockerfile, context.files, t))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -516,7 +516,7 @@ func TestForbiddenContextPath(t *testing.T) {
|
|||
}
|
||||
dockerfile := constructDockerfile(context.dockerfile, ip, port)
|
||||
|
||||
buildfile := docker.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false))
|
||||
buildfile := docker.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false), nil)
|
||||
_, err = buildfile.Build(mkTestContext(dockerfile, context.files, t))
|
||||
|
||||
if err == nil {
|
||||
|
@ -562,7 +562,7 @@ func TestBuildADDFileNotFound(t *testing.T) {
|
|||
}
|
||||
dockerfile := constructDockerfile(context.dockerfile, ip, port)
|
||||
|
||||
buildfile := docker.NewBuildFile(mkServerFromEngine(eng, t), ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false))
|
||||
buildfile := docker.NewBuildFile(mkServerFromEngine(eng, t), ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false), nil)
|
||||
_, err = buildfile.Build(mkTestContext(dockerfile, context.files, t))
|
||||
|
||||
if err == nil {
|
||||
|
|
|
@ -867,11 +867,11 @@ func TestImagesTree(t *testing.T) {
|
|||
}
|
||||
cmdOutput := string(cmdOutputBytes)
|
||||
regexpStrings := []string{
|
||||
fmt.Sprintf("└─%s Size: (\\d+.\\d+ MB) \\(virtual \\d+.\\d+ MB\\) Tags: %s:latest", unitTestImageIDShort, unitTestImageName),
|
||||
fmt.Sprintf("└─%s Virtual Size: \\d+.\\d+ MB Tags: %s:latest", unitTestImageIDShort, unitTestImageName),
|
||||
"(?m) └─[0-9a-f]+.*",
|
||||
"(?m) └─[0-9a-f]+.*",
|
||||
"(?m) └─[0-9a-f]+.*",
|
||||
fmt.Sprintf("(?m)^ └─%s Size: \\d+ B \\(virtual \\d+.\\d+ MB\\) Tags: test:latest", utils.TruncateID(image.ID)),
|
||||
fmt.Sprintf("(?m)^ └─%s Virtual Size: \\d+.\\d+ MB Tags: test:latest", utils.TruncateID(image.ID)),
|
||||
}
|
||||
|
||||
compiledRegexps := []*regexp.Regexp{}
|
||||
|
@ -910,8 +910,7 @@ run [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = mkServerFromEngine(eng, t).ContainerTag(image.ID, "test", "latest", false)
|
||||
if err != nil {
|
||||
if err := eng.Job("tag", image.ID, "test").Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"testing"
|
||||
)
|
||||
|
||||
func TestContainerTagImageDelete(t *testing.T) {
|
||||
func TestImageTagImageDelete(t *testing.T) {
|
||||
eng := NewTestEngine(t)
|
||||
defer mkRuntimeFromEngine(eng, t).Nuke()
|
||||
|
||||
|
@ -18,14 +18,15 @@ func TestContainerTagImageDelete(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := srv.ContainerTag(unitTestImageName, "utest", "tag1", false); err != nil {
|
||||
if err := eng.Job("tag", unitTestImageName, "utest", "tag1").Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := srv.ContainerTag(unitTestImageName, "utest/docker", "tag2", false); err != nil {
|
||||
if err := eng.Job("tag", unitTestImageName, "utest/docker", "tag2").Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := srv.ContainerTag(unitTestImageName, "utest:5000/docker", "tag3", false); err != nil {
|
||||
|
||||
if err := eng.Job("tag", unitTestImageName, "utest:5000/docker", "tag3").Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -128,8 +129,9 @@ func TestCreateRmVolumes(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = srv.ContainerStop(id, 1)
|
||||
if err != nil {
|
||||
job = eng.Job("stop", id)
|
||||
job.SetenvInt("t", 1)
|
||||
if err := job.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -144,7 +146,6 @@ func TestCreateRmVolumes(t *testing.T) {
|
|||
|
||||
func TestCommit(t *testing.T) {
|
||||
eng := NewTestEngine(t)
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
defer mkRuntimeFromEngine(eng, t).Nuke()
|
||||
|
||||
config, _, _, err := docker.ParseRun([]string{unitTestImageID, "/bin/cat"}, nil)
|
||||
|
@ -154,7 +155,11 @@ func TestCommit(t *testing.T) {
|
|||
|
||||
id := createTestContainer(eng, config, t)
|
||||
|
||||
if _, err := srv.ContainerCommit(id, "testrepo", "testtag", "", "", config); err != nil {
|
||||
job := eng.Job("commit", id)
|
||||
job.Setenv("repo", "testrepo")
|
||||
job.Setenv("tag", "testtag")
|
||||
job.SetenvJson("config", config)
|
||||
if err := job.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
@ -187,7 +192,9 @@ func TestCreateStartRestartStopStartKillRm(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := srv.ContainerStop(id, 15); err != nil {
|
||||
job = eng.Job("stop", id)
|
||||
job.SetenvInt("t", 15)
|
||||
if err := job.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -199,7 +206,7 @@ func TestCreateStartRestartStopStartKillRm(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := srv.ContainerKill(id, 0); err != nil {
|
||||
if err := eng.Job("kill", id).Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -256,17 +263,19 @@ func TestRmi(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := srv.ContainerWait(containerID); err != nil {
|
||||
if err := eng.Job("wait", containerID).Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
imageID, err := srv.ContainerCommit(containerID, "test", "", "", "", nil)
|
||||
if err != nil {
|
||||
job = eng.Job("commit", containerID)
|
||||
job.Setenv("repo", "test")
|
||||
var imageID string
|
||||
job.Stdout.AddString(&imageID)
|
||||
if err := job.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = srv.ContainerTag(imageID, "test", "0.1", false)
|
||||
if err != nil {
|
||||
if err := eng.Job("tag", imageID, "test", "0.1").Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -281,12 +290,13 @@ func TestRmi(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := srv.ContainerWait(containerID); err != nil {
|
||||
if err := eng.Job("wait", containerID).Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = srv.ContainerCommit(containerID, "test", "", "", "", nil)
|
||||
if err != nil {
|
||||
job = eng.Job("commit", containerID)
|
||||
job.Setenv("repo", "test")
|
||||
if err := job.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -329,14 +339,15 @@ func TestImagesFilter(t *testing.T) {
|
|||
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
|
||||
if err := srv.ContainerTag(unitTestImageName, "utest", "tag1", false); err != nil {
|
||||
if err := eng.Job("tag", unitTestImageName, "utest", "tag1").Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := srv.ContainerTag(unitTestImageName, "utest/docker", "tag2", false); err != nil {
|
||||
if err := eng.Job("tag", unitTestImageName, "utest/docker", "tag2").Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := srv.ContainerTag(unitTestImageName, "utest:5000/docker", "tag3", false); err != nil {
|
||||
|
||||
if err := eng.Job("tag", unitTestImageName, "utest:5000/docker", "tag3").Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -398,3 +409,50 @@ func TestImageInsert(t *testing.T) {
|
|||
t.Fatalf("expected no error, but got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test for being able to untag an image with an existing
|
||||
// container
|
||||
func TestDeleteTagWithExistingContainers(t *testing.T) {
|
||||
eng := NewTestEngine(t)
|
||||
defer nuke(mkRuntimeFromEngine(eng, t))
|
||||
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
|
||||
// Tag the image
|
||||
if err := eng.Job("tag", unitTestImageID, "utest", "tag1").Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a container from the image
|
||||
config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
id := createNamedTestContainer(eng, config, t, "testingtags")
|
||||
if id == "" {
|
||||
t.Fatal("No id returned")
|
||||
}
|
||||
|
||||
containers := srv.Containers(true, false, -1, "", "")
|
||||
|
||||
if len(containers) != 1 {
|
||||
t.Fatalf("Expected 1 container got %d", len(containers))
|
||||
}
|
||||
|
||||
// Try to remove the tag
|
||||
imgs, err := srv.ImageDelete("utest:tag1", true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(imgs) != 1 {
|
||||
t.Fatalf("Should only have deleted one untag %d", len(imgs))
|
||||
}
|
||||
|
||||
untag := imgs[0]
|
||||
|
||||
if untag.Untagged != unitTestImageID {
|
||||
t.Fatalf("Expected %s got %s", unitTestImageID, untag.Untagged)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -105,7 +105,7 @@ func containerWaitTimeout(eng *engine.Engine, id string, t utils.Fataler) error
|
|||
}
|
||||
|
||||
func containerKill(eng *engine.Engine, id string, t utils.Fataler) {
|
||||
if err := getContainer(eng, id, t).Kill(); err != nil {
|
||||
if err := eng.Job("kill", id).Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
@ -185,6 +185,8 @@ func NewTestEngine(t utils.Fataler) *engine.Engine {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
eng.Stdout = ioutil.Discard
|
||||
eng.Stderr = ioutil.Discard
|
||||
// Load default plugins
|
||||
// (This is manually copied and modified from main() until we have a more generic plugin system)
|
||||
job := eng.Job("initapi")
|
||||
|
|
|
@ -6,24 +6,15 @@ import (
|
|||
)
|
||||
|
||||
const LxcTemplate = `
|
||||
# hostname
|
||||
{{if .Config.Hostname}}
|
||||
lxc.utsname = {{.Config.Hostname}}
|
||||
{{else}}
|
||||
lxc.utsname = {{.Id}}
|
||||
{{end}}
|
||||
|
||||
{{if .Config.NetworkDisabled}}
|
||||
# network is disabled (-n=false)
|
||||
lxc.network.type = empty
|
||||
{{else}}
|
||||
# network configuration
|
||||
lxc.network.type = veth
|
||||
lxc.network.flags = up
|
||||
lxc.network.link = {{.NetworkSettings.Bridge}}
|
||||
lxc.network.name = eth0
|
||||
lxc.network.mtu = 1500
|
||||
lxc.network.ipv4 = {{.NetworkSettings.IPAddress}}/{{.NetworkSettings.IPPrefixLen}}
|
||||
{{end}}
|
||||
|
||||
# root filesystem
|
||||
|
@ -110,18 +101,11 @@ lxc.mount.entry = {{escapeFstabSpaces $realPath}} {{escapeFstabSpaces $ROOTFS}}/
|
|||
{{end}}
|
||||
|
||||
{{if (getHostConfig .).Privileged}}
|
||||
# retain all capabilities; no lxc.cap.drop line
|
||||
{{if (getCapabilities .).AppArmor}}
|
||||
lxc.aa_profile = unconfined
|
||||
{{else}}
|
||||
#lxc.aa_profile = unconfined
|
||||
{{end}}
|
||||
{{else}}
|
||||
# drop linux capabilities (apply mainly to the user root in the container)
|
||||
# (Note: 'lxc.cap.keep' is coming soon and should replace this under the
|
||||
# security principle 'deny all unless explicitly permitted', see
|
||||
# http://sourceforge.net/mailarchive/message.php?msg_id=31054627 )
|
||||
lxc.cap.drop = audit_control audit_write mac_admin mac_override mknod setpcap sys_admin sys_module sys_nice sys_pacct sys_rawio sys_resource sys_time sys_tty_config
|
||||
{{end}}
|
||||
|
||||
# limits
|
||||
|
|
|
@ -29,7 +29,6 @@ func TestLXCConfig(t *testing.T) {
|
|||
container := &Container{
|
||||
root: root,
|
||||
Config: &Config{
|
||||
Hostname: "foobar",
|
||||
Memory: int64(mem),
|
||||
CpuShares: int64(cpu),
|
||||
NetworkDisabled: true,
|
||||
|
@ -41,7 +40,6 @@ func TestLXCConfig(t *testing.T) {
|
|||
if err := container.generateLXCConfig(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
grepFile(t, container.lxcConfigPath(), "lxc.utsname = foobar")
|
||||
grepFile(t, container.lxcConfigPath(),
|
||||
fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem))
|
||||
grepFile(t, container.lxcConfigPath(),
|
||||
|
|
39
network.go
39
network.go
|
@ -118,6 +118,7 @@ func CreateBridgeIface(config *DaemonConfig) error {
|
|||
"192.168.44.1/24",
|
||||
}
|
||||
|
||||
|
||||
nameservers := []string{}
|
||||
resolvConf, _ := utils.GetResolvConf()
|
||||
// we don't check for an error here, because we don't really care
|
||||
|
@ -129,22 +130,30 @@ func CreateBridgeIface(config *DaemonConfig) error {
|
|||
}
|
||||
|
||||
var ifaceAddr string
|
||||
for _, addr := range addrs {
|
||||
_, dockerNetwork, err := net.ParseCIDR(addr)
|
||||
if len(config.BridgeIp) != 0 {
|
||||
_, _, err := net.ParseCIDR(config.BridgeIp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
routes, err := netlink.NetworkGetRoutes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := checkRouteOverlaps(routes, dockerNetwork); err == nil {
|
||||
if err := checkNameserverOverlaps(nameservers, dockerNetwork); err == nil {
|
||||
ifaceAddr = addr
|
||||
break
|
||||
ifaceAddr = config.BridgeIp
|
||||
} else {
|
||||
for _, addr := range addrs {
|
||||
_, dockerNetwork, err := net.ParseCIDR(addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
routes, err := netlink.NetworkGetRoutes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := checkRouteOverlaps(routes, dockerNetwork); err == nil {
|
||||
if err := checkNameserverOverlaps(nameservers, dockerNetwork); err == nil {
|
||||
ifaceAddr = addr
|
||||
break
|
||||
}
|
||||
} else {
|
||||
utils.Debugf("%s: %s", addr, err)
|
||||
}
|
||||
} else {
|
||||
utils.Debugf("%s: %s", addr, err)
|
||||
}
|
||||
}
|
||||
if ifaceAddr == "" {
|
||||
|
@ -178,7 +187,11 @@ func CreateBridgeIface(config *DaemonConfig) error {
|
|||
func createBridgeIface(name string) error {
|
||||
s, err := syscall.Socket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_IP)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating bridge creation socket: %s", err)
|
||||
utils.Debugf("Bridge socket creation failed IPv6 probably not enabled: %v", err)
|
||||
s, err = syscall.Socket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_IP)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating bridge creation socket: %s", err)
|
||||
}
|
||||
}
|
||||
defer syscall.Close(s)
|
||||
|
||||
|
|
19
runtime.go
19
runtime.go
|
@ -18,16 +18,22 @@ import (
|
|||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Set the max depth to the aufs restriction
|
||||
const MaxImageDepth = 42
|
||||
// Set the max depth to the aufs default that most
|
||||
// kernels are compiled with
|
||||
// For more information see: http://sourceforge.net/p/aufs/aufs3-standalone/ci/aufs3.12/tree/config.mk
|
||||
const MaxImageDepth = 127
|
||||
|
||||
var defaultDns = []string{"8.8.8.8", "8.8.4.4"}
|
||||
var (
|
||||
defaultDns = []string{"8.8.8.8", "8.8.4.4"}
|
||||
validContainerName = regexp.MustCompile(`^/?[a-zA-Z0-9_-]+$`)
|
||||
)
|
||||
|
||||
type Capabilities struct {
|
||||
MemoryLimit bool
|
||||
|
@ -418,7 +424,12 @@ func (runtime *Runtime) Create(config *Config, name string) (*Container, []strin
|
|||
if err != nil {
|
||||
name = utils.TruncateID(id)
|
||||
}
|
||||
} else {
|
||||
if !validContainerName.MatchString(name) {
|
||||
return nil, nil, fmt.Errorf("Invalid container name (%s), only [a-zA-Z0-9_-] are allowed", name)
|
||||
}
|
||||
}
|
||||
|
||||
if name[0] != '/' {
|
||||
name = "/" + name
|
||||
}
|
||||
|
@ -857,7 +868,7 @@ func linkLxcStart(root string) error {
|
|||
}
|
||||
targetPath := path.Join(root, "lxc-start-unconfined")
|
||||
|
||||
if _, err := os.Stat(targetPath); err != nil && !os.IsNotExist(err) {
|
||||
if _, err := os.Lstat(targetPath); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
} else if err == nil {
|
||||
if err := os.Remove(targetPath); err != nil {
|
||||
|
|
324
server.go
324
server.go
|
@ -22,6 +22,7 @@ import (
|
|||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
@ -70,18 +71,50 @@ func jobInitApi(job *engine.Job) engine.Status {
|
|||
if srv.runtime.networkManager.bridgeNetwork != nil {
|
||||
job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", srv.runtime.networkManager.bridgeNetwork.IP)
|
||||
}
|
||||
if err := job.Eng.Register("export", srv.ContainerExport); err != nil {
|
||||
job.Error(err)
|
||||
return engine.StatusErr
|
||||
}
|
||||
if err := job.Eng.Register("create", srv.ContainerCreate); err != nil {
|
||||
job.Error(err)
|
||||
return engine.StatusErr
|
||||
}
|
||||
if err := job.Eng.Register("stop", srv.ContainerStop); err != nil {
|
||||
job.Error(err)
|
||||
return engine.StatusErr
|
||||
}
|
||||
if err := job.Eng.Register("start", srv.ContainerStart); err != nil {
|
||||
job.Error(err)
|
||||
return engine.StatusErr
|
||||
}
|
||||
if err := job.Eng.Register("kill", srv.ContainerKill); err != nil {
|
||||
job.Error(err)
|
||||
return engine.StatusErr
|
||||
}
|
||||
if err := job.Eng.Register("serveapi", srv.ListenAndServe); err != nil {
|
||||
job.Error(err)
|
||||
return engine.StatusErr
|
||||
}
|
||||
if err := job.Eng.Register("wait", srv.ContainerWait); err != nil {
|
||||
job.Error(err)
|
||||
return engine.StatusErr
|
||||
}
|
||||
if err := job.Eng.Register("tag", srv.ImageTag); err != nil {
|
||||
job.Error(err)
|
||||
return engine.StatusErr
|
||||
}
|
||||
if err := job.Eng.Register("resize", srv.ContainerResize); err != nil {
|
||||
job.Error(err)
|
||||
return engine.StatusErr
|
||||
}
|
||||
if err := job.Eng.Register("commit", srv.ContainerCommit); err != nil {
|
||||
job.Error(err)
|
||||
return engine.StatusErr
|
||||
}
|
||||
if err := job.Eng.Register("info", srv.DockerInfo); err != nil {
|
||||
job.Error(err)
|
||||
return engine.StatusErr
|
||||
}
|
||||
return engine.StatusOK
|
||||
}
|
||||
|
||||
|
@ -118,14 +151,6 @@ func (srv *Server) ListenAndServe(job *engine.Job) engine.Status {
|
|||
return engine.StatusOK
|
||||
}
|
||||
|
||||
func (srv *Server) DockerVersion() APIVersion {
|
||||
return APIVersion{
|
||||
Version: VERSION,
|
||||
GitCommit: GITCOMMIT,
|
||||
GoVersion: runtime.Version(),
|
||||
}
|
||||
}
|
||||
|
||||
// simpleVersionInfo is a simple implementation of
|
||||
// the interface VersionInfo, which is used
|
||||
// to provide version information for some product,
|
||||
|
@ -144,68 +169,73 @@ func (v *simpleVersionInfo) Version() string {
|
|||
return v.version
|
||||
}
|
||||
|
||||
// versionCheckers() returns version informations of:
|
||||
// docker, go, git-commit (of the docker) and the host's kernel.
|
||||
//
|
||||
// Such information will be used on call to NewRegistry().
|
||||
func (srv *Server) versionInfos() []utils.VersionInfo {
|
||||
v := srv.DockerVersion()
|
||||
ret := append(make([]utils.VersionInfo, 0, 4), &simpleVersionInfo{"docker", v.Version})
|
||||
|
||||
if len(v.GoVersion) > 0 {
|
||||
ret = append(ret, &simpleVersionInfo{"go", v.GoVersion})
|
||||
}
|
||||
if len(v.GitCommit) > 0 {
|
||||
ret = append(ret, &simpleVersionInfo{"git-commit", v.GitCommit})
|
||||
}
|
||||
if kernelVersion, err := utils.GetKernelVersion(); err == nil {
|
||||
ret = append(ret, &simpleVersionInfo{"kernel", kernelVersion.String()})
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// ContainerKill send signal to the container
|
||||
// If no signal is given (sig 0), then Kill with SIGKILL and wait
|
||||
// for the container to exit.
|
||||
// If a signal is given, then just send it to the container and return.
|
||||
func (srv *Server) ContainerKill(name string, sig int) error {
|
||||
func (srv *Server) ContainerKill(job *engine.Job) engine.Status {
|
||||
if n := len(job.Args); n < 1 || n > 2 {
|
||||
job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name)
|
||||
return engine.StatusErr
|
||||
}
|
||||
name := job.Args[0]
|
||||
var sig uint64
|
||||
if len(job.Args) == 2 && job.Args[1] != "" {
|
||||
var err error
|
||||
// The largest legal signal is 31, so let's parse on 5 bits
|
||||
sig, err = strconv.ParseUint(job.Args[1], 10, 5)
|
||||
if err != nil {
|
||||
job.Errorf("Invalid signal: %s", job.Args[1])
|
||||
return engine.StatusErr
|
||||
}
|
||||
}
|
||||
if container := srv.runtime.Get(name); container != nil {
|
||||
// If no signal is passed, perform regular Kill (SIGKILL + wait())
|
||||
if sig == 0 {
|
||||
if err := container.Kill(); err != nil {
|
||||
return fmt.Errorf("Cannot kill container %s: %s", name, err)
|
||||
job.Errorf("Cannot kill container %s: %s", name, err)
|
||||
return engine.StatusErr
|
||||
}
|
||||
srv.LogEvent("kill", container.ID, srv.runtime.repositories.ImageName(container.Image))
|
||||
} else {
|
||||
// Otherwise, just send the requested signal
|
||||
if err := container.kill(sig); err != nil {
|
||||
return fmt.Errorf("Cannot kill container %s: %s", name, err)
|
||||
if err := container.kill(int(sig)); err != nil {
|
||||
job.Errorf("Cannot kill container %s: %s", name, err)
|
||||
return engine.StatusErr
|
||||
}
|
||||
// FIXME: Add event for signals
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("No such container: %s", name)
|
||||
job.Errorf("No such container: %s", name)
|
||||
return engine.StatusErr
|
||||
}
|
||||
return nil
|
||||
return engine.StatusOK
|
||||
}
|
||||
|
||||
func (srv *Server) ContainerExport(name string, out io.Writer) error {
|
||||
func (srv *Server) ContainerExport(job *engine.Job) engine.Status {
|
||||
if len(job.Args) != 1 {
|
||||
job.Errorf("Usage: %s container_id", job.Name)
|
||||
return engine.StatusErr
|
||||
}
|
||||
name := job.Args[0]
|
||||
if container := srv.runtime.Get(name); container != nil {
|
||||
|
||||
data, err := container.Export()
|
||||
if err != nil {
|
||||
return err
|
||||
job.Errorf("%s: %s", name, err)
|
||||
return engine.StatusErr
|
||||
}
|
||||
|
||||
// Stream the entire contents of the container (basically a volatile snapshot)
|
||||
if _, err := io.Copy(out, data); err != nil {
|
||||
return err
|
||||
if _, err := io.Copy(job.Stdout, data); err != nil {
|
||||
job.Errorf("%s: %s", name, err)
|
||||
return engine.StatusErr
|
||||
}
|
||||
// FIXME: factor job-specific LogEvent to engine.Job.Run()
|
||||
srv.LogEvent("export", container.ID, srv.runtime.repositories.ImageName(container.Image))
|
||||
return nil
|
||||
return engine.StatusOK
|
||||
}
|
||||
return fmt.Errorf("No such container: %s", name)
|
||||
job.Errorf("No such container: %s", name)
|
||||
return engine.StatusErr
|
||||
}
|
||||
|
||||
// ImageExport exports all images with the given tag. All versions
|
||||
|
@ -584,7 +614,7 @@ func (srv *Server) Images(all bool, filter string) ([]APIImages, error) {
|
|||
return outs, nil
|
||||
}
|
||||
|
||||
func (srv *Server) DockerInfo() *APIInfo {
|
||||
func (srv *Server) DockerInfo(job *engine.Job) engine.Status {
|
||||
images, _ := srv.runtime.graph.Map()
|
||||
var imgcount int
|
||||
if images == nil {
|
||||
|
@ -604,22 +634,26 @@ func (srv *Server) DockerInfo() *APIInfo {
|
|||
kernelVersion = kv.String()
|
||||
}
|
||||
|
||||
return &APIInfo{
|
||||
Containers: len(srv.runtime.List()),
|
||||
Images: imgcount,
|
||||
Driver: srv.runtime.driver.String(),
|
||||
DriverStatus: srv.runtime.driver.Status(),
|
||||
MemoryLimit: srv.runtime.capabilities.MemoryLimit,
|
||||
SwapLimit: srv.runtime.capabilities.SwapLimit,
|
||||
IPv4Forwarding: !srv.runtime.capabilities.IPv4ForwardingDisabled,
|
||||
Debug: os.Getenv("DEBUG") != "",
|
||||
NFd: utils.GetTotalUsedFds(),
|
||||
NGoroutines: runtime.NumGoroutine(),
|
||||
LXCVersion: lxcVersion,
|
||||
NEventsListener: len(srv.events),
|
||||
KernelVersion: kernelVersion,
|
||||
IndexServerAddress: auth.IndexServerAddress(),
|
||||
v := &engine.Env{}
|
||||
v.SetInt("Containers", len(srv.runtime.List()))
|
||||
v.SetInt("Images", imgcount)
|
||||
v.Set("Driver", srv.runtime.driver.String())
|
||||
v.SetJson("DriverStatus", srv.runtime.driver.Status())
|
||||
v.SetBool("MemoryLimit", srv.runtime.capabilities.MemoryLimit)
|
||||
v.SetBool("SwapLimit", srv.runtime.capabilities.SwapLimit)
|
||||
v.SetBool("IPv4Forwarding", !srv.runtime.capabilities.IPv4ForwardingDisabled)
|
||||
v.SetBool("Debug", os.Getenv("DEBUG") != "")
|
||||
v.SetInt("NFd", utils.GetTotalUsedFds())
|
||||
v.SetInt("NGoroutines", runtime.NumGoroutine())
|
||||
v.Set("LXCVersion", lxcVersion)
|
||||
v.SetInt("NEventsListener", len(srv.events))
|
||||
v.Set("KernelVersion", kernelVersion)
|
||||
v.Set("IndexServerAddress", auth.IndexServerAddress())
|
||||
if _, err := v.WriteTo(job.Stdout); err != nil {
|
||||
job.Error(err)
|
||||
return engine.StatusErr
|
||||
}
|
||||
return engine.StatusOK
|
||||
}
|
||||
|
||||
func (srv *Server) ImageHistory(name string) ([]APIHistory, error) {
|
||||
|
@ -747,24 +781,47 @@ func createAPIContainer(names []string, container *Container, size bool, runtime
|
|||
}
|
||||
return c
|
||||
}
|
||||
func (srv *Server) ContainerCommit(name, repo, tag, author, comment string, config *Config) (string, error) {
|
||||
func (srv *Server) ContainerCommit(job *engine.Job) engine.Status {
|
||||
if len(job.Args) != 1 {
|
||||
job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
|
||||
return engine.StatusErr
|
||||
}
|
||||
name := job.Args[0]
|
||||
|
||||
container := srv.runtime.Get(name)
|
||||
if container == nil {
|
||||
return "", fmt.Errorf("No such container: %s", name)
|
||||
job.Errorf("No such container: %s", name)
|
||||
return engine.StatusErr
|
||||
}
|
||||
img, err := srv.runtime.Commit(container, repo, tag, comment, author, config)
|
||||
var config Config
|
||||
if err := job.GetenvJson("config", &config); err != nil {
|
||||
job.Error(err)
|
||||
return engine.StatusErr
|
||||
}
|
||||
|
||||
img, err := srv.runtime.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), &config)
|
||||
if err != nil {
|
||||
return "", err
|
||||
job.Error(err)
|
||||
return engine.StatusErr
|
||||
}
|
||||
return img.ID, err
|
||||
job.Printf("%s\n", img.ID)
|
||||
return engine.StatusOK
|
||||
}
|
||||
|
||||
// FIXME: this should be called ImageTag
|
||||
func (srv *Server) ContainerTag(name, repo, tag string, force bool) error {
|
||||
if err := srv.runtime.repositories.Set(repo, tag, name, force); err != nil {
|
||||
return err
|
||||
func (srv *Server) ImageTag(job *engine.Job) engine.Status {
|
||||
if len(job.Args) != 2 && len(job.Args) != 3 {
|
||||
job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name)
|
||||
return engine.StatusErr
|
||||
}
|
||||
return nil
|
||||
var tag string
|
||||
if len(job.Args) == 3 {
|
||||
tag = job.Args[2]
|
||||
}
|
||||
if err := srv.runtime.repositories.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil {
|
||||
job.Error(err)
|
||||
return engine.StatusErr
|
||||
}
|
||||
return engine.StatusOK
|
||||
}
|
||||
|
||||
func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoint string, token []string, sf *utils.StreamFormatter) error {
|
||||
|
@ -1118,7 +1175,7 @@ func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName
|
|||
return nil
|
||||
}
|
||||
|
||||
out.Write(sf.FormatStatus("", "Pushing tags for rev [%s] on {%s}", elem.ID, ep+"repositories/"+remoteName+"/tags/"+elem.Tag))
|
||||
out.Write(sf.FormatStatus("", "Pushing tags for rev [%s] on {%s}", utils.TruncateID(elem.ID), ep+"repositories/"+remoteName+"/tags/"+elem.Tag))
|
||||
if err := r.PushRegistryTag(remoteName, elem.ID, elem.Tag, ep, repoData.Tokens); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1128,13 +1185,13 @@ func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName
|
|||
if err := pushTags(); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", elem.ID))
|
||||
out.Write(sf.FormatProgress(utils.TruncateID(elem.ID), "Image already pushed, skipping", nil))
|
||||
continue
|
||||
} else if r.LookupRemoteImage(elem.ID, ep, repoData.Tokens) {
|
||||
if err := pushTags(); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", elem.ID))
|
||||
out.Write(sf.FormatProgress(utils.TruncateID(elem.ID), "Image already pushed, skipping", nil))
|
||||
continue
|
||||
}
|
||||
checksum, err := srv.pushImage(r, out, remoteName, elem.ID, ep, repoData.Tokens, sf)
|
||||
|
@ -1164,7 +1221,7 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID,
|
|||
if err != nil {
|
||||
return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
|
||||
}
|
||||
out.Write(sf.FormatStatus("", "Pushing %s", imgID))
|
||||
out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pushing", nil))
|
||||
|
||||
imgData := ®istry.ImgData{
|
||||
ID: imgID,
|
||||
|
@ -1173,7 +1230,7 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID,
|
|||
// Send the json
|
||||
if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil {
|
||||
if err == registry.ErrAlreadyExists {
|
||||
out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", imgData.ID))
|
||||
out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image already pushed, skipping", nil))
|
||||
return "", nil
|
||||
}
|
||||
return "", err
|
||||
|
@ -1186,14 +1243,11 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID,
|
|||
defer os.RemoveAll(layerData.Name())
|
||||
|
||||
// Send the layer
|
||||
checksum, err = r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, "", "Pushing"), ep, token, jsonRaw)
|
||||
checksum, err = r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
imgData.Checksum = checksum
|
||||
|
||||
out.Write(sf.FormatStatus("", ""))
|
||||
|
||||
// Send the checksum
|
||||
if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil {
|
||||
return "", err
|
||||
|
@ -1496,8 +1550,10 @@ func (srv *Server) deleteImageParents(img *Image, imgs *[]APIRmi) error {
|
|||
}
|
||||
|
||||
func (srv *Server) deleteImage(img *Image, repoName, tag string) ([]APIRmi, error) {
|
||||
imgs := []APIRmi{}
|
||||
tags := []string{}
|
||||
var (
|
||||
imgs = []APIRmi{}
|
||||
tags = []string{}
|
||||
)
|
||||
|
||||
//If delete by id, see if the id belong only to one repository
|
||||
if repoName == "" {
|
||||
|
@ -1517,6 +1573,7 @@ func (srv *Server) deleteImage(img *Image, repoName, tag string) ([]APIRmi, erro
|
|||
} else {
|
||||
tags = append(tags, tag)
|
||||
}
|
||||
|
||||
//Untag the current image
|
||||
for _, tag := range tags {
|
||||
tagDeleted, err := srv.runtime.repositories.Delete(repoName, tag)
|
||||
|
@ -1528,6 +1585,7 @@ func (srv *Server) deleteImage(img *Image, repoName, tag string) ([]APIRmi, erro
|
|||
srv.LogEvent("untag", img.ID, "")
|
||||
}
|
||||
}
|
||||
|
||||
if len(srv.runtime.repositories.ByID()[img.ID]) == 0 {
|
||||
if err := srv.deleteImageAndChildren(img.ID, &imgs, nil); err != nil {
|
||||
if err != ErrImageReferenced {
|
||||
|
@ -1543,10 +1601,16 @@ func (srv *Server) deleteImage(img *Image, repoName, tag string) ([]APIRmi, erro
|
|||
}
|
||||
|
||||
func (srv *Server) ImageDelete(name string, autoPrune bool) ([]APIRmi, error) {
|
||||
var (
|
||||
repository, tag string
|
||||
validate = true
|
||||
)
|
||||
img, err := srv.runtime.repositories.LookupImage(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("No such image: %s", name)
|
||||
}
|
||||
|
||||
// FIXME: What does autoPrune mean ?
|
||||
if !autoPrune {
|
||||
if err := srv.runtime.graph.Delete(img.ID); err != nil {
|
||||
return nil, fmt.Errorf("Cannot delete image %s: %s", name, err)
|
||||
|
@ -1554,9 +1618,22 @@ func (srv *Server) ImageDelete(name string, autoPrune bool) ([]APIRmi, error) {
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
// Prevent deletion if image is used by a running container
|
||||
for _, container := range srv.runtime.List() {
|
||||
if container.State.IsRunning() {
|
||||
if !strings.Contains(img.ID, name) {
|
||||
repository, tag = utils.ParseRepositoryTag(name)
|
||||
}
|
||||
|
||||
// If we have a repo and the image is not referenced anywhere else
|
||||
// then just perform an untag and do not validate.
|
||||
//
|
||||
// i.e. only validate if we are performing an actual delete and not
|
||||
// an untag op
|
||||
if repository != "" {
|
||||
validate = len(srv.runtime.repositories.ByID()[img.ID]) == 1
|
||||
}
|
||||
|
||||
if validate {
|
||||
// Prevent deletion if image is used by a container
|
||||
for _, container := range srv.runtime.List() {
|
||||
parent, err := srv.runtime.repositories.LookupImage(container.Image)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -1564,7 +1641,7 @@ func (srv *Server) ImageDelete(name string, autoPrune bool) ([]APIRmi, error) {
|
|||
|
||||
if err := parent.WalkHistory(func(p *Image) error {
|
||||
if img.ID == p.ID {
|
||||
return fmt.Errorf("Conflict, cannot delete %s because the running container %s is using it", name, container.ID)
|
||||
return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it", name, container.ID)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
|
@ -1572,13 +1649,7 @@ func (srv *Server) ImageDelete(name string, autoPrune bool) ([]APIRmi, error) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
if strings.Contains(img.ID, name) {
|
||||
//delete via ID
|
||||
return srv.deleteImage(img, "", "")
|
||||
}
|
||||
name, tag := utils.ParseRepositoryTag(name)
|
||||
return srv.deleteImage(img, name, tag)
|
||||
return srv.deleteImage(img, repository, tag)
|
||||
}
|
||||
|
||||
func (srv *Server) ImageGetCached(imgID string, config *Config) (*Image, error) {
|
||||
|
@ -1706,30 +1777,69 @@ func (srv *Server) ContainerStart(job *engine.Job) engine.Status {
|
|||
return engine.StatusOK
|
||||
}
|
||||
|
||||
func (srv *Server) ContainerStop(name string, t int) error {
|
||||
func (srv *Server) ContainerStop(job *engine.Job) engine.Status {
|
||||
if len(job.Args) != 1 {
|
||||
job.Errorf("Usage: %s CONTAINER\n", job.Name)
|
||||
return engine.StatusErr
|
||||
}
|
||||
name := job.Args[0]
|
||||
t := job.GetenvInt("t")
|
||||
if t == -1 {
|
||||
t = 10
|
||||
}
|
||||
if container := srv.runtime.Get(name); container != nil {
|
||||
if err := container.Stop(t); err != nil {
|
||||
return fmt.Errorf("Cannot stop container %s: %s", name, err)
|
||||
if err := container.Stop(int(t)); err != nil {
|
||||
job.Errorf("Cannot stop container %s: %s\n", name, err)
|
||||
return engine.StatusErr
|
||||
}
|
||||
srv.LogEvent("stop", container.ID, srv.runtime.repositories.ImageName(container.Image))
|
||||
} else {
|
||||
return fmt.Errorf("No such container: %s", name)
|
||||
job.Errorf("No such container: %s\n", name)
|
||||
return engine.StatusErr
|
||||
}
|
||||
return nil
|
||||
return engine.StatusOK
|
||||
}
|
||||
|
||||
func (srv *Server) ContainerWait(name string) (int, error) {
|
||||
if container := srv.runtime.Get(name); container != nil {
|
||||
return container.Wait(), nil
|
||||
func (srv *Server) ContainerWait(job *engine.Job) engine.Status {
|
||||
if len(job.Args) != 1 {
|
||||
job.Errorf("Usage: %s", job.Name)
|
||||
return engine.StatusErr
|
||||
}
|
||||
return 0, fmt.Errorf("No such container: %s", name)
|
||||
name := job.Args[0]
|
||||
if container := srv.runtime.Get(name); container != nil {
|
||||
status := container.Wait()
|
||||
job.Printf("%d\n", status)
|
||||
return engine.StatusOK
|
||||
}
|
||||
job.Errorf("%s: no such container: %s", job.Name, name)
|
||||
return engine.StatusErr
|
||||
}
|
||||
|
||||
func (srv *Server) ContainerResize(name string, h, w int) error {
|
||||
if container := srv.runtime.Get(name); container != nil {
|
||||
return container.Resize(h, w)
|
||||
func (srv *Server) ContainerResize(job *engine.Job) engine.Status {
|
||||
if len(job.Args) != 3 {
|
||||
job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name)
|
||||
return engine.StatusErr
|
||||
}
|
||||
return fmt.Errorf("No such container: %s", name)
|
||||
name := job.Args[0]
|
||||
height, err := strconv.Atoi(job.Args[1])
|
||||
if err != nil {
|
||||
job.Error(err)
|
||||
return engine.StatusErr
|
||||
}
|
||||
width, err := strconv.Atoi(job.Args[2])
|
||||
if err != nil {
|
||||
job.Error(err)
|
||||
return engine.StatusErr
|
||||
}
|
||||
if container := srv.runtime.Get(name); container != nil {
|
||||
if err := container.Resize(height, width); err != nil {
|
||||
job.Error(err)
|
||||
return engine.StatusErr
|
||||
}
|
||||
return engine.StatusOK
|
||||
}
|
||||
job.Errorf("No such container: %s", name)
|
||||
return engine.StatusErr
|
||||
}
|
||||
|
||||
func (srv *Server) ContainerAttach(name string, logs, stream, stdin, stdout, stderr bool, inStream io.ReadCloser, outStream, errStream io.Writer) error {
|
||||
|
@ -1874,7 +1984,13 @@ func NewServer(eng *engine.Engine, config *DaemonConfig) (*Server, error) {
|
|||
func (srv *Server) HTTPRequestFactory(metaHeaders map[string][]string) *utils.HTTPRequestFactory {
|
||||
srv.Lock()
|
||||
defer srv.Unlock()
|
||||
ud := utils.NewHTTPUserAgentDecorator(srv.versionInfos()...)
|
||||
v := dockerVersion()
|
||||
httpVersion := make([]utils.VersionInfo, 0, 4)
|
||||
httpVersion = append(httpVersion, &simpleVersionInfo{"docker", v.Get("Version")})
|
||||
httpVersion = append(httpVersion, &simpleVersionInfo{"go", v.Get("GoVersion")})
|
||||
httpVersion = append(httpVersion, &simpleVersionInfo{"git-commit", v.Get("GitCommit")})
|
||||
httpVersion = append(httpVersion, &simpleVersionInfo{"kernel", v.Get("KernelVersion")})
|
||||
ud := utils.NewHTTPUserAgentDecorator(httpVersion...)
|
||||
md := &utils.HTTPMetaHeadersDecorator{
|
||||
Headers: metaHeaders,
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"fmt"
|
||||
"github.com/dotcloud/docker/netlink"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
|
@ -16,73 +17,146 @@ import (
|
|||
"syscall"
|
||||
)
|
||||
|
||||
type DockerInitArgs struct {
|
||||
user string
|
||||
gateway string
|
||||
ip string
|
||||
workDir string
|
||||
privileged bool
|
||||
env []string
|
||||
args []string
|
||||
}
|
||||
|
||||
func setupHostname(args *DockerInitArgs) error {
|
||||
hostname := getEnv(args, "HOSTNAME")
|
||||
if hostname == "" {
|
||||
return nil
|
||||
}
|
||||
return syscall.Sethostname([]byte(hostname))
|
||||
}
|
||||
|
||||
// Setup networking
|
||||
func setupNetworking(gw string) {
|
||||
if gw == "" {
|
||||
return
|
||||
func setupNetworking(args *DockerInitArgs) error {
|
||||
if args.ip != "" {
|
||||
// eth0
|
||||
iface, err := net.InterfaceByName("eth0")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to set up networking: %v", err)
|
||||
}
|
||||
ip, ipNet, err := net.ParseCIDR(args.ip)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to set up networking: %v", err)
|
||||
}
|
||||
if err := netlink.NetworkLinkAddIp(iface, ip, ipNet); err != nil {
|
||||
return fmt.Errorf("Unable to set up networking: %v", err)
|
||||
}
|
||||
if err := netlink.NetworkLinkUp(iface); err != nil {
|
||||
return fmt.Errorf("Unable to set up networking: %v", err)
|
||||
}
|
||||
|
||||
// loopback
|
||||
iface, err = net.InterfaceByName("lo")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to set up networking: %v", err)
|
||||
}
|
||||
if err := netlink.NetworkLinkUp(iface); err != nil {
|
||||
return fmt.Errorf("Unable to set up networking: %v", err)
|
||||
}
|
||||
}
|
||||
if args.gateway != "" {
|
||||
gw := net.ParseIP(args.gateway)
|
||||
if gw == nil {
|
||||
return fmt.Errorf("Unable to set up networking, %s is not a valid gateway IP", args.gateway)
|
||||
}
|
||||
|
||||
if err := netlink.AddDefaultGw(gw); err != nil {
|
||||
return fmt.Errorf("Unable to set up networking: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
ip := net.ParseIP(gw)
|
||||
if ip == nil {
|
||||
log.Fatalf("Unable to set up networking, %s is not a valid IP", gw)
|
||||
return
|
||||
}
|
||||
|
||||
if err := netlink.AddDefaultGw(ip); err != nil {
|
||||
log.Fatalf("Unable to set up networking: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Setup working directory
|
||||
func setupWorkingDirectory(workdir string) {
|
||||
if workdir == "" {
|
||||
return
|
||||
func setupWorkingDirectory(args *DockerInitArgs) error {
|
||||
if args.workDir == "" {
|
||||
return nil
|
||||
}
|
||||
if err := syscall.Chdir(workdir); err != nil {
|
||||
log.Fatalf("Unable to change dir to %v: %v", workdir, err)
|
||||
if err := syscall.Chdir(args.workDir); err != nil {
|
||||
return fmt.Errorf("Unable to change dir to %v: %v", args.workDir, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Takes care of dropping privileges to the desired user
|
||||
func changeUser(u string) {
|
||||
if u == "" {
|
||||
return
|
||||
func changeUser(args *DockerInitArgs) error {
|
||||
if args.user == "" {
|
||||
return nil
|
||||
}
|
||||
userent, err := utils.UserLookup(u)
|
||||
userent, err := utils.UserLookup(args.user)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to find user %v: %v", u, err)
|
||||
return fmt.Errorf("Unable to find user %v: %v", args.user, err)
|
||||
}
|
||||
|
||||
uid, err := strconv.Atoi(userent.Uid)
|
||||
if err != nil {
|
||||
log.Fatalf("Invalid uid: %v", userent.Uid)
|
||||
return fmt.Errorf("Invalid uid: %v", userent.Uid)
|
||||
}
|
||||
gid, err := strconv.Atoi(userent.Gid)
|
||||
if err != nil {
|
||||
log.Fatalf("Invalid gid: %v", userent.Gid)
|
||||
return fmt.Errorf("Invalid gid: %v", userent.Gid)
|
||||
}
|
||||
|
||||
if err := syscall.Setgid(gid); err != nil {
|
||||
log.Fatalf("setgid failed: %v", err)
|
||||
return fmt.Errorf("setgid failed: %v", err)
|
||||
}
|
||||
if err := syscall.Setuid(uid); err != nil {
|
||||
log.Fatalf("setuid failed: %v", err)
|
||||
return fmt.Errorf("setuid failed: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func setupCapabilities(args *DockerInitArgs) error {
|
||||
|
||||
if args.privileged {
|
||||
return nil
|
||||
}
|
||||
|
||||
drop := []capability.Cap{
|
||||
capability.CAP_SETPCAP,
|
||||
capability.CAP_SYS_MODULE,
|
||||
capability.CAP_SYS_RAWIO,
|
||||
capability.CAP_SYS_PACCT,
|
||||
capability.CAP_SYS_ADMIN,
|
||||
capability.CAP_SYS_NICE,
|
||||
capability.CAP_SYS_RESOURCE,
|
||||
capability.CAP_SYS_TIME,
|
||||
capability.CAP_SYS_TTY_CONFIG,
|
||||
capability.CAP_MKNOD,
|
||||
capability.CAP_AUDIT_WRITE,
|
||||
capability.CAP_AUDIT_CONTROL,
|
||||
capability.CAP_MAC_OVERRIDE,
|
||||
capability.CAP_MAC_ADMIN,
|
||||
}
|
||||
|
||||
c, err := capability.NewPid(os.Getpid())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.Unset(capability.CAPS|capability.BOUNDS, drop...)
|
||||
|
||||
if err := c.Apply(capability.CAPS | capability.BOUNDS); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clear environment pollution introduced by lxc-start
|
||||
func cleanupEnv() {
|
||||
func setupEnv(args *DockerInitArgs) {
|
||||
os.Clearenv()
|
||||
var lines []string
|
||||
content, err := ioutil.ReadFile("/.dockerenv")
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to load environment variables: %v", err)
|
||||
}
|
||||
err = json.Unmarshal(content, &lines)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to unmarshal environment variables: %v", err)
|
||||
}
|
||||
for _, kv := range lines {
|
||||
for _, kv := range args.env {
|
||||
parts := strings.SplitN(kv, "=", 2)
|
||||
if len(parts) == 1 {
|
||||
parts = append(parts, "")
|
||||
|
@ -91,16 +165,51 @@ func cleanupEnv() {
|
|||
}
|
||||
}
|
||||
|
||||
func executeProgram(name string, args []string) {
|
||||
path, err := exec.LookPath(name)
|
||||
func getEnv(args *DockerInitArgs, key string) string {
|
||||
for _, kv := range args.env {
|
||||
parts := strings.SplitN(kv, "=", 2)
|
||||
if parts[0] == key && len(parts) == 2 {
|
||||
return parts[1]
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func executeProgram(args *DockerInitArgs) error {
|
||||
setupEnv(args)
|
||||
|
||||
if err := setupHostname(args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := setupNetworking(args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := setupCapabilities(args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := setupWorkingDirectory(args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := changeUser(args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path, err := exec.LookPath(args.args[0])
|
||||
if err != nil {
|
||||
log.Printf("Unable to locate %v", name)
|
||||
log.Printf("Unable to locate %v", args.args[0])
|
||||
os.Exit(127)
|
||||
}
|
||||
|
||||
if err := syscall.Exec(path, args, os.Environ()); err != nil {
|
||||
if err := syscall.Exec(path, args.args, os.Environ()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Will never reach here
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sys Init code
|
||||
|
@ -111,15 +220,39 @@ func SysInit() {
|
|||
fmt.Println("You should not invoke dockerinit manually")
|
||||
os.Exit(1)
|
||||
}
|
||||
var u = flag.String("u", "", "username or uid")
|
||||
var gw = flag.String("g", "", "gateway address")
|
||||
var workdir = flag.String("w", "", "workdir")
|
||||
|
||||
// Get cmdline arguments
|
||||
user := flag.String("u", "", "username or uid")
|
||||
gateway := flag.String("g", "", "gateway address")
|
||||
ip := flag.String("i", "", "ip address")
|
||||
workDir := flag.String("w", "", "workdir")
|
||||
privileged := flag.Bool("privileged", false, "privileged mode")
|
||||
flag.Parse()
|
||||
|
||||
cleanupEnv()
|
||||
setupNetworking(*gw)
|
||||
setupWorkingDirectory(*workdir)
|
||||
changeUser(*u)
|
||||
executeProgram(flag.Arg(0), flag.Args())
|
||||
// Get env
|
||||
var env []string
|
||||
content, err := ioutil.ReadFile("/.dockerenv")
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to load environment variables: %v", err)
|
||||
}
|
||||
if err := json.Unmarshal(content, &env); err != nil {
|
||||
log.Fatalf("Unable to unmarshal environment variables: %v", err)
|
||||
}
|
||||
|
||||
// Propagate the plugin-specific container env variable
|
||||
env = append(env, "container="+os.Getenv("container"))
|
||||
|
||||
args := &DockerInitArgs{
|
||||
user: *user,
|
||||
gateway: *gateway,
|
||||
ip: *ip,
|
||||
workDir: *workDir,
|
||||
privileged: *privileged,
|
||||
env: env,
|
||||
args: flag.Args(),
|
||||
}
|
||||
|
||||
if err := executeProgram(args); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
|
57
utils/fs.go
57
utils/fs.go
|
@ -1,8 +1,10 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
|
@ -33,3 +35,58 @@ func TreeSize(dir string) (size int64, err error) {
|
|||
})
|
||||
return
|
||||
}
|
||||
|
||||
// FollowSymlink will follow an existing link and scope it to the root
|
||||
// path provided.
|
||||
func FollowSymlinkInScope(link, root string) (string, error) {
|
||||
prev := "/"
|
||||
|
||||
root, err := filepath.Abs(root)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
link, err = filepath.Abs(link)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(filepath.Dir(link), root) {
|
||||
return "", fmt.Errorf("%s is not within %s", link, root)
|
||||
}
|
||||
|
||||
for _, p := range strings.Split(link, "/") {
|
||||
prev = filepath.Join(prev, p)
|
||||
prev = filepath.Clean(prev)
|
||||
|
||||
for {
|
||||
stat, err := os.Lstat(prev)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
break
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
if stat.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
dest, err := os.Readlink(prev)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
switch dest[0] {
|
||||
case '/':
|
||||
prev = filepath.Join(root, dest)
|
||||
case '.':
|
||||
prev, _ = filepath.Abs(prev)
|
||||
|
||||
if prev = filepath.Clean(filepath.Join(filepath.Dir(prev), dest)); len(prev) < len(root) {
|
||||
prev = filepath.Join(root, filepath.Base(dest))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return prev, nil
|
||||
}
|
||||
|
|
83
utils/fs_test.go
Normal file
83
utils/fs_test.go
Normal file
|
@ -0,0 +1,83 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func abs(t *testing.T, p string) string {
|
||||
o, err := filepath.Abs(p)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func TestFollowSymLinkNormal(t *testing.T) {
|
||||
link := "testdata/fs/a/d/c/data"
|
||||
|
||||
rewrite, err := FollowSymlinkInScope(link, "testdata")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if expected := abs(t, "testdata/b/c/data"); expected != rewrite {
|
||||
t.Fatalf("Expected %s got %s", expected, rewrite)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowSymLinkRandomString(t *testing.T) {
|
||||
if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil {
|
||||
t.Fatal("Random string should fail but didn't")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowSymLinkLastLink(t *testing.T) {
|
||||
link := "testdata/fs/a/d"
|
||||
|
||||
rewrite, err := FollowSymlinkInScope(link, "testdata")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if expected := abs(t, "testdata/b"); expected != rewrite {
|
||||
t.Fatalf("Expected %s got %s", expected, rewrite)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowSymLinkRelativeLink(t *testing.T) {
|
||||
link := "testdata/fs/a/e/c/data"
|
||||
|
||||
rewrite, err := FollowSymlinkInScope(link, "testdata")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if expected := abs(t, "testdata/fs/b/c/data"); expected != rewrite {
|
||||
t.Fatalf("Expected %s got %s", expected, rewrite)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowSymLinkRelativeLinkScope(t *testing.T) {
|
||||
link := "testdata/fs/a/f"
|
||||
|
||||
rewrite, err := FollowSymlinkInScope(link, "testdata")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if expected := abs(t, "testdata/test"); expected != rewrite {
|
||||
t.Fatalf("Expected %s got %s", expected, rewrite)
|
||||
}
|
||||
|
||||
link = "testdata/fs/b/h"
|
||||
|
||||
rewrite, err = FollowSymlinkInScope(link, "testdata")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if expected := abs(t, "testdata/root"); expected != rewrite {
|
||||
t.Fatalf("Expected %s got %s", expected, rewrite)
|
||||
}
|
||||
}
|
1
utils/testdata/fs/a/d
vendored
Symbolic link
1
utils/testdata/fs/a/d
vendored
Symbolic link
|
@ -0,0 +1 @@
|
|||
/b
|
1
utils/testdata/fs/a/e
vendored
Symbolic link
1
utils/testdata/fs/a/e
vendored
Symbolic link
|
@ -0,0 +1 @@
|
|||
../b
|
1
utils/testdata/fs/a/f
vendored
Symbolic link
1
utils/testdata/fs/a/f
vendored
Symbolic link
|
@ -0,0 +1 @@
|
|||
../../../../test
|
1
utils/testdata/fs/b/h
vendored
Symbolic link
1
utils/testdata/fs/b/h
vendored
Symbolic link
|
@ -0,0 +1 @@
|
|||
../g
|
1
utils/testdata/fs/g
vendored
Symbolic link
1
utils/testdata/fs/g
vendored
Symbolic link
|
@ -0,0 +1 @@
|
|||
../../../../../../../../../../../../root
|
3
vendor/src/github.com/dotcloud/tar/README
vendored
3
vendor/src/github.com/dotcloud/tar/README
vendored
|
@ -1,3 +0,0 @@
|
|||
This is a fork of the upstream Go [archive/tar](http://golang.org/pkg/archive/tar/) package to add PAX header support.
|
||||
|
||||
You can monitor the upstream pull request [here](https://codereview.appspot.com/12561043/).
|
299
vendor/src/github.com/dotcloud/tar/common.go
vendored
299
vendor/src/github.com/dotcloud/tar/common.go
vendored
|
@ -1,299 +0,0 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package tar implements access to tar archives.
|
||||
// It aims to cover most of the variations, including those produced
|
||||
// by GNU and BSD tars.
|
||||
//
|
||||
// References:
|
||||
// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
|
||||
// http://www.gnu.org/software/tar/manual/html_node/Standard.html
|
||||
// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
|
||||
package tar
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
blockSize = 512
|
||||
|
||||
// Types
|
||||
TypeReg = '0' // regular file
|
||||
TypeRegA = '\x00' // regular file
|
||||
TypeLink = '1' // hard link
|
||||
TypeSymlink = '2' // symbolic link
|
||||
TypeChar = '3' // character device node
|
||||
TypeBlock = '4' // block device node
|
||||
TypeDir = '5' // directory
|
||||
TypeFifo = '6' // fifo node
|
||||
TypeCont = '7' // reserved
|
||||
TypeXHeader = 'x' // extended header
|
||||
TypeXGlobalHeader = 'g' // global extended header
|
||||
TypeGNULongName = 'L' // Next file has a long name
|
||||
TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name
|
||||
)
|
||||
|
||||
// A Header represents a single header in a tar archive.
|
||||
// Some fields may not be populated.
|
||||
type Header struct {
|
||||
Name string // name of header file entry
|
||||
Mode int64 // permission and mode bits
|
||||
Uid int // user id of owner
|
||||
Gid int // group id of owner
|
||||
Size int64 // length in bytes
|
||||
ModTime time.Time // modified time
|
||||
Typeflag byte // type of header entry
|
||||
Linkname string // target name of link
|
||||
Uname string // user name of owner
|
||||
Gname string // group name of owner
|
||||
Devmajor int64 // major number of character or block device
|
||||
Devminor int64 // minor number of character or block device
|
||||
AccessTime time.Time // access time
|
||||
ChangeTime time.Time // status change time
|
||||
}
|
||||
|
||||
// File name constants from the tar spec.
|
||||
const (
|
||||
fileNameSize = 100 // Maximum number of bytes in a standard tar name.
|
||||
fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
|
||||
)
|
||||
|
||||
// FileInfo returns an os.FileInfo for the Header.
|
||||
func (h *Header) FileInfo() os.FileInfo {
|
||||
return headerFileInfo{h}
|
||||
}
|
||||
|
||||
// headerFileInfo implements os.FileInfo.
|
||||
type headerFileInfo struct {
|
||||
h *Header
|
||||
}
|
||||
|
||||
func (fi headerFileInfo) Size() int64 { return fi.h.Size }
|
||||
func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
|
||||
func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
|
||||
func (fi headerFileInfo) Sys() interface{} { return fi.h }
|
||||
|
||||
// Name returns the base name of the file.
|
||||
func (fi headerFileInfo) Name() string {
|
||||
if fi.IsDir() {
|
||||
return path.Clean(fi.h.Name)
|
||||
}
|
||||
return fi.h.Name
|
||||
}
|
||||
|
||||
// Mode returns the permission and mode bits for the headerFileInfo.
|
||||
func (fi headerFileInfo) Mode() (mode os.FileMode) {
|
||||
// Set file permission bits.
|
||||
mode = os.FileMode(fi.h.Mode).Perm()
|
||||
|
||||
// Set setuid, setgid and sticky bits.
|
||||
if fi.h.Mode&c_ISUID != 0 {
|
||||
// setuid
|
||||
mode |= os.ModeSetuid
|
||||
}
|
||||
if fi.h.Mode&c_ISGID != 0 {
|
||||
// setgid
|
||||
mode |= os.ModeSetgid
|
||||
}
|
||||
if fi.h.Mode&c_ISVTX != 0 {
|
||||
// sticky
|
||||
mode |= os.ModeSticky
|
||||
}
|
||||
|
||||
// Set file mode bits.
|
||||
// clear perm, setuid, setgid and sticky bits.
|
||||
m := os.FileMode(fi.h.Mode) &^ 07777
|
||||
if m == c_ISDIR {
|
||||
// directory
|
||||
mode |= os.ModeDir
|
||||
}
|
||||
if m == c_ISFIFO {
|
||||
// named pipe (FIFO)
|
||||
mode |= os.ModeNamedPipe
|
||||
}
|
||||
if m == c_ISLNK {
|
||||
// symbolic link
|
||||
mode |= os.ModeSymlink
|
||||
}
|
||||
if m == c_ISBLK {
|
||||
// device file
|
||||
mode |= os.ModeDevice
|
||||
}
|
||||
if m == c_ISCHR {
|
||||
// Unix character device
|
||||
mode |= os.ModeDevice
|
||||
mode |= os.ModeCharDevice
|
||||
}
|
||||
if m == c_ISSOCK {
|
||||
// Unix domain socket
|
||||
mode |= os.ModeSocket
|
||||
}
|
||||
|
||||
switch fi.h.Typeflag {
|
||||
case TypeLink, TypeSymlink:
|
||||
// hard link, symbolic link
|
||||
mode |= os.ModeSymlink
|
||||
case TypeChar:
|
||||
// character device node
|
||||
mode |= os.ModeDevice
|
||||
mode |= os.ModeCharDevice
|
||||
case TypeBlock:
|
||||
// block device node
|
||||
mode |= os.ModeDevice
|
||||
case TypeDir:
|
||||
// directory
|
||||
mode |= os.ModeDir
|
||||
case TypeFifo:
|
||||
// fifo node
|
||||
mode |= os.ModeNamedPipe
|
||||
}
|
||||
|
||||
return mode
|
||||
}
|
||||
|
||||
// sysStat, if non-nil, populates h from system-dependent fields of fi.
|
||||
var sysStat func(fi os.FileInfo, h *Header) error
|
||||
|
||||
// Mode constants from the tar spec.
|
||||
const (
|
||||
c_ISUID = 04000 // Set uid
|
||||
c_ISGID = 02000 // Set gid
|
||||
c_ISVTX = 01000 // Save text (sticky bit)
|
||||
c_ISDIR = 040000 // Directory
|
||||
c_ISFIFO = 010000 // FIFO
|
||||
c_ISREG = 0100000 // Regular file
|
||||
c_ISLNK = 0120000 // Symbolic link
|
||||
c_ISBLK = 060000 // Block special file
|
||||
c_ISCHR = 020000 // Character special file
|
||||
c_ISSOCK = 0140000 // Socket
|
||||
)
|
||||
|
||||
// Keywords for the PAX Extended Header
|
||||
const (
|
||||
paxAtime = "atime"
|
||||
paxCharset = "charset"
|
||||
paxComment = "comment"
|
||||
paxCtime = "ctime" // please note that ctime is not a valid pax header.
|
||||
paxGid = "gid"
|
||||
paxGname = "gname"
|
||||
paxLinkpath = "linkpath"
|
||||
paxMtime = "mtime"
|
||||
paxPath = "path"
|
||||
paxSize = "size"
|
||||
paxUid = "uid"
|
||||
paxUname = "uname"
|
||||
paxNone = ""
|
||||
)
|
||||
|
||||
// FileInfoHeader creates a partially-populated Header from fi.
|
||||
// If fi describes a symlink, FileInfoHeader records link as the link target.
|
||||
// If fi describes a directory, a slash is appended to the name.
|
||||
func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
|
||||
if fi == nil {
|
||||
return nil, errors.New("tar: FileInfo is nil")
|
||||
}
|
||||
fm := fi.Mode()
|
||||
h := &Header{
|
||||
Name: fi.Name(),
|
||||
ModTime: fi.ModTime(),
|
||||
Mode: int64(fm.Perm()), // or'd with c_IS* constants later
|
||||
}
|
||||
switch {
|
||||
case fm.IsRegular():
|
||||
h.Mode |= c_ISREG
|
||||
h.Typeflag = TypeReg
|
||||
h.Size = fi.Size()
|
||||
case fi.IsDir():
|
||||
h.Typeflag = TypeDir
|
||||
h.Mode |= c_ISDIR
|
||||
h.Name += "/"
|
||||
case fm&os.ModeSymlink != 0:
|
||||
h.Typeflag = TypeSymlink
|
||||
h.Mode |= c_ISLNK
|
||||
h.Linkname = link
|
||||
case fm&os.ModeDevice != 0:
|
||||
if fm&os.ModeCharDevice != 0 {
|
||||
h.Mode |= c_ISCHR
|
||||
h.Typeflag = TypeChar
|
||||
} else {
|
||||
h.Mode |= c_ISBLK
|
||||
h.Typeflag = TypeBlock
|
||||
}
|
||||
case fm&os.ModeNamedPipe != 0:
|
||||
h.Typeflag = TypeFifo
|
||||
h.Mode |= c_ISFIFO
|
||||
case fm&os.ModeSocket != 0:
|
||||
h.Mode |= c_ISSOCK
|
||||
default:
|
||||
return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
|
||||
}
|
||||
if fm&os.ModeSetuid != 0 {
|
||||
h.Mode |= c_ISUID
|
||||
}
|
||||
if fm&os.ModeSetgid != 0 {
|
||||
h.Mode |= c_ISGID
|
||||
}
|
||||
if fm&os.ModeSticky != 0 {
|
||||
h.Mode |= c_ISVTX
|
||||
}
|
||||
if sysStat != nil {
|
||||
return h, sysStat(fi, h)
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
var zeroBlock = make([]byte, blockSize)
|
||||
|
||||
// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
|
||||
// We compute and return both.
|
||||
func checksum(header []byte) (unsigned int64, signed int64) {
|
||||
for i := 0; i < len(header); i++ {
|
||||
if i == 148 {
|
||||
// The chksum field (header[148:156]) is special: it should be treated as space bytes.
|
||||
unsigned += ' ' * 8
|
||||
signed += ' ' * 8
|
||||
i += 7
|
||||
continue
|
||||
}
|
||||
unsigned += int64(header[i])
|
||||
signed += int64(int8(header[i]))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type slicer []byte
|
||||
|
||||
func (sp *slicer) next(n int) (b []byte) {
|
||||
s := *sp
|
||||
b, *sp = s[0:n], s[n:]
|
||||
return
|
||||
}
|
||||
|
||||
func isASCII(s string) bool {
|
||||
for _, c := range s {
|
||||
if c >= 0x80 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func toASCII(s string) string {
|
||||
if isASCII(s) {
|
||||
return s
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
for _, c := range s {
|
||||
if c < 0x80 {
|
||||
buf.WriteByte(byte(c))
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
|
@ -1,79 +0,0 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar_test
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
func Example() {
|
||||
// Create a buffer to write our archive to.
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
// Create a new tar archive.
|
||||
tw := tar.NewWriter(buf)
|
||||
|
||||
// Add some files to the archive.
|
||||
var files = []struct {
|
||||
Name, Body string
|
||||
}{
|
||||
{"readme.txt", "This archive contains some text files."},
|
||||
{"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"},
|
||||
{"todo.txt", "Get animal handling licence."},
|
||||
}
|
||||
for _, file := range files {
|
||||
hdr := &tar.Header{
|
||||
Name: file.Name,
|
||||
Size: int64(len(file.Body)),
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
if _, err := tw.Write([]byte(file.Body)); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
// Make sure to check the error on Close.
|
||||
if err := tw.Close(); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// Open the tar archive for reading.
|
||||
r := bytes.NewReader(buf.Bytes())
|
||||
tr := tar.NewReader(r)
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
// end of tar archive
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
fmt.Printf("Contents of %s:\n", hdr.Name)
|
||||
if _, err := io.Copy(os.Stdout, tr); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Output:
|
||||
// Contents of readme.txt:
|
||||
// This archive contains some text files.
|
||||
// Contents of gopher.txt:
|
||||
// Gopher names:
|
||||
// George
|
||||
// Geoffrey
|
||||
// Gonzo
|
||||
// Contents of todo.txt:
|
||||
// Get animal handling licence.
|
||||
}
|
396
vendor/src/github.com/dotcloud/tar/reader.go
vendored
396
vendor/src/github.com/dotcloud/tar/reader.go
vendored
|
@ -1,396 +0,0 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar
|
||||
|
||||
// TODO(dsymonds):
|
||||
// - pax extensions
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrHeader = errors.New("archive/tar: invalid tar header")
|
||||
)
|
||||
|
||||
const maxNanoSecondIntSize = 9
|
||||
|
||||
// A Reader provides sequential access to the contents of a tar archive.
|
||||
// A tar archive consists of a sequence of files.
|
||||
// The Next method advances to the next file in the archive (including the first),
|
||||
// and then it can be treated as an io.Reader to access the file's data.
|
||||
type Reader struct {
|
||||
r io.Reader
|
||||
err error
|
||||
nb int64 // number of unread bytes for current file entry
|
||||
pad int64 // amount of padding (ignored) after current file entry
|
||||
}
|
||||
|
||||
// NewReader creates a new Reader reading from r.
|
||||
func NewReader(r io.Reader) *Reader { return &Reader{r: r} }
|
||||
|
||||
// Next advances to the next entry in the tar archive.
|
||||
func (tr *Reader) Next() (*Header, error) {
|
||||
var hdr *Header
|
||||
if tr.err == nil {
|
||||
tr.skipUnread()
|
||||
}
|
||||
if tr.err != nil {
|
||||
return hdr, tr.err
|
||||
}
|
||||
hdr = tr.readHeader()
|
||||
if hdr == nil {
|
||||
return hdr, tr.err
|
||||
}
|
||||
// Check for PAX/GNU header.
|
||||
switch hdr.Typeflag {
|
||||
case TypeXHeader:
|
||||
// PAX extended header
|
||||
headers, err := parsePAX(tr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// We actually read the whole file,
|
||||
// but this skips alignment padding
|
||||
tr.skipUnread()
|
||||
hdr = tr.readHeader()
|
||||
mergePAX(hdr, headers)
|
||||
return hdr, nil
|
||||
case TypeGNULongName:
|
||||
// We have a GNU long name header. Its contents are the real file name.
|
||||
realname, err := ioutil.ReadAll(tr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr, err := tr.Next()
|
||||
hdr.Name = cString(realname)
|
||||
return hdr, err
|
||||
case TypeGNULongLink:
|
||||
// We have a GNU long link header.
|
||||
realname, err := ioutil.ReadAll(tr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr, err := tr.Next()
|
||||
hdr.Linkname = cString(realname)
|
||||
return hdr, err
|
||||
}
|
||||
return hdr, tr.err
|
||||
}
|
||||
|
||||
// mergePAX merges well known headers according to PAX standard.
|
||||
// In general headers with the same name as those found
|
||||
// in the header struct overwrite those found in the header
|
||||
// struct with higher precision or longer values. Esp. useful
|
||||
// for name and linkname fields.
|
||||
func mergePAX(hdr *Header, headers map[string]string) error {
|
||||
for k, v := range headers {
|
||||
switch k {
|
||||
case paxPath:
|
||||
hdr.Name = v
|
||||
case paxLinkpath:
|
||||
hdr.Linkname = v
|
||||
case paxGname:
|
||||
hdr.Gname = v
|
||||
case paxUname:
|
||||
hdr.Uname = v
|
||||
case paxUid:
|
||||
uid, err := strconv.ParseInt(v, 10, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Uid = int(uid)
|
||||
case paxGid:
|
||||
gid, err := strconv.ParseInt(v, 10, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Gid = int(gid)
|
||||
case paxAtime:
|
||||
t, err := parsePAXTime(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.AccessTime = t
|
||||
case paxMtime:
|
||||
t, err := parsePAXTime(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.ModTime = t
|
||||
case paxCtime:
|
||||
t, err := parsePAXTime(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.ChangeTime = t
|
||||
case paxSize:
|
||||
size, err := strconv.ParseInt(v, 10, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Size = int64(size)
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parsePAXTime takes a string of the form %d.%d as described in
|
||||
// the PAX specification.
|
||||
func parsePAXTime(t string) (time.Time, error) {
|
||||
buf := []byte(t)
|
||||
pos := bytes.IndexByte(buf, '.')
|
||||
var seconds, nanoseconds int64
|
||||
var err error
|
||||
if pos == -1 {
|
||||
seconds, err = strconv.ParseInt(t, 10, 0)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
} else {
|
||||
seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
nano_buf := string(buf[pos+1:])
|
||||
// Pad as needed before converting to a decimal.
|
||||
// For example .030 -> .030000000 -> 30000000 nanoseconds
|
||||
if len(nano_buf) < maxNanoSecondIntSize {
|
||||
// Right pad
|
||||
nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf))
|
||||
} else if len(nano_buf) > maxNanoSecondIntSize {
|
||||
// Right truncate
|
||||
nano_buf = nano_buf[:maxNanoSecondIntSize]
|
||||
}
|
||||
nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
}
|
||||
ts := time.Unix(seconds, nanoseconds)
|
||||
return ts, nil
|
||||
}
|
||||
|
||||
// parsePAX parses PAX headers.
|
||||
// If an extended header (type 'x') is invalid, ErrHeader is returned
|
||||
func parsePAX(r io.Reader) (map[string]string, error) {
|
||||
buf, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headers := make(map[string]string)
|
||||
// Each record is constructed as
|
||||
// "%d %s=%s\n", length, keyword, value
|
||||
for len(buf) > 0 {
|
||||
// or the header was empty to start with.
|
||||
var sp int
|
||||
// The size field ends at the first space.
|
||||
sp = bytes.IndexByte(buf, ' ')
|
||||
if sp == -1 {
|
||||
return nil, ErrHeader
|
||||
}
|
||||
// Parse the first token as a decimal integer.
|
||||
n, err := strconv.ParseInt(string(buf[:sp]), 10, 0)
|
||||
if err != nil {
|
||||
return nil, ErrHeader
|
||||
}
|
||||
// Extract everything between the decimal and the n -1 on the
|
||||
// beginning to to eat the ' ', -1 on the end to skip the newline.
|
||||
var record []byte
|
||||
record, buf = buf[sp+1:n-1], buf[n:]
|
||||
// The first equals is guaranteed to mark the end of the key.
|
||||
// Everything else is value.
|
||||
eq := bytes.IndexByte(record, '=')
|
||||
if eq == -1 {
|
||||
return nil, ErrHeader
|
||||
}
|
||||
key, value := record[:eq], record[eq+1:]
|
||||
headers[string(key)] = string(value)
|
||||
}
|
||||
return headers, nil
|
||||
}
|
||||
|
||||
// cString parses bytes as a NUL-terminated C-style string.
|
||||
// If a NUL byte is not found then the whole slice is returned as a string.
|
||||
func cString(b []byte) string {
|
||||
n := 0
|
||||
for n < len(b) && b[n] != 0 {
|
||||
n++
|
||||
}
|
||||
return string(b[0:n])
|
||||
}
|
||||
|
||||
func (tr *Reader) octal(b []byte) int64 {
|
||||
// Check for binary format first.
|
||||
if len(b) > 0 && b[0]&0x80 != 0 {
|
||||
var x int64
|
||||
for i, c := range b {
|
||||
if i == 0 {
|
||||
c &= 0x7f // ignore signal bit in first byte
|
||||
}
|
||||
x = x<<8 | int64(c)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// Because unused fields are filled with NULs, we need
|
||||
// to skip leading NULs. Fields may also be padded with
|
||||
// spaces or NULs.
|
||||
// So we remove leading and trailing NULs and spaces to
|
||||
// be sure.
|
||||
b = bytes.Trim(b, " \x00")
|
||||
|
||||
if len(b) == 0 {
|
||||
return 0
|
||||
}
|
||||
x, err := strconv.ParseUint(cString(b), 8, 64)
|
||||
if err != nil {
|
||||
tr.err = err
|
||||
}
|
||||
return int64(x)
|
||||
}
|
||||
|
||||
// skipUnread skips any unread bytes in the existing file entry, as well as any alignment padding.
|
||||
func (tr *Reader) skipUnread() {
|
||||
nr := tr.nb + tr.pad // number of bytes to skip
|
||||
tr.nb, tr.pad = 0, 0
|
||||
if sr, ok := tr.r.(io.Seeker); ok {
|
||||
if _, err := sr.Seek(nr, os.SEEK_CUR); err == nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
_, tr.err = io.CopyN(ioutil.Discard, tr.r, nr)
|
||||
}
|
||||
|
||||
func (tr *Reader) verifyChecksum(header []byte) bool {
|
||||
if tr.err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
given := tr.octal(header[148:156])
|
||||
unsigned, signed := checksum(header)
|
||||
return given == unsigned || given == signed
|
||||
}
|
||||
|
||||
func (tr *Reader) readHeader() *Header {
|
||||
header := make([]byte, blockSize)
|
||||
if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Two blocks of zero bytes marks the end of the archive.
|
||||
if bytes.Equal(header, zeroBlock[0:blockSize]) {
|
||||
if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
|
||||
return nil
|
||||
}
|
||||
if bytes.Equal(header, zeroBlock[0:blockSize]) {
|
||||
tr.err = io.EOF
|
||||
} else {
|
||||
tr.err = ErrHeader // zero block and then non-zero block
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if !tr.verifyChecksum(header) {
|
||||
tr.err = ErrHeader
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unpack
|
||||
hdr := new(Header)
|
||||
s := slicer(header)
|
||||
|
||||
hdr.Name = cString(s.next(100))
|
||||
hdr.Mode = tr.octal(s.next(8))
|
||||
hdr.Uid = int(tr.octal(s.next(8)))
|
||||
hdr.Gid = int(tr.octal(s.next(8)))
|
||||
hdr.Size = tr.octal(s.next(12))
|
||||
hdr.ModTime = time.Unix(tr.octal(s.next(12)), 0)
|
||||
s.next(8) // chksum
|
||||
hdr.Typeflag = s.next(1)[0]
|
||||
hdr.Linkname = cString(s.next(100))
|
||||
|
||||
// The remainder of the header depends on the value of magic.
|
||||
// The original (v7) version of tar had no explicit magic field,
|
||||
// so its magic bytes, like the rest of the block, are NULs.
|
||||
magic := string(s.next(8)) // contains version field as well.
|
||||
var format string
|
||||
switch magic {
|
||||
case "ustar\x0000": // POSIX tar (1003.1-1988)
|
||||
if string(header[508:512]) == "tar\x00" {
|
||||
format = "star"
|
||||
} else {
|
||||
format = "posix"
|
||||
}
|
||||
case "ustar \x00": // old GNU tar
|
||||
format = "gnu"
|
||||
}
|
||||
|
||||
switch format {
|
||||
case "posix", "gnu", "star":
|
||||
hdr.Uname = cString(s.next(32))
|
||||
hdr.Gname = cString(s.next(32))
|
||||
devmajor := s.next(8)
|
||||
devminor := s.next(8)
|
||||
if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock {
|
||||
hdr.Devmajor = tr.octal(devmajor)
|
||||
hdr.Devminor = tr.octal(devminor)
|
||||
}
|
||||
var prefix string
|
||||
switch format {
|
||||
case "posix", "gnu":
|
||||
prefix = cString(s.next(155))
|
||||
case "star":
|
||||
prefix = cString(s.next(131))
|
||||
hdr.AccessTime = time.Unix(tr.octal(s.next(12)), 0)
|
||||
hdr.ChangeTime = time.Unix(tr.octal(s.next(12)), 0)
|
||||
}
|
||||
if len(prefix) > 0 {
|
||||
hdr.Name = prefix + "/" + hdr.Name
|
||||
}
|
||||
}
|
||||
|
||||
if tr.err != nil {
|
||||
tr.err = ErrHeader
|
||||
return nil
|
||||
}
|
||||
|
||||
// Maximum value of hdr.Size is 64 GB (12 octal digits),
|
||||
// so there's no risk of int64 overflowing.
|
||||
tr.nb = int64(hdr.Size)
|
||||
tr.pad = -tr.nb & (blockSize - 1) // blockSize is a power of two
|
||||
|
||||
return hdr
|
||||
}
|
||||
|
||||
// Read reads from the current entry in the tar archive.
|
||||
// It returns 0, io.EOF when it reaches the end of that entry,
|
||||
// until Next is called to advance to the next entry.
|
||||
func (tr *Reader) Read(b []byte) (n int, err error) {
|
||||
if tr.nb == 0 {
|
||||
// file consumed
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
if int64(len(b)) > tr.nb {
|
||||
b = b[0:tr.nb]
|
||||
}
|
||||
n, err = tr.r.Read(b)
|
||||
tr.nb -= int64(n)
|
||||
|
||||
if err == io.EOF && tr.nb > 0 {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
tr.err = err
|
||||
return
|
||||
}
|
385
vendor/src/github.com/dotcloud/tar/reader_test.go
vendored
385
vendor/src/github.com/dotcloud/tar/reader_test.go
vendored
|
@ -1,385 +0,0 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type untarTest struct {
|
||||
file string
|
||||
headers []*Header
|
||||
cksums []string
|
||||
}
|
||||
|
||||
var gnuTarTest = &untarTest{
|
||||
file: "testdata/gnu.tar",
|
||||
headers: []*Header{
|
||||
{
|
||||
Name: "small.txt",
|
||||
Mode: 0640,
|
||||
Uid: 73025,
|
||||
Gid: 5000,
|
||||
Size: 5,
|
||||
ModTime: time.Unix(1244428340, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "dsymonds",
|
||||
Gname: "eng",
|
||||
},
|
||||
{
|
||||
Name: "small2.txt",
|
||||
Mode: 0640,
|
||||
Uid: 73025,
|
||||
Gid: 5000,
|
||||
Size: 11,
|
||||
ModTime: time.Unix(1244436044, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "dsymonds",
|
||||
Gname: "eng",
|
||||
},
|
||||
},
|
||||
cksums: []string{
|
||||
"e38b27eaccb4391bdec553a7f3ae6b2f",
|
||||
"c65bd2e50a56a2138bf1716f2fd56fe9",
|
||||
},
|
||||
}
|
||||
|
||||
var untarTests = []*untarTest{
|
||||
gnuTarTest,
|
||||
{
|
||||
file: "testdata/star.tar",
|
||||
headers: []*Header{
|
||||
{
|
||||
Name: "small.txt",
|
||||
Mode: 0640,
|
||||
Uid: 73025,
|
||||
Gid: 5000,
|
||||
Size: 5,
|
||||
ModTime: time.Unix(1244592783, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "dsymonds",
|
||||
Gname: "eng",
|
||||
AccessTime: time.Unix(1244592783, 0),
|
||||
ChangeTime: time.Unix(1244592783, 0),
|
||||
},
|
||||
{
|
||||
Name: "small2.txt",
|
||||
Mode: 0640,
|
||||
Uid: 73025,
|
||||
Gid: 5000,
|
||||
Size: 11,
|
||||
ModTime: time.Unix(1244592783, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "dsymonds",
|
||||
Gname: "eng",
|
||||
AccessTime: time.Unix(1244592783, 0),
|
||||
ChangeTime: time.Unix(1244592783, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
file: "testdata/v7.tar",
|
||||
headers: []*Header{
|
||||
{
|
||||
Name: "small.txt",
|
||||
Mode: 0444,
|
||||
Uid: 73025,
|
||||
Gid: 5000,
|
||||
Size: 5,
|
||||
ModTime: time.Unix(1244593104, 0),
|
||||
Typeflag: '\x00',
|
||||
},
|
||||
{
|
||||
Name: "small2.txt",
|
||||
Mode: 0444,
|
||||
Uid: 73025,
|
||||
Gid: 5000,
|
||||
Size: 11,
|
||||
ModTime: time.Unix(1244593104, 0),
|
||||
Typeflag: '\x00',
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
file: "testdata/pax.tar",
|
||||
headers: []*Header{
|
||||
{
|
||||
Name: "a/123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
|
||||
Mode: 0664,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
Uname: "shane",
|
||||
Gname: "shane",
|
||||
Size: 7,
|
||||
ModTime: time.Unix(1350244992, 23960108),
|
||||
ChangeTime: time.Unix(1350244992, 23960108),
|
||||
AccessTime: time.Unix(1350244992, 23960108),
|
||||
Typeflag: TypeReg,
|
||||
},
|
||||
{
|
||||
Name: "a/b",
|
||||
Mode: 0777,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
Uname: "shane",
|
||||
Gname: "shane",
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1350266320, 910238425),
|
||||
ChangeTime: time.Unix(1350266320, 910238425),
|
||||
AccessTime: time.Unix(1350266320, 910238425),
|
||||
Typeflag: TypeSymlink,
|
||||
Linkname: "123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
file: "testdata/nil-uid.tar", // golang.org/issue/5290
|
||||
headers: []*Header{
|
||||
{
|
||||
Name: "P1050238.JPG.log",
|
||||
Mode: 0664,
|
||||
Uid: 0,
|
||||
Gid: 0,
|
||||
Size: 14,
|
||||
ModTime: time.Unix(1365454838, 0),
|
||||
Typeflag: TypeReg,
|
||||
Linkname: "",
|
||||
Uname: "eyefi",
|
||||
Gname: "eyefi",
|
||||
Devmajor: 0,
|
||||
Devminor: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestReader(t *testing.T) {
|
||||
testLoop:
|
||||
for i, test := range untarTests {
|
||||
f, err := os.Open(test.file)
|
||||
if err != nil {
|
||||
t.Errorf("test %d: Unexpected error: %v", i, err)
|
||||
continue
|
||||
}
|
||||
defer f.Close()
|
||||
tr := NewReader(f)
|
||||
for j, header := range test.headers {
|
||||
hdr, err := tr.Next()
|
||||
if err != nil || hdr == nil {
|
||||
t.Errorf("test %d, entry %d: Didn't get entry: %v", i, j, err)
|
||||
f.Close()
|
||||
continue testLoop
|
||||
}
|
||||
if *hdr != *header {
|
||||
t.Errorf("test %d, entry %d: Incorrect header:\nhave %+v\nwant %+v",
|
||||
i, j, *hdr, *header)
|
||||
}
|
||||
}
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
continue testLoop
|
||||
}
|
||||
if hdr != nil || err != nil {
|
||||
t.Errorf("test %d: Unexpected entry or error: hdr=%v err=%v", i, hdr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPartialRead(t *testing.T) {
|
||||
f, err := os.Open("testdata/gnu.tar")
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
tr := NewReader(f)
|
||||
|
||||
// Read the first four bytes; Next() should skip the last byte.
|
||||
hdr, err := tr.Next()
|
||||
if err != nil || hdr == nil {
|
||||
t.Fatalf("Didn't get first file: %v", err)
|
||||
}
|
||||
buf := make([]byte, 4)
|
||||
if _, err := io.ReadFull(tr, buf); err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
if expected := []byte("Kilt"); !bytes.Equal(buf, expected) {
|
||||
t.Errorf("Contents = %v, want %v", buf, expected)
|
||||
}
|
||||
|
||||
// Second file
|
||||
hdr, err = tr.Next()
|
||||
if err != nil || hdr == nil {
|
||||
t.Fatalf("Didn't get second file: %v", err)
|
||||
}
|
||||
buf = make([]byte, 6)
|
||||
if _, err := io.ReadFull(tr, buf); err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
if expected := []byte("Google"); !bytes.Equal(buf, expected) {
|
||||
t.Errorf("Contents = %v, want %v", buf, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIncrementalRead(t *testing.T) {
|
||||
test := gnuTarTest
|
||||
f, err := os.Open(test.file)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
tr := NewReader(f)
|
||||
|
||||
headers := test.headers
|
||||
cksums := test.cksums
|
||||
nread := 0
|
||||
|
||||
// loop over all files
|
||||
for ; ; nread++ {
|
||||
hdr, err := tr.Next()
|
||||
if hdr == nil || err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
// check the header
|
||||
if *hdr != *headers[nread] {
|
||||
t.Errorf("Incorrect header:\nhave %+v\nwant %+v",
|
||||
*hdr, headers[nread])
|
||||
}
|
||||
|
||||
// read file contents in little chunks EOF,
|
||||
// checksumming all the way
|
||||
h := md5.New()
|
||||
rdbuf := make([]uint8, 8)
|
||||
for {
|
||||
nr, err := tr.Read(rdbuf)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("Read: unexpected error %v\n", err)
|
||||
break
|
||||
}
|
||||
h.Write(rdbuf[0:nr])
|
||||
}
|
||||
// verify checksum
|
||||
have := fmt.Sprintf("%x", h.Sum(nil))
|
||||
want := cksums[nread]
|
||||
if want != have {
|
||||
t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want)
|
||||
}
|
||||
}
|
||||
if nread != len(headers) {
|
||||
t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNonSeekable(t *testing.T) {
|
||||
test := gnuTarTest
|
||||
f, err := os.Open(test.file)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
type readerOnly struct {
|
||||
io.Reader
|
||||
}
|
||||
tr := NewReader(readerOnly{f})
|
||||
nread := 0
|
||||
|
||||
for ; ; nread++ {
|
||||
_, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if nread != len(test.headers) {
|
||||
t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(test.headers), nread)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePAXHeader(t *testing.T) {
|
||||
paxTests := [][3]string{
|
||||
{"a", "a=name", "10 a=name\n"}, // Test case involving multiple acceptable lengths
|
||||
{"a", "a=name", "9 a=name\n"}, // Test case involving multiple acceptable length
|
||||
{"mtime", "mtime=1350244992.023960108", "30 mtime=1350244992.023960108\n"}}
|
||||
for _, test := range paxTests {
|
||||
key, expected, raw := test[0], test[1], test[2]
|
||||
reader := bytes.NewBuffer([]byte(raw))
|
||||
headers, err := parsePAX(reader)
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't parse correctly formatted headers: %v", err)
|
||||
continue
|
||||
}
|
||||
if strings.EqualFold(headers[key], expected) {
|
||||
t.Errorf("mtime header incorrectly parsed: got %s, wanted %s", headers[key], expected)
|
||||
continue
|
||||
}
|
||||
trailer := make([]byte, 100)
|
||||
n, err := reader.Read(trailer)
|
||||
if err != io.EOF || n != 0 {
|
||||
t.Error("Buffer wasn't consumed")
|
||||
}
|
||||
}
|
||||
badHeader := bytes.NewBuffer([]byte("3 somelongkey="))
|
||||
if _, err := parsePAX(badHeader); err != ErrHeader {
|
||||
t.Fatal("Unexpected success when parsing bad header")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePAXTime(t *testing.T) {
|
||||
// Some valid PAX time values
|
||||
timestamps := map[string]time.Time{
|
||||
"1350244992.023960108": time.Unix(1350244992, 23960108), // The commoon case
|
||||
"1350244992.02396010": time.Unix(1350244992, 23960100), // Lower precision value
|
||||
"1350244992.0239601089": time.Unix(1350244992, 23960108), // Higher precision value
|
||||
"1350244992": time.Unix(1350244992, 0), // Low precision value
|
||||
}
|
||||
for input, expected := range timestamps {
|
||||
ts, err := parsePAXTime(input)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ts.Equal(expected) {
|
||||
t.Fatalf("Time parsing failure %s %s", ts, expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMergePAX(t *testing.T) {
|
||||
hdr := new(Header)
|
||||
// Test a string, integer, and time based value.
|
||||
headers := map[string]string{
|
||||
"path": "a/b/c",
|
||||
"uid": "1000",
|
||||
"mtime": "1350244992.023960108",
|
||||
}
|
||||
err := mergePAX(hdr, headers)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := &Header{
|
||||
Name: "a/b/c",
|
||||
Uid: 1000,
|
||||
ModTime: time.Unix(1350244992, 23960108),
|
||||
}
|
||||
if !reflect.DeepEqual(hdr, want) {
|
||||
t.Errorf("incorrect merge: got %+v, want %+v", hdr, want)
|
||||
}
|
||||
}
|
20
vendor/src/github.com/dotcloud/tar/stat_atim.go
vendored
20
vendor/src/github.com/dotcloud/tar/stat_atim.go
vendored
|
@ -1,20 +0,0 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux openbsd
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func statAtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Atim.Unix())
|
||||
}
|
||||
|
||||
func statCtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Ctim.Unix())
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin freebsd netbsd
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func statAtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Atimespec.Unix())
|
||||
}
|
||||
|
||||
func statCtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Ctimespec.Unix())
|
||||
}
|
32
vendor/src/github.com/dotcloud/tar/stat_unix.go
vendored
32
vendor/src/github.com/dotcloud/tar/stat_unix.go
vendored
|
@ -1,32 +0,0 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux darwin freebsd openbsd netbsd
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func init() {
|
||||
sysStat = statUnix
|
||||
}
|
||||
|
||||
func statUnix(fi os.FileInfo, h *Header) error {
|
||||
sys, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
h.Uid = int(sys.Uid)
|
||||
h.Gid = int(sys.Gid)
|
||||
// TODO(bradfitz): populate username & group. os/user
|
||||
// doesn't cache LookupId lookups, and lacks group
|
||||
// lookup functions.
|
||||
h.AccessTime = statAtime(sys)
|
||||
h.ChangeTime = statCtime(sys)
|
||||
// TODO(bradfitz): major/minor device numbers?
|
||||
return nil
|
||||
}
|
271
vendor/src/github.com/dotcloud/tar/tar_test.go
vendored
271
vendor/src/github.com/dotcloud/tar/tar_test.go
vendored
|
@ -1,271 +0,0 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestFileInfoHeader(t *testing.T) {
|
||||
fi, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
h, err := FileInfoHeader(fi, "")
|
||||
if err != nil {
|
||||
t.Fatalf("FileInfoHeader: %v", err)
|
||||
}
|
||||
if g, e := h.Name, "small.txt"; g != e {
|
||||
t.Errorf("Name = %q; want %q", g, e)
|
||||
}
|
||||
if g, e := h.Mode, int64(fi.Mode().Perm())|c_ISREG; g != e {
|
||||
t.Errorf("Mode = %#o; want %#o", g, e)
|
||||
}
|
||||
if g, e := h.Size, int64(5); g != e {
|
||||
t.Errorf("Size = %v; want %v", g, e)
|
||||
}
|
||||
if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
|
||||
t.Errorf("ModTime = %v; want %v", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileInfoHeaderDir(t *testing.T) {
|
||||
fi, err := os.Stat("testdata")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
h, err := FileInfoHeader(fi, "")
|
||||
if err != nil {
|
||||
t.Fatalf("FileInfoHeader: %v", err)
|
||||
}
|
||||
if g, e := h.Name, "testdata/"; g != e {
|
||||
t.Errorf("Name = %q; want %q", g, e)
|
||||
}
|
||||
// Ignoring c_ISGID for golang.org/issue/4867
|
||||
if g, e := h.Mode&^c_ISGID, int64(fi.Mode().Perm())|c_ISDIR; g != e {
|
||||
t.Errorf("Mode = %#o; want %#o", g, e)
|
||||
}
|
||||
if g, e := h.Size, int64(0); g != e {
|
||||
t.Errorf("Size = %v; want %v", g, e)
|
||||
}
|
||||
if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
|
||||
t.Errorf("ModTime = %v; want %v", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileInfoHeaderSymlink(t *testing.T) {
|
||||
h, err := FileInfoHeader(symlink{}, "some-target")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if g, e := h.Name, "some-symlink"; g != e {
|
||||
t.Errorf("Name = %q; want %q", g, e)
|
||||
}
|
||||
if g, e := h.Linkname, "some-target"; g != e {
|
||||
t.Errorf("Linkname = %q; want %q", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
type symlink struct{}
|
||||
|
||||
func (symlink) Name() string { return "some-symlink" }
|
||||
func (symlink) Size() int64 { return 0 }
|
||||
func (symlink) Mode() os.FileMode { return os.ModeSymlink }
|
||||
func (symlink) ModTime() time.Time { return time.Time{} }
|
||||
func (symlink) IsDir() bool { return false }
|
||||
func (symlink) Sys() interface{} { return nil }
|
||||
|
||||
func TestRoundTrip(t *testing.T) {
|
||||
data := []byte("some file contents")
|
||||
|
||||
var b bytes.Buffer
|
||||
tw := NewWriter(&b)
|
||||
hdr := &Header{
|
||||
Name: "file.txt",
|
||||
Uid: 1 << 21, // too big for 8 octal digits
|
||||
Size: int64(len(data)),
|
||||
ModTime: time.Now(),
|
||||
}
|
||||
// tar only supports second precision.
|
||||
hdr.ModTime = hdr.ModTime.Add(-time.Duration(hdr.ModTime.Nanosecond()) * time.Nanosecond)
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
t.Fatalf("tw.WriteHeader: %v", err)
|
||||
}
|
||||
if _, err := tw.Write(data); err != nil {
|
||||
t.Fatalf("tw.Write: %v", err)
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
t.Fatalf("tw.Close: %v", err)
|
||||
}
|
||||
|
||||
// Read it back.
|
||||
tr := NewReader(&b)
|
||||
rHdr, err := tr.Next()
|
||||
if err != nil {
|
||||
t.Fatalf("tr.Next: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(rHdr, hdr) {
|
||||
t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, hdr)
|
||||
}
|
||||
rData, err := ioutil.ReadAll(tr)
|
||||
if err != nil {
|
||||
t.Fatalf("Read: %v", err)
|
||||
}
|
||||
if !bytes.Equal(rData, data) {
|
||||
t.Errorf("Data mismatch.\n got %q\nwant %q", rData, data)
|
||||
}
|
||||
}
|
||||
|
||||
type headerRoundTripTest struct {
|
||||
h *Header
|
||||
fm os.FileMode
|
||||
}
|
||||
|
||||
func TestHeaderRoundTrip(t *testing.T) {
|
||||
golden := []headerRoundTripTest{
|
||||
// regular file.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "test.txt",
|
||||
Mode: 0644 | c_ISREG,
|
||||
Size: 12,
|
||||
ModTime: time.Unix(1360600916, 0),
|
||||
Typeflag: TypeReg,
|
||||
},
|
||||
fm: 0644,
|
||||
},
|
||||
// hard link.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "hard.txt",
|
||||
Mode: 0644 | c_ISLNK,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360600916, 0),
|
||||
Typeflag: TypeLink,
|
||||
},
|
||||
fm: 0644 | os.ModeSymlink,
|
||||
},
|
||||
// symbolic link.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "link.txt",
|
||||
Mode: 0777 | c_ISLNK,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360600852, 0),
|
||||
Typeflag: TypeSymlink,
|
||||
},
|
||||
fm: 0777 | os.ModeSymlink,
|
||||
},
|
||||
// character device node.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "dev/null",
|
||||
Mode: 0666 | c_ISCHR,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360578951, 0),
|
||||
Typeflag: TypeChar,
|
||||
},
|
||||
fm: 0666 | os.ModeDevice | os.ModeCharDevice,
|
||||
},
|
||||
// block device node.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "dev/sda",
|
||||
Mode: 0660 | c_ISBLK,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360578954, 0),
|
||||
Typeflag: TypeBlock,
|
||||
},
|
||||
fm: 0660 | os.ModeDevice,
|
||||
},
|
||||
// directory.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "dir/",
|
||||
Mode: 0755 | c_ISDIR,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360601116, 0),
|
||||
Typeflag: TypeDir,
|
||||
},
|
||||
fm: 0755 | os.ModeDir,
|
||||
},
|
||||
// fifo node.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "dev/initctl",
|
||||
Mode: 0600 | c_ISFIFO,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360578949, 0),
|
||||
Typeflag: TypeFifo,
|
||||
},
|
||||
fm: 0600 | os.ModeNamedPipe,
|
||||
},
|
||||
// setuid.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "bin/su",
|
||||
Mode: 0755 | c_ISREG | c_ISUID,
|
||||
Size: 23232,
|
||||
ModTime: time.Unix(1355405093, 0),
|
||||
Typeflag: TypeReg,
|
||||
},
|
||||
fm: 0755 | os.ModeSetuid,
|
||||
},
|
||||
// setguid.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "group.txt",
|
||||
Mode: 0750 | c_ISREG | c_ISGID,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360602346, 0),
|
||||
Typeflag: TypeReg,
|
||||
},
|
||||
fm: 0750 | os.ModeSetgid,
|
||||
},
|
||||
// sticky.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "sticky.txt",
|
||||
Mode: 0600 | c_ISREG | c_ISVTX,
|
||||
Size: 7,
|
||||
ModTime: time.Unix(1360602540, 0),
|
||||
Typeflag: TypeReg,
|
||||
},
|
||||
fm: 0600 | os.ModeSticky,
|
||||
},
|
||||
}
|
||||
|
||||
for i, g := range golden {
|
||||
fi := g.h.FileInfo()
|
||||
h2, err := FileInfoHeader(fi, "")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
if got, want := h2.Name, g.h.Name; got != want {
|
||||
t.Errorf("i=%d: Name: got %v, want %v", i, got, want)
|
||||
}
|
||||
if got, want := h2.Size, g.h.Size; got != want {
|
||||
t.Errorf("i=%d: Size: got %v, want %v", i, got, want)
|
||||
}
|
||||
if got, want := h2.Mode, g.h.Mode; got != want {
|
||||
t.Errorf("i=%d: Mode: got %o, want %o", i, got, want)
|
||||
}
|
||||
if got, want := fi.Mode(), g.fm; got != want {
|
||||
t.Errorf("i=%d: fi.Mode: got %o, want %o", i, got, want)
|
||||
}
|
||||
if got, want := h2.ModTime, g.h.ModTime; got != want {
|
||||
t.Errorf("i=%d: ModTime: got %v, want %v", i, got, want)
|
||||
}
|
||||
if sysh, ok := fi.Sys().(*Header); !ok || sysh != g.h {
|
||||
t.Errorf("i=%d: Sys didn't return original *Header", i)
|
||||
}
|
||||
}
|
||||
}
|
BIN
vendor/src/github.com/dotcloud/tar/testdata/gnu.tar
vendored
BIN
vendor/src/github.com/dotcloud/tar/testdata/gnu.tar
vendored
Binary file not shown.
Binary file not shown.
BIN
vendor/src/github.com/dotcloud/tar/testdata/pax.tar
vendored
BIN
vendor/src/github.com/dotcloud/tar/testdata/pax.tar
vendored
Binary file not shown.
|
@ -1 +0,0 @@
|
|||
Kilts
|
|
@ -1 +0,0 @@
|
|||
Google.com
|
BIN
vendor/src/github.com/dotcloud/tar/testdata/star.tar
vendored
BIN
vendor/src/github.com/dotcloud/tar/testdata/star.tar
vendored
Binary file not shown.
Binary file not shown.
BIN
vendor/src/github.com/dotcloud/tar/testdata/v7.tar
vendored
BIN
vendor/src/github.com/dotcloud/tar/testdata/v7.tar
vendored
Binary file not shown.
Binary file not shown.
Binary file not shown.
377
vendor/src/github.com/dotcloud/tar/writer.go
vendored
377
vendor/src/github.com/dotcloud/tar/writer.go
vendored
|
@ -1,377 +0,0 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar
|
||||
|
||||
// TODO(dsymonds):
|
||||
// - catch more errors (no first header, etc.)
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrWriteTooLong = errors.New("archive/tar: write too long")
|
||||
ErrFieldTooLong = errors.New("archive/tar: header field too long")
|
||||
ErrWriteAfterClose = errors.New("archive/tar: write after close")
|
||||
errNameTooLong = errors.New("archive/tar: name too long")
|
||||
errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
|
||||
)
|
||||
|
||||
// A Writer provides sequential writing of a tar archive in POSIX.1 format.
|
||||
// A tar archive consists of a sequence of files.
|
||||
// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
|
||||
// writing at most hdr.Size bytes in total.
|
||||
type Writer struct {
|
||||
w io.Writer
|
||||
err error
|
||||
nb int64 // number of unwritten bytes for current file entry
|
||||
pad int64 // amount of padding to write after current file entry
|
||||
closed bool
|
||||
usedBinary bool // whether the binary numeric field extension was used
|
||||
preferPax bool // use pax header instead of binary numeric header
|
||||
}
|
||||
|
||||
// NewWriter creates a new Writer writing to w.
|
||||
func NewWriter(w io.Writer) *Writer { return &Writer{w: w} }
|
||||
|
||||
// Flush finishes writing the current file (optional).
|
||||
func (tw *Writer) Flush() error {
|
||||
if tw.nb > 0 {
|
||||
tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
|
||||
return tw.err
|
||||
}
|
||||
|
||||
n := tw.nb + tw.pad
|
||||
for n > 0 && tw.err == nil {
|
||||
nr := n
|
||||
if nr > blockSize {
|
||||
nr = blockSize
|
||||
}
|
||||
var nw int
|
||||
nw, tw.err = tw.w.Write(zeroBlock[0:nr])
|
||||
n -= int64(nw)
|
||||
}
|
||||
tw.nb = 0
|
||||
tw.pad = 0
|
||||
return tw.err
|
||||
}
|
||||
|
||||
// Write s into b, terminating it with a NUL if there is room.
|
||||
// If the value is too long for the field and allowPax is true add a paxheader record instead
|
||||
func (tw *Writer) cString(b []byte, s string, allowPax bool, paxKeyword string, paxHeaders map[string]string) {
|
||||
needsPaxHeader := allowPax && len(s) > len(b) || !isASCII(s)
|
||||
if needsPaxHeader {
|
||||
paxHeaders[paxKeyword] = s
|
||||
return
|
||||
}
|
||||
if len(s) > len(b) {
|
||||
if tw.err == nil {
|
||||
tw.err = ErrFieldTooLong
|
||||
}
|
||||
return
|
||||
}
|
||||
ascii := toASCII(s)
|
||||
copy(b, ascii)
|
||||
if len(ascii) < len(b) {
|
||||
b[len(ascii)] = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Encode x as an octal ASCII string and write it into b with leading zeros.
|
||||
func (tw *Writer) octal(b []byte, x int64) {
|
||||
s := strconv.FormatInt(x, 8)
|
||||
// leading zeros, but leave room for a NUL.
|
||||
for len(s)+1 < len(b) {
|
||||
s = "0" + s
|
||||
}
|
||||
tw.cString(b, s, false, paxNone, nil)
|
||||
}
|
||||
|
||||
// Write x into b, either as octal or as binary (GNUtar/star extension).
|
||||
// If the value is too long for the field and writingPax is enabled both for the field and the add a paxheader record instead
|
||||
func (tw *Writer) numeric(b []byte, x int64, allowPax bool, paxKeyword string, paxHeaders map[string]string) {
|
||||
// Try octal first.
|
||||
s := strconv.FormatInt(x, 8)
|
||||
if len(s) < len(b) {
|
||||
tw.octal(b, x)
|
||||
return
|
||||
}
|
||||
|
||||
// If it is too long for octal, and pax is preferred, use a pax header
|
||||
if allowPax && tw.preferPax {
|
||||
tw.octal(b, 0)
|
||||
s := strconv.FormatInt(x, 10)
|
||||
paxHeaders[paxKeyword] = s
|
||||
return
|
||||
}
|
||||
|
||||
// Too big: use binary (big-endian).
|
||||
tw.usedBinary = true
|
||||
for i := len(b) - 1; x > 0 && i >= 0; i-- {
|
||||
b[i] = byte(x)
|
||||
x >>= 8
|
||||
}
|
||||
b[0] |= 0x80 // highest bit indicates binary format
|
||||
}
|
||||
|
||||
var (
|
||||
minTime = time.Unix(0, 0)
|
||||
// There is room for 11 octal digits (33 bits) of mtime.
|
||||
maxTime = minTime.Add((1<<33 - 1) * time.Second)
|
||||
)
|
||||
|
||||
// WriteHeader writes hdr and prepares to accept the file's contents.
|
||||
// WriteHeader calls Flush if it is not the first header.
|
||||
// Calling after a Close will return ErrWriteAfterClose.
|
||||
func (tw *Writer) WriteHeader(hdr *Header) error {
|
||||
return tw.writeHeader(hdr, true)
|
||||
}
|
||||
|
||||
// WriteHeader writes hdr and prepares to accept the file's contents.
|
||||
// WriteHeader calls Flush if it is not the first header.
|
||||
// Calling after a Close will return ErrWriteAfterClose.
|
||||
// As this method is called internally by writePax header to allow it to
|
||||
// suppress writing the pax header.
|
||||
func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
|
||||
if tw.closed {
|
||||
return ErrWriteAfterClose
|
||||
}
|
||||
if tw.err == nil {
|
||||
tw.Flush()
|
||||
}
|
||||
if tw.err != nil {
|
||||
return tw.err
|
||||
}
|
||||
|
||||
// a map to hold pax header records, if any are needed
|
||||
paxHeaders := make(map[string]string)
|
||||
|
||||
// TODO(shanemhansen): we might want to use PAX headers for
|
||||
// subsecond time resolution, but for now let's just capture
|
||||
// too long fields or non ascii characters
|
||||
|
||||
header := make([]byte, blockSize)
|
||||
s := slicer(header)
|
||||
|
||||
// keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
|
||||
pathHeaderBytes := s.next(fileNameSize)
|
||||
|
||||
tw.cString(pathHeaderBytes, hdr.Name, true, paxPath, paxHeaders)
|
||||
|
||||
// Handle out of range ModTime carefully.
|
||||
var modTime int64
|
||||
if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) {
|
||||
modTime = hdr.ModTime.Unix()
|
||||
}
|
||||
|
||||
tw.octal(s.next(8), hdr.Mode) // 100:108
|
||||
tw.numeric(s.next(8), int64(hdr.Uid), true, paxUid, paxHeaders) // 108:116
|
||||
tw.numeric(s.next(8), int64(hdr.Gid), true, paxGid, paxHeaders) // 116:124
|
||||
tw.numeric(s.next(12), hdr.Size, true, paxSize, paxHeaders) // 124:136
|
||||
tw.numeric(s.next(12), modTime, false, paxNone, nil) // 136:148 --- consider using pax for finer granularity
|
||||
s.next(8) // chksum (148:156)
|
||||
s.next(1)[0] = hdr.Typeflag // 156:157
|
||||
|
||||
tw.cString(s.next(100), hdr.Linkname, true, paxLinkpath, paxHeaders)
|
||||
|
||||
copy(s.next(8), []byte("ustar\x0000")) // 257:265
|
||||
tw.cString(s.next(32), hdr.Uname, true, paxUname, paxHeaders) // 265:297
|
||||
tw.cString(s.next(32), hdr.Gname, true, paxGname, paxHeaders) // 297:329
|
||||
tw.numeric(s.next(8), hdr.Devmajor, false, paxNone, nil) // 329:337
|
||||
tw.numeric(s.next(8), hdr.Devminor, false, paxNone, nil) // 337:345
|
||||
|
||||
// keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
|
||||
prefixHeaderBytes := s.next(155)
|
||||
tw.cString(prefixHeaderBytes, "", false, paxNone, nil) // 345:500 prefix
|
||||
|
||||
// Use the GNU magic instead of POSIX magic if we used any GNU extensions.
|
||||
if tw.usedBinary {
|
||||
copy(header[257:265], []byte("ustar \x00"))
|
||||
}
|
||||
|
||||
_, paxPathUsed := paxHeaders[paxPath]
|
||||
// try to use a ustar header when only the name is too long
|
||||
if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
|
||||
suffix := hdr.Name
|
||||
prefix := ""
|
||||
if len(hdr.Name) > fileNameSize && isASCII(hdr.Name) {
|
||||
var err error
|
||||
prefix, suffix, err = tw.splitUSTARLongName(hdr.Name)
|
||||
if err == nil {
|
||||
// ok we can use a ustar long name instead of pax, now correct the fields
|
||||
|
||||
// remove the path field from the pax header. this will suppress the pax header
|
||||
delete(paxHeaders, paxPath)
|
||||
|
||||
// update the path fields
|
||||
tw.cString(pathHeaderBytes, suffix, false, paxNone, nil)
|
||||
tw.cString(prefixHeaderBytes, prefix, false, paxNone, nil)
|
||||
|
||||
// Use the ustar magic if we used ustar long names.
|
||||
if len(prefix) > 0 {
|
||||
copy(header[257:265], []byte("ustar\000"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The chksum field is terminated by a NUL and a space.
|
||||
// This is different from the other octal fields.
|
||||
chksum, _ := checksum(header)
|
||||
tw.octal(header[148:155], chksum)
|
||||
header[155] = ' '
|
||||
|
||||
if tw.err != nil {
|
||||
// problem with header; probably integer too big for a field.
|
||||
return tw.err
|
||||
}
|
||||
|
||||
if len(paxHeaders) > 0 {
|
||||
if !allowPax {
|
||||
return errInvalidHeader
|
||||
}
|
||||
if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
tw.nb = int64(hdr.Size)
|
||||
tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
|
||||
|
||||
_, tw.err = tw.w.Write(header)
|
||||
return tw.err
|
||||
}
|
||||
|
||||
// writeUSTARLongName splits a USTAR long name hdr.Name.
|
||||
// name must be < 256 characters. errNameTooLong is returned
|
||||
// if hdr.Name can't be split. The splitting heuristic
|
||||
// is compatible with gnu tar.
|
||||
func (tw *Writer) splitUSTARLongName(name string) (prefix, suffix string, err error) {
|
||||
length := len(name)
|
||||
if length > fileNamePrefixSize+1 {
|
||||
length = fileNamePrefixSize + 1
|
||||
} else if name[length-1] == '/' {
|
||||
length--
|
||||
}
|
||||
i := strings.LastIndex(name[:length], "/")
|
||||
// nlen contains the resulting length in the name field.
|
||||
// plen contains the resulting length in the prefix field.
|
||||
nlen := len(name) - i - 1
|
||||
plen := i
|
||||
if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
|
||||
err = errNameTooLong
|
||||
return
|
||||
}
|
||||
prefix, suffix = name[:i], name[i+1:]
|
||||
return
|
||||
}
|
||||
|
||||
// writePaxHeader writes an extended pax header to the
|
||||
// archive.
|
||||
func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
|
||||
// Prepare extended header
|
||||
ext := new(Header)
|
||||
ext.Typeflag = TypeXHeader
|
||||
// Setting ModTime is required for reader parsing to
|
||||
// succeed, and seems harmless enough.
|
||||
ext.ModTime = hdr.ModTime
|
||||
// The spec asks that we namespace our pseudo files
|
||||
// with the current pid.
|
||||
pid := os.Getpid()
|
||||
dir, file := path.Split(hdr.Name)
|
||||
fullName := path.Join(dir,
|
||||
fmt.Sprintf("PaxHeaders.%d", pid), file)
|
||||
|
||||
ascii := toASCII(fullName)
|
||||
if len(ascii) > 100 {
|
||||
ascii = ascii[:100]
|
||||
}
|
||||
ext.Name = ascii
|
||||
// Construct the body
|
||||
var buf bytes.Buffer
|
||||
|
||||
for k, v := range paxHeaders {
|
||||
fmt.Fprint(&buf, paxHeader(k+"="+v))
|
||||
}
|
||||
|
||||
ext.Size = int64(len(buf.Bytes()))
|
||||
if err := tw.writeHeader(ext, false); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := tw.Write(buf.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// paxHeader formats a single pax record, prefixing it with the appropriate length
|
||||
func paxHeader(msg string) string {
|
||||
const padding = 2 // Extra padding for space and newline
|
||||
size := len(msg) + padding
|
||||
size += len(strconv.Itoa(size))
|
||||
record := fmt.Sprintf("%d %s\n", size, msg)
|
||||
if len(record) != size {
|
||||
// Final adjustment if adding size increased
|
||||
// the number of digits in size
|
||||
size = len(record)
|
||||
record = fmt.Sprintf("%d %s\n", size, msg)
|
||||
}
|
||||
return record
|
||||
}
|
||||
|
||||
// Write writes to the current entry in the tar archive.
|
||||
// Write returns the error ErrWriteTooLong if more than
|
||||
// hdr.Size bytes are written after WriteHeader.
|
||||
func (tw *Writer) Write(b []byte) (n int, err error) {
|
||||
if tw.closed {
|
||||
err = ErrWriteTooLong
|
||||
return
|
||||
}
|
||||
overwrite := false
|
||||
if int64(len(b)) > tw.nb {
|
||||
b = b[0:tw.nb]
|
||||
overwrite = true
|
||||
}
|
||||
n, err = tw.w.Write(b)
|
||||
tw.nb -= int64(n)
|
||||
if err == nil && overwrite {
|
||||
err = ErrWriteTooLong
|
||||
return
|
||||
}
|
||||
tw.err = err
|
||||
return
|
||||
}
|
||||
|
||||
// Close closes the tar archive, flushing any unwritten
|
||||
// data to the underlying writer.
|
||||
func (tw *Writer) Close() error {
|
||||
if tw.err != nil || tw.closed {
|
||||
return tw.err
|
||||
}
|
||||
tw.Flush()
|
||||
tw.closed = true
|
||||
if tw.err != nil {
|
||||
return tw.err
|
||||
}
|
||||
|
||||
// trailer: two zero blocks
|
||||
for i := 0; i < 2; i++ {
|
||||
_, tw.err = tw.w.Write(zeroBlock)
|
||||
if tw.err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return tw.err
|
||||
}
|
393
vendor/src/github.com/dotcloud/tar/writer_test.go
vendored
393
vendor/src/github.com/dotcloud/tar/writer_test.go
vendored
|
@ -1,393 +0,0 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/iotest"
|
||||
"time"
|
||||
)
|
||||
|
||||
type writerTestEntry struct {
|
||||
header *Header
|
||||
contents string
|
||||
}
|
||||
|
||||
type writerTest struct {
|
||||
file string // filename of expected output
|
||||
entries []*writerTestEntry
|
||||
}
|
||||
|
||||
var writerTests = []*writerTest{
|
||||
// The writer test file was produced with this command:
|
||||
// tar (GNU tar) 1.26
|
||||
// ln -s small.txt link.txt
|
||||
// tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt
|
||||
{
|
||||
file: "testdata/writer.tar",
|
||||
entries: []*writerTestEntry{
|
||||
{
|
||||
header: &Header{
|
||||
Name: "small.txt",
|
||||
Mode: 0640,
|
||||
Uid: 73025,
|
||||
Gid: 5000,
|
||||
Size: 5,
|
||||
ModTime: time.Unix(1246508266, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "dsymonds",
|
||||
Gname: "eng",
|
||||
},
|
||||
contents: "Kilts",
|
||||
},
|
||||
{
|
||||
header: &Header{
|
||||
Name: "small2.txt",
|
||||
Mode: 0640,
|
||||
Uid: 73025,
|
||||
Gid: 5000,
|
||||
Size: 11,
|
||||
ModTime: time.Unix(1245217492, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "dsymonds",
|
||||
Gname: "eng",
|
||||
},
|
||||
contents: "Google.com\n",
|
||||
},
|
||||
{
|
||||
header: &Header{
|
||||
Name: "link.txt",
|
||||
Mode: 0777,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1314603082, 0),
|
||||
Typeflag: '2',
|
||||
Linkname: "small.txt",
|
||||
Uname: "strings",
|
||||
Gname: "strings",
|
||||
},
|
||||
// no contents
|
||||
},
|
||||
},
|
||||
},
|
||||
// The truncated test file was produced using these commands:
|
||||
// dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
|
||||
// tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
|
||||
{
|
||||
file: "testdata/writer-big.tar",
|
||||
entries: []*writerTestEntry{
|
||||
{
|
||||
header: &Header{
|
||||
Name: "tmp/16gig.txt",
|
||||
Mode: 0640,
|
||||
Uid: 73025,
|
||||
Gid: 5000,
|
||||
Size: 16 << 30,
|
||||
ModTime: time.Unix(1254699560, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "dsymonds",
|
||||
Gname: "eng",
|
||||
},
|
||||
// fake contents
|
||||
contents: strings.Repeat("\x00", 4<<10),
|
||||
},
|
||||
},
|
||||
},
|
||||
// This file was produced using gnu tar 1.17
|
||||
// gnutar -b 4 --format=ustar (longname/)*15 + file.txt
|
||||
{
|
||||
file: "testdata/ustar.tar",
|
||||
entries: []*writerTestEntry{
|
||||
{
|
||||
header: &Header{
|
||||
Name: strings.Repeat("longname/", 15) + "file.txt",
|
||||
Mode: 0644,
|
||||
Uid: 0765,
|
||||
Gid: 024,
|
||||
Size: 06,
|
||||
ModTime: time.Unix(1360135598, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "shane",
|
||||
Gname: "staff",
|
||||
},
|
||||
contents: "hello\n",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection.
|
||||
func bytestr(offset int, b []byte) string {
|
||||
const rowLen = 32
|
||||
s := fmt.Sprintf("%04x ", offset)
|
||||
for _, ch := range b {
|
||||
switch {
|
||||
case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z':
|
||||
s += fmt.Sprintf(" %c", ch)
|
||||
default:
|
||||
s += fmt.Sprintf(" %02x", ch)
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Render a pseudo-diff between two blocks of bytes.
|
||||
func bytediff(a []byte, b []byte) string {
|
||||
const rowLen = 32
|
||||
s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b))
|
||||
for offset := 0; len(a)+len(b) > 0; offset += rowLen {
|
||||
na, nb := rowLen, rowLen
|
||||
if na > len(a) {
|
||||
na = len(a)
|
||||
}
|
||||
if nb > len(b) {
|
||||
nb = len(b)
|
||||
}
|
||||
sa := bytestr(offset, a[0:na])
|
||||
sb := bytestr(offset, b[0:nb])
|
||||
if sa != sb {
|
||||
s += fmt.Sprintf("-%v\n+%v\n", sa, sb)
|
||||
}
|
||||
a = a[na:]
|
||||
b = b[nb:]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func TestWriter(t *testing.T) {
|
||||
testLoop:
|
||||
for i, test := range writerTests {
|
||||
expected, err := ioutil.ReadFile(test.file)
|
||||
if err != nil {
|
||||
t.Errorf("test %d: Unexpected error: %v", i, err)
|
||||
continue
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB
|
||||
big := false
|
||||
for j, entry := range test.entries {
|
||||
big = big || entry.header.Size > 1<<10
|
||||
if err := tw.WriteHeader(entry.header); err != nil {
|
||||
t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err)
|
||||
continue testLoop
|
||||
}
|
||||
if _, err := io.WriteString(tw, entry.contents); err != nil {
|
||||
t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err)
|
||||
continue testLoop
|
||||
}
|
||||
}
|
||||
// Only interested in Close failures for the small tests.
|
||||
if err := tw.Close(); err != nil && !big {
|
||||
t.Errorf("test %d: Failed closing archive: %v", i, err)
|
||||
continue testLoop
|
||||
}
|
||||
|
||||
actual := buf.Bytes()
|
||||
if !bytes.Equal(expected, actual) {
|
||||
t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v",
|
||||
i, bytediff(expected, actual))
|
||||
}
|
||||
if testing.Short() { // The second test is expensive.
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPax(t *testing.T) {
|
||||
// Create an archive with a large name
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat: %v", err)
|
||||
}
|
||||
// Force a PAX long name to be written
|
||||
longName := strings.Repeat("ab", 100)
|
||||
contents := strings.Repeat(" ", int(hdr.Size))
|
||||
hdr.Name = longName
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = writer.Write([]byte(contents)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Simple test to make sure PAX extensions are in effect
|
||||
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
|
||||
t.Fatal("Expected at least one PAX header to be written.")
|
||||
}
|
||||
// Test that we can get a long name back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hdr.Name != longName {
|
||||
t.Fatal("Couldn't recover long file name")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPaxSymlink(t *testing.T) {
|
||||
// Create an archive with a large linkname
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
hdr.Typeflag = TypeSymlink
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat:1 %v", err)
|
||||
}
|
||||
// Force a PAX long linkname to be written
|
||||
longLinkname := strings.Repeat("1234567890/1234567890", 10)
|
||||
hdr.Linkname = longLinkname
|
||||
|
||||
hdr.Size = 0
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Simple test to make sure PAX extensions are in effect
|
||||
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
|
||||
t.Fatal("Expected at least one PAX header to be written.")
|
||||
}
|
||||
// Test that we can get a long name back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hdr.Linkname != longLinkname {
|
||||
t.Fatal("Couldn't recover long link name")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPaxNonAscii(t *testing.T) {
|
||||
// Create an archive with non ascii. These should trigger a pax header
|
||||
// because pax headers have a defined utf-8 encoding.
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat:1 %v", err)
|
||||
}
|
||||
|
||||
// some sample data
|
||||
chineseFilename := "文件名"
|
||||
chineseGroupname := "組"
|
||||
chineseUsername := "用戶名"
|
||||
|
||||
hdr.Name = chineseFilename
|
||||
hdr.Gname = chineseGroupname
|
||||
hdr.Uname = chineseUsername
|
||||
|
||||
contents := strings.Repeat(" ", int(hdr.Size))
|
||||
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = writer.Write([]byte(contents)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Simple test to make sure PAX extensions are in effect
|
||||
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
|
||||
t.Fatal("Expected at least one PAX header to be written.")
|
||||
}
|
||||
// Test that we can get a long name back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hdr.Name != chineseFilename {
|
||||
t.Fatal("Couldn't recover unicode name")
|
||||
}
|
||||
if hdr.Gname != chineseGroupname {
|
||||
t.Fatal("Couldn't recover unicode group")
|
||||
}
|
||||
if hdr.Uname != chineseUsername {
|
||||
t.Fatal("Couldn't recover unicode user")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPAXHeader(t *testing.T) {
|
||||
medName := strings.Repeat("CD", 50)
|
||||
longName := strings.Repeat("AB", 100)
|
||||
paxTests := [][2]string{
|
||||
{paxPath + "=/etc/hosts", "19 path=/etc/hosts\n"},
|
||||
{"a=b", "6 a=b\n"}, // Single digit length
|
||||
{"a=names", "11 a=names\n"}, // Test case involving carries
|
||||
{paxPath + "=" + longName, fmt.Sprintf("210 path=%s\n", longName)},
|
||||
{paxPath + "=" + medName, fmt.Sprintf("110 path=%s\n", medName)}}
|
||||
|
||||
for _, test := range paxTests {
|
||||
key, expected := test[0], test[1]
|
||||
if result := paxHeader(key); result != expected {
|
||||
t.Fatalf("paxHeader: got %s, expected %s", result, expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUSTARLongName(t *testing.T) {
|
||||
// Create an archive with a path that failed to split with USTAR extension in previous versions.
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
hdr.Typeflag = TypeDir
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat:1 %v", err)
|
||||
}
|
||||
// Force a PAX long name to be written. The name was taken from a practical example
|
||||
// that fails and replaced ever char through numbers to anonymize the sample.
|
||||
longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/"
|
||||
hdr.Name = longName
|
||||
|
||||
hdr.Size = 0
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Test that we can get a long name back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hdr.Name != longName {
|
||||
t.Fatal("Couldn't recover long name")
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue