Procházet zdrojové kódy

Remove engine.Status and replace it with standard go error

Signed-off-by: Antonio Murdaca <me@runcom.ninja>
Antonio Murdaca před 10 roky
rodič
revize
c79b9bab54

+ 6 - 6
api/server/server.go

@@ -1097,7 +1097,7 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite
 			select {
 			case <-finished:
 			case <-closeNotifier.CloseNotify():
-				log.Infof("Client disconnected, cancelling job: %v", job)
+				log.Infof("Client disconnected, cancelling job: %s", job.Name)
 				job.Cancel()
 			}
 		}()
@@ -1581,9 +1581,9 @@ type Server interface {
 
 // ServeApi loops through all of the protocols sent in to docker and spawns
 // off a go routine to setup a serving http.Server for each.
-func ServeApi(job *engine.Job) engine.Status {
+func ServeApi(job *engine.Job) error {
 	if len(job.Args) == 0 {
-		return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name)
+		return fmt.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name)
 	}
 	var (
 		protoAddrs = job.Args
@@ -1594,7 +1594,7 @@ func ServeApi(job *engine.Job) engine.Status {
 	for _, protoAddr := range protoAddrs {
 		protoAddrParts := strings.SplitN(protoAddr, "://", 2)
 		if len(protoAddrParts) != 2 {
-			return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name)
+			return fmt.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name)
 		}
 		go func() {
 			log.Infof("Listening for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1])
@@ -1618,9 +1618,9 @@ func ServeApi(job *engine.Job) engine.Status {
 	for i := 0; i < len(protoAddrs); i++ {
 		err := <-chErrors
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 	}
 
-	return engine.StatusOK
+	return nil
 }

+ 2 - 2
api/server/server_linux.go

@@ -90,7 +90,7 @@ func serveFd(addr string, job *engine.Job) error {
 }
 
 // Called through eng.Job("acceptconnections")
-func AcceptConnections(job *engine.Job) engine.Status {
+func AcceptConnections(job *engine.Job) error {
 	// Tell the init daemon we are accepting requests
 	go systemd.SdNotify("READY=1")
 
@@ -99,5 +99,5 @@ func AcceptConnections(job *engine.Job) engine.Status {
 		close(activationLock)
 	}
 
-	return engine.StatusOK
+	return nil
 }

+ 40 - 40
api/server/server_unit_test.go

@@ -63,7 +63,7 @@ func TesthttpError(t *testing.T) {
 func TestGetVersion(t *testing.T) {
 	eng := engine.New()
 	var called bool
-	eng.Register("version", func(job *engine.Job) engine.Status {
+	eng.Register("version", func(job *engine.Job) error {
 		called = true
 		v := &engine.Env{}
 		v.SetJson("Version", "42.1")
@@ -72,9 +72,9 @@ func TestGetVersion(t *testing.T) {
 		v.Set("Os", "Linux")
 		v.Set("Arch", "x86_64")
 		if _, err := v.WriteTo(job.Stdout); err != nil {
-			return job.Error(err)
+			return err
 		}
-		return engine.StatusOK
+		return nil
 	})
 	r := serveRequest("GET", "/version", nil, eng, t)
 	if !called {
@@ -92,15 +92,15 @@ func TestGetVersion(t *testing.T) {
 func TestGetInfo(t *testing.T) {
 	eng := engine.New()
 	var called bool
-	eng.Register("info", func(job *engine.Job) engine.Status {
+	eng.Register("info", func(job *engine.Job) error {
 		called = true
 		v := &engine.Env{}
 		v.SetInt("Containers", 1)
 		v.SetInt("Images", 42000)
 		if _, err := v.WriteTo(job.Stdout); err != nil {
-			return job.Error(err)
+			return err
 		}
-		return engine.StatusOK
+		return nil
 	})
 	r := serveRequest("GET", "/info", nil, eng, t)
 	if !called {
@@ -119,13 +119,13 @@ func TestGetInfo(t *testing.T) {
 func TestGetImagesJSON(t *testing.T) {
 	eng := engine.New()
 	var called bool
-	eng.Register("images", func(job *engine.Job) engine.Status {
+	eng.Register("images", func(job *engine.Job) error {
 		called = true
 		v := createEnvFromGetImagesJSONStruct(sampleImage)
 		if _, err := v.WriteTo(job.Stdout); err != nil {
-			return job.Error(err)
+			return err
 		}
-		return engine.StatusOK
+		return nil
 	})
 	r := serveRequest("GET", "/images/json", nil, eng, t)
 	if !called {
@@ -145,9 +145,9 @@ func TestGetImagesJSON(t *testing.T) {
 func TestGetImagesJSONFilter(t *testing.T) {
 	eng := engine.New()
 	filter := "nothing"
-	eng.Register("images", func(job *engine.Job) engine.Status {
+	eng.Register("images", func(job *engine.Job) error {
 		filter = job.Getenv("filter")
-		return engine.StatusOK
+		return nil
 	})
 	serveRequest("GET", "/images/json?filter=aaaa", nil, eng, t)
 	if filter != "aaaa" {
@@ -158,9 +158,9 @@ func TestGetImagesJSONFilter(t *testing.T) {
 func TestGetImagesJSONFilters(t *testing.T) {
 	eng := engine.New()
 	filter := "nothing"
-	eng.Register("images", func(job *engine.Job) engine.Status {
+	eng.Register("images", func(job *engine.Job) error {
 		filter = job.Getenv("filters")
-		return engine.StatusOK
+		return nil
 	})
 	serveRequest("GET", "/images/json?filters=nnnn", nil, eng, t)
 	if filter != "nnnn" {
@@ -171,9 +171,9 @@ func TestGetImagesJSONFilters(t *testing.T) {
 func TestGetImagesJSONAll(t *testing.T) {
 	eng := engine.New()
 	allFilter := "-1"
-	eng.Register("images", func(job *engine.Job) engine.Status {
+	eng.Register("images", func(job *engine.Job) error {
 		allFilter = job.Getenv("all")
-		return engine.StatusOK
+		return nil
 	})
 	serveRequest("GET", "/images/json?all=1", nil, eng, t)
 	if allFilter != "1" {
@@ -184,14 +184,14 @@ func TestGetImagesJSONAll(t *testing.T) {
 func TestGetImagesJSONLegacyFormat(t *testing.T) {
 	eng := engine.New()
 	var called bool
-	eng.Register("images", func(job *engine.Job) engine.Status {
+	eng.Register("images", func(job *engine.Job) error {
 		called = true
 		outsLegacy := engine.NewTable("Created", 0)
 		outsLegacy.Add(createEnvFromGetImagesJSONStruct(sampleImage))
 		if _, err := outsLegacy.WriteListTo(job.Stdout); err != nil {
-			return job.Error(err)
+			return err
 		}
-		return engine.StatusOK
+		return nil
 	})
 	r := serveRequestUsingVersion("GET", "/images/json", "1.6", nil, eng, t)
 	if !called {
@@ -219,7 +219,7 @@ func TestGetContainersByName(t *testing.T) {
 	eng := engine.New()
 	name := "container_name"
 	var called bool
-	eng.Register("container_inspect", func(job *engine.Job) engine.Status {
+	eng.Register("container_inspect", func(job *engine.Job) error {
 		called = true
 		if job.Args[0] != name {
 			t.Errorf("name != '%s': %#v", name, job.Args[0])
@@ -232,9 +232,9 @@ func TestGetContainersByName(t *testing.T) {
 		v := &engine.Env{}
 		v.SetBool("dirty", true)
 		if _, err := v.WriteTo(job.Stdout); err != nil {
-			return job.Error(err)
+			return err
 		}
-		return engine.StatusOK
+		return nil
 	})
 	r := serveRequest("GET", "/containers/"+name+"/json", nil, eng, t)
 	if !called {
@@ -253,7 +253,7 @@ func TestGetContainersByName(t *testing.T) {
 func TestGetEvents(t *testing.T) {
 	eng := engine.New()
 	var called bool
-	eng.Register("events", func(job *engine.Job) engine.Status {
+	eng.Register("events", func(job *engine.Job) error {
 		called = true
 		since := job.Getenv("since")
 		if since != "1" {
@@ -267,9 +267,9 @@ func TestGetEvents(t *testing.T) {
 		v.Set("since", since)
 		v.Set("until", until)
 		if _, err := v.WriteTo(job.Stdout); err != nil {
-			return job.Error(err)
+			return err
 		}
-		return engine.StatusOK
+		return nil
 	})
 	r := serveRequest("GET", "/events?since=1&until=0", nil, eng, t)
 	if !called {
@@ -295,7 +295,7 @@ func TestLogs(t *testing.T) {
 	eng := engine.New()
 	var inspect bool
 	var logs bool
-	eng.Register("container_inspect", func(job *engine.Job) engine.Status {
+	eng.Register("container_inspect", func(job *engine.Job) error {
 		inspect = true
 		if len(job.Args) == 0 {
 			t.Fatal("Job arguments is empty")
@@ -303,10 +303,10 @@ func TestLogs(t *testing.T) {
 		if job.Args[0] != "test" {
 			t.Fatalf("Container name %s, must be test", job.Args[0])
 		}
-		return engine.StatusOK
+		return nil
 	})
 	expected := "logs"
-	eng.Register("logs", func(job *engine.Job) engine.Status {
+	eng.Register("logs", func(job *engine.Job) error {
 		logs = true
 		if len(job.Args) == 0 {
 			t.Fatal("Job arguments is empty")
@@ -331,7 +331,7 @@ func TestLogs(t *testing.T) {
 			t.Fatalf("timestamps %s, must be 1", timestamps)
 		}
 		job.Stdout.Write([]byte(expected))
-		return engine.StatusOK
+		return nil
 	})
 	r := serveRequest("GET", "/containers/test/logs?follow=1&stdout=1&timestamps=1", nil, eng, t)
 	if r.Code != http.StatusOK {
@@ -353,7 +353,7 @@ func TestLogsNoStreams(t *testing.T) {
 	eng := engine.New()
 	var inspect bool
 	var logs bool
-	eng.Register("container_inspect", func(job *engine.Job) engine.Status {
+	eng.Register("container_inspect", func(job *engine.Job) error {
 		inspect = true
 		if len(job.Args) == 0 {
 			t.Fatal("Job arguments is empty")
@@ -361,11 +361,11 @@ func TestLogsNoStreams(t *testing.T) {
 		if job.Args[0] != "test" {
 			t.Fatalf("Container name %s, must be test", job.Args[0])
 		}
-		return engine.StatusOK
+		return nil
 	})
-	eng.Register("logs", func(job *engine.Job) engine.Status {
+	eng.Register("logs", func(job *engine.Job) error {
 		logs = true
-		return engine.StatusOK
+		return nil
 	})
 	r := serveRequest("GET", "/containers/test/logs", nil, eng, t)
 	if r.Code != http.StatusBadRequest {
@@ -388,7 +388,7 @@ func TestGetImagesHistory(t *testing.T) {
 	eng := engine.New()
 	imageName := "docker-test-image"
 	var called bool
-	eng.Register("history", func(job *engine.Job) engine.Status {
+	eng.Register("history", func(job *engine.Job) error {
 		called = true
 		if len(job.Args) == 0 {
 			t.Fatal("Job arguments is empty")
@@ -398,9 +398,9 @@ func TestGetImagesHistory(t *testing.T) {
 		}
 		v := &engine.Env{}
 		if _, err := v.WriteTo(job.Stdout); err != nil {
-			return job.Error(err)
+			return err
 		}
-		return engine.StatusOK
+		return nil
 	})
 	r := serveRequest("GET", "/images/"+imageName+"/history", nil, eng, t)
 	if !called {
@@ -418,7 +418,7 @@ func TestGetImagesByName(t *testing.T) {
 	eng := engine.New()
 	name := "image_name"
 	var called bool
-	eng.Register("image_inspect", func(job *engine.Job) engine.Status {
+	eng.Register("image_inspect", func(job *engine.Job) error {
 		called = true
 		if job.Args[0] != name {
 			t.Fatalf("name != '%s': %#v", name, job.Args[0])
@@ -431,9 +431,9 @@ func TestGetImagesByName(t *testing.T) {
 		v := &engine.Env{}
 		v.SetBool("dirty", true)
 		if _, err := v.WriteTo(job.Stdout); err != nil {
-			return job.Error(err)
+			return err
 		}
-		return engine.StatusOK
+		return nil
 	})
 	r := serveRequest("GET", "/images/"+name+"/json", nil, eng, t)
 	if !called {
@@ -455,7 +455,7 @@ func TestDeleteContainers(t *testing.T) {
 	eng := engine.New()
 	name := "foo"
 	var called bool
-	eng.Register("rm", func(job *engine.Job) engine.Status {
+	eng.Register("rm", func(job *engine.Job) error {
 		called = true
 		if len(job.Args) == 0 {
 			t.Fatalf("Job arguments is empty")
@@ -463,7 +463,7 @@ func TestDeleteContainers(t *testing.T) {
 		if job.Args[0] != name {
 			t.Fatalf("name != '%s': %#v", name, job.Args[0])
 		}
-		return engine.StatusOK
+		return nil
 	})
 	r := serveRequest("DELETE", "/containers/"+name, nil, eng, t)
 	if !called {

+ 21 - 20
builder/job.go

@@ -3,6 +3,7 @@ package builder
 import (
 	"bytes"
 	"encoding/json"
+	"fmt"
 	"io"
 	"io/ioutil"
 	"os"
@@ -44,9 +45,9 @@ func (b *BuilderJob) Install() {
 	b.Engine.Register("build_config", b.CmdBuildConfig)
 }
 
-func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
+func (b *BuilderJob) CmdBuild(job *engine.Job) error {
 	if len(job.Args) != 0 {
-		return job.Errorf("Usage: %s\n", job.Name)
+		return fmt.Errorf("Usage: %s\n", job.Name)
 	}
 	var (
 		dockerfileName = job.Getenv("dockerfile")
@@ -73,11 +74,11 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
 	repoName, tag = parsers.ParseRepositoryTag(repoName)
 	if repoName != "" {
 		if err := registry.ValidateRepositoryName(repoName); err != nil {
-			return job.Error(err)
+			return err
 		}
 		if len(tag) > 0 {
 			if err := graph.ValidateTagName(tag); err != nil {
-				return job.Error(err)
+				return err
 			}
 		}
 	}
@@ -90,28 +91,28 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
 		}
 		root, err := ioutil.TempDir("", "docker-build-git")
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 		defer os.RemoveAll(root)
 
 		if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
-			return job.Errorf("Error trying to use git: %s (%s)", err, output)
+			return fmt.Errorf("Error trying to use git: %s (%s)", err, output)
 		}
 
 		c, err := archive.Tar(root, archive.Uncompressed)
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 		context = c
 	} else if urlutil.IsURL(remoteURL) {
 		f, err := utils.Download(remoteURL)
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 		defer f.Body.Close()
 		dockerFile, err := ioutil.ReadAll(f.Body)
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 
 		// When we're downloading just a Dockerfile put it in
@@ -120,7 +121,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
 
 		c, err := archive.Generate(dockerfileName, string(dockerFile))
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 		context = c
 	}
@@ -158,18 +159,18 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
 
 	id, err := builder.Run(context)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	if repoName != "" {
 		b.Daemon.Repositories().Set(repoName, tag, id, true)
 	}
-	return engine.StatusOK
+	return nil
 }
 
-func (b *BuilderJob) CmdBuildConfig(job *engine.Job) engine.Status {
+func (b *BuilderJob) CmdBuildConfig(job *engine.Job) error {
 	if len(job.Args) != 0 {
-		return job.Errorf("Usage: %s\n", job.Name)
+		return fmt.Errorf("Usage: %s\n", job.Name)
 	}
 
 	var (
@@ -178,18 +179,18 @@ func (b *BuilderJob) CmdBuildConfig(job *engine.Job) engine.Status {
 	)
 
 	if err := job.GetenvJson("config", &newConfig); err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n")))
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	// ensure that the commands are valid
 	for _, n := range ast.Children {
 		if !validCommitCommands[n.Value] {
-			return job.Errorf("%s is not a valid change command", n.Value)
+			return fmt.Errorf("%s is not a valid change command", n.Value)
 		}
 	}
 
@@ -204,12 +205,12 @@ func (b *BuilderJob) CmdBuildConfig(job *engine.Job) engine.Status {
 
 	for i, n := range ast.Children {
 		if err := builder.dispatch(i, n); err != nil {
-			return job.Error(err)
+			return err
 		}
 	}
 
 	if err := json.NewEncoder(job.Stdout).Encode(builder.Config); err != nil {
-		return job.Error(err)
+		return err
 	}
-	return engine.StatusOK
+	return nil
 }

+ 3 - 3
builtins/builtins.go

@@ -57,7 +57,7 @@ func daemon(eng *engine.Engine) error {
 }
 
 // builtins jobs independent of any subsystem
-func dockerVersion(job *engine.Job) engine.Status {
+func dockerVersion(job *engine.Job) error {
 	v := &engine.Env{}
 	v.SetJson("Version", dockerversion.VERSION)
 	v.SetJson("ApiVersion", api.APIVERSION)
@@ -69,7 +69,7 @@ func dockerVersion(job *engine.Job) engine.Status {
 		v.Set("KernelVersion", kernelVersion.String())
 	}
 	if _, err := v.WriteTo(job.Stdout); err != nil {
-		return job.Error(err)
+		return err
 	}
-	return engine.StatusOK
+	return nil
 }

+ 5 - 4
daemon/attach.go

@@ -2,6 +2,7 @@ package daemon
 
 import (
 	"encoding/json"
+	"fmt"
 	"io"
 	"os"
 	"sync"
@@ -14,9 +15,9 @@ import (
 	"github.com/docker/docker/utils"
 )
 
-func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
+func (daemon *Daemon) ContainerAttach(job *engine.Job) error {
 	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
+		return fmt.Errorf("Usage: %s CONTAINER\n", job.Name)
 	}
 
 	var (
@@ -30,7 +31,7 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
 
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	//logs
@@ -108,7 +109,7 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
 			container.WaitStop(-1 * time.Second)
 		}
 	}
-	return engine.StatusOK
+	return nil
 }
 
 func (daemon *Daemon) Attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {

+ 11 - 9
daemon/changes.go

@@ -1,37 +1,39 @@
 package daemon
 
 import (
+	"fmt"
+
 	"github.com/docker/docker/engine"
 )
 
-func (daemon *Daemon) ContainerChanges(job *engine.Job) engine.Status {
+func (daemon *Daemon) ContainerChanges(job *engine.Job) error {
 	if n := len(job.Args); n != 1 {
-		return job.Errorf("Usage: %s CONTAINER", job.Name)
+		return fmt.Errorf("Usage: %s CONTAINER", job.Name)
 	}
 	name := job.Args[0]
 
-	container, error := daemon.Get(name)
-	if error != nil {
-		return job.Error(error)
+	container, err := daemon.Get(name)
+	if err != nil {
+		return err
 	}
 
 	outs := engine.NewTable("", 0)
 	changes, err := container.Changes()
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	for _, change := range changes {
 		out := &engine.Env{}
 		if err := out.Import(change); err != nil {
-			return job.Error(err)
+			return err
 		}
 		outs.Add(out)
 	}
 
 	if _, err := outs.WriteListTo(job.Stdout); err != nil {
-		return job.Error(err)
+		return err
 	}
 
-	return engine.StatusOK
+	return nil
 }

+ 9 - 8
daemon/commit.go

@@ -3,21 +3,22 @@ package daemon
 import (
 	"bytes"
 	"encoding/json"
+	"fmt"
 
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/runconfig"
 )
 
-func (daemon *Daemon) ContainerCommit(job *engine.Job) engine.Status {
+func (daemon *Daemon) ContainerCommit(job *engine.Job) error {
 	if len(job.Args) != 1 {
-		return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
+		return fmt.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
 	}
 	name := job.Args[0]
 
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	var (
@@ -33,22 +34,22 @@ func (daemon *Daemon) ContainerCommit(job *engine.Job) engine.Status {
 	buildConfigJob.Setenv("config", job.Getenv("config"))
 
 	if err := buildConfigJob.Run(); err != nil {
-		return job.Error(err)
+		return err
 	}
 	if err := json.NewDecoder(stdoutBuffer).Decode(&newConfig); err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	if err := runconfig.Merge(&newConfig, config); err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	img, err := daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), job.GetenvBool("pause"), &newConfig)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	job.Printf("%s\n", img.ID)
-	return engine.StatusOK
+	return nil
 }
 
 // Commit creates a new filesystem image from the current state of a container.

+ 7 - 6
daemon/copy.go

@@ -1,14 +1,15 @@
 package daemon
 
 import (
+	"fmt"
 	"io"
 
 	"github.com/docker/docker/engine"
 )
 
-func (daemon *Daemon) ContainerCopy(job *engine.Job) engine.Status {
+func (daemon *Daemon) ContainerCopy(job *engine.Job) error {
 	if len(job.Args) != 2 {
-		return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name)
+		return fmt.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name)
 	}
 
 	var (
@@ -18,17 +19,17 @@ func (daemon *Daemon) ContainerCopy(job *engine.Job) engine.Status {
 
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	data, err := container.Copy(resource)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	defer data.Close()
 
 	if _, err := io.Copy(job.Stdout, data); err != nil {
-		return job.Error(err)
+		return err
 	}
-	return engine.StatusOK
+	return nil
 }

+ 14 - 13
daemon/create.go

@@ -4,6 +4,7 @@ import (
 	"fmt"
 	"strings"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/graph"
 	"github.com/docker/docker/image"
@@ -12,36 +13,36 @@ import (
 	"github.com/docker/libcontainer/label"
 )
 
-func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status {
+func (daemon *Daemon) ContainerCreate(job *engine.Job) error {
 	var name string
 	if len(job.Args) == 1 {
 		name = job.Args[0]
 	} else if len(job.Args) > 1 {
-		return job.Errorf("Usage: %s", job.Name)
+		return fmt.Errorf("Usage: %s", job.Name)
 	}
 
 	config := runconfig.ContainerConfigFromJob(job)
 	hostConfig := runconfig.ContainerHostConfigFromJob(job)
 
 	if len(hostConfig.LxcConf) > 0 && !strings.Contains(daemon.ExecutionDriver().Name(), "lxc") {
-		return job.Errorf("Cannot use --lxc-conf with execdriver: %s", daemon.ExecutionDriver().Name())
+		return fmt.Errorf("Cannot use --lxc-conf with execdriver: %s", daemon.ExecutionDriver().Name())
 	}
 	if hostConfig.Memory != 0 && hostConfig.Memory < 4194304 {
-		return job.Errorf("Minimum memory limit allowed is 4MB")
+		return fmt.Errorf("Minimum memory limit allowed is 4MB")
 	}
 	if hostConfig.Memory > 0 && !daemon.SystemConfig().MemoryLimit {
-		job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
+		log.Printf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
 		hostConfig.Memory = 0
 	}
 	if hostConfig.Memory > 0 && hostConfig.MemorySwap != -1 && !daemon.SystemConfig().SwapLimit {
-		job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
+		log.Printf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
 		hostConfig.MemorySwap = -1
 	}
 	if hostConfig.Memory > 0 && hostConfig.MemorySwap > 0 && hostConfig.MemorySwap < hostConfig.Memory {
-		return job.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.\n")
+		return fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.\n")
 	}
 	if hostConfig.Memory == 0 && hostConfig.MemorySwap > 0 {
-		return job.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.\n")
+		return fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.\n")
 	}
 
 	container, buildWarnings, err := daemon.Create(config, hostConfig, name)
@@ -51,22 +52,22 @@ func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status {
 			if tag == "" {
 				tag = graph.DEFAULTTAG
 			}
-			return job.Errorf("No such image: %s (tag: %s)", config.Image, tag)
+			return fmt.Errorf("No such image: %s (tag: %s)", config.Image, tag)
 		}
-		return job.Error(err)
+		return err
 	}
 	if !container.Config.NetworkDisabled && daemon.SystemConfig().IPv4ForwardingDisabled {
-		job.Errorf("IPv4 forwarding is disabled.\n")
+		log.Printf("IPv4 forwarding is disabled.\n")
 	}
 	container.LogEvent("create")
 
 	job.Printf("%s\n", container.ID)
 
 	for _, warning := range buildWarnings {
-		job.Errorf("%s\n", warning)
+		log.Printf("%s\n", warning)
 	}
 
-	return engine.StatusOK
+	return nil
 }
 
 // Create creates a new container from the given configuration with a given name.

+ 14 - 12
daemon/delete.go

@@ -9,9 +9,9 @@ import (
 	"github.com/docker/docker/engine"
 )
 
-func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status {
+func (daemon *Daemon) ContainerRm(job *engine.Job) error {
 	if len(job.Args) != 1 {
-		return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
+		return fmt.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
 	}
 	name := job.Args[0]
 	removeVolume := job.GetenvBool("removeVolume")
@@ -20,21 +20,23 @@ func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status {
 
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	if removeLink {
 		name, err := GetFullContainerName(name)
 		if err != nil {
-			job.Error(err)
+			return err
+			// TODO: why was just job.Error(err) without return if the function cannot continue w/o container name?
+			//job.Error(err)
 		}
 		parent, n := path.Split(name)
 		if parent == "/" {
-			return job.Errorf("Conflict, cannot remove the default name of the container")
+			return fmt.Errorf("Conflict, cannot remove the default name of the container")
 		}
 		pe := daemon.ContainerGraph().Get(parent)
 		if pe == nil {
-			return job.Errorf("Cannot get parent %s for name %s", parent, name)
+			return fmt.Errorf("Cannot get parent %s for name %s", parent, name)
 		}
 		parentContainer, _ := daemon.Get(pe.ID())
 
@@ -43,9 +45,9 @@ func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status {
 		}
 
 		if err := daemon.ContainerGraph().Delete(name); err != nil {
-			return job.Error(err)
+			return err
 		}
-		return engine.StatusOK
+		return nil
 	}
 
 	if container != nil {
@@ -55,21 +57,21 @@ func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status {
 		if container.IsRunning() {
 			if forceRemove {
 				if err := container.Kill(); err != nil {
-					return job.Errorf("Could not kill running container, cannot remove - %v", err)
+					return fmt.Errorf("Could not kill running container, cannot remove - %v", err)
 				}
 			} else {
-				return job.Errorf("Conflict, You cannot remove a running container. Stop the container before attempting removal or use -f")
+				return fmt.Errorf("Conflict, You cannot remove a running container. Stop the container before attempting removal or use -f")
 			}
 		}
 		if err := daemon.Rm(container); err != nil {
-			return job.Errorf("Cannot destroy container %s: %s", name, err)
+			return fmt.Errorf("Cannot destroy container %s: %s", name, err)
 		}
 		container.LogEvent("destroy")
 		if removeVolume {
 			daemon.DeleteVolumes(container.VolumePaths())
 		}
 	}
-	return engine.StatusOK
+	return nil
 }
 
 func (daemon *Daemon) DeleteVolumes(volumeIDs map[string]struct{}) {

+ 13 - 13
daemon/exec.go

@@ -111,25 +111,25 @@ func (d *Daemon) getActiveContainer(name string) (*Container, error) {
 	return container, nil
 }
 
-func (d *Daemon) ContainerExecCreate(job *engine.Job) engine.Status {
+func (d *Daemon) ContainerExecCreate(job *engine.Job) error {
 	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s [options] container command [args]", job.Name)
+		return fmt.Errorf("Usage: %s [options] container command [args]", job.Name)
 	}
 
 	if strings.HasPrefix(d.execDriver.Name(), lxc.DriverName) {
-		return job.Error(lxc.ErrExec)
+		return lxc.ErrExec
 	}
 
 	var name = job.Args[0]
 
 	container, err := d.getActiveContainer(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	config, err := runconfig.ExecConfigFromJob(job)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	entrypoint, args := d.getEntrypointAndArgs(nil, config.Cmd)
@@ -157,12 +157,12 @@ func (d *Daemon) ContainerExecCreate(job *engine.Job) engine.Status {
 
 	job.Printf("%s\n", execConfig.ID)
 
-	return engine.StatusOK
+	return nil
 }
 
-func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status {
+func (d *Daemon) ContainerExecStart(job *engine.Job) error {
 	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s [options] exec", job.Name)
+		return fmt.Errorf("Usage: %s [options] exec", job.Name)
 	}
 
 	var (
@@ -173,7 +173,7 @@ func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status {
 
 	execConfig, err := d.getExecConfig(execName)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	func() {
@@ -185,7 +185,7 @@ func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status {
 		execConfig.Running = true
 	}()
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	log.Debugf("starting exec command %s in container %s", execConfig.ID, execConfig.Container.ID)
@@ -236,14 +236,14 @@ func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status {
 	select {
 	case err := <-attachErr:
 		if err != nil {
-			return job.Errorf("attach failed with error: %s", err)
+			return fmt.Errorf("attach failed with error: %s", err)
 		}
 		break
 	case err := <-execErr:
-		return job.Error(err)
+		return err
 	}
 
-	return engine.StatusOK
+	return nil
 }
 
 func (d *Daemon) Exec(c *Container, execConfig *execConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {

+ 7 - 6
daemon/export.go

@@ -1,33 +1,34 @@
 package daemon
 
 import (
+	"fmt"
 	"io"
 
 	"github.com/docker/docker/engine"
 )
 
-func (daemon *Daemon) ContainerExport(job *engine.Job) engine.Status {
+func (daemon *Daemon) ContainerExport(job *engine.Job) error {
 	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s container_id", job.Name)
+		return fmt.Errorf("Usage: %s container_id", job.Name)
 	}
 	name := job.Args[0]
 
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	data, err := container.Export()
 	if err != nil {
-		return job.Errorf("%s: %s", name, err)
+		return fmt.Errorf("%s: %s", name, err)
 	}
 	defer data.Close()
 
 	// Stream the entire contents of the container (basically a volatile snapshot)
 	if _, err := io.Copy(job.Stdout, data); err != nil {
-		return job.Errorf("%s: %s", name, err)
+		return fmt.Errorf("%s: %s", name, err)
 	}
 	// FIXME: factor job-specific LogEvent to engine.Job.Run()
 	container.LogEvent("export")
-	return engine.StatusOK
+	return nil
 }

+ 6 - 6
daemon/image_delete.go

@@ -12,21 +12,21 @@ import (
 	"github.com/docker/docker/utils"
 )
 
-func (daemon *Daemon) ImageDelete(job *engine.Job) engine.Status {
+func (daemon *Daemon) ImageDelete(job *engine.Job) error {
 	if n := len(job.Args); n != 1 {
-		return job.Errorf("Usage: %s IMAGE", job.Name)
+		return fmt.Errorf("Usage: %s IMAGE", job.Name)
 	}
 	imgs := engine.NewTable("", 0)
 	if err := daemon.DeleteImage(job.Eng, job.Args[0], imgs, true, job.GetenvBool("force"), job.GetenvBool("noprune")); err != nil {
-		return job.Error(err)
+		return err
 	}
 	if len(imgs.Data) == 0 {
-		return job.Errorf("Conflict, %s wasn't deleted", job.Args[0])
+		return fmt.Errorf("Conflict, %s wasn't deleted", job.Args[0])
 	}
 	if _, err := imgs.WriteListTo(job.Stdout); err != nil {
-		return job.Error(err)
+		return err
 	}
-	return engine.StatusOK
+	return nil
 }
 
 // FIXME: make this private and use the job instead

+ 6 - 6
daemon/info.go

@@ -15,7 +15,7 @@ import (
 	"github.com/docker/docker/utils"
 )
 
-func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status {
+func (daemon *Daemon) CmdInfo(job *engine.Job) error {
 	images, _ := daemon.Graph().Map()
 	var imgcount int
 	if images == nil {
@@ -54,16 +54,16 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status {
 	cjob := job.Eng.Job("subscribers_count")
 	env, _ := cjob.Stdout.AddEnv()
 	if err := cjob.Run(); err != nil {
-		return job.Error(err)
+		return err
 	}
 	registryJob := job.Eng.Job("registry_config")
 	registryEnv, _ := registryJob.Stdout.AddEnv()
 	if err := registryJob.Run(); err != nil {
-		return job.Error(err)
+		return err
 	}
 	registryConfig := registry.ServiceConfig{}
 	if err := registryEnv.GetJson("config", &registryConfig); err != nil {
-		return job.Error(err)
+		return err
 	}
 	v := &engine.Env{}
 	v.SetJson("ID", daemon.ID)
@@ -104,7 +104,7 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status {
 	}
 	v.SetList("Labels", daemon.Config().Labels)
 	if _, err := v.WriteTo(job.Stdout); err != nil {
-		return job.Error(err)
+		return err
 	}
-	return engine.StatusOK
+	return nil
 }

+ 12 - 12
daemon/inspect.go

@@ -8,14 +8,14 @@ import (
 	"github.com/docker/docker/runconfig"
 )
 
-func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status {
+func (daemon *Daemon) ContainerInspect(job *engine.Job) error {
 	if len(job.Args) != 1 {
-		return job.Errorf("usage: %s NAME", job.Name)
+		return fmt.Errorf("usage: %s NAME", job.Name)
 	}
 	name := job.Args[0]
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	container.Lock()
@@ -26,10 +26,10 @@ func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status {
 			HostConfig *runconfig.HostConfig
 		}{container, container.hostConfig})
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 		job.Stdout.Write(b)
-		return engine.StatusOK
+		return nil
 	}
 
 	out := &engine.Env{}
@@ -75,25 +75,25 @@ func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status {
 
 	container.hostConfig.Links = nil
 	if _, err := out.WriteTo(job.Stdout); err != nil {
-		return job.Error(err)
+		return err
 	}
-	return engine.StatusOK
+	return nil
 }
 
-func (daemon *Daemon) ContainerExecInspect(job *engine.Job) engine.Status {
+func (daemon *Daemon) ContainerExecInspect(job *engine.Job) error {
 	if len(job.Args) != 1 {
-		return job.Errorf("usage: %s ID", job.Name)
+		return fmt.Errorf("usage: %s ID", job.Name)
 	}
 	id := job.Args[0]
 	eConfig, err := daemon.getExecConfig(id)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	b, err := json.Marshal(*eConfig)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	job.Stdout.Write(b)
-	return engine.StatusOK
+	return nil
 }

+ 8 - 7
daemon/kill.go

@@ -1,6 +1,7 @@
 package daemon
 
 import (
+	"fmt"
 	"strconv"
 	"strings"
 	"syscall"
@@ -13,9 +14,9 @@ import (
 // If no signal is given (sig 0), then Kill with SIGKILL and wait
 // for the container to exit.
 // If a signal is given, then just send it to the container and return.
-func (daemon *Daemon) ContainerKill(job *engine.Job) engine.Status {
+func (daemon *Daemon) ContainerKill(job *engine.Job) error {
 	if n := len(job.Args); n < 1 || n > 2 {
-		return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name)
+		return fmt.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name)
 	}
 	var (
 		name = job.Args[0]
@@ -34,27 +35,27 @@ func (daemon *Daemon) ContainerKill(job *engine.Job) engine.Status {
 		}
 
 		if sig == 0 {
-			return job.Errorf("Invalid signal: %s", job.Args[1])
+			return fmt.Errorf("Invalid signal: %s", job.Args[1])
 		}
 	}
 
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	// If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait())
 	if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL {
 		if err := container.Kill(); err != nil {
-			return job.Errorf("Cannot kill container %s: %s", name, err)
+			return fmt.Errorf("Cannot kill container %s: %s", name, err)
 		}
 		container.LogEvent("kill")
 	} else {
 		// Otherwise, just send the requested signal
 		if err := container.KillSig(int(sig)); err != nil {
-			return job.Errorf("Cannot kill container %s: %s", name, err)
+			return fmt.Errorf("Cannot kill container %s: %s", name, err)
 		}
 		// FIXME: Add event for signals
 	}
-	return engine.StatusOK
+	return nil
 }

+ 8 - 8
daemon/list.go

@@ -20,7 +20,7 @@ func (daemon *Daemon) List() []*Container {
 	return daemon.containers.List()
 }
 
-func (daemon *Daemon) Containers(job *engine.Job) engine.Status {
+func (daemon *Daemon) Containers(job *engine.Job) error {
 	var (
 		foundBefore bool
 		displayed   int
@@ -36,13 +36,13 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status {
 
 	psFilters, err := filters.FromParam(job.Getenv("filters"))
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	if i, ok := psFilters["exited"]; ok {
 		for _, value := range i {
 			code, err := strconv.Atoi(value)
 			if err != nil {
-				return job.Error(err)
+				return err
 			}
 			filt_exited = append(filt_exited, code)
 		}
@@ -65,14 +65,14 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status {
 	if before != "" {
 		beforeCont, err = daemon.Get(before)
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 	}
 
 	if since != "" {
 		sinceCont, err = daemon.Get(since)
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 	}
 
@@ -170,14 +170,14 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status {
 	for _, container := range daemon.List() {
 		if err := writeCont(container); err != nil {
 			if err != errLast {
-				return job.Error(err)
+				return err
 			}
 			break
 		}
 	}
 	outs.ReverseSort()
 	if _, err := outs.WriteListTo(job.Stdout); err != nil {
-		return job.Error(err)
+		return err
 	}
-	return engine.StatusOK
+	return nil
 }

+ 7 - 7
daemon/logs.go

@@ -16,9 +16,9 @@ import (
 	"github.com/docker/docker/pkg/timeutils"
 )
 
-func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status {
+func (daemon *Daemon) ContainerLogs(job *engine.Job) error {
 	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
+		return fmt.Errorf("Usage: %s CONTAINER\n", job.Name)
 	}
 
 	var (
@@ -32,7 +32,7 @@ func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status {
 		format string
 	)
 	if !(stdout || stderr) {
-		return job.Errorf("You must choose at least one stream")
+		return fmt.Errorf("You must choose at least one stream")
 	}
 	if times {
 		format = timeutils.RFC3339NanoFixed
@@ -42,10 +42,10 @@ func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status {
 	}
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	if container.LogDriverType() != "json-file" {
-		return job.Errorf("\"logs\" endpoint is supported only for \"json-file\" logging driver")
+		return fmt.Errorf("\"logs\" endpoint is supported only for \"json-file\" logging driver")
 	}
 	cLog, err := container.ReadLog("json")
 	if err != nil && os.IsNotExist(err) {
@@ -83,7 +83,7 @@ func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status {
 				f := cLog.(*os.File)
 				ls, err := tailfile.TailFile(f, lines)
 				if err != nil {
-					return job.Error(err)
+					return err
 				}
 				tmp := bytes.NewBuffer([]byte{})
 				for _, l := range ls {
@@ -148,5 +148,5 @@ func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status {
 		}
 
 	}
-	return engine.StatusOK
+	return nil
 }

+ 46 - 46
daemon/networkdriver/bridge/driver.go

@@ -83,7 +83,7 @@ var (
 	ipAllocator       = ipallocator.New()
 )
 
-func InitDriver(job *engine.Job) engine.Status {
+func InitDriver(job *engine.Job) error {
 	var (
 		networkv4      *net.IPNet
 		networkv6      *net.IPNet
@@ -117,17 +117,17 @@ func InitDriver(job *engine.Job) engine.Status {
 		// No Bridge existent, create one
 		// If we're not using the default bridge, fail without trying to create it
 		if !usingDefaultBridge {
-			return job.Error(err)
+			return err
 		}
 
 		// If the iface is not found, try to create it
 		if err := configureBridge(bridgeIP, bridgeIPv6, enableIPv6); err != nil {
-			return job.Error(err)
+			return err
 		}
 
 		addrv4, addrsv6, err = networkdriver.GetIfaceAddr(bridgeIface)
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 
 		if fixedCIDRv6 != "" {
@@ -144,10 +144,10 @@ func InitDriver(job *engine.Job) engine.Status {
 			networkv4 = addrv4.(*net.IPNet)
 			bip, _, err := net.ParseCIDR(bridgeIP)
 			if err != nil {
-				return job.Error(err)
+				return err
 			}
 			if !networkv4.IP.Equal(bip) {
-				return job.Errorf("Bridge ip (%s) does not match existing bridge configuration %s", networkv4.IP, bip)
+				return fmt.Errorf("Bridge ip (%s) does not match existing bridge configuration %s", networkv4.IP, bip)
 			}
 		}
 
@@ -157,12 +157,12 @@ func InitDriver(job *engine.Job) engine.Status {
 		// the bridge init for IPv6 here, else we will error out below if --ipv6=true
 		if len(addrsv6) == 0 && enableIPv6 {
 			if err := setupIPv6Bridge(bridgeIPv6); err != nil {
-				return job.Error(err)
+				return err
 			}
 			// Recheck addresses now that IPv6 is setup on the bridge
 			addrv4, addrsv6, err = networkdriver.GetIfaceAddr(bridgeIface)
 			if err != nil {
-				return job.Error(err)
+				return err
 			}
 		}
 
@@ -172,7 +172,7 @@ func InitDriver(job *engine.Job) engine.Status {
 	if enableIPv6 {
 		bip6, _, err := net.ParseCIDR(bridgeIPv6)
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 		found := false
 		for _, addrv6 := range addrsv6 {
@@ -183,7 +183,7 @@ func InitDriver(job *engine.Job) engine.Status {
 			}
 		}
 		if !found {
-			return job.Errorf("Bridge IPv6 does not match existing bridge configuration %s", bip6)
+			return fmt.Errorf("Bridge IPv6 does not match existing bridge configuration %s", bip6)
 		}
 	}
 
@@ -191,7 +191,7 @@ func InitDriver(job *engine.Job) engine.Status {
 
 	if enableIPv6 {
 		if len(addrsv6) == 0 {
-			return job.Error(errors.New("IPv6 enabled but no IPv6 detected"))
+			return errors.New("IPv6 enabled but no IPv6 detected")
 		}
 		bridgeIPv6Addr = networkv6.IP
 	}
@@ -199,7 +199,7 @@ func InitDriver(job *engine.Job) engine.Status {
 	// Configure iptables for link support
 	if enableIPTables {
 		if err := setupIPTables(addrv4, icc, ipMasq); err != nil {
-			return job.Error(err)
+			return err
 		}
 
 	}
@@ -207,33 +207,33 @@ func InitDriver(job *engine.Job) engine.Status {
 	if ipForward {
 		// Enable IPv4 forwarding
 		if err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte{'1', '\n'}, 0644); err != nil {
-			job.Logf("WARNING: unable to enable IPv4 forwarding: %s\n", err)
+			log.Warnf("WARNING: unable to enable IPv4 forwarding: %s\n", err)
 		}
 
 		if fixedCIDRv6 != "" {
 			// Enable IPv6 forwarding
 			if err := ioutil.WriteFile("/proc/sys/net/ipv6/conf/default/forwarding", []byte{'1', '\n'}, 0644); err != nil {
-				job.Logf("WARNING: unable to enable IPv6 default forwarding: %s\n", err)
+				log.Warnf("WARNING: unable to enable IPv6 default forwarding: %s\n", err)
 			}
 			if err := ioutil.WriteFile("/proc/sys/net/ipv6/conf/all/forwarding", []byte{'1', '\n'}, 0644); err != nil {
-				job.Logf("WARNING: unable to enable IPv6 all forwarding: %s\n", err)
+				log.Warnf("WARNING: unable to enable IPv6 all forwarding: %s\n", err)
 			}
 		}
 	}
 
 	// We can always try removing the iptables
 	if err := iptables.RemoveExistingChain("DOCKER", iptables.Nat); err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	if enableIPTables {
 		_, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Nat)
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 		chain, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Filter)
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 		portmapper.SetIptablesChain(chain)
 	}
@@ -242,22 +242,22 @@ func InitDriver(job *engine.Job) engine.Status {
 	if fixedCIDR != "" {
 		_, subnet, err := net.ParseCIDR(fixedCIDR)
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 		log.Debugf("Subnet: %v", subnet)
 		if err := ipAllocator.RegisterSubnet(bridgeIPv4Network, subnet); err != nil {
-			return job.Error(err)
+			return err
 		}
 	}
 
 	if fixedCIDRv6 != "" {
 		_, subnet, err := net.ParseCIDR(fixedCIDRv6)
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 		log.Debugf("Subnet: %v", subnet)
 		if err := ipAllocator.RegisterSubnet(subnet, subnet); err != nil {
-			return job.Error(err)
+			return err
 		}
 		globalIPv6Network = subnet
 	}
@@ -275,10 +275,10 @@ func InitDriver(job *engine.Job) engine.Status {
 		"link":               LinkContainers,
 	} {
 		if err := job.Eng.Register(name, f); err != nil {
-			return job.Error(err)
+			return err
 		}
 	}
-	return engine.StatusOK
+	return nil
 }
 
 func setupIPTables(addr net.Addr, icc, ipmasq bool) error {
@@ -499,7 +499,7 @@ func linkLocalIPv6FromMac(mac string) (string, error) {
 }
 
 // Allocate a network interface
-func Allocate(job *engine.Job) engine.Status {
+func Allocate(job *engine.Job) error {
 	var (
 		ip            net.IP
 		mac           net.HardwareAddr
@@ -512,7 +512,7 @@ func Allocate(job *engine.Job) engine.Status {
 
 	ip, err = ipAllocator.RequestIP(bridgeIPv4Network, requestedIP)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	// If no explicit mac address was given, generate a random one.
@@ -534,7 +534,7 @@ func Allocate(job *engine.Job) engine.Status {
 		globalIPv6, err = ipAllocator.RequestIP(globalIPv6Network, requestedIPv6)
 		if err != nil {
 			log.Errorf("Allocator: RequestIP v6: %v", err)
-			return job.Error(err)
+			return err
 		}
 		log.Infof("Allocated IPv6 %s", globalIPv6)
 	}
@@ -552,7 +552,7 @@ func Allocate(job *engine.Job) engine.Status {
 	// If linklocal IPv6
 	localIPv6Net, err := linkLocalIPv6FromMac(mac.String())
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	localIPv6, _, _ := net.ParseCIDR(localIPv6Net)
 	out.Set("LinkLocalIPv6", localIPv6.String())
@@ -572,18 +572,18 @@ func Allocate(job *engine.Job) engine.Status {
 
 	out.WriteTo(job.Stdout)
 
-	return engine.StatusOK
+	return nil
 }
 
 // Release an interface for a select ip
-func Release(job *engine.Job) engine.Status {
+func Release(job *engine.Job) error {
 	var (
 		id                 = job.Args[0]
 		containerInterface = currentInterfaces.Get(id)
 	)
 
 	if containerInterface == nil {
-		return job.Errorf("No network information to release for %s", id)
+		return fmt.Errorf("No network information to release for %s", id)
 	}
 
 	for _, nat := range containerInterface.PortMappings {
@@ -600,11 +600,11 @@ func Release(job *engine.Job) engine.Status {
 			log.Infof("Unable to release IPv6 %s", err)
 		}
 	}
-	return engine.StatusOK
+	return nil
 }
 
 // Allocate an external port and map it to the interface
-func AllocatePort(job *engine.Job) engine.Status {
+func AllocatePort(job *engine.Job) error {
 	var (
 		err error
 
@@ -620,7 +620,7 @@ func AllocatePort(job *engine.Job) engine.Status {
 	if hostIP != "" {
 		ip = net.ParseIP(hostIP)
 		if ip == nil {
-			return job.Errorf("Bad parameter: invalid host ip %s", hostIP)
+			return fmt.Errorf("Bad parameter: invalid host ip %s", hostIP)
 		}
 	}
 
@@ -632,7 +632,7 @@ func AllocatePort(job *engine.Job) engine.Status {
 	case "udp":
 		container = &net.UDPAddr{IP: network.IP, Port: containerPort}
 	default:
-		return job.Errorf("unsupported address type %s", proto)
+		return fmt.Errorf("unsupported address type %s", proto)
 	}
 
 	//
@@ -650,14 +650,14 @@ func AllocatePort(job *engine.Job) engine.Status {
 		// There is no point in immediately retrying to map an explicitly
 		// chosen port.
 		if hostPort != 0 {
-			job.Logf("Failed to allocate and map port %d: %s", hostPort, err)
+			log.Warnf("Failed to allocate and map port %d: %s", hostPort, err)
 			break
 		}
-		job.Logf("Failed to allocate and map port: %s, retry: %d", err, i+1)
+		log.Warnf("Failed to allocate and map port: %s, retry: %d", err, i+1)
 	}
 
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	network.PortMappings = append(network.PortMappings, host)
@@ -672,13 +672,13 @@ func AllocatePort(job *engine.Job) engine.Status {
 		out.SetInt("HostPort", netAddr.Port)
 	}
 	if _, err := out.WriteTo(job.Stdout); err != nil {
-		return job.Error(err)
+		return err
 	}
 
-	return engine.StatusOK
+	return nil
 }
 
-func LinkContainers(job *engine.Job) engine.Status {
+func LinkContainers(job *engine.Job) error {
 	var (
 		action       = job.Args[0]
 		nfAction     iptables.Action
@@ -696,24 +696,24 @@ func LinkContainers(job *engine.Job) engine.Status {
 	case "-D":
 		nfAction = iptables.Delete
 	default:
-		return job.Errorf("Invalid action '%s' specified", action)
+		return fmt.Errorf("Invalid action '%s' specified", action)
 	}
 
 	ip1 := net.ParseIP(parentIP)
 	if ip1 == nil {
-		return job.Errorf("Parent IP '%s' is invalid", parentIP)
+		return fmt.Errorf("Parent IP '%s' is invalid", parentIP)
 	}
 	ip2 := net.ParseIP(childIP)
 	if ip2 == nil {
-		return job.Errorf("Child IP '%s' is invalid", childIP)
+		return fmt.Errorf("Child IP '%s' is invalid", childIP)
 	}
 
 	chain := iptables.Chain{Name: "DOCKER", Bridge: bridgeIface}
 	for _, p := range ports {
 		port := nat.Port(p)
 		if err := chain.Link(nfAction, ip1, ip2, port.Int(), port.Proto()); !ignoreErrors && err != nil {
-			return job.Error(err)
+			return err
 		}
 	}
-	return engine.StatusOK
+	return nil
 }

+ 12 - 12
daemon/networkdriver/bridge/driver_test.go

@@ -60,22 +60,22 @@ func TestAllocatePortDetection(t *testing.T) {
 
 	// Init driver
 	job := eng.Job("initdriver")
-	if res := InitDriver(job); res != engine.StatusOK {
+	if res := InitDriver(job); res != nil {
 		t.Fatal("Failed to initialize network driver")
 	}
 
 	// Allocate interface
 	job = eng.Job("allocate_interface", "container_id")
-	if res := Allocate(job); res != engine.StatusOK {
+	if res := Allocate(job); res != nil {
 		t.Fatal("Failed to allocate network interface")
 	}
 
 	// Allocate same port twice, expect failure on second call
 	job = newPortAllocationJob(eng, freePort)
-	if res := AllocatePort(job); res != engine.StatusOK {
+	if res := AllocatePort(job); res != nil {
 		t.Fatal("Failed to find a free port to allocate")
 	}
-	if res := AllocatePort(job); res == engine.StatusOK {
+	if res := AllocatePort(job); res == nil {
 		t.Fatal("Duplicate port allocation granted by AllocatePort")
 	}
 }
@@ -88,19 +88,19 @@ func TestHostnameFormatChecking(t *testing.T) {
 
 	// Init driver
 	job := eng.Job("initdriver")
-	if res := InitDriver(job); res != engine.StatusOK {
+	if res := InitDriver(job); res != nil {
 		t.Fatal("Failed to initialize network driver")
 	}
 
 	// Allocate interface
 	job = eng.Job("allocate_interface", "container_id")
-	if res := Allocate(job); res != engine.StatusOK {
+	if res := Allocate(job); res != nil {
 		t.Fatal("Failed to allocate network interface")
 	}
 
 	// Allocate port with invalid HostIP, expect failure with Bad Request http status
 	job = newPortAllocationJobWithInvalidHostIP(eng, freePort)
-	if res := AllocatePort(job); res == engine.StatusOK {
+	if res := AllocatePort(job); res == nil {
 		t.Fatal("Failed to check invalid HostIP")
 	}
 }
@@ -129,11 +129,11 @@ func newInterfaceAllocation(t *testing.T, input engine.Env) (output engine.Env)
 	<-done
 
 	if input.Exists("expectFail") && input.GetBool("expectFail") {
-		if res == engine.StatusOK {
+		if res == nil {
 			t.Fatal("Doesn't fail to allocate network interface")
 		}
 	} else {
-		if res != engine.StatusOK {
+		if res != nil {
 			t.Fatal("Failed to allocate network interface")
 		}
 	}
@@ -244,13 +244,13 @@ func TestLinkContainers(t *testing.T) {
 
 	// Init driver
 	job := eng.Job("initdriver")
-	if res := InitDriver(job); res != engine.StatusOK {
+	if res := InitDriver(job); res != nil {
 		t.Fatal("Failed to initialize network driver")
 	}
 
 	// Allocate interface
 	job = eng.Job("allocate_interface", "container_id")
-	if res := Allocate(job); res != engine.StatusOK {
+	if res := Allocate(job); res != nil {
 		t.Fatal("Failed to allocate network interface")
 	}
 
@@ -267,7 +267,7 @@ func TestLinkContainers(t *testing.T) {
 		t.Fatal(err)
 	}
 
-	if res := LinkContainers(job); res != engine.StatusOK {
+	if res := LinkContainers(job); res != nil {
 		t.Fatalf("LinkContainers failed")
 	}
 

+ 12 - 10
daemon/pause.go

@@ -1,37 +1,39 @@
 package daemon
 
 import (
+	"fmt"
+
 	"github.com/docker/docker/engine"
 )
 
-func (daemon *Daemon) ContainerPause(job *engine.Job) engine.Status {
+func (daemon *Daemon) ContainerPause(job *engine.Job) error {
 	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s CONTAINER", job.Name)
+		return fmt.Errorf("Usage: %s CONTAINER", job.Name)
 	}
 	name := job.Args[0]
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	if err := container.Pause(); err != nil {
-		return job.Errorf("Cannot pause container %s: %s", name, err)
+		return fmt.Errorf("Cannot pause container %s: %s", name, err)
 	}
 	container.LogEvent("pause")
-	return engine.StatusOK
+	return nil
 }
 
-func (daemon *Daemon) ContainerUnpause(job *engine.Job) engine.Status {
+func (daemon *Daemon) ContainerUnpause(job *engine.Job) error {
 	if n := len(job.Args); n < 1 || n > 2 {
-		return job.Errorf("Usage: %s CONTAINER", job.Name)
+		return fmt.Errorf("Usage: %s CONTAINER", job.Name)
 	}
 	name := job.Args[0]
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	if err := container.Unpause(); err != nil {
-		return job.Errorf("Cannot unpause container %s: %s", name, err)
+		return fmt.Errorf("Cannot unpause container %s: %s", name, err)
 	}
 	container.LogEvent("unpause")
-	return engine.StatusOK
+	return nil
 }

+ 12 - 8
daemon/rename.go

@@ -1,17 +1,21 @@
 package daemon
 
-import "github.com/docker/docker/engine"
+import (
+	"fmt"
 
-func (daemon *Daemon) ContainerRename(job *engine.Job) engine.Status {
+	"github.com/docker/docker/engine"
+)
+
+func (daemon *Daemon) ContainerRename(job *engine.Job) error {
 	if len(job.Args) != 2 {
-		return job.Errorf("usage: %s OLD_NAME NEW_NAME", job.Name)
+		return fmt.Errorf("usage: %s OLD_NAME NEW_NAME", job.Name)
 	}
 	oldName := job.Args[0]
 	newName := job.Args[1]
 
 	container, err := daemon.Get(oldName)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	oldName = container.Name
@@ -19,7 +23,7 @@ func (daemon *Daemon) ContainerRename(job *engine.Job) engine.Status {
 	container.Lock()
 	defer container.Unlock()
 	if newName, err = daemon.reserveName(container.ID, newName); err != nil {
-		return job.Errorf("Error when allocating new name: %s", err)
+		return fmt.Errorf("Error when allocating new name: %s", err)
 	}
 
 	container.Name = newName
@@ -32,13 +36,13 @@ func (daemon *Daemon) ContainerRename(job *engine.Job) engine.Status {
 
 	if err := daemon.containerGraph.Delete(oldName); err != nil {
 		undo()
-		return job.Errorf("Failed to delete container %q: %v", oldName, err)
+		return fmt.Errorf("Failed to delete container %q: %v", oldName, err)
 	}
 
 	if err := container.toDisk(); err != nil {
 		undo()
-		return job.Error(err)
+		return err
 	}
 
-	return engine.StatusOK
+	return nil
 }

+ 15 - 14
daemon/resize.go

@@ -1,53 +1,54 @@
 package daemon
 
 import (
+	"fmt"
 	"strconv"
 
 	"github.com/docker/docker/engine"
 )
 
-func (daemon *Daemon) ContainerResize(job *engine.Job) engine.Status {
+func (daemon *Daemon) ContainerResize(job *engine.Job) error {
 	if len(job.Args) != 3 {
-		return job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name)
+		return fmt.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name)
 	}
 	name := job.Args[0]
 	height, err := strconv.Atoi(job.Args[1])
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	width, err := strconv.Atoi(job.Args[2])
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	if err := container.Resize(height, width); err != nil {
-		return job.Error(err)
+		return err
 	}
-	return engine.StatusOK
+	return nil
 }
 
-func (daemon *Daemon) ContainerExecResize(job *engine.Job) engine.Status {
+func (daemon *Daemon) ContainerExecResize(job *engine.Job) error {
 	if len(job.Args) != 3 {
-		return job.Errorf("Not enough arguments. Usage: %s EXEC HEIGHT WIDTH\n", job.Name)
+		return fmt.Errorf("Not enough arguments. Usage: %s EXEC HEIGHT WIDTH\n", job.Name)
 	}
 	name := job.Args[0]
 	height, err := strconv.Atoi(job.Args[1])
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	width, err := strconv.Atoi(job.Args[2])
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	execConfig, err := daemon.getExecConfig(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	if err := execConfig.Resize(height, width); err != nil {
-		return job.Error(err)
+		return err
 	}
-	return engine.StatusOK
+	return nil
 }

+ 7 - 5
daemon/restart.go

@@ -1,12 +1,14 @@
 package daemon
 
 import (
+	"fmt"
+
 	"github.com/docker/docker/engine"
 )
 
-func (daemon *Daemon) ContainerRestart(job *engine.Job) engine.Status {
+func (daemon *Daemon) ContainerRestart(job *engine.Job) error {
 	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
+		return fmt.Errorf("Usage: %s CONTAINER\n", job.Name)
 	}
 	var (
 		name = job.Args[0]
@@ -17,11 +19,11 @@ func (daemon *Daemon) ContainerRestart(job *engine.Job) engine.Status {
 	}
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	if err := container.Restart(int(t)); err != nil {
-		return job.Errorf("Cannot restart container %s: %s\n", name, err)
+		return fmt.Errorf("Cannot restart container %s: %s\n", name, err)
 	}
 	container.LogEvent("restart")
-	return engine.StatusOK
+	return nil
 }

+ 10 - 8
daemon/start.go

@@ -1,13 +1,15 @@
 package daemon
 
 import (
+	"fmt"
+
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/runconfig"
 )
 
-func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status {
+func (daemon *Daemon) ContainerStart(job *engine.Job) error {
 	if len(job.Args) < 1 {
-		return job.Errorf("Usage: %s container_id", job.Name)
+		return fmt.Errorf("Usage: %s container_id", job.Name)
 	}
 	var (
 		name = job.Args[0]
@@ -15,15 +17,15 @@ func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status {
 
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	if container.IsPaused() {
-		return job.Errorf("Cannot start a paused container, try unpause instead.")
+		return fmt.Errorf("Cannot start a paused container, try unpause instead.")
 	}
 
 	if container.IsRunning() {
-		return job.Errorf("Container already started")
+		return fmt.Errorf("Container already started")
 	}
 
 	// If no environment was set, then no hostconfig was passed.
@@ -32,15 +34,15 @@ func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status {
 	if len(job.Environ()) > 0 {
 		hostConfig := runconfig.ContainerHostConfigFromJob(job)
 		if err := daemon.setHostConfig(container, hostConfig); err != nil {
-			return job.Error(err)
+			return err
 		}
 	}
 	if err := container.Start(); err != nil {
 		container.LogEvent("die")
-		return job.Errorf("Cannot start container %s: %s", name, err)
+		return fmt.Errorf("Cannot start container %s: %s", name, err)
 	}
 
-	return engine.StatusOK
+	return nil
 }
 
 func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error {

+ 4 - 4
daemon/stats.go

@@ -10,10 +10,10 @@ import (
 	"github.com/docker/libcontainer/cgroups"
 )
 
-func (daemon *Daemon) ContainerStats(job *engine.Job) engine.Status {
+func (daemon *Daemon) ContainerStats(job *engine.Job) error {
 	updates, err := daemon.SubscribeToContainerStats(job.Args[0])
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	enc := json.NewEncoder(job.Stdout)
 	for v := range updates {
@@ -25,10 +25,10 @@ func (daemon *Daemon) ContainerStats(job *engine.Job) engine.Status {
 		if err := enc.Encode(ss); err != nil {
 			// TODO: handle the specific broken pipe
 			daemon.UnsubscribeToContainerStats(job.Args[0], updates)
-			return job.Error(err)
+			return err
 		}
 	}
-	return engine.StatusOK
+	return nil
 }
 
 // convertToAPITypes converts the libcontainer.Stats to the api specific

+ 8 - 6
daemon/stop.go

@@ -1,12 +1,14 @@
 package daemon
 
 import (
+	"fmt"
+
 	"github.com/docker/docker/engine"
 )
 
-func (daemon *Daemon) ContainerStop(job *engine.Job) engine.Status {
+func (daemon *Daemon) ContainerStop(job *engine.Job) error {
 	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
+		return fmt.Errorf("Usage: %s CONTAINER\n", job.Name)
 	}
 	var (
 		name = job.Args[0]
@@ -17,14 +19,14 @@ func (daemon *Daemon) ContainerStop(job *engine.Job) engine.Status {
 	}
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	if !container.IsRunning() {
-		return job.Errorf("Container already stopped")
+		return fmt.Errorf("Container already stopped")
 	}
 	if err := container.Stop(int(t)); err != nil {
-		return job.Errorf("Cannot stop container %s: %s\n", name, err)
+		return fmt.Errorf("Cannot stop container %s: %s\n", name, err)
 	}
 	container.LogEvent("stop")
-	return engine.StatusOK
+	return nil
 }

+ 10 - 9
daemon/top.go

@@ -1,6 +1,7 @@
 package daemon
 
 import (
+	"fmt"
 	"os/exec"
 	"strconv"
 	"strings"
@@ -8,9 +9,9 @@ import (
 	"github.com/docker/docker/engine"
 )
 
-func (daemon *Daemon) ContainerTop(job *engine.Job) engine.Status {
+func (daemon *Daemon) ContainerTop(job *engine.Job) error {
 	if len(job.Args) != 1 && len(job.Args) != 2 {
-		return job.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name)
+		return fmt.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name)
 	}
 	var (
 		name   = job.Args[0]
@@ -23,18 +24,18 @@ func (daemon *Daemon) ContainerTop(job *engine.Job) engine.Status {
 
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	if !container.IsRunning() {
-		return job.Errorf("Container %s is not running", name)
+		return fmt.Errorf("Container %s is not running", name)
 	}
 	pids, err := daemon.ExecutionDriver().GetPidsForContainer(container.ID)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	output, err := exec.Command("ps", strings.Split(psArgs, " ")...).Output()
 	if err != nil {
-		return job.Errorf("Error running ps: %s", err)
+		return fmt.Errorf("Error running ps: %s", err)
 	}
 
 	lines := strings.Split(string(output), "\n")
@@ -49,7 +50,7 @@ func (daemon *Daemon) ContainerTop(job *engine.Job) engine.Status {
 		}
 	}
 	if pidIndex == -1 {
-		return job.Errorf("Couldn't find PID field in ps output")
+		return fmt.Errorf("Couldn't find PID field in ps output")
 	}
 
 	processes := [][]string{}
@@ -60,7 +61,7 @@ func (daemon *Daemon) ContainerTop(job *engine.Job) engine.Status {
 		fields := strings.Fields(line)
 		p, err := strconv.Atoi(fields[pidIndex])
 		if err != nil {
-			return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err)
+			return fmt.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err)
 		}
 
 		for _, pid := range pids {
@@ -75,5 +76,5 @@ func (daemon *Daemon) ContainerTop(job *engine.Job) engine.Status {
 	}
 	out.SetJson("Processes", processes)
 	out.WriteTo(job.Stdout)
-	return engine.StatusOK
+	return nil
 }

+ 5 - 4
daemon/wait.go

@@ -1,21 +1,22 @@
 package daemon
 
 import (
+	"fmt"
 	"time"
 
 	"github.com/docker/docker/engine"
 )
 
-func (daemon *Daemon) ContainerWait(job *engine.Job) engine.Status {
+func (daemon *Daemon) ContainerWait(job *engine.Job) error {
 	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s", job.Name)
+		return fmt.Errorf("Usage: %s", job.Name)
 	}
 	name := job.Args[0]
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Errorf("%s: %v", job.Name, err)
+		return fmt.Errorf("%s: %v", job.Name, err)
 	}
 	status, _ := container.WaitStop(-1 * time.Second)
 	job.Printf("%d\n", status)
-	return engine.StatusOK
+	return nil
 }

+ 3 - 3
engine/engine.go

@@ -21,7 +21,7 @@ type Installer interface {
 	Install(*Engine) error
 }
 
-type Handler func(*Job) Status
+type Handler func(*Job) error
 
 var globalHandlers map[string]Handler
 
@@ -84,11 +84,11 @@ func New() *Engine {
 		Stdin:    os.Stdin,
 		Logging:  true,
 	}
-	eng.Register("commands", func(job *Job) Status {
+	eng.Register("commands", func(job *Job) error {
 		for _, name := range eng.commands() {
 			job.Printf("%s\n", name)
 		}
-		return StatusOK
+		return nil
 	})
 	// Copy existing global handlers
 	for k, v := range globalHandlers {

+ 12 - 12
engine/engine_test.go

@@ -45,9 +45,9 @@ func TestJob(t *testing.T) {
 		t.Fatalf("job1.handler should be empty")
 	}
 
-	h := func(j *Job) Status {
+	h := func(j *Job) error {
 		j.Printf("%s\n", j.Name)
-		return 42
+		return nil
 	}
 
 	eng.Register("dummy2", h)
@@ -58,7 +58,7 @@ func TestJob(t *testing.T) {
 		t.Fatalf("job2.handler shouldn't be nil")
 	}
 
-	if job2.handler(job2) != 42 {
+	if job2.handler(job2) != nil {
 		t.Fatalf("handler dummy2 was not found in job2")
 	}
 }
@@ -76,7 +76,7 @@ func TestEngineShutdown(t *testing.T) {
 
 func TestEngineCommands(t *testing.T) {
 	eng := New()
-	handler := func(job *Job) Status { return StatusOK }
+	handler := func(job *Job) error { return nil }
 	eng.Register("foo", handler)
 	eng.Register("bar", handler)
 	eng.Register("echo", handler)
@@ -105,9 +105,9 @@ func TestParseJob(t *testing.T) {
 	eng := New()
 	// Verify that the resulting job calls to the right place
 	var called bool
-	eng.Register("echo", func(job *Job) Status {
+	eng.Register("echo", func(job *Job) error {
 		called = true
-		return StatusOK
+		return nil
 	})
 	input := "echo DEBUG=1 hello world VERBOSITY=42"
 	job, err := eng.ParseJob(input)
@@ -140,9 +140,9 @@ func TestParseJob(t *testing.T) {
 func TestCatchallEmptyName(t *testing.T) {
 	eng := New()
 	var called bool
-	eng.RegisterCatchall(func(job *Job) Status {
+	eng.RegisterCatchall(func(job *Job) error {
 		called = true
-		return StatusOK
+		return nil
 	})
 	err := eng.Job("").Run()
 	if err == nil {
@@ -164,7 +164,7 @@ func TestNestedJobSharedOutput(t *testing.T) {
 		wrapOutput   bool
 	)
 
-	outerHandler = func(job *Job) Status {
+	outerHandler = func(job *Job) error {
 		job.Stdout.Write([]byte("outer1"))
 
 		innerJob := job.Eng.Job("innerJob")
@@ -184,13 +184,13 @@ func TestNestedJobSharedOutput(t *testing.T) {
 		// closed output.
 		job.Stdout.Write([]byte(" outer2"))
 
-		return StatusOK
+		return nil
 	}
 
-	innerHandler = func(job *Job) Status {
+	innerHandler = func(job *Job) error {
 		job.Stdout.Write([]byte(" inner"))
 
-		return StatusOK
+		return nil
 	}
 
 	eng := New()

+ 7 - 55
engine/job.go

@@ -32,7 +32,7 @@ type Job struct {
 	Stderr  *Output
 	Stdin   *Input
 	handler Handler
-	status  Status
+	err     error
 	end     time.Time
 	closeIO bool
 
@@ -43,17 +43,8 @@ type Job struct {
 	cancelOnce sync.Once
 }
 
-type Status int
-
-const (
-	StatusOK       Status = 0
-	StatusErr      Status = 1
-	StatusNotFound Status = 127
-)
-
 // Run executes the job and blocks until the job completes.
-// If the job returns a failure status, an error is returned
-// which includes the status.
+// If the job fails it returns an error
 func (job *Job) Run() error {
 	if job.Eng.IsShutdown() && !job.GetenvBool("overrideShutdown") {
 		return fmt.Errorf("engine is shutdown")
@@ -78,16 +69,16 @@ func (job *Job) Run() error {
 	if job.Eng.Logging {
 		log.Infof("+job %s", job.CallString())
 		defer func() {
-			log.Infof("-job %s%s", job.CallString(), job.StatusString())
+			// what if err is nil?
+			log.Infof("-job %s%s", job.CallString(), job.err)
 		}()
 	}
 	var errorMessage = bytes.NewBuffer(nil)
 	job.Stderr.Add(errorMessage)
 	if job.handler == nil {
-		job.Errorf("%s: command not found", job.Name)
-		job.status = 127
+		job.err = fmt.Errorf("%s: command not found", job.Name)
 	} else {
-		job.status = job.handler(job)
+		job.err = job.handler(job)
 		job.end = time.Now()
 	}
 	if job.closeIO {
@@ -102,36 +93,14 @@ func (job *Job) Run() error {
 			return err
 		}
 	}
-	if job.status != 0 {
-		return fmt.Errorf("%s", Tail(errorMessage, 1))
-	}
 
-	return nil
+	return job.err
 }
 
 func (job *Job) CallString() string {
 	return fmt.Sprintf("%s(%s)", job.Name, strings.Join(job.Args, ", "))
 }
 
-func (job *Job) StatusString() string {
-	// If the job hasn't completed, status string is empty
-	if job.end.IsZero() {
-		return ""
-	}
-	var okerr string
-	if job.status == StatusOK {
-		okerr = "OK"
-	} else {
-		okerr = "ERR"
-	}
-	return fmt.Sprintf(" = %s (%d)", okerr, job.status)
-}
-
-// String returns a human-readable description of `job`
-func (job *Job) String() string {
-	return fmt.Sprintf("%s.%s%s", job.Eng, job.CallString(), job.StatusString())
-}
-
 func (job *Job) Env() *Env {
 	return job.env
 }
@@ -235,23 +204,6 @@ func (job *Job) Printf(format string, args ...interface{}) (n int, err error) {
 	return fmt.Fprintf(job.Stdout, format, args...)
 }
 
-func (job *Job) Errorf(format string, args ...interface{}) Status {
-	if format[len(format)-1] != '\n' {
-		format = format + "\n"
-	}
-	fmt.Fprintf(job.Stderr, format, args...)
-	return StatusErr
-}
-
-func (job *Job) Error(err error) Status {
-	fmt.Fprintf(job.Stderr, "%s\n", err)
-	return StatusErr
-}
-
-func (job *Job) StatusCode() int {
-	return int(job.status)
-}
-
 func (job *Job) SetCloseIO(val bool) {
 	job.closeIO = val
 }

+ 8 - 36
engine/job_test.go

@@ -2,43 +2,35 @@ package engine
 
 import (
 	"bytes"
+	"errors"
 	"fmt"
 	"testing"
 )
 
-func TestJobStatusOK(t *testing.T) {
+func TestJobOK(t *testing.T) {
 	eng := New()
-	eng.Register("return_ok", func(job *Job) Status { return StatusOK })
+	eng.Register("return_ok", func(job *Job) error { return nil })
 	err := eng.Job("return_ok").Run()
 	if err != nil {
 		t.Fatalf("Expected: err=%v\nReceived: err=%v", nil, err)
 	}
 }
 
-func TestJobStatusErr(t *testing.T) {
+func TestJobErr(t *testing.T) {
 	eng := New()
-	eng.Register("return_err", func(job *Job) Status { return StatusErr })
+	eng.Register("return_err", func(job *Job) error { return errors.New("return_err") })
 	err := eng.Job("return_err").Run()
 	if err == nil {
-		t.Fatalf("When a job returns StatusErr, Run() should return an error")
-	}
-}
-
-func TestJobStatusNotFound(t *testing.T) {
-	eng := New()
-	eng.Register("return_not_found", func(job *Job) Status { return StatusNotFound })
-	err := eng.Job("return_not_found").Run()
-	if err == nil {
-		t.Fatalf("When a job returns StatusNotFound, Run() should return an error")
+		t.Fatalf("When a job returns error, Run() should return an error")
 	}
 }
 
 func TestJobStdoutString(t *testing.T) {
 	eng := New()
 	// FIXME: test multiple combinations of output and status
-	eng.Register("say_something_in_stdout", func(job *Job) Status {
+	eng.Register("say_something_in_stdout", func(job *Job) error {
 		job.Printf("Hello world\n")
-		return StatusOK
+		return nil
 	})
 
 	job := eng.Job("say_something_in_stdout")
@@ -53,23 +45,3 @@ func TestJobStdoutString(t *testing.T) {
 		t.Fatalf("Stdout last line:\nExpected: %v\nReceived: %v", expectedOutput, output)
 	}
 }
-
-func TestJobStderrString(t *testing.T) {
-	eng := New()
-	// FIXME: test multiple combinations of output and status
-	eng.Register("say_something_in_stderr", func(job *Job) Status {
-		job.Errorf("Something might happen\nHere it comes!\nOh no...\nSomething happened\n")
-		return StatusOK
-	})
-
-	job := eng.Job("say_something_in_stderr")
-	var outputBuffer = bytes.NewBuffer(nil)
-	job.Stderr.Add(outputBuffer)
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-	var output = Tail(outputBuffer, 1)
-	if expectedOutput := "Something happened"; output != expectedOutput {
-		t.Fatalf("Stderr last line:\nExpected: %v\nReceived: %v", expectedOutput, output)
-	}
-}

+ 4 - 4
engine/shutdown_test.go

@@ -19,9 +19,9 @@ func TestShutdownEmpty(t *testing.T) {
 func TestShutdownAfterRun(t *testing.T) {
 	eng := New()
 	var called bool
-	eng.Register("foo", func(job *Job) Status {
+	eng.Register("foo", func(job *Job) error {
 		called = true
-		return StatusOK
+		return nil
 	})
 	if err := eng.Job("foo").Run(); err != nil {
 		t.Fatal(err)
@@ -42,10 +42,10 @@ func TestShutdownDuringRun(t *testing.T) {
 	)
 	eng := New()
 	var completed bool
-	eng.Register("foo", func(job *Job) Status {
+	eng.Register("foo", func(job *Job) error {
 		time.Sleep(jobDelay)
 		completed = true
-		return StatusOK
+		return nil
 	})
 	go eng.Job("foo").Run()
 	time.Sleep(50 * time.Millisecond)

+ 12 - 11
events/events.go

@@ -3,6 +3,7 @@ package events
 import (
 	"bytes"
 	"encoding/json"
+	"fmt"
 	"io"
 	"strings"
 	"sync"
@@ -45,7 +46,7 @@ func (e *Events) Install(eng *engine.Engine) error {
 	return nil
 }
 
-func (e *Events) Get(job *engine.Job) engine.Status {
+func (e *Events) Get(job *engine.Job) error {
 	var (
 		since   = job.GetenvInt64("since")
 		until   = job.GetenvInt64("until")
@@ -54,7 +55,7 @@ func (e *Events) Get(job *engine.Job) engine.Status {
 
 	eventFilters, err := filters.FromParam(job.Getenv("filters"))
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	// If no until, disable timeout
@@ -71,7 +72,7 @@ func (e *Events) Get(job *engine.Job) engine.Status {
 	// Resend every event in the [since, until] time interval.
 	if since != 0 {
 		if err := e.writeCurrent(job, since, until, eventFilters); err != nil {
-			return job.Error(err)
+			return err
 		}
 	}
 
@@ -79,31 +80,31 @@ func (e *Events) Get(job *engine.Job) engine.Status {
 		select {
 		case event, ok := <-listener:
 			if !ok {
-				return engine.StatusOK
+				return nil
 			}
 			if err := writeEvent(job, event, eventFilters); err != nil {
-				return job.Error(err)
+				return err
 			}
 		case <-timeout.C:
-			return engine.StatusOK
+			return nil
 		}
 	}
 }
 
-func (e *Events) Log(job *engine.Job) engine.Status {
+func (e *Events) Log(job *engine.Job) error {
 	if len(job.Args) != 3 {
-		return job.Errorf("usage: %s ACTION ID FROM", job.Name)
+		return fmt.Errorf("usage: %s ACTION ID FROM", job.Name)
 	}
 	// not waiting for receivers
 	go e.log(job.Args[0], job.Args[1], job.Args[2])
-	return engine.StatusOK
+	return nil
 }
 
-func (e *Events) SubscribersCount(job *engine.Job) engine.Status {
+func (e *Events) SubscribersCount(job *engine.Job) error {
 	ret := &engine.Env{}
 	ret.SetInt("count", e.subscribersCount())
 	ret.WriteTo(job.Stdout)
-	return engine.StatusOK
+	return nil
 }
 
 func writeEvent(job *engine.Job, event *utils.JSONMessage, eventFilters filters.Args) error {

+ 12 - 11
graph/export.go

@@ -2,6 +2,7 @@ package graph
 
 import (
 	"encoding/json"
+	"fmt"
 	"io"
 	"io/ioutil"
 	"os"
@@ -19,14 +20,14 @@ import (
 // uncompressed tar ball.
 // name is the set of tags to export.
 // out is the writer where the images are written to.
-func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status {
+func (s *TagStore) CmdImageExport(job *engine.Job) error {
 	if len(job.Args) < 1 {
-		return job.Errorf("Usage: %s IMAGE [IMAGE...]\n", job.Name)
+		return fmt.Errorf("Usage: %s IMAGE [IMAGE...]\n", job.Name)
 	}
 	// get image json
 	tempdir, err := ioutil.TempDir("", "docker-export-")
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	defer os.RemoveAll(tempdir)
 
@@ -48,13 +49,13 @@ func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status {
 			for tag, id := range rootRepo {
 				addKey(name, tag, id)
 				if err := s.exportImage(job.Eng, id, tempdir); err != nil {
-					return job.Error(err)
+					return err
 				}
 			}
 		} else {
 			img, err := s.LookupImage(name)
 			if err != nil {
-				return job.Error(err)
+				return err
 			}
 
 			if img != nil {
@@ -67,13 +68,13 @@ func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status {
 					addKey(repoName, repoTag, img.ID)
 				}
 				if err := s.exportImage(job.Eng, img.ID, tempdir); err != nil {
-					return job.Error(err)
+					return err
 				}
 
 			} else {
 				// this must be an ID that didn't get looked up just right?
 				if err := s.exportImage(job.Eng, name, tempdir); err != nil {
-					return job.Error(err)
+					return err
 				}
 			}
 		}
@@ -83,7 +84,7 @@ func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status {
 	if len(rootRepoMap) > 0 {
 		rootRepoJson, _ := json.Marshal(rootRepoMap)
 		if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.FileMode(0644)); err != nil {
-			return job.Error(err)
+			return err
 		}
 	} else {
 		log.Debugf("There were no repositories to write")
@@ -91,15 +92,15 @@ func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status {
 
 	fs, err := archive.Tar(tempdir, archive.Uncompressed)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	defer fs.Close()
 
 	if _, err := io.Copy(job.Stdout, fs); err != nil {
-		return job.Error(err)
+		return err
 	}
 	log.Debugf("End export job: %s", job.Name)
-	return engine.StatusOK
+	return nil
 }
 
 // FIXME: this should be a top-level function, not a class method

+ 6 - 5
graph/history.go

@@ -1,6 +1,7 @@
 package graph
 
 import (
+	"fmt"
 	"strings"
 
 	"github.com/docker/docker/engine"
@@ -8,14 +9,14 @@ import (
 	"github.com/docker/docker/utils"
 )
 
-func (s *TagStore) CmdHistory(job *engine.Job) engine.Status {
+func (s *TagStore) CmdHistory(job *engine.Job) error {
 	if n := len(job.Args); n != 1 {
-		return job.Errorf("Usage: %s IMAGE", job.Name)
+		return fmt.Errorf("Usage: %s IMAGE", job.Name)
 	}
 	name := job.Args[0]
 	foundImage, err := s.LookupImage(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	lookupMap := make(map[string][]string)
@@ -41,7 +42,7 @@ func (s *TagStore) CmdHistory(job *engine.Job) engine.Status {
 		return nil
 	})
 	if _, err := outs.WriteListTo(job.Stdout); err != nil {
-		return job.Error(err)
+		return err
 	}
-	return engine.StatusOK
+	return nil
 }

+ 10 - 9
graph/import.go

@@ -3,6 +3,7 @@ package graph
 import (
 	"bytes"
 	"encoding/json"
+	"fmt"
 	"net/http"
 	"net/url"
 
@@ -14,9 +15,9 @@ import (
 	"github.com/docker/docker/utils"
 )
 
-func (s *TagStore) CmdImport(job *engine.Job) engine.Status {
+func (s *TagStore) CmdImport(job *engine.Job) error {
 	if n := len(job.Args); n != 2 && n != 3 {
-		return job.Errorf("Usage: %s SRC REPO [TAG]", job.Name)
+		return fmt.Errorf("Usage: %s SRC REPO [TAG]", job.Name)
 	}
 	var (
 		src          = job.Args[0]
@@ -37,7 +38,7 @@ func (s *TagStore) CmdImport(job *engine.Job) engine.Status {
 	} else {
 		u, err := url.Parse(src)
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 		if u.Scheme == "" {
 			u.Scheme = "http"
@@ -47,7 +48,7 @@ func (s *TagStore) CmdImport(job *engine.Job) engine.Status {
 		job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u))
 		resp, err = utils.Download(u.String())
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 		progressReader := progressreader.New(progressreader.Config{
 			In:        resp.Body,
@@ -69,20 +70,20 @@ func (s *TagStore) CmdImport(job *engine.Job) engine.Status {
 	buildConfigJob.Setenv("config", job.Getenv("config"))
 
 	if err := buildConfigJob.Run(); err != nil {
-		return job.Error(err)
+		return err
 	}
 	if err := json.NewDecoder(stdoutBuffer).Decode(&newConfig); err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	img, err := s.graph.Create(archive, "", "", "Imported from "+src, "", nil, &newConfig)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	// Optionally register the image at REPO/TAG
 	if repo != "" {
 		if err := s.Set(repo, tag, img.ID, true); err != nil {
-			return job.Error(err)
+			return err
 		}
 	}
 	job.Stdout.Write(sf.FormatStatus("", img.ID))
@@ -93,5 +94,5 @@ func (s *TagStore) CmdImport(job *engine.Job) engine.Status {
 	if err = job.Eng.Job("log", "import", logID, "").Run(); err != nil {
 		log.Errorf("Error logging event 'import' for %s: %s", logID, err)
 	}
-	return engine.StatusOK
+	return nil
 }

+ 7 - 6
graph/list.go

@@ -1,6 +1,7 @@
 package graph
 
 import (
+	"fmt"
 	"log"
 	"path"
 	"strings"
@@ -16,7 +17,7 @@ var acceptedImageFilterTags = map[string]struct{}{
 	"label":    {},
 }
 
-func (s *TagStore) CmdImages(job *engine.Job) engine.Status {
+func (s *TagStore) CmdImages(job *engine.Job) error {
 	var (
 		allImages   map[string]*image.Image
 		err         error
@@ -26,11 +27,11 @@ func (s *TagStore) CmdImages(job *engine.Job) engine.Status {
 
 	imageFilters, err := filters.FromParam(job.Getenv("filters"))
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	for name := range imageFilters {
 		if _, ok := acceptedImageFilterTags[name]; !ok {
-			return job.Errorf("Invalid filter '%s'", name)
+			return fmt.Errorf("Invalid filter '%s'", name)
 		}
 	}
 
@@ -50,7 +51,7 @@ func (s *TagStore) CmdImages(job *engine.Job) engine.Status {
 		allImages, err = s.graph.Heads()
 	}
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	lookup := make(map[string]*engine.Env)
 	s.Lock()
@@ -133,7 +134,7 @@ func (s *TagStore) CmdImages(job *engine.Job) engine.Status {
 
 	outs.ReverseSort()
 	if _, err := outs.WriteListTo(job.Stdout); err != nil {
-		return job.Error(err)
+		return err
 	}
-	return engine.StatusOK
+	return nil
 }

+ 11 - 11
graph/load.go

@@ -18,10 +18,10 @@ import (
 
 // Loads a set of images into the repository. This is the complementary of ImageExport.
 // The input stream is an uncompressed tar ball containing images and metadata.
-func (s *TagStore) CmdLoad(job *engine.Job) engine.Status {
+func (s *TagStore) CmdLoad(job *engine.Job) error {
 	tmpImageDir, err := ioutil.TempDir("", "docker-import-")
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	defer os.RemoveAll(tmpImageDir)
 
@@ -30,11 +30,11 @@ func (s *TagStore) CmdLoad(job *engine.Job) engine.Status {
 	)
 
 	if err := os.Mkdir(repoDir, os.ModeDir); err != nil {
-		return job.Error(err)
+		return err
 	}
 	images, err := s.graph.Map()
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	excludes := make([]string, len(images))
 	i := 0
@@ -43,18 +43,18 @@ func (s *TagStore) CmdLoad(job *engine.Job) engine.Status {
 		i++
 	}
 	if err := chrootarchive.Untar(job.Stdin, repoDir, &archive.TarOptions{ExcludePatterns: excludes}); err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	dirs, err := ioutil.ReadDir(repoDir)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	for _, d := range dirs {
 		if d.IsDir() {
 			if err := s.recursiveLoad(job.Eng, d.Name(), tmpImageDir); err != nil {
-				return job.Error(err)
+				return err
 			}
 		}
 	}
@@ -63,21 +63,21 @@ func (s *TagStore) CmdLoad(job *engine.Job) engine.Status {
 	if err == nil {
 		repositories := map[string]Repository{}
 		if err := json.Unmarshal(repositoriesJson, &repositories); err != nil {
-			return job.Error(err)
+			return err
 		}
 
 		for imageName, tagMap := range repositories {
 			for tag, address := range tagMap {
 				if err := s.Set(imageName, tag, address, true); err != nil {
-					return job.Error(err)
+					return err
 				}
 			}
 		}
 	} else if !os.IsNotExist(err) {
-		return job.Error(err)
+		return err
 	}
 
-	return engine.StatusOK
+	return nil
 }
 
 func (s *TagStore) recursiveLoad(eng *engine.Engine, address, tmpImageDir string) error {

+ 4 - 2
graph/load_unsupported.go

@@ -3,9 +3,11 @@
 package graph
 
 import (
+	"fmt"
+
 	"github.com/docker/docker/engine"
 )
 
-func (s *TagStore) CmdLoad(job *engine.Job) engine.Status {
-	return job.Errorf("CmdLoad is not supported on this platform")
+func (s *TagStore) CmdLoad(job *engine.Job) error {
+	return fmt.Errorf("CmdLoad is not supported on this platform")
 }

+ 10 - 10
graph/pull.go

@@ -20,9 +20,9 @@ import (
 	"github.com/docker/docker/utils"
 )
 
-func (s *TagStore) CmdPull(job *engine.Job) engine.Status {
+func (s *TagStore) CmdPull(job *engine.Job) error {
 	if n := len(job.Args); n != 1 && n != 2 {
-		return job.Errorf("Usage: %s IMAGE [TAG|DIGEST]", job.Name)
+		return fmt.Errorf("Usage: %s IMAGE [TAG|DIGEST]", job.Name)
 	}
 
 	var (
@@ -36,7 +36,7 @@ func (s *TagStore) CmdPull(job *engine.Job) engine.Status {
 	// Resolve the Repository name from fqn to RepositoryInfo
 	repoInfo, err := registry.ResolveRepositoryInfo(job, localName)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	if len(job.Args) > 1 {
@@ -52,21 +52,21 @@ func (s *TagStore) CmdPull(job *engine.Job) engine.Status {
 			// Another pull of the same repository is already taking place; just wait for it to finish
 			job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", repoInfo.LocalName))
 			<-c
-			return engine.StatusOK
+			return nil
 		}
-		return job.Error(err)
+		return err
 	}
 	defer s.poolRemove("pull", utils.ImageReference(repoInfo.LocalName, tag))
 
 	log.Debugf("pulling image from host %q with remote name %q", repoInfo.Index.Name, repoInfo.RemoteName)
 	endpoint, err := repoInfo.GetEndpoint()
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, true)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	logName := repoInfo.LocalName
@@ -87,7 +87,7 @@ func (s *TagStore) CmdPull(job *engine.Job) engine.Status {
 			if err = job.Eng.Job("log", "pull", logName, "").Run(); err != nil {
 				log.Errorf("Error logging event 'pull' for %s: %s", logName, err)
 			}
-			return engine.StatusOK
+			return nil
 		} else if err != registry.ErrDoesNotExist && err != ErrV2RegistryUnavailable {
 			log.Errorf("Error from V2 registry: %s", err)
 		}
@@ -97,14 +97,14 @@ func (s *TagStore) CmdPull(job *engine.Job) engine.Status {
 
 	log.Debugf("pulling v1 repository with local name %q", repoInfo.LocalName)
 	if err = s.pullRepository(r, job.Stdout, repoInfo, tag, sf, job.GetenvBool("parallel")); err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	if err = job.Eng.Job("log", "pull", logName, "").Run(); err != nil {
 		log.Errorf("Error logging event 'pull' for %s: %s", logName, err)
 	}
 
-	return engine.StatusOK
+	return nil
 }
 
 func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *registry.RepositoryInfo, askedTag string, sf *utils.StreamFormatter, parallel bool) error {

+ 11 - 11
graph/push.go

@@ -492,9 +492,9 @@ func (s *TagStore) pushV2Image(r *registry.Session, img *image.Image, endpoint *
 }
 
 // FIXME: Allow to interrupt current push when new push of same image is done.
-func (s *TagStore) CmdPush(job *engine.Job) engine.Status {
+func (s *TagStore) CmdPush(job *engine.Job) error {
 	if n := len(job.Args); n != 1 {
-		return job.Errorf("Usage: %s IMAGE", job.Name)
+		return fmt.Errorf("Usage: %s IMAGE", job.Name)
 	}
 	var (
 		localName   = job.Args[0]
@@ -506,7 +506,7 @@ func (s *TagStore) CmdPush(job *engine.Job) engine.Status {
 	// Resolve the Repository name from fqn to RepositoryInfo
 	repoInfo, err := registry.ResolveRepositoryInfo(job, localName)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	tag := job.Getenv("tag")
@@ -514,18 +514,18 @@ func (s *TagStore) CmdPush(job *engine.Job) engine.Status {
 	job.GetenvJson("metaHeaders", &metaHeaders)
 
 	if _, err := s.poolAdd("push", repoInfo.LocalName); err != nil {
-		return job.Error(err)
+		return err
 	}
 	defer s.poolRemove("push", repoInfo.LocalName)
 
 	endpoint, err := repoInfo.GetEndpoint()
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, false)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	reposLen := 1
@@ -536,23 +536,23 @@ func (s *TagStore) CmdPush(job *engine.Job) engine.Status {
 	// If it fails, try to get the repository
 	localRepo, exists := s.Repositories[repoInfo.LocalName]
 	if !exists {
-		return job.Errorf("Repository does not exist: %s", repoInfo.LocalName)
+		return fmt.Errorf("Repository does not exist: %s", repoInfo.LocalName)
 	}
 
 	if repoInfo.Index.Official || endpoint.Version == registry.APIVersion2 {
 		err := s.pushV2Repository(r, localRepo, job.Stdout, repoInfo, tag, sf)
 		if err == nil {
-			return engine.StatusOK
+			return nil
 		}
 
 		if err != ErrV2RegistryUnavailable {
-			return job.Errorf("Error pushing to registry: %s", err)
+			return fmt.Errorf("Error pushing to registry: %s", err)
 		}
 	}
 
 	if err := s.pushRepository(r, job.Stdout, repoInfo, localRepo, tag, sf); err != nil {
-		return job.Error(err)
+		return err
 	}
-	return engine.StatusOK
+	return nil
 
 }

+ 23 - 23
graph/service.go

@@ -55,36 +55,36 @@ func (s *TagStore) Install(eng *engine.Engine) error {
 //			That is a requirement of the current registry client implementation,
 //			because a re-encoded json might invalidate the image checksum at
 //			the next upload, even with functionaly identical content.
-func (s *TagStore) CmdSet(job *engine.Job) engine.Status {
+func (s *TagStore) CmdSet(job *engine.Job) error {
 	if len(job.Args) != 1 {
-		return job.Errorf("usage: %s NAME", job.Name)
+		return fmt.Errorf("usage: %s NAME", job.Name)
 	}
 	var (
 		imgJSON = []byte(job.Getenv("json"))
 		layer   = job.Stdin
 	)
 	if len(imgJSON) == 0 {
-		return job.Errorf("mandatory key 'json' is not set")
+		return fmt.Errorf("mandatory key 'json' is not set")
 	}
 	// We have to pass an *image.Image object, even though it will be completely
 	// ignored in favor of the redundant json data.
 	// FIXME: the current prototype of Graph.Register is stupid and redundant.
 	img, err := image.NewImgJSON(imgJSON)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	if err := s.graph.Register(img, layer); err != nil {
-		return job.Error(err)
+		return err
 	}
-	return engine.StatusOK
+	return nil
 }
 
 // CmdGet returns information about an image.
 // If the image doesn't exist, an empty object is returned, to allow
 // checking for an image's existence.
-func (s *TagStore) CmdGet(job *engine.Job) engine.Status {
+func (s *TagStore) CmdGet(job *engine.Job) error {
 	if len(job.Args) != 1 {
-		return job.Errorf("usage: %s NAME", job.Name)
+		return fmt.Errorf("usage: %s NAME", job.Name)
 	}
 	name := job.Args[0]
 	res := &engine.Env{}
@@ -92,7 +92,7 @@ func (s *TagStore) CmdGet(job *engine.Job) engine.Status {
 	// Note: if the image doesn't exist, LookupImage returns
 	// nil, nil.
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	if img != nil {
 		// We don't directly expose all fields of the Image objects,
@@ -116,23 +116,23 @@ func (s *TagStore) CmdGet(job *engine.Job) engine.Status {
 		res.SetJson("Parent", img.Parent)
 	}
 	res.WriteTo(job.Stdout)
-	return engine.StatusOK
+	return nil
 }
 
 // CmdLookup return an image encoded in JSON
-func (s *TagStore) CmdLookup(job *engine.Job) engine.Status {
+func (s *TagStore) CmdLookup(job *engine.Job) error {
 	if len(job.Args) != 1 {
-		return job.Errorf("usage: %s NAME", job.Name)
+		return fmt.Errorf("usage: %s NAME", job.Name)
 	}
 	name := job.Args[0]
 	if image, err := s.LookupImage(name); err == nil && image != nil {
 		if job.GetenvBool("raw") {
 			b, err := image.RawJson()
 			if err != nil {
-				return job.Error(err)
+				return err
 			}
 			job.Stdout.Write(b)
-			return engine.StatusOK
+			return nil
 		}
 
 		out := &engine.Env{}
@@ -150,32 +150,32 @@ func (s *TagStore) CmdLookup(job *engine.Job) engine.Status {
 		out.SetInt64("Size", image.Size)
 		out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size)
 		if _, err = out.WriteTo(job.Stdout); err != nil {
-			return job.Error(err)
+			return err
 		}
-		return engine.StatusOK
+		return nil
 	}
-	return job.Errorf("No such image: %s", name)
+	return fmt.Errorf("No such image: %s", name)
 }
 
 // CmdTarLayer return the tarLayer of the image
-func (s *TagStore) CmdTarLayer(job *engine.Job) engine.Status {
+func (s *TagStore) CmdTarLayer(job *engine.Job) error {
 	if len(job.Args) != 1 {
-		return job.Errorf("usage: %s NAME", job.Name)
+		return fmt.Errorf("usage: %s NAME", job.Name)
 	}
 	name := job.Args[0]
 	if image, err := s.LookupImage(name); err == nil && image != nil {
 		fs, err := image.TarLayer()
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 		defer fs.Close()
 
 		written, err := io.Copy(job.Stdout, fs)
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 		log.Debugf("rendered layer for %s of [%d] size", image.ID, written)
-		return engine.StatusOK
+		return nil
 	}
-	return job.Errorf("No such image: %s", name)
+	return fmt.Errorf("No such image: %s", name)
 }

+ 5 - 6
graph/tag.go

@@ -1,19 +1,18 @@
 package graph
 
 import (
+	"fmt"
+
 	"github.com/docker/docker/engine"
 )
 
-func (s *TagStore) CmdTag(job *engine.Job) engine.Status {
+func (s *TagStore) CmdTag(job *engine.Job) error {
 	if len(job.Args) != 2 && len(job.Args) != 3 {
-		return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name)
+		return fmt.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name)
 	}
 	var tag string
 	if len(job.Args) == 3 {
 		tag = job.Args[2]
 	}
-	if err := s.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil {
-		return job.Error(err)
-	}
-	return engine.StatusOK
+	return s.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force"))
 }

+ 5 - 4
graph/viz.go

@@ -1,16 +1,17 @@
 package graph
 
 import (
+	"fmt"
 	"strings"
 
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/image"
 )
 
-func (s *TagStore) CmdViz(job *engine.Job) engine.Status {
+func (s *TagStore) CmdViz(job *engine.Job) error {
 	images, _ := s.graph.Map()
 	if images == nil {
-		return engine.StatusOK
+		return nil
 	}
 	job.Stdout.Write([]byte("digraph docker {\n"))
 
@@ -21,7 +22,7 @@ func (s *TagStore) CmdViz(job *engine.Job) engine.Status {
 	for _, image := range images {
 		parentImage, err = image.GetParent()
 		if err != nil {
-			return job.Errorf("Error while getting parent image: %v", err)
+			return fmt.Errorf("Error while getting parent image: %v", err)
 		}
 		if parentImage != nil {
 			job.Stdout.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n"))
@@ -34,5 +35,5 @@ func (s *TagStore) CmdViz(job *engine.Job) engine.Status {
 		job.Stdout.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n"))
 	}
 	job.Stdout.Write([]byte(" base [style=invisible]\n}\n"))
-	return engine.StatusOK
+	return nil
 }

+ 26 - 24
registry/service.go

@@ -1,6 +1,8 @@
 package registry
 
 import (
+	"fmt"
+
 	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/engine"
 )
@@ -38,7 +40,7 @@ func (s *Service) Install(eng *engine.Engine) error {
 // Auth contacts the public registry with the provided credentials,
 // and returns OK if authentication was sucessful.
 // It can be used to verify the validity of a client's credentials.
-func (s *Service) Auth(job *engine.Job) engine.Status {
+func (s *Service) Auth(job *engine.Job) error {
 	var (
 		authConfig = new(AuthConfig)
 		endpoint   *Endpoint
@@ -56,25 +58,25 @@ func (s *Service) Auth(job *engine.Job) engine.Status {
 	}
 
 	if index, err = ResolveIndexInfo(job, addr); err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	if endpoint, err = NewEndpoint(index); err != nil {
 		log.Errorf("unable to get new registry endpoint: %s", err)
-		return job.Error(err)
+		return err
 	}
 
 	authConfig.ServerAddress = endpoint.String()
 
 	if status, err = Login(authConfig, endpoint, HTTPRequestFactory(nil)); err != nil {
 		log.Errorf("unable to login against registry endpoint %s: %s", endpoint, err)
-		return job.Error(err)
+		return err
 	}
 
 	log.Infof("successful registry login for endpoint %s: %s", endpoint, status)
 	job.Printf("%s\n", status)
 
-	return engine.StatusOK
+	return nil
 }
 
 // Search queries the public registry for images matching the specified
@@ -93,9 +95,9 @@ func (s *Service) Auth(job *engine.Job) engine.Status {
 //	Results are sent as a collection of structured messages (using engine.Table).
 //	Each result is sent as a separate message.
 //	Results are ordered by number of stars on the public registry.
-func (s *Service) Search(job *engine.Job) engine.Status {
+func (s *Service) Search(job *engine.Job) error {
 	if n := len(job.Args); n != 1 {
-		return job.Errorf("Usage: %s TERM", job.Name)
+		return fmt.Errorf("Usage: %s TERM", job.Name)
 	}
 	var (
 		term        = job.Args[0]
@@ -107,20 +109,20 @@ func (s *Service) Search(job *engine.Job) engine.Status {
 
 	repoInfo, err := ResolveRepositoryInfo(job, term)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	// *TODO: Search multiple indexes.
 	endpoint, err := repoInfo.GetEndpoint()
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	r, err := NewSession(authConfig, HTTPRequestFactory(metaHeaders), endpoint, true)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	results, err := r.SearchRepositories(repoInfo.GetSearchTerm())
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	outs := engine.NewTable("star_count", 0)
 	for _, result := range results.Results {
@@ -130,31 +132,31 @@ func (s *Service) Search(job *engine.Job) engine.Status {
 	}
 	outs.ReverseSort()
 	if _, err := outs.WriteListTo(job.Stdout); err != nil {
-		return job.Error(err)
+		return err
 	}
-	return engine.StatusOK
+	return nil
 }
 
 // ResolveRepository splits a repository name into its components
 // and configuration of the associated registry.
-func (s *Service) ResolveRepository(job *engine.Job) engine.Status {
+func (s *Service) ResolveRepository(job *engine.Job) error {
 	var (
 		reposName = job.Args[0]
 	)
 
 	repoInfo, err := s.Config.NewRepositoryInfo(reposName)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	out := engine.Env{}
 	err = out.SetJson("repository", repoInfo)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	out.WriteTo(job.Stdout)
 
-	return engine.StatusOK
+	return nil
 }
 
 // Convenience wrapper for calling resolve_repository Job from a running job.
@@ -175,24 +177,24 @@ func ResolveRepositoryInfo(jobContext *engine.Job, reposName string) (*Repositor
 }
 
 // ResolveIndex takes indexName and returns index info
-func (s *Service) ResolveIndex(job *engine.Job) engine.Status {
+func (s *Service) ResolveIndex(job *engine.Job) error {
 	var (
 		indexName = job.Args[0]
 	)
 
 	index, err := s.Config.NewIndexInfo(indexName)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	out := engine.Env{}
 	err = out.SetJson("index", index)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	out.WriteTo(job.Stdout)
 
-	return engine.StatusOK
+	return nil
 }
 
 // Convenience wrapper for calling resolve_index Job from a running job.
@@ -213,13 +215,13 @@ func ResolveIndexInfo(jobContext *engine.Job, indexName string) (*IndexInfo, err
 }
 
 // GetRegistryConfig returns current registry configuration.
-func (s *Service) GetRegistryConfig(job *engine.Job) engine.Status {
+func (s *Service) GetRegistryConfig(job *engine.Job) error {
 	out := engine.Env{}
 	err := out.SetJson("config", s.Config)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	out.WriteTo(job.Stdout)
 
-	return engine.StatusOK
+	return nil
 }

+ 9 - 9
trust/service.go

@@ -21,9 +21,9 @@ func (t *TrustStore) Install(eng *engine.Engine) error {
 	return nil
 }
 
-func (t *TrustStore) CmdCheckKey(job *engine.Job) engine.Status {
+func (t *TrustStore) CmdCheckKey(job *engine.Job) error {
 	if n := len(job.Args); n != 1 {
-		return job.Errorf("Usage: %s NAMESPACE", job.Name)
+		return fmt.Errorf("Usage: %s NAMESPACE", job.Name)
 	}
 	var (
 		namespace = job.Args[0]
@@ -31,11 +31,11 @@ func (t *TrustStore) CmdCheckKey(job *engine.Job) engine.Status {
 	)
 
 	if keyBytes == "" {
-		return job.Errorf("Missing PublicKey")
+		return fmt.Errorf("Missing PublicKey")
 	}
 	pk, err := libtrust.UnmarshalPublicKeyJWK([]byte(keyBytes))
 	if err != nil {
-		return job.Errorf("Error unmarshalling public key: %s", err)
+		return fmt.Errorf("Error unmarshalling public key: %s", err)
 	}
 
 	permission := uint16(job.GetenvInt("Permission"))
@@ -47,13 +47,13 @@ func (t *TrustStore) CmdCheckKey(job *engine.Job) engine.Status {
 	defer t.RUnlock()
 	if t.graph == nil {
 		job.Stdout.Write([]byte("no graph"))
-		return engine.StatusOK
+		return nil
 	}
 
 	// Check if any expired grants
 	verified, err := t.graph.Verify(pk, namespace, permission)
 	if err != nil {
-		return job.Errorf("Error verifying key to namespace: %s", namespace)
+		return fmt.Errorf("Error verifying key to namespace: %s", namespace)
 	}
 	if !verified {
 		log.Debugf("Verification failed for %s using key %s", namespace, pk.KeyID())
@@ -64,11 +64,11 @@ func (t *TrustStore) CmdCheckKey(job *engine.Job) engine.Status {
 		job.Stdout.Write([]byte("verified"))
 	}
 
-	return engine.StatusOK
+	return nil
 }
 
-func (t *TrustStore) CmdUpdateBase(job *engine.Job) engine.Status {
+func (t *TrustStore) CmdUpdateBase(job *engine.Job) error {
 	t.fetch()
 
-	return engine.StatusOK
+	return nil
 }