瀏覽代碼

Merge pull request #9643 from LK4D4/fix_vet_errors

Fix vet errors
Michael Crosby 10 年之前
父節點
當前提交
17cacf3326

+ 2 - 2
daemon/daemon.go

@@ -234,7 +234,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err
 		log.Debugf("killing old running container %s", container.ID)
 		log.Debugf("killing old running container %s", container.ID)
 
 
 		existingPid := container.Pid
 		existingPid := container.Pid
-		container.SetStopped(&execdriver.ExitStatus{0, false})
+		container.SetStopped(&execdriver.ExitStatus{ExitCode: 0})
 
 
 		// We only have to handle this for lxc because the other drivers will ensure that
 		// We only have to handle this for lxc because the other drivers will ensure that
 		// no processes are left when docker dies
 		// no processes are left when docker dies
@@ -266,7 +266,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err
 
 
 			log.Debugf("Marking as stopped")
 			log.Debugf("Marking as stopped")
 
 
-			container.SetStopped(&execdriver.ExitStatus{-127, false})
+			container.SetStopped(&execdriver.ExitStatus{ExitCode: -127})
 			if err := container.ToDisk(); err != nil {
 			if err := container.ToDisk(); err != nil {
 				return err
 				return err
 			}
 			}

+ 6 - 6
daemon/execdriver/lxc/driver.go

@@ -76,11 +76,11 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 	})
 	})
 
 
 	if err := d.generateEnvConfig(c); err != nil {
 	if err := d.generateEnvConfig(c); err != nil {
-		return execdriver.ExitStatus{-1, false}, err
+		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
 	}
 	configPath, err := d.generateLXCConfig(c)
 	configPath, err := d.generateLXCConfig(c)
 	if err != nil {
 	if err != nil {
-		return execdriver.ExitStatus{-1, false}, err
+		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
 	}
 	params := []string{
 	params := []string{
 		"lxc-start",
 		"lxc-start",
@@ -154,11 +154,11 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 	c.ProcessConfig.Args = append([]string{name}, arg...)
 	c.ProcessConfig.Args = append([]string{name}, arg...)
 
 
 	if err := nodes.CreateDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil {
 	if err := nodes.CreateDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil {
-		return execdriver.ExitStatus{-1, false}, err
+		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
 	}
 
 
 	if err := c.ProcessConfig.Start(); err != nil {
 	if err := c.ProcessConfig.Start(); err != nil {
-		return execdriver.ExitStatus{-1, false}, err
+		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
 	}
 
 
 	var (
 	var (
@@ -182,7 +182,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 			c.ProcessConfig.Process.Kill()
 			c.ProcessConfig.Process.Kill()
 			c.ProcessConfig.Wait()
 			c.ProcessConfig.Wait()
 		}
 		}
-		return execdriver.ExitStatus{-1, false}, err
+		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
 	}
 
 
 	c.ContainerPid = pid
 	c.ContainerPid = pid
@@ -193,7 +193,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 
 
 	<-waitLock
 	<-waitLock
 
 
-	return execdriver.ExitStatus{getExitCode(c), false}, waitErr
+	return execdriver.ExitStatus{ExitCode: getExitCode(c)}, waitErr
 }
 }
 
 
 /// Return the exit code of the process
 /// Return the exit code of the process

+ 6 - 6
daemon/execdriver/native/driver.go

@@ -74,7 +74,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 	// take the Command and populate the libcontainer.Config from it
 	// take the Command and populate the libcontainer.Config from it
 	container, err := d.createContainer(c)
 	container, err := d.createContainer(c)
 	if err != nil {
 	if err != nil {
-		return execdriver.ExitStatus{-1, false}, err
+		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
 	}
 
 
 	var term execdriver.Terminal
 	var term execdriver.Terminal
@@ -85,7 +85,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 		term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
 		term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
 	}
 	}
 	if err != nil {
 	if err != nil {
-		return execdriver.ExitStatus{-1, false}, err
+		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
 	}
 	c.ProcessConfig.Terminal = term
 	c.ProcessConfig.Terminal = term
 
 
@@ -102,12 +102,12 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 	)
 	)
 
 
 	if err := d.createContainerRoot(c.ID); err != nil {
 	if err := d.createContainerRoot(c.ID); err != nil {
-		return execdriver.ExitStatus{-1, false}, err
+		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
 	}
 	defer d.cleanContainer(c.ID)
 	defer d.cleanContainer(c.ID)
 
 
 	if err := d.writeContainerFile(container, c.ID); err != nil {
 	if err := d.writeContainerFile(container, c.ID); err != nil {
-		return execdriver.ExitStatus{-1, false}, err
+		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
 	}
 
 
 	execOutputChan := make(chan execOutput, 1)
 	execOutputChan := make(chan execOutput, 1)
@@ -146,7 +146,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 
 
 	select {
 	select {
 	case execOutput := <-execOutputChan:
 	case execOutput := <-execOutputChan:
-		return execdriver.ExitStatus{execOutput.exitCode, false}, execOutput.err
+		return execdriver.ExitStatus{ExitCode: execOutput.exitCode}, execOutput.err
 	case <-waitForStart:
 	case <-waitForStart:
 		break
 		break
 	}
 	}
@@ -161,7 +161,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 	// wait for the container to exit.
 	// wait for the container to exit.
 	execOutput := <-execOutputChan
 	execOutput := <-execOutputChan
 
 
-	return execdriver.ExitStatus{execOutput.exitCode, oomKill}, execOutput.err
+	return execdriver.ExitStatus{ExitCode: execOutput.exitCode, OOMKilled: oomKill}, execOutput.err
 }
 }
 
 
 func (d *driver) Kill(p *execdriver.Command, sig int) error {
 func (d *driver) Kill(p *execdriver.Command, sig int) error {

+ 4 - 4
daemon/graphdriver/aufs/aufs.go

@@ -134,15 +134,15 @@ func supportsAufs() error {
 	return ErrAufsNotSupported
 	return ErrAufsNotSupported
 }
 }
 
 
-func (a Driver) rootPath() string {
+func (a *Driver) rootPath() string {
 	return a.root
 	return a.root
 }
 }
 
 
-func (Driver) String() string {
+func (*Driver) String() string {
 	return "aufs"
 	return "aufs"
 }
 }
 
 
-func (a Driver) Status() [][2]string {
+func (a *Driver) Status() [][2]string {
 	ids, _ := loadIds(path.Join(a.rootPath(), "layers"))
 	ids, _ := loadIds(path.Join(a.rootPath(), "layers"))
 	return [][2]string{
 	return [][2]string{
 		{"Root Dir", a.rootPath()},
 		{"Root Dir", a.rootPath()},
@@ -152,7 +152,7 @@ func (a Driver) Status() [][2]string {
 
 
 // Exists returns true if the given id is registered with
 // Exists returns true if the given id is registered with
 // this driver
 // this driver
-func (a Driver) Exists(id string) bool {
+func (a *Driver) Exists(id string) bool {
 	if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil {
 	if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil {
 		return false
 		return false
 	}
 	}

+ 10 - 10
daemon/graphdriver/devmapper/deviceset.go

@@ -45,15 +45,15 @@ type Transaction struct {
 }
 }
 
 
 type DevInfo struct {
 type DevInfo struct {
-	Hash          string     `json:"-"`
-	DeviceId      int        `json:"device_id"`
-	Size          uint64     `json:"size"`
-	TransactionId uint64     `json:"transaction_id"`
-	Initialized   bool       `json:"initialized"`
-	devices       *DeviceSet `json:"-"`
+	Hash          string `json:"-"`
+	DeviceId      int    `json:"device_id"`
+	Size          uint64 `json:"size"`
+	TransactionId uint64 `json:"transaction_id"`
+	Initialized   bool   `json:"initialized"`
+	devices       *DeviceSet
 
 
-	mountCount int    `json:"-"`
-	mountPath  string `json:"-"`
+	mountCount int
+	mountPath  string
 
 
 	// The global DeviceSet lock guarantees that we serialize all
 	// The global DeviceSet lock guarantees that we serialize all
 	// the calls to libdevmapper (which is not threadsafe), but we
 	// the calls to libdevmapper (which is not threadsafe), but we
@@ -65,12 +65,12 @@ type DevInfo struct {
 	// the global lock while holding the per-device locks all
 	// the global lock while holding the per-device locks all
 	// device locks must be aquired *before* the device lock, and
 	// device locks must be aquired *before* the device lock, and
 	// multiple device locks should be aquired parent before child.
 	// multiple device locks should be aquired parent before child.
-	lock sync.Mutex `json:"-"`
+	lock sync.Mutex
 }
 }
 
 
 type MetaData struct {
 type MetaData struct {
 	Devices     map[string]*DevInfo `json:"Devices"`
 	Devices     map[string]*DevInfo `json:"Devices"`
-	devicesLock sync.Mutex          `json:"-"` // Protects all read/writes to Devices map
+	devicesLock sync.Mutex          // Protects all read/writes to Devices map
 }
 }
 
 
 type DeviceSet struct {
 type DeviceSet struct {

+ 1 - 1
daemon/state_test.go

@@ -49,7 +49,7 @@ func TestStateRunStop(t *testing.T) {
 			atomic.StoreInt64(&exit, int64(exitCode))
 			atomic.StoreInt64(&exit, int64(exitCode))
 			close(stopped)
 			close(stopped)
 		}()
 		}()
-		s.SetStopped(&execdriver.ExitStatus{i, false})
+		s.SetStopped(&execdriver.ExitStatus{ExitCode: i})
 		if s.IsRunning() {
 		if s.IsRunning() {
 			t.Fatal("State is running")
 			t.Fatal("State is running")
 		}
 		}

+ 1 - 1
daemon/utils_test.go

@@ -16,7 +16,7 @@ func TestMergeLxcConfig(t *testing.T) {
 
 
 	out, err := mergeLxcConfIntoOptions(hostConfig)
 	out, err := mergeLxcConfIntoOptions(hostConfig)
 	if err != nil {
 	if err != nil {
-		t.Fatalf("Failed to merge Lxc Config ", err)
+		t.Fatalf("Failed to merge Lxc Config: %s", err)
 	}
 	}
 
 
 	cpuset := out[0]
 	cpuset := out[0]

+ 1 - 1
integration-cli/docker_cli_attach_test.go

@@ -122,7 +122,7 @@ func TestAttachTtyWithoutStdin(t *testing.T) {
 		if out, _, err := runCommandWithOutput(cmd); err == nil {
 		if out, _, err := runCommandWithOutput(cmd); err == nil {
 			t.Fatal("attach should have failed")
 			t.Fatal("attach should have failed")
 		} else if !strings.Contains(out, expected) {
 		} else if !strings.Contains(out, expected) {
-			t.Fatal("attach failed with error %q: expected %q", out, expected)
+			t.Fatalf("attach failed with error %q: expected %q", out, expected)
 		}
 		}
 	}()
 	}()
 
 

+ 1 - 1
integration-cli/docker_cli_build_test.go

@@ -3903,7 +3903,7 @@ func TestBuildStderr(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	if stderr != "" {
 	if stderr != "" {
-		t.Fatal("Stderr should have been empty, instead its: %q", stderr)
+		t.Fatalf("Stderr should have been empty, instead its: %q", stderr)
 	}
 	}
 	logDone("build - testing stderr")
 	logDone("build - testing stderr")
 }
 }

+ 1 - 1
integration-cli/docker_cli_exec_test.go

@@ -339,7 +339,7 @@ func TestExecTtyWithoutStdin(t *testing.T) {
 		if out, _, err := runCommandWithOutput(cmd); err == nil {
 		if out, _, err := runCommandWithOutput(cmd); err == nil {
 			t.Fatal("exec should have failed")
 			t.Fatal("exec should have failed")
 		} else if !strings.Contains(out, expected) {
 		} else if !strings.Contains(out, expected) {
-			t.Fatal("exec failed with error %q: expected %q", out, expected)
+			t.Fatalf("exec failed with error %q: expected %q", out, expected)
 		}
 		}
 	}()
 	}()
 
 

+ 1 - 1
integration-cli/docker_cli_run_test.go

@@ -2760,7 +2760,7 @@ func TestRunTtyWithPipe(t *testing.T) {
 		if out, _, err := runCommandWithOutput(cmd); err == nil {
 		if out, _, err := runCommandWithOutput(cmd); err == nil {
 			t.Fatal("run should have failed")
 			t.Fatal("run should have failed")
 		} else if !strings.Contains(out, expected) {
 		} else if !strings.Contains(out, expected) {
-			t.Fatal("run failed with error %q: expected %q", out, expected)
+			t.Fatalf("run failed with error %q: expected %q", out, expected)
 		}
 		}
 	}()
 	}()
 
 

+ 1 - 1
integration/runtime_test.go

@@ -653,7 +653,7 @@ func TestRestore(t *testing.T) {
 	if err := container3.Run(); err != nil {
 	if err := container3.Run(); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	container2.SetStopped(&execdriver.ExitStatus{0, false})
+	container2.SetStopped(&execdriver.ExitStatus{ExitCode: 0})
 }
 }
 
 
 func TestDefaultContainerName(t *testing.T) {
 func TestDefaultContainerName(t *testing.T) {

+ 1 - 1
pkg/chrootarchive/archive.go

@@ -16,7 +16,7 @@ import (
 	"github.com/docker/docker/pkg/reexec"
 	"github.com/docker/docker/pkg/reexec"
 )
 )
 
 
-var chrootArchiver = &archive.Archiver{Untar}
+var chrootArchiver = &archive.Archiver{Untar: Untar}
 
 
 func chroot(path string) error {
 func chroot(path string) error {
 	if err := syscall.Chroot(path); err != nil {
 	if err := syscall.Chroot(path); err != nil {

+ 1 - 1
pkg/symlink/fs_test.go

@@ -311,7 +311,7 @@ func TestFollowSymlinkEmpty(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	if res != wd {
 	if res != wd {
-		t.Fatal("expected %q got %q", wd, res)
+		t.Fatalf("expected %q got %q", wd, res)
 	}
 	}
 }
 }