Browse Source

Add gosimple linter

Update gometalinter

Signed-off-by: Daniel Nephin <dnephin@docker.com>
Daniel Nephin 7 years ago
parent
commit
f7f101d57e
52 changed files with 137 additions and 271 deletions
  1. 2 9
      api/server/httputils/write_log_stream.go
  2. 2 10
      api/server/router/swarm/cluster_routes.go
  3. 2 2
      api/types/filters/parse_test.go
  4. 1 3
      api/types/time/timestamp.go
  5. 4 4
      builder/dockerfile/bflag_test.go
  6. 2 8
      builder/dockerfile/internals.go
  7. 2 2
      builder/dockerfile/parser/parser.go
  8. 1 1
      builder/remotecontext/detect_test.go
  9. 1 1
      builder/remotecontext/remote.go
  10. 1 1
      client/container_commit.go
  11. 1 1
      client/hijack.go
  12. 1 1
      cmd/dockerd/daemon.go
  13. 1 4
      container/view.go
  14. 0 9
      contrib/docker-device-tool/device_tool.go
  15. 2 10
      daemon/cluster/executor/container/attachment.go
  16. 7 12
      daemon/config/config_unix_test.go
  17. 1 1
      daemon/exec/exec.go
  18. 30 59
      daemon/graphdriver/aufs/aufs_test.go
  19. 7 8
      daemon/graphdriver/devmapper/deviceset.go
  20. 10 16
      daemon/graphdriver/devmapper/driver.go
  21. 1 1
      daemon/graphdriver/fsdiff.go
  22. 2 4
      daemon/graphdriver/graphtest/graphbench_unix.go
  23. 1 4
      daemon/graphdriver/overlay/overlay.go
  24. 1 4
      daemon/graphdriver/vfs/driver.go
  25. 1 1
      daemon/graphdriver/zfs/zfs.go
  26. 2 4
      daemon/images.go
  27. 1 1
      daemon/logger/factory.go
  28. 1 2
      daemon/logger/jsonfilelog/read.go
  29. 0 1
      daemon/logger/ring.go
  30. 9 12
      daemon/logger/splunk/splunk_test.go
  31. 0 1
      daemon/metrics.go
  32. 1 1
      daemon/network.go
  33. 1 0
      daemon/oci_linux.go
  34. 3 6
      daemon/reload_test.go
  35. 1 0
      daemon/top_unix.go
  36. 1 6
      distribution/push_v2.go
  37. 1 1
      hack/dockerfile/binaries-commits
  38. 2 3
      hack/validate/gometalinter.json
  39. 3 8
      image/fs_test.go
  40. 1 2
      image/tarexport/load.go
  41. 1 4
      libcontainerd/client_linux.go
  42. 1 1
      pkg/authorization/authz.go
  43. 2 4
      pkg/devicemapper/devmapper.go
  44. 0 2
      pkg/discovery/kv/kv_test.go
  45. 2 8
      pkg/loopback/ioctl.go
  46. 9 12
      pkg/pools/pools_test.go
  47. 1 1
      pkg/signal/signal_linux_test.go
  48. 3 3
      reference/store_test.go
  49. 2 2
      registry/config.go
  50. 3 6
      registry/registry_test.go
  51. 1 1
      runconfig/hostconfig.go
  52. 1 3
      runconfig/hostconfig_test.go

+ 2 - 9
api/server/httputils/write_log_stream.go

@@ -17,20 +17,13 @@ import (
 
 
 // WriteLogStream writes an encoded byte stream of log messages from the
 // WriteLogStream writes an encoded byte stream of log messages from the
 // messages channel, multiplexing them with a stdcopy.Writer if mux is true
 // messages channel, multiplexing them with a stdcopy.Writer if mux is true
-func WriteLogStream(ctx context.Context, w io.Writer, msgs <-chan *backend.LogMessage, config *types.ContainerLogsOptions, mux bool) {
+func WriteLogStream(_ context.Context, w io.Writer, msgs <-chan *backend.LogMessage, config *types.ContainerLogsOptions, mux bool) {
 	wf := ioutils.NewWriteFlusher(w)
 	wf := ioutils.NewWriteFlusher(w)
 	defer wf.Close()
 	defer wf.Close()
 
 
 	wf.Flush()
 	wf.Flush()
 
 
-	// this might seem like doing below is clear:
-	//   var outStream io.Writer = wf
-	// however, this GREATLY DISPLEASES golint, and if you do that, it will
-	// fail CI. we need outstream to be type writer because if we mux streams,
-	// we will need to reassign all of the streams to be stdwriters, which only
-	// conforms to the io.Writer interface.
-	var outStream io.Writer
-	outStream = wf
+	outStream := io.Writer(wf)
 	errStream := outStream
 	errStream := outStream
 	sysErrStream := errStream
 	sysErrStream := errStream
 	if mux {
 	if mux {

+ 2 - 10
api/server/router/swarm/cluster_routes.go

@@ -427,11 +427,7 @@ func (sr *swarmRouter) updateSecret(ctx context.Context, w http.ResponseWriter,
 	}
 	}
 
 
 	id := vars["id"]
 	id := vars["id"]
-	if err := sr.backend.UpdateSecret(id, version, secret); err != nil {
-		return err
-	}
-
-	return nil
+	return sr.backend.UpdateSecret(id, version, secret)
 }
 }
 
 
 func (sr *swarmRouter) getConfigs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 func (sr *swarmRouter) getConfigs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@@ -498,9 +494,5 @@ func (sr *swarmRouter) updateConfig(ctx context.Context, w http.ResponseWriter,
 	}
 	}
 
 
 	id := vars["id"]
 	id := vars["id"]
-	if err := sr.backend.UpdateConfig(id, version, config); err != nil {
-		return err
-	}
-
-	return nil
+	return sr.backend.UpdateConfig(id, version, config)
 }
 }

+ 2 - 2
api/types/filters/parse_test.go

@@ -182,7 +182,7 @@ func TestArgsMatchKVList(t *testing.T) {
 	}
 	}
 
 
 	for args, field := range matches {
 	for args, field := range matches {
-		if args.MatchKVList(field, sources) != true {
+		if !args.MatchKVList(field, sources) {
 			t.Fatalf("Expected true for %v on %v, got false", sources, args)
 			t.Fatalf("Expected true for %v on %v, got false", sources, args)
 		}
 		}
 	}
 	}
@@ -202,7 +202,7 @@ func TestArgsMatchKVList(t *testing.T) {
 	}
 	}
 
 
 	for args, field := range differs {
 	for args, field := range differs {
-		if args.MatchKVList(field, sources) != false {
+		if args.MatchKVList(field, sources) {
 			t.Fatalf("Expected false for %v on %v, got true", sources, args)
 			t.Fatalf("Expected false for %v on %v, got true", sources, args)
 		}
 		}
 	}
 	}

+ 1 - 3
api/types/time/timestamp.go

@@ -29,10 +29,8 @@ func GetTimestamp(value string, reference time.Time) (string, error) {
 	}
 	}
 
 
 	var format string
 	var format string
-	var parseInLocation bool
-
 	// if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
 	// if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
-	parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
+	parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
 
 
 	if strings.Contains(value, ".") {
 	if strings.Contains(value, ".") {
 		if parseInLocation {
 		if parseInLocation {

+ 4 - 4
builder/dockerfile/bflag_test.go

@@ -34,10 +34,10 @@ func TestBuilderFlags(t *testing.T) {
 		t.Fatalf("Test3 of %q was supposed to work: %s", bf.Args, err)
 		t.Fatalf("Test3 of %q was supposed to work: %s", bf.Args, err)
 	}
 	}
 
 
-	if flStr1.IsUsed() == true {
+	if flStr1.IsUsed() {
 		t.Fatal("Test3 - str1 was not used!")
 		t.Fatal("Test3 - str1 was not used!")
 	}
 	}
-	if flBool1.IsUsed() == true {
+	if flBool1.IsUsed() {
 		t.Fatal("Test3 - bool1 was not used!")
 		t.Fatal("Test3 - bool1 was not used!")
 	}
 	}
 
 
@@ -58,10 +58,10 @@ func TestBuilderFlags(t *testing.T) {
 	if flBool1.IsTrue() {
 	if flBool1.IsTrue() {
 		t.Fatal("Bool1 was supposed to default to: false")
 		t.Fatal("Bool1 was supposed to default to: false")
 	}
 	}
-	if flStr1.IsUsed() == true {
+	if flStr1.IsUsed() {
 		t.Fatal("Str1 was not used!")
 		t.Fatal("Str1 was not used!")
 	}
 	}
-	if flBool1.IsUsed() == true {
+	if flBool1.IsUsed() {
 		t.Fatal("Bool1 was not used!")
 		t.Fatal("Bool1 was not used!")
 	}
 	}
 
 

+ 2 - 8
builder/dockerfile/internals.go

@@ -206,10 +206,7 @@ func lookupUser(userStr, filepath string) (int, error) {
 		return uid, nil
 		return uid, nil
 	}
 	}
 	users, err := lcUser.ParsePasswdFileFilter(filepath, func(u lcUser.User) bool {
 	users, err := lcUser.ParsePasswdFileFilter(filepath, func(u lcUser.User) bool {
-		if u.Name == userStr {
-			return true
-		}
-		return false
+		return u.Name == userStr
 	})
 	})
 	if err != nil {
 	if err != nil {
 		return 0, err
 		return 0, err
@@ -228,10 +225,7 @@ func lookupGroup(groupStr, filepath string) (int, error) {
 		return gid, nil
 		return gid, nil
 	}
 	}
 	groups, err := lcUser.ParseGroupFileFilter(filepath, func(g lcUser.Group) bool {
 	groups, err := lcUser.ParseGroupFileFilter(filepath, func(g lcUser.Group) bool {
-		if g.Name == groupStr {
-			return true
-		}
-		return false
+		return g.Name == groupStr
 	})
 	})
 	if err != nil {
 	if err != nil {
 		return 0, err
 		return 0, err

+ 2 - 2
builder/dockerfile/parser/parser.go

@@ -143,7 +143,7 @@ func (d *Directive) possibleParserDirective(line string) error {
 	if len(tecMatch) != 0 {
 	if len(tecMatch) != 0 {
 		for i, n := range tokenEscapeCommand.SubexpNames() {
 		for i, n := range tokenEscapeCommand.SubexpNames() {
 			if n == "escapechar" {
 			if n == "escapechar" {
-				if d.escapeSeen == true {
+				if d.escapeSeen {
 					return errors.New("only one escape parser directive can be used")
 					return errors.New("only one escape parser directive can be used")
 				}
 				}
 				d.escapeSeen = true
 				d.escapeSeen = true
@@ -159,7 +159,7 @@ func (d *Directive) possibleParserDirective(line string) error {
 		if len(tpcMatch) != 0 {
 		if len(tpcMatch) != 0 {
 			for i, n := range tokenPlatformCommand.SubexpNames() {
 			for i, n := range tokenPlatformCommand.SubexpNames() {
 				if n == "platform" {
 				if n == "platform" {
-					if d.platformSeen == true {
+					if d.platformSeen {
 						return errors.New("only one platform parser directive can be used")
 						return errors.New("only one platform parser directive can be used")
 					}
 					}
 					d.platformSeen = true
 					d.platformSeen = true

+ 1 - 1
builder/remotecontext/detect_test.go

@@ -21,7 +21,7 @@ const (
 const shouldStayFilename = "should_stay"
 const shouldStayFilename = "should_stay"
 
 
 func extractFilenames(files []os.FileInfo) []string {
 func extractFilenames(files []os.FileInfo) []string {
-	filenames := make([]string, len(files), len(files))
+	filenames := make([]string, len(files))
 
 
 	for i, file := range files {
 	for i, file := range files {
 		filenames[i] = file.Name()
 		filenames[i] = file.Name()

+ 1 - 1
builder/remotecontext/remote.go

@@ -116,7 +116,7 @@ func inspectResponse(ct string, r io.Reader, clen int64) (string, io.ReadCloser,
 		plen = maxPreambleLength
 		plen = maxPreambleLength
 	}
 	}
 
 
-	preamble := make([]byte, plen, plen)
+	preamble := make([]byte, plen)
 	rlen, err := r.Read(preamble)
 	rlen, err := r.Read(preamble)
 	if rlen == 0 {
 	if rlen == 0 {
 		return ct, ioutil.NopCloser(r), errors.New("empty response")
 		return ct, ioutil.NopCloser(r), errors.New("empty response")

+ 1 - 1
client/container_commit.go

@@ -39,7 +39,7 @@ func (cli *Client) ContainerCommit(ctx context.Context, container string, option
 	for _, change := range options.Changes {
 	for _, change := range options.Changes {
 		query.Add("changes", change)
 		query.Add("changes", change)
 	}
 	}
-	if options.Pause != true {
+	if !options.Pause {
 		query.Set("pause", "0")
 		query.Set("pause", "0")
 	}
 	}
 
 

+ 1 - 1
client/hijack.go

@@ -70,7 +70,7 @@ func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Con
 	timeout := dialer.Timeout
 	timeout := dialer.Timeout
 
 
 	if !dialer.Deadline.IsZero() {
 	if !dialer.Deadline.IsZero() {
-		deadlineTimeout := dialer.Deadline.Sub(time.Now())
+		deadlineTimeout := time.Until(dialer.Deadline)
 		if timeout == 0 || deadlineTimeout < timeout {
 		if timeout == 0 || deadlineTimeout < timeout {
 			timeout = deadlineTimeout
 			timeout = deadlineTimeout
 		}
 		}

+ 1 - 1
cmd/dockerd/daemon.go

@@ -467,7 +467,7 @@ func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) {
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	if conf.V2Only == false {
+	if !conf.V2Only {
 		logrus.Warnf(`The "disable-legacy-registry" option is deprecated and wil be removed in Docker v17.12. Interacting with legacy (v1) registries will no longer be supported in Docker v17.12"`)
 		logrus.Warnf(`The "disable-legacy-registry" option is deprecated and wil be removed in Docker v17.12. Interacting with legacy (v1) registries will no longer be supported in Docker v17.12"`)
 	}
 	}
 
 

+ 1 - 4
container/view.go

@@ -203,10 +203,7 @@ func (db *memDB) ReserveName(name, containerID string) error {
 // Once released, a name can be reserved again
 // Once released, a name can be reserved again
 func (db *memDB) ReleaseName(name string) error {
 func (db *memDB) ReleaseName(name string) error {
 	return db.withTxn(func(txn *memdb.Txn) error {
 	return db.withTxn(func(txn *memdb.Txn) error {
-		if err := txn.Delete(memdbNamesTable, nameAssociation{name: name}); err != nil {
-			return err
-		}
-		return nil
+		return txn.Delete(memdbNamesTable, nameAssociation{name: name})
 	})
 	})
 }
 }
 
 

+ 0 - 9
contrib/docker-device-tool/device_tool.go

@@ -90,14 +90,12 @@ func main() {
 		fmt.Printf("Sector size: %d\n", status.SectorSize)
 		fmt.Printf("Sector size: %d\n", status.SectorSize)
 		fmt.Printf("Data use: %d of %d (%.1f %%)\n", status.Data.Used, status.Data.Total, 100.0*float64(status.Data.Used)/float64(status.Data.Total))
 		fmt.Printf("Data use: %d of %d (%.1f %%)\n", status.Data.Used, status.Data.Total, 100.0*float64(status.Data.Used)/float64(status.Data.Total))
 		fmt.Printf("Metadata use: %d of %d (%.1f %%)\n", status.Metadata.Used, status.Metadata.Total, 100.0*float64(status.Metadata.Used)/float64(status.Metadata.Total))
 		fmt.Printf("Metadata use: %d of %d (%.1f %%)\n", status.Metadata.Used, status.Metadata.Total, 100.0*float64(status.Metadata.Used)/float64(status.Metadata.Total))
-		break
 	case "list":
 	case "list":
 		ids := devices.List()
 		ids := devices.List()
 		sort.Strings(ids)
 		sort.Strings(ids)
 		for _, id := range ids {
 		for _, id := range ids {
 			fmt.Println(id)
 			fmt.Println(id)
 		}
 		}
-		break
 	case "device":
 	case "device":
 		if flag.NArg() < 2 {
 		if flag.NArg() < 2 {
 			usage()
 			usage()
@@ -113,7 +111,6 @@ func main() {
 		fmt.Printf("Size in Sectors: %d\n", status.SizeInSectors)
 		fmt.Printf("Size in Sectors: %d\n", status.SizeInSectors)
 		fmt.Printf("Mapped Sectors: %d\n", status.MappedSectors)
 		fmt.Printf("Mapped Sectors: %d\n", status.MappedSectors)
 		fmt.Printf("Highest Mapped Sector: %d\n", status.HighestMappedSector)
 		fmt.Printf("Highest Mapped Sector: %d\n", status.HighestMappedSector)
-		break
 	case "resize":
 	case "resize":
 		if flag.NArg() < 2 {
 		if flag.NArg() < 2 {
 			usage()
 			usage()
@@ -131,7 +128,6 @@ func main() {
 			os.Exit(1)
 			os.Exit(1)
 		}
 		}
 
 
-		break
 	case "snap":
 	case "snap":
 		if flag.NArg() < 3 {
 		if flag.NArg() < 3 {
 			usage()
 			usage()
@@ -142,7 +138,6 @@ func main() {
 			fmt.Println("Can't create snap device: ", err)
 			fmt.Println("Can't create snap device: ", err)
 			os.Exit(1)
 			os.Exit(1)
 		}
 		}
-		break
 	case "remove":
 	case "remove":
 		if flag.NArg() < 2 {
 		if flag.NArg() < 2 {
 			usage()
 			usage()
@@ -153,7 +148,6 @@ func main() {
 			fmt.Println("Can't remove device: ", err)
 			fmt.Println("Can't remove device: ", err)
 			os.Exit(1)
 			os.Exit(1)
 		}
 		}
-		break
 	case "mount":
 	case "mount":
 		if flag.NArg() < 3 {
 		if flag.NArg() < 3 {
 			usage()
 			usage()
@@ -164,13 +158,10 @@ func main() {
 			fmt.Println("Can't mount device: ", err)
 			fmt.Println("Can't mount device: ", err)
 			os.Exit(1)
 			os.Exit(1)
 		}
 		}
-		break
 	default:
 	default:
 		fmt.Printf("Unknown command %s\n", args[0])
 		fmt.Printf("Unknown command %s\n", args[0])
 		usage()
 		usage()
 
 
 		os.Exit(1)
 		os.Exit(1)
 	}
 	}
-
-	return
 }
 }

+ 2 - 10
daemon/cluster/executor/container/attachment.go

@@ -40,11 +40,7 @@ func (nc *networkAttacherController) Update(ctx context.Context, t *api.Task) er
 
 
 func (nc *networkAttacherController) Prepare(ctx context.Context) error {
 func (nc *networkAttacherController) Prepare(ctx context.Context) error {
 	// Make sure all the networks that the task needs are created.
 	// Make sure all the networks that the task needs are created.
-	if err := nc.adapter.createNetworks(ctx); err != nil {
-		return err
-	}
-
-	return nil
+	return nc.adapter.createNetworks(ctx)
 }
 }
 
 
 func (nc *networkAttacherController) Start(ctx context.Context) error {
 func (nc *networkAttacherController) Start(ctx context.Context) error {
@@ -69,11 +65,7 @@ func (nc *networkAttacherController) Terminate(ctx context.Context) error {
 func (nc *networkAttacherController) Remove(ctx context.Context) error {
 func (nc *networkAttacherController) Remove(ctx context.Context) error {
 	// Try removing the network referenced in this task in case this
 	// Try removing the network referenced in this task in case this
 	// task is the last one referencing it
 	// task is the last one referencing it
-	if err := nc.adapter.removeNetworks(ctx); err != nil {
-		return err
-	}
-
-	return nil
+	return nc.adapter.removeNetworks(ctx)
 }
 }
 
 
 func (nc *networkAttacherController) Close() error {
 func (nc *networkAttacherController) Close() error {

+ 7 - 12
daemon/config/config_unix_test.go

@@ -6,7 +6,7 @@ import (
 	"testing"
 	"testing"
 
 
 	"github.com/docker/docker/opts"
 	"github.com/docker/docker/opts"
-	"github.com/docker/go-units"
+	units "github.com/docker/go-units"
 	"github.com/gotestyourself/gotestyourself/fs"
 	"github.com/gotestyourself/gotestyourself/fs"
 	"github.com/spf13/pflag"
 	"github.com/spf13/pflag"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/assert"
@@ -14,7 +14,7 @@ import (
 )
 )
 
 
 func TestGetConflictFreeConfiguration(t *testing.T) {
 func TestGetConflictFreeConfiguration(t *testing.T) {
-	configFileData := string([]byte(`
+	configFileData := `
 		{
 		{
 			"debug": true,
 			"debug": true,
 			"default-ulimits": {
 			"default-ulimits": {
@@ -27,7 +27,7 @@ func TestGetConflictFreeConfiguration(t *testing.T) {
 			"log-opts": {
 			"log-opts": {
 				"tag": "test_tag"
 				"tag": "test_tag"
 			}
 			}
-		}`))
+		}`
 
 
 	file := fs.NewFile(t, "docker-config", fs.WithContent(configFileData))
 	file := fs.NewFile(t, "docker-config", fs.WithContent(configFileData))
 	defer file.Remove()
 	defer file.Remove()
@@ -55,7 +55,7 @@ func TestGetConflictFreeConfiguration(t *testing.T) {
 }
 }
 
 
 func TestDaemonConfigurationMerge(t *testing.T) {
 func TestDaemonConfigurationMerge(t *testing.T) {
-	configFileData := string([]byte(`
+	configFileData := `
 		{
 		{
 			"debug": true,
 			"debug": true,
 			"default-ulimits": {
 			"default-ulimits": {
@@ -68,7 +68,7 @@ func TestDaemonConfigurationMerge(t *testing.T) {
 			"log-opts": {
 			"log-opts": {
 				"tag": "test_tag"
 				"tag": "test_tag"
 			}
 			}
-		}`))
+		}`
 
 
 	file := fs.NewFile(t, "docker-config", fs.WithContent(configFileData))
 	file := fs.NewFile(t, "docker-config", fs.WithContent(configFileData))
 	defer file.Remove()
 	defer file.Remove()
@@ -115,10 +115,7 @@ func TestDaemonConfigurationMerge(t *testing.T) {
 }
 }
 
 
 func TestDaemonConfigurationMergeShmSize(t *testing.T) {
 func TestDaemonConfigurationMergeShmSize(t *testing.T) {
-	data := string([]byte(`
-		{
-			"default-shm-size": "1g"
-		}`))
+	data := `{"default-shm-size": "1g"}`
 
 
 	file := fs.NewFile(t, "docker-config", fs.WithContent(data))
 	file := fs.NewFile(t, "docker-config", fs.WithContent(data))
 	defer file.Remove()
 	defer file.Remove()
@@ -133,7 +130,5 @@ func TestDaemonConfigurationMergeShmSize(t *testing.T) {
 	require.NoError(t, err)
 	require.NoError(t, err)
 
 
 	expectedValue := 1 * 1024 * 1024 * 1024
 	expectedValue := 1 * 1024 * 1024 * 1024
-	if cc.ShmSize.Value() != int64(expectedValue) {
-		t.Fatalf("expected default shm size %d, got %d", expectedValue, cc.ShmSize.Value())
-	}
+	assert.Equal(t, int64(expectedValue), cc.ShmSize.Value())
 }
 }

+ 1 - 1
daemon/exec/exec.go

@@ -75,7 +75,7 @@ type Store struct {
 
 
 // NewStore initializes a new exec store.
 // NewStore initializes a new exec store.
 func NewStore() *Store {
 func NewStore() *Store {
-	return &Store{commands: make(map[string]*Config, 0)}
+	return &Store{commands: make(map[string]*Config)}
 }
 }
 
 
 // Commands returns the exec configurations in the store.
 // Commands returns the exec configurations in the store.

+ 30 - 59
daemon/graphdriver/aufs/aufs_test.go

@@ -18,6 +18,8 @@ import (
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/reexec"
 	"github.com/docker/docker/pkg/reexec"
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/pkg/stringid"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 )
 )
 
 
 var (
 var (
@@ -179,9 +181,8 @@ func TestCleanupWithNoDirs(t *testing.T) {
 	d := newDriver(t)
 	d := newDriver(t)
 	defer os.RemoveAll(tmp)
 	defer os.RemoveAll(tmp)
 
 
-	if err := d.Cleanup(); err != nil {
-		t.Fatal(err)
-	}
+	err := d.Cleanup()
+	assert.NoError(t, err)
 }
 }
 
 
 func TestCleanupWithDir(t *testing.T) {
 func TestCleanupWithDir(t *testing.T) {
@@ -201,18 +202,12 @@ func TestMountedFalseResponse(t *testing.T) {
 	d := newDriver(t)
 	d := newDriver(t)
 	defer os.RemoveAll(tmp)
 	defer os.RemoveAll(tmp)
 
 
-	if err := d.Create("1", "", nil); err != nil {
-		t.Fatal(err)
-	}
+	err := d.Create("1", "", nil)
+	require.NoError(t, err)
 
 
 	response, err := d.mounted(d.getDiffPath("1"))
 	response, err := d.mounted(d.getDiffPath("1"))
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if response != false {
-		t.Fatal("Response if dir id 1 is mounted should be false")
-	}
+	require.NoError(t, err)
+	assert.False(t, response)
 }
 }
 
 
 func TestMountedTrueResponse(t *testing.T) {
 func TestMountedTrueResponse(t *testing.T) {
@@ -220,26 +215,17 @@ func TestMountedTrueResponse(t *testing.T) {
 	defer os.RemoveAll(tmp)
 	defer os.RemoveAll(tmp)
 	defer d.Cleanup()
 	defer d.Cleanup()
 
 
-	if err := d.Create("1", "", nil); err != nil {
-		t.Fatal(err)
-	}
-	if err := d.Create("2", "1", nil); err != nil {
-		t.Fatal(err)
-	}
+	err := d.Create("1", "", nil)
+	require.NoError(t, err)
+	err = d.Create("2", "1", nil)
+	require.NoError(t, err)
 
 
-	_, err := d.Get("2", "")
-	if err != nil {
-		t.Fatal(err)
-	}
+	_, err = d.Get("2", "")
+	require.NoError(t, err)
 
 
 	response, err := d.mounted(d.pathCache["2"])
 	response, err := d.mounted(d.pathCache["2"])
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if response != true {
-		t.Fatal("Response if dir id 2 is mounted should be true")
-	}
+	require.NoError(t, err)
+	assert.True(t, response)
 }
 }
 
 
 func TestMountWithParent(t *testing.T) {
 func TestMountWithParent(t *testing.T) {
@@ -574,9 +560,8 @@ func TestStatus(t *testing.T) {
 	}
 	}
 
 
 	status := d.Status()
 	status := d.Status()
-	if status == nil || len(status) == 0 {
-		t.Fatal("Status should not be nil or empty")
-	}
+	assert.Len(t, status, 4)
+
 	rootDir := status[0]
 	rootDir := status[0]
 	dirs := status[2]
 	dirs := status[2]
 	if rootDir[0] != "Root Dir" {
 	if rootDir[0] != "Root Dir" {
@@ -677,27 +662,19 @@ func testMountMoreThan42Layers(t *testing.T, mountPath string) {
 		}
 		}
 		current = hash(current)
 		current = hash(current)
 
 
-		if err := d.CreateReadWrite(current, parent, nil); err != nil {
-			t.Logf("Current layer %d", i)
-			t.Error(err)
-		}
+		err := d.CreateReadWrite(current, parent, nil)
+		require.NoError(t, err, "current layer %d", i)
+
 		point, err := d.Get(current, "")
 		point, err := d.Get(current, "")
-		if err != nil {
-			t.Logf("Current layer %d", i)
-			t.Error(err)
-		}
+		require.NoError(t, err, "current layer %d", i)
+
 		f, err := os.Create(path.Join(point, current))
 		f, err := os.Create(path.Join(point, current))
-		if err != nil {
-			t.Logf("Current layer %d", i)
-			t.Error(err)
-		}
+		require.NoError(t, err, "current layer %d", i)
 		f.Close()
 		f.Close()
 
 
 		if i%10 == 0 {
 		if i%10 == 0 {
-			if err := os.Remove(path.Join(point, parent)); err != nil {
-				t.Logf("Current layer %d", i)
-				t.Error(err)
-			}
+			err := os.Remove(path.Join(point, parent))
+			require.NoError(t, err, "current layer %d", i)
 			expected--
 			expected--
 		}
 		}
 		last = current
 		last = current
@@ -705,20 +682,14 @@ func testMountMoreThan42Layers(t *testing.T, mountPath string) {
 
 
 	// Perform the actual mount for the top most image
 	// Perform the actual mount for the top most image
 	point, err := d.Get(last, "")
 	point, err := d.Get(last, "")
-	if err != nil {
-		t.Error(err)
-	}
+	require.NoError(t, err)
 	files, err := ioutil.ReadDir(point)
 	files, err := ioutil.ReadDir(point)
-	if err != nil {
-		t.Error(err)
-	}
-	if len(files) != expected {
-		t.Errorf("Expected %d got %d", expected, len(files))
-	}
+	require.NoError(t, err)
+	assert.Len(t, files, expected)
 }
 }
 
 
 func TestMountMoreThan42Layers(t *testing.T) {
 func TestMountMoreThan42Layers(t *testing.T) {
-	os.RemoveAll(tmpOuter)
+	defer os.RemoveAll(tmpOuter)
 	testMountMoreThan42Layers(t, tmp)
 	testMountMoreThan42Layers(t, tmp)
 }
 }
 
 

+ 7 - 8
daemon/graphdriver/devmapper/deviceset.go

@@ -1254,14 +1254,13 @@ func (devices *DeviceSet) setupBaseImage() error {
 }
 }
 
 
 func setCloseOnExec(name string) {
 func setCloseOnExec(name string) {
-	if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil {
-		for _, i := range fileInfos {
-			link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name()))
-			if link == name {
-				fd, err := strconv.Atoi(i.Name())
-				if err == nil {
-					unix.CloseOnExec(fd)
-				}
+	fileInfos, _ := ioutil.ReadDir("/proc/self/fd")
+	for _, i := range fileInfos {
+		link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name()))
+		if link == name {
+			fd, err := strconv.Atoi(i.Name())
+			if err == nil {
+				unix.CloseOnExec(fd)
 			}
 			}
 		}
 		}
 	}
 	}

+ 10 - 16
daemon/graphdriver/devmapper/driver.go

@@ -69,18 +69,18 @@ func (d *Driver) Status() [][2]string {
 
 
 	status := [][2]string{
 	status := [][2]string{
 		{"Pool Name", s.PoolName},
 		{"Pool Name", s.PoolName},
-		{"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(float64(s.SectorSize)))},
-		{"Base Device Size", fmt.Sprintf("%s", units.HumanSize(float64(s.BaseDeviceSize)))},
+		{"Pool Blocksize", units.HumanSize(float64(s.SectorSize))},
+		{"Base Device Size", units.HumanSize(float64(s.BaseDeviceSize))},
 		{"Backing Filesystem", s.BaseDeviceFS},
 		{"Backing Filesystem", s.BaseDeviceFS},
 		{"Data file", s.DataFile},
 		{"Data file", s.DataFile},
 		{"Metadata file", s.MetadataFile},
 		{"Metadata file", s.MetadataFile},
-		{"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))},
-		{"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))},
-		{"Data Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Available)))},
-		{"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))},
-		{"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))},
-		{"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))},
-		{"Thin Pool Minimum Free Space", fmt.Sprintf("%s", units.HumanSize(float64(s.MinFreeSpace)))},
+		{"Data Space Used", units.HumanSize(float64(s.Data.Used))},
+		{"Data Space Total", units.HumanSize(float64(s.Data.Total))},
+		{"Data Space Available", units.HumanSize(float64(s.Data.Available))},
+		{"Metadata Space Used", units.HumanSize(float64(s.Metadata.Used))},
+		{"Metadata Space Total", units.HumanSize(float64(s.Metadata.Total))},
+		{"Metadata Space Available", units.HumanSize(float64(s.Metadata.Available))},
+		{"Thin Pool Minimum Free Space", units.HumanSize(float64(s.MinFreeSpace))},
 		{"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)},
 		{"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)},
 		{"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)},
 		{"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)},
 		{"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)},
 		{"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)},
@@ -159,13 +159,7 @@ func (d *Driver) Remove(id string) error {
 	if err := d.DeviceSet.DeleteDevice(id, false); err != nil {
 	if err := d.DeviceSet.DeleteDevice(id, false); err != nil {
 		return fmt.Errorf("failed to remove device %s: %v", id, err)
 		return fmt.Errorf("failed to remove device %s: %v", id, err)
 	}
 	}
-
-	mp := path.Join(d.home, "mnt", id)
-	if err := system.EnsureRemoveAll(mp); err != nil {
-		return err
-	}
-
-	return nil
+	return system.EnsureRemoveAll(path.Join(d.home, "mnt", id))
 }
 }
 
 
 // Get mounts a device with given id into the root filesystem
 // Get mounts a device with given id into the root filesystem

+ 1 - 1
daemon/graphdriver/fsdiff.go

@@ -94,7 +94,7 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err
 		// are extracted from tar's with full second precision on modified time.
 		// are extracted from tar's with full second precision on modified time.
 		// We need this hack here to make sure calls within same second receive
 		// We need this hack here to make sure calls within same second receive
 		// correct result.
 		// correct result.
-		time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now()))
+		time.Sleep(time.Until(startTime.Truncate(time.Second).Add(time.Second)))
 		return err
 		return err
 	}), nil
 	}), nil
 }
 }

+ 2 - 4
daemon/graphdriver/graphtest/graphbench_unix.go

@@ -3,13 +3,13 @@
 package graphtest
 package graphtest
 
 
 import (
 import (
-	"bytes"
 	"io"
 	"io"
 	"io/ioutil"
 	"io/ioutil"
 	"path/filepath"
 	"path/filepath"
 	"testing"
 	"testing"
 
 
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/pkg/stringid"
+	"github.com/stretchr/testify/require"
 )
 )
 
 
 // DriverBenchExists benchmarks calls to exist
 // DriverBenchExists benchmarks calls to exist
@@ -251,9 +251,7 @@ func DriverBenchDeepLayerRead(b *testing.B, layerCount int, drivername string, d
 		}
 		}
 
 
 		b.StopTimer()
 		b.StopTimer()
-		if bytes.Compare(c, content) != 0 {
-			b.Fatalf("Wrong content in file %v, expected %v", c, content)
-		}
+		require.Equal(b, content, c)
 		b.StartTimer()
 		b.StartTimer()
 	}
 	}
 }
 }

+ 1 - 4
daemon/graphdriver/overlay/overlay.go

@@ -269,10 +269,7 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr
 
 
 	// Toplevel images are just a "root" dir
 	// Toplevel images are just a "root" dir
 	if parent == "" {
 	if parent == "" {
-		if err := idtools.MkdirAs(path.Join(dir, "root"), 0755, rootUID, rootGID); err != nil {
-			return err
-		}
-		return nil
+		return idtools.MkdirAndChown(path.Join(dir, "root"), 0755, idtools.IDPair{rootUID, rootGID})
 	}
 	}
 
 
 	parentDir := d.dir(parent)
 	parentDir := d.dir(parent)

+ 1 - 4
daemon/graphdriver/vfs/driver.go

@@ -103,10 +103,7 @@ func (d *Driver) dir(id string) string {
 
 
 // Remove deletes the content from the directory for a given id.
 // Remove deletes the content from the directory for a given id.
 func (d *Driver) Remove(id string) error {
 func (d *Driver) Remove(id string) error {
-	if err := system.EnsureRemoveAll(d.dir(id)); err != nil {
-		return err
-	}
-	return nil
+	return system.EnsureRemoveAll(d.dir(id))
 }
 }
 
 
 // Get returns the directory for the given id.
 // Get returns the directory for the given id.

+ 1 - 1
daemon/graphdriver/zfs/zfs.go

@@ -416,5 +416,5 @@ func (d *Driver) Put(id string) error {
 func (d *Driver) Exists(id string) bool {
 func (d *Driver) Exists(id string) bool {
 	d.Lock()
 	d.Lock()
 	defer d.Unlock()
 	defer d.Unlock()
-	return d.filesystemsCache[d.zfsPath(id)] == true
+	return d.filesystemsCache[d.zfsPath(id)]
 }
 }

+ 2 - 4
daemon/images.go

@@ -301,12 +301,10 @@ func (daemon *Daemon) SquashImage(id, parent string) (string, error) {
 	}
 	}
 	defer daemon.stores[img.Platform()].layerStore.Release(newL)
 	defer daemon.stores[img.Platform()].layerStore.Release(newL)
 
 
-	var newImage image.Image
-	newImage = *img
+	newImage := *img
 	newImage.RootFS = nil
 	newImage.RootFS = nil
 
 
-	var rootFS image.RootFS
-	rootFS = *parentImg.RootFS
+	rootFS := *parentImg.RootFS
 	rootFS.DiffIDs = append(rootFS.DiffIDs, newL.DiffID())
 	rootFS.DiffIDs = append(rootFS.DiffIDs, newL.DiffID())
 	newImage.RootFS = &rootFS
 	newImage.RootFS = &rootFS
 
 

+ 1 - 1
daemon/logger/factory.go

@@ -93,7 +93,7 @@ func (lf *logdriverFactory) getLogOptValidator(name string) LogOptValidator {
 	lf.m.Lock()
 	lf.m.Lock()
 	defer lf.m.Unlock()
 	defer lf.m.Unlock()
 
 
-	c, _ := lf.optValidator[name]
+	c := lf.optValidator[name]
 	return c
 	return c
 }
 }
 
 

+ 1 - 2
daemon/logger/jsonfilelog/read.go

@@ -137,8 +137,7 @@ func newSectionReader(f *os.File) (*io.SectionReader, error) {
 }
 }
 
 
 func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) {
 func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) {
-	var rdr io.Reader
-	rdr = f
+	rdr := io.Reader(f)
 	if tail > 0 {
 	if tail > 0 {
 		ls, err := tailfile.TailFile(f, tail)
 		ls, err := tailfile.TailFile(f, tail)
 		if err != nil {
 		if err != nil {

+ 0 - 1
daemon/logger/ring.go

@@ -202,7 +202,6 @@ func (r *messageRing) Close() {
 	r.closed = true
 	r.closed = true
 	r.wait.Broadcast()
 	r.wait.Broadcast()
 	r.mu.Unlock()
 	r.mu.Unlock()
-	return
 }
 }
 
 
 // Drain drains all messages from the queue.
 // Drain drains all messages from the queue.

+ 9 - 12
daemon/logger/splunk/splunk_test.go

@@ -8,6 +8,7 @@ import (
 	"time"
 	"time"
 
 
 	"github.com/docker/docker/daemon/logger"
 	"github.com/docker/docker/daemon/logger"
+	"github.com/stretchr/testify/require"
 )
 )
 
 
 // Validate options
 // Validate options
@@ -125,7 +126,7 @@ func TestDefault(t *testing.T) {
 		splunkLoggerDriver.nullMessage.Source != "" ||
 		splunkLoggerDriver.nullMessage.Source != "" ||
 		splunkLoggerDriver.nullMessage.SourceType != "" ||
 		splunkLoggerDriver.nullMessage.SourceType != "" ||
 		splunkLoggerDriver.nullMessage.Index != "" ||
 		splunkLoggerDriver.nullMessage.Index != "" ||
-		splunkLoggerDriver.gzipCompression != false ||
+		splunkLoggerDriver.gzipCompression ||
 		splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency ||
 		splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency ||
 		splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize ||
 		splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize ||
 		splunkLoggerDriver.bufferMaximum != defaultBufferMaximum ||
 		splunkLoggerDriver.bufferMaximum != defaultBufferMaximum ||
@@ -255,7 +256,7 @@ func TestInlineFormatWithNonDefaultOptions(t *testing.T) {
 		splunkLoggerDriver.nullMessage.Source != "mysource" ||
 		splunkLoggerDriver.nullMessage.Source != "mysource" ||
 		splunkLoggerDriver.nullMessage.SourceType != "mysourcetype" ||
 		splunkLoggerDriver.nullMessage.SourceType != "mysourcetype" ||
 		splunkLoggerDriver.nullMessage.Index != "myindex" ||
 		splunkLoggerDriver.nullMessage.Index != "myindex" ||
-		splunkLoggerDriver.gzipCompression != true ||
+		!splunkLoggerDriver.gzipCompression ||
 		splunkLoggerDriver.gzipCompressionLevel != gzip.DefaultCompression ||
 		splunkLoggerDriver.gzipCompressionLevel != gzip.DefaultCompression ||
 		splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency ||
 		splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency ||
 		splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize ||
 		splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize ||
@@ -355,7 +356,7 @@ func TestJsonFormat(t *testing.T) {
 		splunkLoggerDriver.nullMessage.Source != "" ||
 		splunkLoggerDriver.nullMessage.Source != "" ||
 		splunkLoggerDriver.nullMessage.SourceType != "" ||
 		splunkLoggerDriver.nullMessage.SourceType != "" ||
 		splunkLoggerDriver.nullMessage.Index != "" ||
 		splunkLoggerDriver.nullMessage.Index != "" ||
-		splunkLoggerDriver.gzipCompression != true ||
+		!splunkLoggerDriver.gzipCompression ||
 		splunkLoggerDriver.gzipCompressionLevel != gzip.BestSpeed ||
 		splunkLoggerDriver.gzipCompressionLevel != gzip.BestSpeed ||
 		splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency ||
 		splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency ||
 		splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize ||
 		splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize ||
@@ -448,14 +449,10 @@ func TestRawFormat(t *testing.T) {
 	}
 	}
 
 
 	hostname, err := info.Hostname()
 	hostname, err := info.Hostname()
-	if err != nil {
-		t.Fatal(err)
-	}
+	require.NoError(t, err)
 
 
 	loggerDriver, err := New(info)
 	loggerDriver, err := New(info)
-	if err != nil {
-		t.Fatal(err)
-	}
+	require.NoError(t, err)
 
 
 	if !hec.connectionVerified {
 	if !hec.connectionVerified {
 		t.Fatal("By default connection should be verified")
 		t.Fatal("By default connection should be verified")
@@ -472,7 +469,7 @@ func TestRawFormat(t *testing.T) {
 		splunkLoggerDriver.nullMessage.Source != "" ||
 		splunkLoggerDriver.nullMessage.Source != "" ||
 		splunkLoggerDriver.nullMessage.SourceType != "" ||
 		splunkLoggerDriver.nullMessage.SourceType != "" ||
 		splunkLoggerDriver.nullMessage.Index != "" ||
 		splunkLoggerDriver.nullMessage.Index != "" ||
-		splunkLoggerDriver.gzipCompression != false ||
+		splunkLoggerDriver.gzipCompression ||
 		splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency ||
 		splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency ||
 		splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize ||
 		splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize ||
 		splunkLoggerDriver.bufferMaximum != defaultBufferMaximum ||
 		splunkLoggerDriver.bufferMaximum != defaultBufferMaximum ||
@@ -586,7 +583,7 @@ func TestRawFormatWithLabels(t *testing.T) {
 		splunkLoggerDriver.nullMessage.Source != "" ||
 		splunkLoggerDriver.nullMessage.Source != "" ||
 		splunkLoggerDriver.nullMessage.SourceType != "" ||
 		splunkLoggerDriver.nullMessage.SourceType != "" ||
 		splunkLoggerDriver.nullMessage.Index != "" ||
 		splunkLoggerDriver.nullMessage.Index != "" ||
-		splunkLoggerDriver.gzipCompression != false ||
+		splunkLoggerDriver.gzipCompression ||
 		splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency ||
 		splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency ||
 		splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize ||
 		splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize ||
 		splunkLoggerDriver.bufferMaximum != defaultBufferMaximum ||
 		splunkLoggerDriver.bufferMaximum != defaultBufferMaximum ||
@@ -698,7 +695,7 @@ func TestRawFormatWithoutTag(t *testing.T) {
 		splunkLoggerDriver.nullMessage.Source != "" ||
 		splunkLoggerDriver.nullMessage.Source != "" ||
 		splunkLoggerDriver.nullMessage.SourceType != "" ||
 		splunkLoggerDriver.nullMessage.SourceType != "" ||
 		splunkLoggerDriver.nullMessage.Index != "" ||
 		splunkLoggerDriver.nullMessage.Index != "" ||
-		splunkLoggerDriver.gzipCompression != false ||
+		splunkLoggerDriver.gzipCompression ||
 		splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency ||
 		splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency ||
 		splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize ||
 		splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize ||
 		splunkLoggerDriver.bufferMaximum != defaultBufferMaximum ||
 		splunkLoggerDriver.bufferMaximum != defaultBufferMaximum ||

+ 0 - 1
daemon/metrics.go

@@ -169,5 +169,4 @@ func pluginStopMetricsCollection(p plugingetter.CompatPlugin) {
 			logrus.WithError(err).WithField("name", p.Name()).WithField("socket", sockPath).Error("error unmounting metrics socket for plugin")
 			logrus.WithError(err).WithField("name", p.Name()).WithField("socket", sockPath).Error("error unmounting metrics socket for plugin")
 		}
 		}
 	}
 	}
-	return
 }
 }

+ 1 - 1
daemon/network.go

@@ -115,6 +115,7 @@ var (
 func (daemon *Daemon) startIngressWorker() {
 func (daemon *Daemon) startIngressWorker() {
 	ingressJobsChannel = make(chan *ingressJob, 100)
 	ingressJobsChannel = make(chan *ingressJob, 100)
 	go func() {
 	go func() {
+		// nolint: gosimple
 		for {
 		for {
 			select {
 			select {
 			case r := <-ingressJobsChannel:
 			case r := <-ingressJobsChannel:
@@ -232,7 +233,6 @@ func (daemon *Daemon) releaseIngress(id string) {
 		logrus.Errorf("Failed to delete ingress network %s: %v", n.ID(), err)
 		logrus.Errorf("Failed to delete ingress network %s: %v", n.ID(), err)
 		return
 		return
 	}
 	}
-	return
 }
 }
 
 
 // SetNetworkBootstrapKeys sets the bootstrap keys.
 // SetNetworkBootstrapKeys sets the bootstrap keys.

+ 1 - 0
daemon/oci_linux.go

@@ -29,6 +29,7 @@ import (
 	"github.com/sirupsen/logrus"
 	"github.com/sirupsen/logrus"
 )
 )
 
 
+// nolint: gosimple
 var (
 var (
 	deviceCgroupRuleRegex = regexp.MustCompile("^([acb]) ([0-9]+|\\*):([0-9]+|\\*) ([rwm]{1,3})$")
 	deviceCgroupRuleRegex = regexp.MustCompile("^([acb]) ([0-9]+|\\*):([0-9]+|\\*) ([rwm]{1,3})$")
 )
 )

+ 3 - 6
daemon/reload_test.go

@@ -12,6 +12,7 @@ import (
 	"github.com/docker/docker/pkg/discovery"
 	"github.com/docker/docker/pkg/discovery"
 	_ "github.com/docker/docker/pkg/discovery/memory"
 	_ "github.com/docker/docker/pkg/discovery/memory"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/registry"
+	"github.com/stretchr/testify/assert"
 )
 )
 
 
 func TestDaemonReloadLabels(t *testing.T) {
 func TestDaemonReloadLabels(t *testing.T) {
@@ -85,15 +86,11 @@ func TestDaemonReloadAllowNondistributableArtifacts(t *testing.T) {
 	for _, value := range serviceConfig.AllowNondistributableArtifactsCIDRs {
 	for _, value := range serviceConfig.AllowNondistributableArtifactsCIDRs {
 		actual = append(actual, value.String())
 		actual = append(actual, value.String())
 	}
 	}
-	for _, value := range serviceConfig.AllowNondistributableArtifactsHostnames {
-		actual = append(actual, value)
-	}
+	actual = append(actual, serviceConfig.AllowNondistributableArtifactsHostnames...)
 
 
 	sort.Strings(registries)
 	sort.Strings(registries)
 	sort.Strings(actual)
 	sort.Strings(actual)
-	if !reflect.DeepEqual(registries, actual) {
-		t.Fatalf("expected %v, got %v\n", registries, actual)
-	}
+	assert.Equal(t, registries, actual)
 }
 }
 
 
 func TestDaemonReloadMirrors(t *testing.T) {
 func TestDaemonReloadMirrors(t *testing.T) {

+ 1 - 0
daemon/top_unix.go

@@ -16,6 +16,7 @@ func validatePSArgs(psArgs string) error {
 	// NOTE: \\s does not detect unicode whitespaces.
 	// NOTE: \\s does not detect unicode whitespaces.
 	// So we use fieldsASCII instead of strings.Fields in parsePSOutput.
 	// So we use fieldsASCII instead of strings.Fields in parsePSOutput.
 	// See https://github.com/docker/docker/pull/24358
 	// See https://github.com/docker/docker/pull/24358
+	// nolint: gosimple
 	re := regexp.MustCompile("\\s+([^\\s]*)=\\s*(PID[^\\s]*)")
 	re := regexp.MustCompile("\\s+([^\\s]*)=\\s*(PID[^\\s]*)")
 	for _, group := range re.FindAllStringSubmatch(psArgs, -1) {
 	for _, group := range re.FindAllStringSubmatch(psArgs, -1) {
 		if len(group) >= 3 {
 		if len(group) >= 3 {

+ 1 - 6
distribution/push_v2.go

@@ -395,12 +395,7 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
 	defer layerUpload.Close()
 	defer layerUpload.Close()
 
 
 	// upload the blob
 	// upload the blob
-	desc, err := pd.uploadUsingSession(ctx, progressOutput, diffID, layerUpload)
-	if err != nil {
-		return desc, err
-	}
-
-	return desc, nil
+	return pd.uploadUsingSession(ctx, progressOutput, diffID, layerUpload)
 }
 }
 
 
 func (pd *v2PushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) {
 func (pd *v2PushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) {

+ 1 - 1
hack/dockerfile/binaries-commits

@@ -10,4 +10,4 @@ LIBNETWORK_COMMIT=7b2b1feb1de4817d522cc372af149ff48d25028e
 VNDR_COMMIT=9909bb2b8a0b7ea464527b376dc50389c90df587
 VNDR_COMMIT=9909bb2b8a0b7ea464527b376dc50389c90df587
 
 
 # Linting
 # Linting
-GOMETALINTER_COMMIT=5507b26af3204e949ffe50ec08ee73e5847938e1
+GOMETALINTER_COMMIT=bfcc1d6942136fd86eb6f1a6fb328de8398fbd80

+ 2 - 3
hack/validate/gometalinter.json

@@ -9,15 +9,14 @@
     "api/types/container/container_.*",
     "api/types/container/container_.*",
     "integration-cli/"
     "integration-cli/"
   ],
   ],
-  "Skip": [
-    "integration-cli/"
-  ],
+  "Skip": ["integration-cli/"],
 
 
   "Enable": [
   "Enable": [
     "deadcode",
     "deadcode",
     "gofmt",
     "gofmt",
     "goimports",
     "goimports",
     "golint",
     "golint",
+    "gosimple",
     "ineffassign",
     "ineffassign",
     "interfacer",
     "interfacer",
     "unconvert",
     "unconvert",

+ 3 - 8
image/fs_test.go

@@ -1,7 +1,6 @@
 package image
 package image
 
 
 import (
 import (
-	"bytes"
 	"crypto/rand"
 	"crypto/rand"
 	"crypto/sha256"
 	"crypto/sha256"
 	"encoding/hex"
 	"encoding/hex"
@@ -12,7 +11,7 @@ import (
 	"testing"
 	"testing"
 
 
 	"github.com/docker/docker/internal/testutil"
 	"github.com/docker/docker/internal/testutil"
-	"github.com/opencontainers/go-digest"
+	digest "github.com/opencontainers/go-digest"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/assert"
 )
 )
 
 
@@ -112,9 +111,7 @@ func TestFSMetadataGetSet(t *testing.T) {
 		actual, err := store.GetMetadata(tc.id, tc.key)
 		actual, err := store.GetMetadata(tc.id, tc.key)
 		assert.NoError(t, err)
 		assert.NoError(t, err)
 
 
-		if bytes.Compare(actual, tc.value) != 0 {
-			t.Fatalf("Metadata expected %q, got %q", tc.value, actual)
-		}
+		assert.Equal(t, tc.value, actual)
 	}
 	}
 
 
 	_, err = store.GetMetadata(id2, "tkey2")
 	_, err = store.GetMetadata(id2, "tkey2")
@@ -183,9 +180,7 @@ func TestFSGetSet(t *testing.T) {
 	for _, tc := range tcases {
 	for _, tc := range tcases {
 		data, err := store.Get(tc.expected)
 		data, err := store.Get(tc.expected)
 		assert.NoError(t, err)
 		assert.NoError(t, err)
-		if bytes.Compare(data, tc.input) != 0 {
-			t.Fatalf("expected data %q, got %q", tc.input, data)
-		}
+		assert.Equal(t, tc.input, data)
 	}
 	}
 }
 }
 
 

+ 1 - 2
image/tarexport/load.go

@@ -82,8 +82,7 @@ func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool)
 		if err := checkCompatibleOS(img.OS); err != nil {
 		if err := checkCompatibleOS(img.OS); err != nil {
 			return err
 			return err
 		}
 		}
-		var rootFS image.RootFS
-		rootFS = *img.RootFS
+		rootFS := *img.RootFS
 		rootFS.DiffIDs = nil
 		rootFS.DiffIDs = nil
 
 
 		if expected, actual := len(m.Layers), len(img.RootFS.DiffIDs); expected != actual {
 		if expected, actual := len(m.Layers), len(img.RootFS.DiffIDs); expected != actual {

+ 1 - 4
libcontainerd/client_linux.go

@@ -296,10 +296,7 @@ func (clnt *client) UpdateResources(containerID string, resources Resources) err
 		Pid:       InitFriendlyName,
 		Pid:       InitFriendlyName,
 		Resources: (*containerd.UpdateResource)(&resources),
 		Resources: (*containerd.UpdateResource)(&resources),
 	})
 	})
-	if err != nil {
-		return err
-	}
-	return nil
+	return err
 }
 }
 
 
 func (clnt *client) getExitNotifier(containerID string) *exitNotifier {
 func (clnt *client) getExitNotifier(containerID string) *exitNotifier {

+ 1 - 1
pkg/authorization/authz.go

@@ -158,7 +158,7 @@ func sendBody(url string, header http.Header) bool {
 
 
 // headers returns flatten version of the http headers excluding authorization
 // headers returns flatten version of the http headers excluding authorization
 func headers(header http.Header) map[string]string {
 func headers(header http.Header) map[string]string {
-	v := make(map[string]string, 0)
+	v := make(map[string]string)
 	for k, values := range header {
 	for k, values := range header {
 		// Skip authorization headers
 		// Skip authorization headers
 		if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "X-Registry-Config") || strings.EqualFold(k, "X-Registry-Auth") {
 		if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "X-Registry-Config") || strings.EqualFold(k, "X-Registry-Auth") {

+ 2 - 4
pkg/devicemapper/devmapper.go

@@ -351,8 +351,7 @@ func RemoveDeviceDeferred(name string) error {
 	// disable udev dm rules and delete the symlink under /dev/mapper by itself,
 	// disable udev dm rules and delete the symlink under /dev/mapper by itself,
 	// even if the removal is deferred by the kernel.
 	// even if the removal is deferred by the kernel.
 	cookie := new(uint)
 	cookie := new(uint)
-	var flags uint16
-	flags = DmUdevDisableLibraryFallback
+	flags := uint16(DmUdevDisableLibraryFallback)
 	if err := task.setCookie(cookie, flags); err != nil {
 	if err := task.setCookie(cookie, flags); err != nil {
 		return fmt.Errorf("devicemapper: Can not set cookie: %s", err)
 		return fmt.Errorf("devicemapper: Can not set cookie: %s", err)
 	}
 	}
@@ -465,8 +464,7 @@ func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize
 	}
 	}
 
 
 	cookie := new(uint)
 	cookie := new(uint)
-	var flags uint16
-	flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag
+	flags := uint16(DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag)
 	if err := task.setCookie(cookie, flags); err != nil {
 	if err := task.setCookie(cookie, flags); err != nil {
 		return fmt.Errorf("devicemapper: Can't set cookie %s", err)
 		return fmt.Errorf("devicemapper: Can't set cookie %s", err)
 	}
 	}

+ 0 - 2
pkg/discovery/kv/kv_test.go

@@ -11,7 +11,6 @@ import (
 	"github.com/docker/docker/pkg/discovery"
 	"github.com/docker/docker/pkg/discovery"
 	"github.com/docker/libkv"
 	"github.com/docker/libkv"
 	"github.com/docker/libkv/store"
 	"github.com/docker/libkv/store"
-
 	"github.com/go-check/check"
 	"github.com/go-check/check"
 )
 )
 
 
@@ -130,7 +129,6 @@ func (s *Mock) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
 
 
 // Close mock
 // Close mock
 func (s *Mock) Close() {
 func (s *Mock) Close() {
-	return
 }
 }
 
 
 func (ds *DiscoverySuite) TestInitializeWithCerts(c *check.C) {
 func (ds *DiscoverySuite) TestInitializeWithCerts(c *check.C) {

+ 2 - 8
pkg/loopback/ioctl.go

@@ -17,10 +17,7 @@ func ioctlLoopCtlGetFree(fd uintptr) (int, error) {
 }
 }
 
 
 func ioctlLoopSetFd(loopFd, sparseFd uintptr) error {
 func ioctlLoopSetFd(loopFd, sparseFd uintptr) error {
-	if err := unix.IoctlSetInt(int(loopFd), LoopSetFd, int(sparseFd)); err != nil {
-		return err
-	}
-	return nil
+	return unix.IoctlSetInt(int(loopFd), LoopSetFd, int(sparseFd))
 }
 }
 
 
 func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *loopInfo64) error {
 func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *loopInfo64) error {
@@ -47,8 +44,5 @@ func ioctlLoopGetStatus64(loopFd uintptr) (*loopInfo64, error) {
 }
 }
 
 
 func ioctlLoopSetCapacity(loopFd uintptr, value int) error {
 func ioctlLoopSetCapacity(loopFd uintptr, value int) error {
-	if err := unix.IoctlSetInt(int(loopFd), LoopSetCapacity, value); err != nil {
-		return err
-	}
-	return nil
+	return unix.IoctlSetInt(int(loopFd), LoopSetCapacity, value)
 }
 }

+ 9 - 12
pkg/pools/pools_test.go

@@ -6,6 +6,9 @@ import (
 	"io"
 	"io"
 	"strings"
 	"strings"
 	"testing"
 	"testing"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 )
 )
 
 
 func TestBufioReaderPoolGetWithNoReaderShouldCreateOne(t *testing.T) {
 func TestBufioReaderPoolGetWithNoReaderShouldCreateOne(t *testing.T) {
@@ -92,22 +95,16 @@ func TestBufioWriterPoolPutAndGet(t *testing.T) {
 	buf := new(bytes.Buffer)
 	buf := new(bytes.Buffer)
 	bw := bufio.NewWriter(buf)
 	bw := bufio.NewWriter(buf)
 	writer := BufioWriter32KPool.Get(bw)
 	writer := BufioWriter32KPool.Get(bw)
-	if writer == nil {
-		t.Fatalf("BufioReaderPool should not return a nil writer.")
-	}
+	require.NotNil(t, writer)
+
 	written, err := writer.Write([]byte("foobar"))
 	written, err := writer.Write([]byte("foobar"))
-	if err != nil {
-		t.Fatal(err)
-	}
-	if written != 6 {
-		t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written)
-	}
+	require.NoError(t, err)
+	assert.Equal(t, 6, written)
+
 	// Make sure we Flush all the way ?
 	// Make sure we Flush all the way ?
 	writer.Flush()
 	writer.Flush()
 	bw.Flush()
 	bw.Flush()
-	if len(buf.Bytes()) != 6 {
-		t.Fatalf("The buffer should contain 6 bytes ('foobar') but contains %v ('%v')", buf.Bytes(), string(buf.Bytes()))
-	}
+	assert.Len(t, buf.Bytes(), 6)
 	// Reset the buffer
 	// Reset the buffer
 	buf.Reset()
 	buf.Reset()
 	BufioWriter32KPool.Put(writer)
 	BufioWriter32KPool.Put(writer)

+ 1 - 1
pkg/signal/signal_linux_test.go

@@ -41,7 +41,7 @@ func TestCatchAll(t *testing.T) {
 }
 }
 
 
 func TestStopCatch(t *testing.T) {
 func TestStopCatch(t *testing.T) {
-	signal, _ := SignalMap["HUP"]
+	signal := SignalMap["HUP"]
 	channel := make(chan os.Signal, 1)
 	channel := make(chan os.Signal, 1)
 	CatchAll(channel)
 	CatchAll(channel)
 	go func() {
 	go func() {

+ 3 - 3
reference/store_test.go

@@ -306,19 +306,19 @@ func TestAddDeleteGet(t *testing.T) {
 	}
 	}
 
 
 	// Delete a few references
 	// Delete a few references
-	if deleted, err := store.Delete(ref1); err != nil || deleted != true {
+	if deleted, err := store.Delete(ref1); err != nil || !deleted {
 		t.Fatal("Delete failed")
 		t.Fatal("Delete failed")
 	}
 	}
 	if _, err := store.Get(ref1); err != ErrDoesNotExist {
 	if _, err := store.Get(ref1); err != ErrDoesNotExist {
 		t.Fatal("Expected ErrDoesNotExist from Get")
 		t.Fatal("Expected ErrDoesNotExist from Get")
 	}
 	}
-	if deleted, err := store.Delete(ref5); err != nil || deleted != true {
+	if deleted, err := store.Delete(ref5); err != nil || !deleted {
 		t.Fatal("Delete failed")
 		t.Fatal("Delete failed")
 	}
 	}
 	if _, err := store.Get(ref5); err != ErrDoesNotExist {
 	if _, err := store.Get(ref5); err != ErrDoesNotExist {
 		t.Fatal("Expected ErrDoesNotExist from Get")
 		t.Fatal("Expected ErrDoesNotExist from Get")
 	}
 	}
-	if deleted, err := store.Delete(nameOnly); err != nil || deleted != true {
+	if deleted, err := store.Delete(nameOnly); err != nil || !deleted {
 		t.Fatal("Delete failed")
 		t.Fatal("Delete failed")
 	}
 	}
 	if _, err := store.Get(nameOnly); err != ErrDoesNotExist {
 	if _, err := store.Get(nameOnly); err != ErrDoesNotExist {

+ 2 - 2
registry/config.go

@@ -75,7 +75,7 @@ func newServiceConfig(options ServiceOptions) *serviceConfig {
 	config := &serviceConfig{
 	config := &serviceConfig{
 		ServiceConfig: registrytypes.ServiceConfig{
 		ServiceConfig: registrytypes.ServiceConfig{
 			InsecureRegistryCIDRs: make([]*registrytypes.NetIPNet, 0),
 			InsecureRegistryCIDRs: make([]*registrytypes.NetIPNet, 0),
-			IndexConfigs:          make(map[string]*registrytypes.IndexInfo, 0),
+			IndexConfigs:          make(map[string]*registrytypes.IndexInfo),
 			// Hack: Bypass setting the mirrors to IndexConfigs since they are going away
 			// Hack: Bypass setting the mirrors to IndexConfigs since they are going away
 			// and Mirrors are only for the official registry anyways.
 			// and Mirrors are only for the official registry anyways.
 		},
 		},
@@ -171,7 +171,7 @@ func (config *serviceConfig) LoadInsecureRegistries(registries []string) error {
 	originalIndexInfos := config.ServiceConfig.IndexConfigs
 	originalIndexInfos := config.ServiceConfig.IndexConfigs
 
 
 	config.ServiceConfig.InsecureRegistryCIDRs = make([]*registrytypes.NetIPNet, 0)
 	config.ServiceConfig.InsecureRegistryCIDRs = make([]*registrytypes.NetIPNet, 0)
-	config.ServiceConfig.IndexConfigs = make(map[string]*registrytypes.IndexInfo, 0)
+	config.ServiceConfig.IndexConfigs = make(map[string]*registrytypes.IndexInfo)
 
 
 skip:
 skip:
 	for _, r := range registries {
 	for _, r := range registries {

+ 3 - 6
registry/registry_test.go

@@ -14,6 +14,7 @@ import (
 	"github.com/docker/distribution/registry/client/transport"
 	"github.com/docker/distribution/registry/client/transport"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
 	registrytypes "github.com/docker/docker/api/types/registry"
 	registrytypes "github.com/docker/docker/api/types/registry"
+	"github.com/stretchr/testify/assert"
 )
 )
 
 
 var (
 var (
@@ -747,16 +748,12 @@ func TestSearchRepositories(t *testing.T) {
 func TestTrustedLocation(t *testing.T) {
 func TestTrustedLocation(t *testing.T) {
 	for _, url := range []string{"http://example.com", "https://example.com:7777", "http://docker.io", "http://test.docker.com", "https://fakedocker.com"} {
 	for _, url := range []string{"http://example.com", "https://example.com:7777", "http://docker.io", "http://test.docker.com", "https://fakedocker.com"} {
 		req, _ := http.NewRequest("GET", url, nil)
 		req, _ := http.NewRequest("GET", url, nil)
-		if trustedLocation(req) == true {
-			t.Fatalf("'%s' shouldn't be detected as a trusted location", url)
-		}
+		assert.False(t, trustedLocation(req))
 	}
 	}
 
 
 	for _, url := range []string{"https://docker.io", "https://test.docker.com:80"} {
 	for _, url := range []string{"https://docker.io", "https://test.docker.com:80"} {
 		req, _ := http.NewRequest("GET", url, nil)
 		req, _ := http.NewRequest("GET", url, nil)
-		if trustedLocation(req) == false {
-			t.Fatalf("'%s' should be detected as a trusted location", url)
-		}
+		assert.True(t, trustedLocation(req))
 	}
 	}
 }
 }
 
 

+ 1 - 1
runconfig/hostconfig.go

@@ -68,7 +68,7 @@ func validateNetContainerMode(c *container.Config, hc *container.HostConfig) err
 		return ErrConflictContainerNetworkAndMac
 		return ErrConflictContainerNetworkAndMac
 	}
 	}
 
 
-	if hc.NetworkMode.IsContainer() && (len(hc.PortBindings) > 0 || hc.PublishAllPorts == true) {
+	if hc.NetworkMode.IsContainer() && (len(hc.PortBindings) > 0 || hc.PublishAllPorts) {
 		return ErrConflictNetworkPublishPorts
 		return ErrConflictNetworkPublishPorts
 	}
 	}
 
 

+ 1 - 3
runconfig/hostconfig_test.go

@@ -195,9 +195,7 @@ func TestDecodeHostConfig(t *testing.T) {
 			t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err))
 			t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err))
 		}
 		}
 
 
-		if c.Privileged != false {
-			t.Fatalf("Expected privileged false, found %v\n", c.Privileged)
-		}
+		assert.False(t, c.Privileged)
 
 
 		if l := len(c.Binds); l != 1 {
 		if l := len(c.Binds); l != 1 {
 			t.Fatalf("Expected 1 bind, found %d\n", l)
 			t.Fatalf("Expected 1 bind, found %d\n", l)