Просмотр исходного кода

Merge pull request #44256 from thaJeztah/redundant_sprintfs

replace redundant fmt.Sprintf() with strconv
Sebastiaan van Stijn 2 лет назад
Родитель
Сommit
542c735926

+ 5 - 5
client/build_prune.go

@@ -3,8 +3,8 @@ package client // import "github.com/docker/docker/client"
 import (
 import (
 	"context"
 	"context"
 	"encoding/json"
 	"encoding/json"
-	"fmt"
 	"net/url"
 	"net/url"
+	"strconv"
 
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/filters"
 	"github.com/docker/docker/api/types/filters"
@@ -23,12 +23,12 @@ func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePru
 	if opts.All {
 	if opts.All {
 		query.Set("all", "1")
 		query.Set("all", "1")
 	}
 	}
-	query.Set("keep-storage", fmt.Sprintf("%d", opts.KeepStorage))
-	filters, err := filters.ToJSON(opts.Filters)
+	query.Set("keep-storage", strconv.Itoa(int(opts.KeepStorage)))
+	f, err := filters.ToJSON(opts.Filters)
 	if err != nil {
 	if err != nil {
 		return nil, errors.Wrap(err, "prune could not marshal filters option")
 		return nil, errors.Wrap(err, "prune could not marshal filters option")
 	}
 	}
-	query.Set("filters", filters)
+	query.Set("filters", f)
 
 
 	serverResp, err := cli.post(ctx, "/build/prune", query, nil, nil)
 	serverResp, err := cli.post(ctx, "/build/prune", query, nil, nil)
 	defer ensureReaderClosed(serverResp)
 	defer ensureReaderClosed(serverResp)
@@ -38,7 +38,7 @@ func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePru
 	}
 	}
 
 
 	if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
 	if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
-		return nil, fmt.Errorf("Error retrieving disk usage: %v", err)
+		return nil, errors.Wrap(err, "error retrieving disk usage")
 	}
 	}
 
 
 	return &report, nil
 	return &report, nil

+ 12 - 12
daemon/config/config.go

@@ -224,7 +224,7 @@ type CommonConfig struct {
 
 
 	DNSConfig
 	DNSConfig
 	LogConfig
 	LogConfig
-	BridgeConfig // bridgeConfig holds bridge network specific configuration.
+	BridgeConfig // BridgeConfig holds bridge network specific configuration.
 	NetworkConfig
 	NetworkConfig
 	registry.ServiceOptions
 	registry.ServiceOptions
 
 
@@ -317,7 +317,7 @@ func GetConflictFreeLabels(labels []string) ([]string, error) {
 		if len(stringSlice) > 1 {
 		if len(stringSlice) > 1 {
 			// If there is a conflict we will return an error
 			// If there is a conflict we will return an error
 			if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] {
 			if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] {
-				return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v)
+				return nil, errors.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v)
 			}
 			}
 			labelMap[stringSlice[0]] = stringSlice[1]
 			labelMap[stringSlice[0]] = stringSlice[1]
 		}
 		}
@@ -325,7 +325,7 @@ func GetConflictFreeLabels(labels []string) ([]string, error) {
 
 
 	newLabels := []string{}
 	newLabels := []string{}
 	for k, v := range labelMap {
 	for k, v := range labelMap {
-		newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v))
+		newLabels = append(newLabels, k+"="+v)
 	}
 	}
 	return newLabels, nil
 	return newLabels, nil
 }
 }
@@ -521,7 +521,7 @@ func findConfigurationConflicts(config map[string]interface{}, flags *pflag.Flag
 		for key := range unknownKeys {
 		for key := range unknownKeys {
 			unknown = append(unknown, key)
 			unknown = append(unknown, key)
 		}
 		}
-		return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", "))
+		return errors.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", "))
 	}
 	}
 
 
 	var conflicts []string
 	var conflicts []string
@@ -555,7 +555,7 @@ func findConfigurationConflicts(config map[string]interface{}, flags *pflag.Flag
 	flags.Visit(duplicatedConflicts)
 	flags.Visit(duplicatedConflicts)
 
 
 	if len(conflicts) > 0 {
 	if len(conflicts) > 0 {
-		return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
+		return errors.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
 	}
 	}
 	return nil
 	return nil
 }
 }
@@ -572,7 +572,7 @@ func Validate(config *Config) error {
 	// validate log-level
 	// validate log-level
 	if config.LogLevel != "" {
 	if config.LogLevel != "" {
 		if _, err := logrus.ParseLevel(config.LogLevel); err != nil {
 		if _, err := logrus.ParseLevel(config.LogLevel); err != nil {
-			return fmt.Errorf("invalid logging level: %s", config.LogLevel)
+			return errors.Errorf("invalid logging level: %s", config.LogLevel)
 		}
 		}
 	}
 	}
 
 
@@ -599,22 +599,22 @@ func Validate(config *Config) error {
 
 
 	// TODO(thaJeztah) Validations below should not accept "0" to be valid; see Validate() for a more in-depth description of this problem
 	// TODO(thaJeztah) Validations below should not accept "0" to be valid; see Validate() for a more in-depth description of this problem
 	if config.Mtu < 0 {
 	if config.Mtu < 0 {
-		return fmt.Errorf("invalid default MTU: %d", config.Mtu)
+		return errors.Errorf("invalid default MTU: %d", config.Mtu)
 	}
 	}
 	if config.MaxConcurrentDownloads < 0 {
 	if config.MaxConcurrentDownloads < 0 {
-		return fmt.Errorf("invalid max concurrent downloads: %d", config.MaxConcurrentDownloads)
+		return errors.Errorf("invalid max concurrent downloads: %d", config.MaxConcurrentDownloads)
 	}
 	}
 	if config.MaxConcurrentUploads < 0 {
 	if config.MaxConcurrentUploads < 0 {
-		return fmt.Errorf("invalid max concurrent uploads: %d", config.MaxConcurrentUploads)
+		return errors.Errorf("invalid max concurrent uploads: %d", config.MaxConcurrentUploads)
 	}
 	}
 	if config.MaxDownloadAttempts < 0 {
 	if config.MaxDownloadAttempts < 0 {
-		return fmt.Errorf("invalid max download attempts: %d", config.MaxDownloadAttempts)
+		return errors.Errorf("invalid max download attempts: %d", config.MaxDownloadAttempts)
 	}
 	}
 
 
 	// validate that "default" runtime is not reset
 	// validate that "default" runtime is not reset
 	if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 {
 	if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 {
 		if _, ok := runtimes[StockRuntimeName]; ok {
 		if _, ok := runtimes[StockRuntimeName]; ok {
-			return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName)
+			return errors.Errorf("runtime name '%s' is reserved", StockRuntimeName)
 		}
 		}
 	}
 	}
 
 
@@ -626,7 +626,7 @@ func Validate(config *Config) error {
 		if !builtinRuntimes[defaultRuntime] {
 		if !builtinRuntimes[defaultRuntime] {
 			runtimes := config.GetAllRuntimes()
 			runtimes := config.GetAllRuntimes()
 			if _, ok := runtimes[defaultRuntime]; !ok && !IsPermissibleC8dRuntimeName(defaultRuntime) {
 			if _, ok := runtimes[defaultRuntime]; !ok && !IsPermissibleC8dRuntimeName(defaultRuntime) {
-				return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
+				return errors.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
 			}
 			}
 		}
 		}
 	}
 	}

+ 3 - 2
daemon/graphdriver/aufs/aufs.go

@@ -32,6 +32,7 @@ import (
 	"os/exec"
 	"os/exec"
 	"path"
 	"path"
 	"path/filepath"
 	"path/filepath"
+	"strconv"
 	"strings"
 	"strings"
 	"sync"
 	"sync"
 
 
@@ -209,8 +210,8 @@ func (a *Driver) Status() [][2]string {
 	return [][2]string{
 	return [][2]string{
 		{"Root Dir", a.rootPath()},
 		{"Root Dir", a.rootPath()},
 		{"Backing Filesystem", backingFs},
 		{"Backing Filesystem", backingFs},
-		{"Dirs", fmt.Sprintf("%d", len(ids))},
-		{"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())},
+		{"Dirs", strconv.Itoa(len(ids))},
+		{"Dirperm1 Supported", strconv.FormatBool(useDirperm())},
 	}
 	}
 }
 }
 
 

+ 3 - 2
daemon/graphdriver/aufs/aufs_test.go

@@ -10,6 +10,7 @@ import (
 	"os"
 	"os"
 	"path"
 	"path"
 	"path/filepath"
 	"path/filepath"
+	"strconv"
 	"sync"
 	"sync"
 	"testing"
 	"testing"
 
 
@@ -651,8 +652,8 @@ func testMountMoreThan42Layers(t *testing.T, mountPath string) {
 	for i := 1; i < 127; i++ {
 	for i := 1; i < 127; i++ {
 		expected++
 		expected++
 		var (
 		var (
-			parent  = fmt.Sprintf("%d", i-1)
-			current = fmt.Sprintf("%d", i)
+			parent  = strconv.Itoa(i - 1)
+			current = strconv.Itoa(i)
 		)
 		)
 
 
 		if parent == "0" {
 		if parent == "0" {

+ 1 - 1
daemon/graphdriver/btrfs/btrfs.go

@@ -156,7 +156,7 @@ func (d *Driver) Status() [][2]string {
 		status = append(status, [2]string{"Build Version", bv})
 		status = append(status, [2]string{"Build Version", bv})
 	}
 	}
 	if lv := btrfsLibVersion(); lv != -1 {
 	if lv := btrfsLibVersion(); lv != -1 {
-		status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)})
+		status = append(status, [2]string{"Library Version", strconv.Itoa(lv)})
 	}
 	}
 	return status
 	return status
 }
 }

+ 1 - 1
daemon/graphdriver/zfs/zfs.go

@@ -231,7 +231,7 @@ func (d *Driver) GetMetadata(id string) (map[string]string, error) {
 }
 }
 
 
 func (d *Driver) cloneFilesystem(name, parentName string) error {
 func (d *Driver) cloneFilesystem(name, parentName string) error {
-	snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond())
+	snapshotName := strconv.Itoa(time.Now().Nanosecond())
 	parentDataset := zfs.Dataset{Name: parentName}
 	parentDataset := zfs.Dataset{Name: parentName}
 	snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false)
 	snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false)
 	if err != nil {
 	if err != nil {

+ 3 - 3
daemon/keys.go

@@ -4,7 +4,6 @@
 package daemon // import "github.com/docker/docker/daemon"
 package daemon // import "github.com/docker/docker/daemon"
 
 
 import (
 import (
-	"fmt"
 	"os"
 	"os"
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
@@ -38,7 +37,8 @@ func setRootKeyLimit(limit int) error {
 		return err
 		return err
 	}
 	}
 	defer keys.Close()
 	defer keys.Close()
-	if _, err := fmt.Fprintf(keys, "%d", limit); err != nil {
+	_, err = keys.WriteString(strconv.Itoa(limit))
+	if err != nil {
 		return err
 		return err
 	}
 	}
 	bytes, err := os.OpenFile(rootBytesFile, os.O_WRONLY, 0)
 	bytes, err := os.OpenFile(rootBytesFile, os.O_WRONLY, 0)
@@ -46,7 +46,7 @@ func setRootKeyLimit(limit int) error {
 		return err
 		return err
 	}
 	}
 	defer bytes.Close()
 	defer bytes.Close()
-	_, err = fmt.Fprintf(bytes, "%d", limit*rootKeyByteMultiplier)
+	_, err = bytes.WriteString(strconv.Itoa(limit * rootKeyByteMultiplier))
 	return err
 	return err
 }
 }
 
 

+ 4 - 4
daemon/kill.go

@@ -4,6 +4,7 @@ import (
 	"context"
 	"context"
 	"fmt"
 	"fmt"
 	"runtime"
 	"runtime"
+	"strconv"
 	"syscall"
 	"syscall"
 	"time"
 	"time"
 
 
@@ -125,10 +126,9 @@ func (daemon *Daemon) killWithSignal(container *containerpkg.Container, stopSign
 		}
 		}
 	}
 	}
 
 
-	attributes := map[string]string{
-		"signal": fmt.Sprintf("%d", stopSignal),
-	}
-	daemon.LogContainerEventWithAttributes(container, "kill", attributes)
+	daemon.LogContainerEventWithAttributes(container, "kill", map[string]string{
+		"signal": strconv.Itoa(int(stopSignal)),
+	})
 	return nil
 	return nil
 }
 }
 
 

+ 2 - 1
daemon/links/links_test.go

@@ -2,6 +2,7 @@ package links // import "github.com/docker/docker/daemon/links"
 
 
 import (
 import (
 	"fmt"
 	"fmt"
+	"strconv"
 	"strings"
 	"strings"
 	"testing"
 	"testing"
 
 
@@ -200,7 +201,7 @@ func TestLinkPortRangeEnv(t *testing.T) {
 		if env[tcpaddr] != "172.0.17.2" {
 		if env[tcpaddr] != "172.0.17.2" {
 			t.Fatalf("Expected env %s  = 172.0.17.2, got %s", tcpaddr, env[tcpaddr])
 			t.Fatalf("Expected env %s  = 172.0.17.2, got %s", tcpaddr, env[tcpaddr])
 		}
 		}
-		if env[tcpport] != fmt.Sprintf("%d", i) {
+		if env[tcpport] != strconv.Itoa(i) {
 			t.Fatalf("Expected env %s  = %d, got %s", tcpport, i, env[tcpport])
 			t.Fatalf("Expected env %s  = %d, got %s", tcpport, i, env[tcpport])
 		}
 		}
 		if env[tcpproto] != "tcp" {
 		if env[tcpproto] != "tcp" {

+ 1 - 1
daemon/logger/awslogs/cloudwatchlogs_test.go

@@ -1426,7 +1426,7 @@ func TestCollectBatchWithDuplicateTimestamps(t *testing.T) {
 	times := maximumLogEventsPerPut
 	times := maximumLogEventsPerPut
 	timestamp := time.Now()
 	timestamp := time.Now()
 	for i := 0; i < times; i++ {
 	for i := 0; i < times; i++ {
-		line := fmt.Sprintf("%d", i)
+		line := strconv.Itoa(i)
 		if i%2 == 0 {
 		if i%2 == 0 {
 			timestamp.Add(1 * time.Nanosecond)
 			timestamp.Add(1 * time.Nanosecond)
 		}
 		}

+ 1 - 2
daemon/logger/jsonfilelog/jsonfilelog_test.go

@@ -4,7 +4,6 @@ import (
 	"bytes"
 	"bytes"
 	"compress/gzip"
 	"compress/gzip"
 	"encoding/json"
 	"encoding/json"
-	"fmt"
 	"io"
 	"io"
 	"os"
 	"os"
 	"path/filepath"
 	"path/filepath"
@@ -128,7 +127,7 @@ func BenchmarkJSONFileLoggerLog(b *testing.B) {
 		bytes.Repeat([]byte("a long string"), 100),
 		bytes.Repeat([]byte("a long string"), 100),
 		bytes.Repeat([]byte("a really long string"), 10000),
 		bytes.Repeat([]byte("a really long string"), 10000),
 	} {
 	} {
-		b.Run(fmt.Sprintf("%d", len(data)), func(b *testing.B) {
+		b.Run(strconv.Itoa(len(data)), func(b *testing.B) {
 			testMsg := &logger.Message{
 			testMsg := &logger.Message{
 				Line:      data,
 				Line:      data,
 				Source:    "stderr",
 				Source:    "stderr",

+ 2 - 2
daemon/logger/local/local_test.go

@@ -3,10 +3,10 @@ package local
 import (
 import (
 	"bytes"
 	"bytes"
 	"encoding/binary"
 	"encoding/binary"
-	"fmt"
 	"io"
 	"io"
 	"os"
 	"os"
 	"path/filepath"
 	"path/filepath"
+	"strconv"
 	"testing"
 	"testing"
 	"time"
 	"time"
 
 
@@ -111,7 +111,7 @@ func BenchmarkLogWrite(b *testing.B) {
 		bytes.Repeat([]byte("a long string"), 100),
 		bytes.Repeat([]byte("a long string"), 100),
 		bytes.Repeat([]byte("a really long string"), 10000),
 		bytes.Repeat([]byte("a really long string"), 10000),
 	} {
 	} {
-		b.Run(fmt.Sprintf("%d", len(data)), func(b *testing.B) {
+		b.Run(strconv.Itoa(len(data)), func(b *testing.B) {
 			entry := &logdriver.LogEntry{Line: data, Source: "stdout", TimeNano: t.UnixNano()}
 			entry := &logdriver.LogEntry{Line: data, Source: "stdout", TimeNano: t.UnixNano()}
 			b.SetBytes(int64(entry.Size() + encodeBinaryLen + encodeBinaryLen))
 			b.SetBytes(int64(entry.Size() + encodeBinaryLen + encodeBinaryLen))
 			b.ResetTimer()
 			b.ResetTimer()

+ 12 - 11
daemon/logger/splunk/splunk_test.go

@@ -6,6 +6,7 @@ import (
 	"fmt"
 	"fmt"
 	"net/http"
 	"net/http"
 	"runtime"
 	"runtime"
+	"strconv"
 	"testing"
 	"testing"
 	"time"
 	"time"
 
 
@@ -827,7 +828,7 @@ func TestBatching(t *testing.T) {
 	}
 	}
 
 
 	for i := 0; i < defaultStreamChannelSize*4; i++ {
 	for i := 0; i < defaultStreamChannelSize*4; i++ {
-		if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil {
+		if err := loggerDriver.Log(&logger.Message{Line: []byte(strconv.Itoa(i)), Source: "stdout", Timestamp: time.Now()}); err != nil {
 			t.Fatal(err)
 			t.Fatal(err)
 		}
 		}
 	}
 	}
@@ -845,7 +846,7 @@ func TestBatching(t *testing.T) {
 		if event, err := message.EventAsMap(); err != nil {
 		if event, err := message.EventAsMap(); err != nil {
 			t.Fatal(err)
 			t.Fatal(err)
 		} else {
 		} else {
-			if event["line"] != fmt.Sprintf("%d", i) {
+			if event["line"] != strconv.Itoa(i) {
 				t.Fatalf("Unexpected event in message %v", event)
 				t.Fatalf("Unexpected event in message %v", event)
 			}
 			}
 		}
 		}
@@ -887,7 +888,7 @@ func TestFrequency(t *testing.T) {
 	}
 	}
 
 
 	for i := 0; i < 10; i++ {
 	for i := 0; i < 10; i++ {
-		if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil {
+		if err := loggerDriver.Log(&logger.Message{Line: []byte(strconv.Itoa(i)), Source: "stdout", Timestamp: time.Now()}); err != nil {
 			t.Fatal(err)
 			t.Fatal(err)
 		}
 		}
 		time.Sleep(15 * time.Millisecond)
 		time.Sleep(15 * time.Millisecond)
@@ -906,7 +907,7 @@ func TestFrequency(t *testing.T) {
 		if event, err := message.EventAsMap(); err != nil {
 		if event, err := message.EventAsMap(); err != nil {
 			t.Fatal(err)
 			t.Fatal(err)
 		} else {
 		} else {
-			if event["line"] != fmt.Sprintf("%d", i) {
+			if event["line"] != strconv.Itoa(i) {
 				t.Fatalf("Unexpected event in message %v", event)
 				t.Fatalf("Unexpected event in message %v", event)
 			}
 			}
 		}
 		}
@@ -958,7 +959,7 @@ func TestOneMessagePerRequest(t *testing.T) {
 	}
 	}
 
 
 	for i := 0; i < 10; i++ {
 	for i := 0; i < 10; i++ {
-		if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil {
+		if err := loggerDriver.Log(&logger.Message{Line: []byte(strconv.Itoa(i)), Source: "stdout", Timestamp: time.Now()}); err != nil {
 			t.Fatal(err)
 			t.Fatal(err)
 		}
 		}
 	}
 	}
@@ -976,7 +977,7 @@ func TestOneMessagePerRequest(t *testing.T) {
 		if event, err := message.EventAsMap(); err != nil {
 		if event, err := message.EventAsMap(); err != nil {
 			t.Fatal(err)
 			t.Fatal(err)
 		} else {
 		} else {
-			if event["line"] != fmt.Sprintf("%d", i) {
+			if event["line"] != strconv.Itoa(i) {
 				t.Fatalf("Unexpected event in message %v", event)
 				t.Fatalf("Unexpected event in message %v", event)
 			}
 			}
 		}
 		}
@@ -1050,7 +1051,7 @@ func TestSkipVerify(t *testing.T) {
 	}
 	}
 
 
 	for i := 0; i < defaultStreamChannelSize*2; i++ {
 	for i := 0; i < defaultStreamChannelSize*2; i++ {
-		if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil {
+		if err := loggerDriver.Log(&logger.Message{Line: []byte(strconv.Itoa(i)), Source: "stdout", Timestamp: time.Now()}); err != nil {
 			t.Fatal(err)
 			t.Fatal(err)
 		}
 		}
 	}
 	}
@@ -1062,7 +1063,7 @@ func TestSkipVerify(t *testing.T) {
 	hec.simulateErr(false)
 	hec.simulateErr(false)
 
 
 	for i := defaultStreamChannelSize * 2; i < defaultStreamChannelSize*4; i++ {
 	for i := defaultStreamChannelSize * 2; i < defaultStreamChannelSize*4; i++ {
-		if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil {
+		if err := loggerDriver.Log(&logger.Message{Line: []byte(strconv.Itoa(i)), Source: "stdout", Timestamp: time.Now()}); err != nil {
 			t.Fatal(err)
 			t.Fatal(err)
 		}
 		}
 	}
 	}
@@ -1080,7 +1081,7 @@ func TestSkipVerify(t *testing.T) {
 		if event, err := message.EventAsMap(); err != nil {
 		if event, err := message.EventAsMap(); err != nil {
 			t.Fatal(err)
 			t.Fatal(err)
 		} else {
 		} else {
-			if event["line"] != fmt.Sprintf("%d", i) {
+			if event["line"] != strconv.Itoa(i) {
 				t.Fatalf("Unexpected event in message %v", event)
 				t.Fatalf("Unexpected event in message %v", event)
 			}
 			}
 		}
 		}
@@ -1124,7 +1125,7 @@ func TestBufferMaximum(t *testing.T) {
 	}
 	}
 
 
 	for i := 0; i < 11; i++ {
 	for i := 0; i < 11; i++ {
-		if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil {
+		if err := loggerDriver.Log(&logger.Message{Line: []byte(strconv.Itoa(i)), Source: "stdout", Timestamp: time.Now()}); err != nil {
 			t.Fatal(err)
 			t.Fatal(err)
 		}
 		}
 	}
 	}
@@ -1193,7 +1194,7 @@ func TestServerAlwaysDown(t *testing.T) {
 	}
 	}
 
 
 	for i := 0; i < 5; i++ {
 	for i := 0; i < 5; i++ {
-		if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil {
+		if err := loggerDriver.Log(&logger.Message{Line: []byte(strconv.Itoa(i)), Source: "stdout", Timestamp: time.Now()}); err != nil {
 			t.Fatal(err)
 			t.Fatal(err)
 		}
 		}
 	}
 	}

+ 3 - 3
daemon/reload_unix.go

@@ -5,7 +5,7 @@ package daemon // import "github.com/docker/docker/daemon"
 
 
 import (
 import (
 	"bytes"
 	"bytes"
-	"fmt"
+	"strconv"
 
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/daemon/config"
@@ -49,12 +49,12 @@ func (daemon *Daemon) reloadPlatform(conf *config.Config, attributes map[string]
 		if runtimeList.Len() > 0 {
 		if runtimeList.Len() > 0 {
 			runtimeList.WriteRune(' ')
 			runtimeList.WriteRune(' ')
 		}
 		}
-		runtimeList.WriteString(fmt.Sprintf("%s:%s", name, rt.Path))
+		runtimeList.WriteString(name + ":" + rt.Path)
 	}
 	}
 
 
 	attributes["runtimes"] = runtimeList.String()
 	attributes["runtimes"] = runtimeList.String()
 	attributes["default-runtime"] = daemon.configStore.DefaultRuntime
 	attributes["default-runtime"] = daemon.configStore.DefaultRuntime
-	attributes["default-shm-size"] = fmt.Sprintf("%d", daemon.configStore.ShmSize)
+	attributes["default-shm-size"] = strconv.FormatInt(int64(daemon.configStore.ShmSize), 10)
 	attributes["default-ipc-mode"] = daemon.configStore.IpcMode
 	attributes["default-ipc-mode"] = daemon.configStore.IpcMode
 	attributes["default-cgroupns-mode"] = daemon.configStore.CgroupNamespaceMode
 	attributes["default-cgroupns-mode"] = daemon.configStore.CgroupNamespaceMode
 
 

+ 5 - 4
daemon/resize.go

@@ -2,7 +2,8 @@ package daemon // import "github.com/docker/docker/daemon"
 
 
 import (
 import (
 	"context"
 	"context"
-	"fmt"
+	"errors"
+	"strconv"
 	"time"
 	"time"
 )
 )
 
 
@@ -23,8 +24,8 @@ func (daemon *Daemon) ContainerResize(name string, height, width int) error {
 
 
 	if err = tsk.Resize(context.Background(), uint32(width), uint32(height)); err == nil {
 	if err = tsk.Resize(context.Background(), uint32(width), uint32(height)); err == nil {
 		attributes := map[string]string{
 		attributes := map[string]string{
-			"height": fmt.Sprintf("%d", height),
-			"width":  fmt.Sprintf("%d", width),
+			"height": strconv.Itoa(height),
+			"width":  strconv.Itoa(width),
 		}
 		}
 		daemon.LogContainerEventWithAttributes(container, "resize", attributes)
 		daemon.LogContainerEventWithAttributes(container, "resize", attributes)
 	}
 	}
@@ -49,6 +50,6 @@ func (daemon *Daemon) ContainerExecResize(name string, height, width int) error
 	case <-ec.Started:
 	case <-ec.Started:
 		return ec.Process.Resize(context.Background(), uint32(width), uint32(height))
 		return ec.Process.Resize(context.Background(), uint32(width), uint32(height))
 	case <-timeout.C:
 	case <-timeout.C:
-		return fmt.Errorf("timeout waiting for exec session ready")
+		return errors.New("timeout waiting for exec session ready")
 	}
 	}
 }
 }

+ 1 - 2
daemon/runtime_unix.go

@@ -14,7 +14,6 @@ import (
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/errdefs"
-	"github.com/docker/docker/pkg/ioutils"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
 	"github.com/sirupsen/logrus"
 )
 )
@@ -56,7 +55,7 @@ func (daemon *Daemon) initRuntimes(runtimes map[string]types.Runtime) (err error
 	runtimeDir := filepath.Join(daemon.configStore.Root, "runtimes")
 	runtimeDir := filepath.Join(daemon.configStore.Root, "runtimes")
 	// Remove old temp directory if any
 	// Remove old temp directory if any
 	os.RemoveAll(runtimeDir + "-old")
 	os.RemoveAll(runtimeDir + "-old")
-	tmpDir, err := ioutils.TempDir(daemon.configStore.Root, "gen-runtimes")
+	tmpDir, err := os.MkdirTemp(daemon.configStore.Root, "gen-runtimes")
 	if err != nil {
 	if err != nil {
 		return errors.Wrap(err, "failed to get temp dir to generate runtime scripts")
 		return errors.Wrap(err, "failed to get temp dir to generate runtime scripts")
 	}
 	}

+ 1 - 1
integration-cli/docker_cli_inspect_test.go

@@ -135,7 +135,7 @@ func (s *DockerCLIInspectSuite) TestInspectTypeFlagWithInvalidValue(c *testing.T
 
 
 	out, exitCode, err := dockerCmdWithError("inspect", "--type=foobar", "busybox")
 	out, exitCode, err := dockerCmdWithError("inspect", "--type=foobar", "busybox")
 	assert.Assert(c, err != nil, "%d", exitCode)
 	assert.Assert(c, err != nil, "%d", exitCode)
-	assert.Equal(c, exitCode, 1, fmt.Sprintf("%s", err))
+	assert.Equal(c, exitCode, 1, err)
 	assert.Assert(c, strings.Contains(out, "not a valid value for --type"))
 	assert.Assert(c, strings.Contains(out, "not a valid value for --type"))
 }
 }
 
 

+ 2 - 3
integration/container/checkpoint_test.go

@@ -2,7 +2,6 @@ package container // import "github.com/docker/docker/integration/container"
 
 
 import (
 import (
 	"context"
 	"context"
-	"fmt"
 	"os/exec"
 	"os/exec"
 	"regexp"
 	"regexp"
 	"sort"
 	"sort"
@@ -84,9 +83,9 @@ func TestCheckpoint(t *testing.T) {
 	err = client.CheckpointCreate(ctx, cID, cptOpt)
 	err = client.CheckpointCreate(ctx, cID, cptOpt)
 	if err != nil {
 	if err != nil {
 		// An error can contain a path to a dump file
 		// An error can contain a path to a dump file
-		t.Logf("%s", err)
+		t.Log(err)
 		re := regexp.MustCompile("path= (.*): ")
 		re := regexp.MustCompile("path= (.*): ")
-		m := re.FindStringSubmatch(fmt.Sprintf("%s", err))
+		m := re.FindStringSubmatch(err.Error())
 		if len(m) >= 2 {
 		if len(m) >= 2 {
 			dumpLog := m[1]
 			dumpLog := m[1]
 			t.Logf("%s", dumpLog)
 			t.Logf("%s", dumpLog)

+ 15 - 17
layer/filestore.go

@@ -3,7 +3,6 @@ package layer // import "github.com/docker/docker/layer"
 import (
 import (
 	"compress/gzip"
 	"compress/gzip"
 	"encoding/json"
 	"encoding/json"
-	"fmt"
 	"io"
 	"io"
 	"os"
 	"os"
 	"path/filepath"
 	"path/filepath"
@@ -40,7 +39,7 @@ type fileMetadataTransaction struct {
 // which is backed by files on disk using the provided root
 // which is backed by files on disk using the provided root
 // as the root of metadata files.
 // as the root of metadata files.
 func newFSMetadataStore(root string) (*fileMetadataStore, error) {
 func newFSMetadataStore(root string) (*fileMetadataStore, error) {
-	if err := os.MkdirAll(root, 0700); err != nil {
+	if err := os.MkdirAll(root, 0o700); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 	return &fileMetadataStore{
 	return &fileMetadataStore{
@@ -67,7 +66,7 @@ func (fms *fileMetadataStore) getMountFilename(mount, filename string) string {
 
 
 func (fms *fileMetadataStore) StartTransaction() (*fileMetadataTransaction, error) {
 func (fms *fileMetadataStore) StartTransaction() (*fileMetadataTransaction, error) {
 	tmpDir := filepath.Join(fms.root, "tmp")
 	tmpDir := filepath.Join(fms.root, "tmp")
-	if err := os.MkdirAll(tmpDir, 0755); err != nil {
+	if err := os.MkdirAll(tmpDir, 0o755); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 	ws, err := ioutils.NewAtomicWriteSet(tmpDir)
 	ws, err := ioutils.NewAtomicWriteSet(tmpDir)
@@ -82,20 +81,19 @@ func (fms *fileMetadataStore) StartTransaction() (*fileMetadataTransaction, erro
 }
 }
 
 
 func (fm *fileMetadataTransaction) SetSize(size int64) error {
 func (fm *fileMetadataTransaction) SetSize(size int64) error {
-	content := fmt.Sprintf("%d", size)
-	return fm.ws.WriteFile("size", []byte(content), 0644)
+	return fm.ws.WriteFile("size", []byte(strconv.FormatInt(size, 10)), 0o644)
 }
 }
 
 
 func (fm *fileMetadataTransaction) SetParent(parent ChainID) error {
 func (fm *fileMetadataTransaction) SetParent(parent ChainID) error {
-	return fm.ws.WriteFile("parent", []byte(digest.Digest(parent).String()), 0644)
+	return fm.ws.WriteFile("parent", []byte(digest.Digest(parent).String()), 0o644)
 }
 }
 
 
 func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error {
 func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error {
-	return fm.ws.WriteFile("diff", []byte(digest.Digest(diff).String()), 0644)
+	return fm.ws.WriteFile("diff", []byte(digest.Digest(diff).String()), 0o644)
 }
 }
 
 
 func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error {
 func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error {
-	return fm.ws.WriteFile("cache-id", []byte(cacheID), 0644)
+	return fm.ws.WriteFile("cache-id", []byte(cacheID), 0o644)
 }
 }
 
 
 func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) error {
 func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) error {
@@ -103,11 +101,11 @@ func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) er
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-	return fm.ws.WriteFile("descriptor.json", jsonRef, 0644)
+	return fm.ws.WriteFile("descriptor.json", jsonRef, 0o644)
 }
 }
 
 
 func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) {
 func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) {
-	f, err := fm.ws.FileWriter("tar-split.json.gz", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
+	f, err := fm.ws.FileWriter("tar-split.json.gz", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0o644)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
@@ -126,7 +124,7 @@ func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteC
 
 
 func (fm *fileMetadataTransaction) Commit(layer ChainID) error {
 func (fm *fileMetadataTransaction) Commit(layer ChainID) error {
 	finalDir := fm.store.getLayerDirectory(layer)
 	finalDir := fm.store.getLayerDirectory(layer)
-	if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil {
+	if err := os.MkdirAll(filepath.Dir(finalDir), 0o755); err != nil {
 		return err
 		return err
 	}
 	}
 
 
@@ -236,24 +234,24 @@ func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, erro
 }
 }
 
 
 func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error {
 func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error {
-	if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil {
+	if err := os.MkdirAll(fms.getMountDirectory(mount), 0o755); err != nil {
 		return err
 		return err
 	}
 	}
-	return os.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644)
+	return os.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0o644)
 }
 }
 
 
 func (fms *fileMetadataStore) SetInitID(mount string, init string) error {
 func (fms *fileMetadataStore) SetInitID(mount string, init string) error {
-	if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil {
+	if err := os.MkdirAll(fms.getMountDirectory(mount), 0o755); err != nil {
 		return err
 		return err
 	}
 	}
-	return os.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644)
+	return os.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0o644)
 }
 }
 
 
 func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error {
 func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error {
-	if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil {
+	if err := os.MkdirAll(fms.getMountDirectory(mount), 0o755); err != nil {
 		return err
 		return err
 	}
 	}
-	return os.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644)
+	return os.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0o644)
 }
 }
 
 
 func (fms *fileMetadataStore) GetMountID(mount string) (string, error) {
 func (fms *fileMetadataStore) GetMountID(mount string) (string, error) {

+ 4 - 4
layer/filestore_test.go

@@ -51,7 +51,7 @@ func TestCommitFailure(t *testing.T) {
 	fms, td, cleanup := newFileMetadataStore(t)
 	fms, td, cleanup := newFileMetadataStore(t)
 	defer cleanup()
 	defer cleanup()
 
 
-	if err := os.WriteFile(filepath.Join(td, "sha256"), []byte("was here first!"), 0644); err != nil {
+	if err := os.WriteFile(filepath.Join(td, "sha256"), []byte("was here first!"), 0o644); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
@@ -75,7 +75,7 @@ func TestStartTransactionFailure(t *testing.T) {
 	fms, td, cleanup := newFileMetadataStore(t)
 	fms, td, cleanup := newFileMetadataStore(t)
 	defer cleanup()
 	defer cleanup()
 
 
-	if err := os.WriteFile(filepath.Join(td, "tmp"), []byte("was here first!"), 0644); err != nil {
+	if err := os.WriteFile(filepath.Join(td, "tmp"), []byte("was here first!"), 0o644); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
@@ -108,7 +108,7 @@ func TestGetOrphan(t *testing.T) {
 	defer cleanup()
 	defer cleanup()
 
 
 	layerRoot := filepath.Join(td, "sha256")
 	layerRoot := filepath.Join(td, "sha256")
-	if err := os.MkdirAll(layerRoot, 0755); err != nil {
+	if err := os.MkdirAll(layerRoot, 0o755); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
@@ -123,7 +123,7 @@ func TestGetOrphan(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	layerPath := fms.getLayerDirectory(layerid)
 	layerPath := fms.getLayerDirectory(layerid)
-	if err := os.WriteFile(filepath.Join(layerPath, "cache-id"), []byte(stringid.GenerateRandomID()), 0644); err != nil {
+	if err := os.WriteFile(filepath.Join(layerPath, "cache-id"), []byte(stringid.GenerateRandomID()), 0o644); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 

+ 22 - 22
layer/layer_test.go

@@ -139,7 +139,7 @@ func newTestFile(name string, content []byte, perm os.FileMode) FileApplier {
 
 
 func (tf *testFile) ApplyFile(root string) error {
 func (tf *testFile) ApplyFile(root string) error {
 	fullPath := filepath.Join(root, tf.name)
 	fullPath := filepath.Join(root, tf.name)
-	if err := os.MkdirAll(filepath.Dir(fullPath), 0755); err != nil {
+	if err := os.MkdirAll(filepath.Dir(fullPath), 0o755); err != nil {
 		return err
 		return err
 	}
 	}
 	// Check if already exists
 	// Check if already exists
@@ -247,7 +247,7 @@ func TestMountAndRegister(t *testing.T) {
 	ls, _, cleanup := newTestStore(t)
 	ls, _, cleanup := newTestStore(t)
 	defer cleanup()
 	defer cleanup()
 
 
-	li := initWithFiles(newTestFile("testfile.txt", []byte("some test data"), 0644))
+	li := initWithFiles(newTestFile("testfile.txt", []byte("some test data"), 0o644))
 	layer, err := createLayer(ls, "", li)
 	layer, err := createLayer(ls, "", li)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -292,12 +292,12 @@ func TestLayerRelease(t *testing.T) {
 	ls, _, cleanup := newTestStore(t)
 	ls, _, cleanup := newTestStore(t)
 	defer cleanup()
 	defer cleanup()
 
 
-	layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644)))
+	layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0o644)))
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
-	layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0644)))
+	layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0o644)))
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -306,12 +306,12 @@ func TestLayerRelease(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
-	layer3a, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3a file"), 0644)))
+	layer3a, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3a file"), 0o644)))
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
-	layer3b, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3b file"), 0644)))
+	layer3b, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3b file"), 0o644)))
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -341,12 +341,12 @@ func TestStoreRestore(t *testing.T) {
 	ls, _, cleanup := newTestStore(t)
 	ls, _, cleanup := newTestStore(t)
 	defer cleanup()
 	defer cleanup()
 
 
-	layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644)))
+	layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0o644)))
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
-	layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0644)))
+	layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0o644)))
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -355,7 +355,7 @@ func TestStoreRestore(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
-	layer3, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3 file"), 0644)))
+	layer3, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3 file"), 0o644)))
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -374,7 +374,7 @@ func TestStoreRestore(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
-	if err := os.WriteFile(filepath.Join(pathFS, "testfile.txt"), []byte("nothing here"), 0644); err != nil {
+	if err := os.WriteFile(filepath.Join(pathFS, "testfile.txt"), []byte("nothing here"), 0o644); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
@@ -457,14 +457,14 @@ func TestTarStreamStability(t *testing.T) {
 	defer cleanup()
 	defer cleanup()
 
 
 	files1 := []FileApplier{
 	files1 := []FileApplier{
-		newTestFile("/etc/hosts", []byte("mydomain 10.0.0.1"), 0644),
-		newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0644),
+		newTestFile("/etc/hosts", []byte("mydomain 10.0.0.1"), 0o644),
+		newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0o644),
 	}
 	}
-	addedFile := newTestFile("/etc/shadow", []byte("root:::::::"), 0644)
+	addedFile := newTestFile("/etc/shadow", []byte("root:::::::"), 0o644)
 	files2 := []FileApplier{
 	files2 := []FileApplier{
-		newTestFile("/etc/hosts", []byte("mydomain 10.0.0.2"), 0644),
-		newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0664),
-		newTestFile("/root/.bashrc", []byte("PATH=/usr/sbin:/usr/bin"), 0644),
+		newTestFile("/etc/hosts", []byte("mydomain 10.0.0.2"), 0o644),
+		newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0o664),
+		newTestFile("/root/.bashrc", []byte("PATH=/usr/sbin:/usr/bin"), 0o644),
 	}
 	}
 
 
 	tar1, err := tarFromFiles(files1...)
 	tar1, err := tarFromFiles(files1...)
@@ -646,11 +646,11 @@ func TestRegisterExistingLayer(t *testing.T) {
 	defer cleanup()
 	defer cleanup()
 
 
 	baseFiles := []FileApplier{
 	baseFiles := []FileApplier{
-		newTestFile("/etc/profile", []byte("# Base configuration"), 0644),
+		newTestFile("/etc/profile", []byte("# Base configuration"), 0o644),
 	}
 	}
 
 
 	layerFiles := []FileApplier{
 	layerFiles := []FileApplier{
-		newTestFile("/root/.bashrc", []byte("# Root configuration"), 0644),
+		newTestFile("/root/.bashrc", []byte("# Root configuration"), 0o644),
 	}
 	}
 
 
 	li := initWithFiles(baseFiles...)
 	li := initWithFiles(baseFiles...)
@@ -686,12 +686,12 @@ func TestTarStreamVerification(t *testing.T) {
 	defer cleanup()
 	defer cleanup()
 
 
 	files1 := []FileApplier{
 	files1 := []FileApplier{
-		newTestFile("/foo", []byte("abc"), 0644),
-		newTestFile("/bar", []byte("def"), 0644),
+		newTestFile("/foo", []byte("abc"), 0o644),
+		newTestFile("/bar", []byte("def"), 0o644),
 	}
 	}
 	files2 := []FileApplier{
 	files2 := []FileApplier{
-		newTestFile("/foo", []byte("abc"), 0644),
-		newTestFile("/bar", []byte("def"), 0600), // different perm
+		newTestFile("/foo", []byte("abc"), 0o644),
+		newTestFile("/bar", []byte("def"), 0o600), // different perm
 	}
 	}
 
 
 	tar1, err := tarFromFiles(files1...)
 	tar1, err := tarFromFiles(files1...)

+ 2 - 2
layer/layer_unix_test.go

@@ -25,12 +25,12 @@ func TestLayerSize(t *testing.T) {
 	content1 := []byte("Base contents")
 	content1 := []byte("Base contents")
 	content2 := []byte("Added contents")
 	content2 := []byte("Added contents")
 
 
-	layer1, err := createLayer(ls, "", initWithFiles(newTestFile("file1", content1, 0644)))
+	layer1, err := createLayer(ls, "", initWithFiles(newTestFile("file1", content1, 0o644)))
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
-	layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("file2", content2, 0644)))
+	layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("file2", content2, 0o644)))
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}

+ 7 - 7
layer/migration_test.go

@@ -16,7 +16,7 @@ import (
 )
 )
 
 
 func writeTarSplitFile(name string, tarContent []byte) error {
 func writeTarSplitFile(name string, tarContent []byte) error {
-	f, err := os.OpenFile(name, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
+	f, err := os.OpenFile(name, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0o644)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -51,12 +51,12 @@ func TestLayerMigration(t *testing.T) {
 	defer os.RemoveAll(td)
 	defer os.RemoveAll(td)
 
 
 	layer1Files := []FileApplier{
 	layer1Files := []FileApplier{
-		newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644),
-		newTestFile("/etc/profile", []byte("# Base configuration"), 0644),
+		newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0o644),
+		newTestFile("/etc/profile", []byte("# Base configuration"), 0o644),
 	}
 	}
 
 
 	layer2Files := []FileApplier{
 	layer2Files := []FileApplier{
-		newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644),
+		newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0o644),
 	}
 	}
 
 
 	tar1, err := tarFromFiles(layer1Files...)
 	tar1, err := tarFromFiles(layer1Files...)
@@ -187,12 +187,12 @@ func TestLayerMigrationNoTarsplit(t *testing.T) {
 	defer os.RemoveAll(td)
 	defer os.RemoveAll(td)
 
 
 	layer1Files := []FileApplier{
 	layer1Files := []FileApplier{
-		newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644),
-		newTestFile("/etc/profile", []byte("# Base configuration"), 0644),
+		newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0o644),
+		newTestFile("/etc/profile", []byte("# Base configuration"), 0o644),
 	}
 	}
 
 
 	layer2Files := []FileApplier{
 	layer2Files := []FileApplier{
-		newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644),
+		newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0o644),
 	}
 	}
 
 
 	graph, err := newVFSGraphDriver(filepath.Join(td, "graphdriver-"))
 	graph, err := newVFSGraphDriver(filepath.Join(td, "graphdriver-"))

+ 17 - 17
layer/mount_test.go

@@ -20,8 +20,8 @@ func TestMountInit(t *testing.T) {
 	ls, _, cleanup := newTestStore(t)
 	ls, _, cleanup := newTestStore(t)
 	defer cleanup()
 	defer cleanup()
 
 
-	basefile := newTestFile("testfile.txt", []byte("base data!"), 0644)
-	initfile := newTestFile("testfile.txt", []byte("init data!"), 0777)
+	basefile := newTestFile("testfile.txt", []byte("base data!"), 0o644)
+	initfile := newTestFile("testfile.txt", []byte("init data!"), 0o777)
 
 
 	li := initWithFiles(basefile)
 	li := initWithFiles(basefile)
 	layer, err := createLayer(ls, "", li)
 	layer, err := createLayer(ls, "", li)
@@ -66,8 +66,8 @@ func TestMountInit(t *testing.T) {
 		t.Fatalf("Unexpected test file contents %q, expected %q", string(b), expected)
 		t.Fatalf("Unexpected test file contents %q, expected %q", string(b), expected)
 	}
 	}
 
 
-	if fi.Mode().Perm() != 0777 {
-		t.Fatalf("Unexpected filemode %o, expecting %o", fi.Mode().Perm(), 0777)
+	if fi.Mode().Perm() != 0o777 {
+		t.Fatalf("Unexpected filemode %o, expecting %o", fi.Mode().Perm(), 0o777)
 	}
 	}
 }
 }
 
 
@@ -83,14 +83,14 @@ func TestMountSize(t *testing.T) {
 	content2 := []byte("Mutable contents")
 	content2 := []byte("Mutable contents")
 	contentInit := []byte("why am I excluded from the size ☹")
 	contentInit := []byte("why am I excluded from the size ☹")
 
 
-	li := initWithFiles(newTestFile("file1", content1, 0644))
+	li := initWithFiles(newTestFile("file1", content1, 0o644))
 	layer, err := createLayer(ls, "", li)
 	layer, err := createLayer(ls, "", li)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
 	mountInit := func(root string) error {
 	mountInit := func(root string) error {
-		return newTestFile("file-init", contentInit, 0777).ApplyFile(root)
+		return newTestFile("file-init", contentInit, 0o777).ApplyFile(root)
 	}
 	}
 	rwLayerOpts := &CreateRWLayerOpts{
 	rwLayerOpts := &CreateRWLayerOpts{
 		InitFunc: mountInit,
 		InitFunc: mountInit,
@@ -106,7 +106,7 @@ func TestMountSize(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
-	if err := os.WriteFile(filepath.Join(pathFS, "file2"), content2, 0755); err != nil {
+	if err := os.WriteFile(filepath.Join(pathFS, "file2"), content2, 0o755); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
@@ -129,11 +129,11 @@ func TestMountChanges(t *testing.T) {
 	defer cleanup()
 	defer cleanup()
 
 
 	basefiles := []FileApplier{
 	basefiles := []FileApplier{
-		newTestFile("testfile1.txt", []byte("base data!"), 0644),
-		newTestFile("testfile2.txt", []byte("base data!"), 0644),
-		newTestFile("testfile3.txt", []byte("base data!"), 0644),
+		newTestFile("testfile1.txt", []byte("base data!"), 0o644),
+		newTestFile("testfile2.txt", []byte("base data!"), 0o644),
+		newTestFile("testfile3.txt", []byte("base data!"), 0o644),
 	}
 	}
-	initfile := newTestFile("testfile1.txt", []byte("init data!"), 0777)
+	initfile := newTestFile("testfile1.txt", []byte("init data!"), 0o777)
 
 
 	li := initWithFiles(basefiles...)
 	li := initWithFiles(basefiles...)
 	layer, err := createLayer(ls, "", li)
 	layer, err := createLayer(ls, "", li)
@@ -158,11 +158,11 @@ func TestMountChanges(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
-	if err := driver.LocalDriver.Lchmod(filepath.Join(pathFS, "testfile1.txt"), 0755); err != nil {
+	if err := driver.LocalDriver.Lchmod(filepath.Join(pathFS, "testfile1.txt"), 0o755); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
-	if err := os.WriteFile(filepath.Join(pathFS, "testfile1.txt"), []byte("mount data!"), 0755); err != nil {
+	if err := os.WriteFile(filepath.Join(pathFS, "testfile1.txt"), []byte("mount data!"), 0o755); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
@@ -170,11 +170,11 @@ func TestMountChanges(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
-	if err := driver.LocalDriver.Lchmod(filepath.Join(pathFS, "testfile3.txt"), 0755); err != nil {
+	if err := driver.LocalDriver.Lchmod(filepath.Join(pathFS, "testfile3.txt"), 0o755); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
-	if err := os.WriteFile(filepath.Join(pathFS, "testfile4.txt"), []byte("mount data!"), 0644); err != nil {
+	if err := os.WriteFile(filepath.Join(pathFS, "testfile4.txt"), []byte("mount data!"), 0o644); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
@@ -215,8 +215,8 @@ func TestMountApply(t *testing.T) {
 	ls, _, cleanup := newTestStore(t)
 	ls, _, cleanup := newTestStore(t)
 	defer cleanup()
 	defer cleanup()
 
 
-	basefile := newTestFile("testfile.txt", []byte("base data!"), 0644)
-	newfile := newTestFile("newfile.txt", []byte("new data!"), 0755)
+	basefile := newTestFile("testfile.txt", []byte("base data!"), 0o644)
+	newfile := newTestFile("newfile.txt", []byte("new data!"), 0o755)
 
 
 	li := initWithFiles(basefile)
 	li := initWithFiles(basefile)
 	layer, err := createLayer(ls, "", li)
 	layer, err := createLayer(ls, "", li)

+ 1 - 0
layer/ro_layer.go

@@ -175,6 +175,7 @@ func (vrc *verifiedReadCloser) Read(p []byte) (n int, err error) {
 	}
 	}
 	return
 	return
 }
 }
+
 func (vrc *verifiedReadCloser) Close() error {
 func (vrc *verifiedReadCloser) Close() error {
 	return vrc.rc.Close()
 	return vrc.rc.Close()
 }
 }

+ 2 - 2
libnetwork/drivers/overlay/ovmanager/ovmanager.go

@@ -117,9 +117,9 @@ func (d *driver) NetworkAllocate(id string, option map[string]string, ipV4Data,
 		n.subnets = append(n.subnets, s)
 		n.subnets = append(n.subnets, s)
 	}
 	}
 
 
-	val := fmt.Sprintf("%d", n.subnets[0].vni)
+	val := strconv.FormatUint(uint64(n.subnets[0].vni), 10)
 	for _, s := range n.subnets[1:] {
 	for _, s := range n.subnets[1:] {
-		val = val + fmt.Sprintf(",%d", s.vni)
+		val = val + "," + strconv.FormatUint(uint64(s.vni), 10)
 	}
 	}
 	opts[netlabel.OverlayVxlanIDList] = val
 	opts[netlabel.OverlayVxlanIDList] = val
 
 

+ 8 - 8
libnetwork/drivers/windows/windows.go

@@ -71,12 +71,12 @@ type hnsEndpoint struct {
 	nid       string
 	nid       string
 	profileID string
 	profileID string
 	Type      string
 	Type      string
-	//Note: Currently, the sandboxID is the same as the containerID since windows does
-	//not expose the sandboxID.
-	//In the future, windows will support a proper sandboxID that is different
-	//than the containerID.
-	//Therefore, we are using sandboxID now, so that we won't have to change this code
-	//when windows properly supports a sandboxID.
+	// Note: Currently, the sandboxID is the same as the containerID since windows does
+	// not expose the sandboxID.
+	// In the future, windows will support a proper sandboxID that is different
+	// than the containerID.
+	// Therefore, we are using sandboxID now, so that we won't have to change this code
+	// when windows properly supports a sandboxID.
 	sandboxID      string
 	sandboxID      string
 	macAddress     net.HardwareAddr
 	macAddress     net.HardwareAddr
 	epOption       *endpointOption       // User specified parameters
 	epOption       *endpointOption       // User specified parameters
@@ -377,8 +377,8 @@ func (d *driver) CreateNetwork(id string, option map[string]interface{}, nInfo d
 		for i, subnet := range hnsresponse.Subnets {
 		for i, subnet := range hnsresponse.Subnets {
 			var gwIP, subnetIP *net.IPNet
 			var gwIP, subnetIP *net.IPNet
 
 
-			//The gateway returned from HNS is an IPAddress.
-			//We need to convert it to an IPNet to use as the Gateway of driverapi.IPAMData struct
+			// The gateway returned from HNS is an IPAddress.
+			// We need to convert it to an IPNet to use as the Gateway of driverapi.IPAMData struct
 			gwCIDR := subnet.GatewayAddress + "/32"
 			gwCIDR := subnet.GatewayAddress + "/32"
 			_, gwIP, err = net.ParseCIDR(gwCIDR)
 			_, gwIP, err = net.ParseCIDR(gwCIDR)
 			if err != nil {
 			if err != nil {

+ 11 - 11
libnetwork/networkdb/networkdbdiagnostic.go

@@ -61,7 +61,7 @@ func dbJoin(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 		diagnostic.HTTPReply(w, diagnostic.CommandSucceed(nil), json)
 		diagnostic.HTTPReply(w, diagnostic.CommandSucceed(nil), json)
 		return
 		return
 	}
 	}
-	diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
+	diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf(dbNotAvailable)), json)
 }
 }
 
 
 func dbPeers(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 func dbPeers(ctx interface{}, w http.ResponseWriter, r *http.Request) {
@@ -95,7 +95,7 @@ func dbPeers(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 		diagnostic.HTTPReply(w, diagnostic.CommandSucceed(rsp), json)
 		diagnostic.HTTPReply(w, diagnostic.CommandSucceed(rsp), json)
 		return
 		return
 	}
 	}
-	diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
+	diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf(dbNotAvailable)), json)
 }
 }
 
 
 func dbClusterPeers(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 func dbClusterPeers(ctx interface{}, w http.ResponseWriter, r *http.Request) {
@@ -118,7 +118,7 @@ func dbClusterPeers(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 		diagnostic.HTTPReply(w, diagnostic.CommandSucceed(rsp), json)
 		diagnostic.HTTPReply(w, diagnostic.CommandSucceed(rsp), json)
 		return
 		return
 	}
 	}
-	diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
+	diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf(dbNotAvailable)), json)
 }
 }
 
 
 func dbCreateEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 func dbCreateEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) {
@@ -167,7 +167,7 @@ func dbCreateEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 		diagnostic.HTTPReply(w, diagnostic.CommandSucceed(nil), json)
 		diagnostic.HTTPReply(w, diagnostic.CommandSucceed(nil), json)
 		return
 		return
 	}
 	}
-	diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
+	diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf(dbNotAvailable)), json)
 }
 }
 
 
 func dbUpdateEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 func dbUpdateEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) {
@@ -215,7 +215,7 @@ func dbUpdateEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 		diagnostic.HTTPReply(w, diagnostic.CommandSucceed(nil), json)
 		diagnostic.HTTPReply(w, diagnostic.CommandSucceed(nil), json)
 		return
 		return
 	}
 	}
-	diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
+	diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf(dbNotAvailable)), json)
 }
 }
 
 
 func dbDeleteEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 func dbDeleteEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) {
@@ -252,7 +252,7 @@ func dbDeleteEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 		diagnostic.HTTPReply(w, diagnostic.CommandSucceed(nil), json)
 		diagnostic.HTTPReply(w, diagnostic.CommandSucceed(nil), json)
 		return
 		return
 	}
 	}
-	diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
+	diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf(dbNotAvailable)), json)
 }
 }
 
 
 func dbGetEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 func dbGetEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) {
@@ -298,7 +298,7 @@ func dbGetEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 		diagnostic.HTTPReply(w, diagnostic.CommandSucceed(rsp), json)
 		diagnostic.HTTPReply(w, diagnostic.CommandSucceed(rsp), json)
 		return
 		return
 	}
 	}
-	diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
+	diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf(dbNotAvailable)), json)
 }
 }
 
 
 func dbJoinNetwork(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 func dbJoinNetwork(ctx interface{}, w http.ResponseWriter, r *http.Request) {
@@ -330,7 +330,7 @@ func dbJoinNetwork(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 		diagnostic.HTTPReply(w, diagnostic.CommandSucceed(nil), json)
 		diagnostic.HTTPReply(w, diagnostic.CommandSucceed(nil), json)
 		return
 		return
 	}
 	}
-	diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
+	diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf(dbNotAvailable)), json)
 }
 }
 
 
 func dbLeaveNetwork(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 func dbLeaveNetwork(ctx interface{}, w http.ResponseWriter, r *http.Request) {
@@ -362,7 +362,7 @@ func dbLeaveNetwork(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 		diagnostic.HTTPReply(w, diagnostic.CommandSucceed(nil), json)
 		diagnostic.HTTPReply(w, diagnostic.CommandSucceed(nil), json)
 		return
 		return
 	}
 	}
-	diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
+	diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf(dbNotAvailable)), json)
 }
 }
 
 
 func dbGetTable(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 func dbGetTable(ctx interface{}, w http.ResponseWriter, r *http.Request) {
@@ -410,7 +410,7 @@ func dbGetTable(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 		diagnostic.HTTPReply(w, diagnostic.CommandSucceed(rsp), json)
 		diagnostic.HTTPReply(w, diagnostic.CommandSucceed(rsp), json)
 		return
 		return
 	}
 	}
-	diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
+	diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf(dbNotAvailable)), json)
 }
 }
 
 
 func dbNetworkStats(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 func dbNetworkStats(ctx interface{}, w http.ResponseWriter, r *http.Request) {
@@ -448,5 +448,5 @@ func dbNetworkStats(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 		diagnostic.HTTPReply(w, rsp, json)
 		diagnostic.HTTPReply(w, rsp, json)
 		return
 		return
 	}
 	}
-	diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
+	diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf(dbNotAvailable)), json)
 }
 }

+ 1 - 1
libnetwork/service_linux.go

@@ -600,7 +600,7 @@ func invokeFWMarker(path string, vip net.IP, fwMark uint32, ingressPorts []*Port
 
 
 	cmd := &exec.Cmd{
 	cmd := &exec.Cmd{
 		Path:   reexec.Self(),
 		Path:   reexec.Self(),
-		Args:   append([]string{"fwmarker"}, path, vip.String(), fmt.Sprintf("%d", fwMark), addDelOpt, ingressPortsFile, eIP.String(), lbMode),
+		Args:   append([]string{"fwmarker"}, path, vip.String(), strconv.FormatUint(uint64(fwMark), 10), addDelOpt, ingressPortsFile, eIP.String(), lbMode),
 		Stdout: os.Stdout,
 		Stdout: os.Stdout,
 		Stderr: os.Stderr,
 		Stderr: os.Stderr,
 	}
 	}

+ 2 - 1
libnetwork/types/types.go

@@ -5,6 +5,7 @@ import (
 	"bytes"
 	"bytes"
 	"fmt"
 	"fmt"
 	"net"
 	"net"
+	"strconv"
 	"strings"
 	"strings"
 
 
 	"github.com/ishidawataru/sctp"
 	"github.com/ishidawataru/sctp"
@@ -202,7 +203,7 @@ func (p Protocol) String() string {
 	case SCTP:
 	case SCTP:
 		return "sctp"
 		return "sctp"
 	default:
 	default:
-		return fmt.Sprintf("%d", p)
+		return strconv.Itoa(int(p))
 	}
 	}
 }
 }