فهرست منبع

Merge pull request #40263 from thaJeztah/normalize_comments

Normalize comment formatting
Brian Goff 5 سال پیش
والد
کامیت
b95fad8e51
65فایلهای تغییر یافته به همراه180 افزوده شده و 189 حذف شده
  1. 1 1
      api/server/router/container/container_routes.go
  2. 2 2
      api/server/router/image/image_routes.go
  3. 1 1
      api/server/router/plugin/plugin_routes.go
  4. 1 1
      api/types/backend/backend.go
  5. 1 1
      api/types/client.go
  6. 1 1
      api/types/container/host_config.go
  7. 3 3
      api/types/filters/parse.go
  8. 1 1
      api/types/network/network.go
  9. 1 1
      builder/remotecontext/remote_test.go
  10. 1 1
      client/image_import.go
  11. 3 3
      daemon/cluster/convert/network.go
  12. 3 3
      daemon/cluster/convert/node.go
  13. 1 1
      daemon/cluster/listen_addr.go
  14. 0 1
      daemon/cluster/swarm.go
  15. 1 1
      daemon/container.go
  16. 1 1
      daemon/container_linux.go
  17. 3 3
      daemon/events/events_test.go
  18. 1 1
      daemon/graphdriver/btrfs/btrfs.go
  19. 1 1
      daemon/graphdriver/copy/copy.go
  20. 1 2
      daemon/graphdriver/copy/copy_test.go
  21. 3 3
      daemon/graphdriver/devmapper/deviceset.go
  22. 1 1
      daemon/graphdriver/devmapper/devmapper_test.go
  23. 1 1
      daemon/graphdriver/driver.go
  24. 1 1
      daemon/graphdriver/overlay2/mount.go
  25. 1 1
      daemon/images/images.go
  26. 1 1
      daemon/links/links.go
  27. 1 1
      daemon/list.go
  28. 1 1
      daemon/logger/copier_test.go
  29. 1 1
      daemon/logger/local/local.go
  30. 2 2
      daemon/network/settings.go
  31. 1 1
      image/tarexport/tarexport.go
  32. 5 10
      integration-cli/docker_api_build_test.go
  33. 1 1
      integration-cli/docker_api_containers_test.go
  34. 1 1
      integration-cli/docker_api_images_test.go
  35. 5 5
      integration-cli/docker_api_swarm_service_test.go
  36. 2 3
      integration-cli/docker_cli_by_digest_test.go
  37. 1 1
      integration-cli/docker_cli_commit_test.go
  38. 7 7
      integration-cli/docker_cli_daemon_test.go
  39. 4 4
      integration-cli/docker_cli_events_test.go
  40. 1 1
      integration-cli/docker_cli_events_unix_test.go
  41. 2 2
      integration-cli/docker_cli_images_test.go
  42. 14 15
      integration-cli/docker_cli_inspect_test.go
  43. 1 1
      integration-cli/docker_cli_ps_test.go
  44. 16 16
      integration-cli/docker_cli_run_test.go
  45. 1 1
      integration-cli/docker_cli_save_load_unix_test.go
  46. 3 3
      integration-cli/docker_cli_search_test.go
  47. 1 1
      integration/service/update_test.go
  48. 33 33
      libcontainerd/local/local_windows.go
  49. 1 1
      libcontainerd/queue/queue_test.go
  50. 6 6
      pkg/archive/archive.go
  51. 13 13
      pkg/archive/archive_unix_test.go
  52. 1 1
      pkg/archive/archive_windows.go
  53. 2 2
      pkg/chrootarchive/archive_test.go
  54. 2 2
      pkg/chrootarchive/archive_unix.go
  55. 1 1
      pkg/containerfs/archiver.go
  56. 2 2
      pkg/idtools/utils_unix.go
  57. 1 1
      pkg/jsonmessage/jsonmessage.go
  58. 1 1
      pkg/mount/mountinfo_freebsd.go
  59. 1 1
      pkg/progress/progressreader.go
  60. 1 1
      pkg/signal/trap.go
  61. 3 3
      pkg/system/chtimes_unix.go
  62. 2 2
      pkg/system/chtimes_windows.go
  63. 1 1
      pkg/system/filesys_windows.go
  64. 1 1
      pkg/tailfile/tailfile.go
  65. 1 1
      registry/registry_mock_test.go

+ 1 - 1
api/server/router/container/container_routes.go

@@ -41,7 +41,7 @@ func (s *containerRouter) postCommit(ctx context.Context, w http.ResponseWriter,
 	}
 	}
 
 
 	config, _, _, err := s.decoder.DecodeConfig(r.Body)
 	config, _, _, err := s.decoder.DecodeConfig(r.Body)
-	if err != nil && err != io.EOF { //Do not fail if body is empty.
+	if err != nil && err != io.EOF { // Do not fail if body is empty.
 		return err
 		return err
 	}
 	}
 
 

+ 2 - 2
api/server/router/image/image_routes.go

@@ -57,7 +57,7 @@ func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrite
 		}
 		}
 	}
 	}
 
 
-	if image != "" { //pull
+	if image != "" { // pull
 		metaHeaders := map[string][]string{}
 		metaHeaders := map[string][]string{}
 		for k, v := range r.Header {
 		for k, v := range r.Header {
 			if strings.HasPrefix(k, "X-Meta-") {
 			if strings.HasPrefix(k, "X-Meta-") {
@@ -76,7 +76,7 @@ func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrite
 			}
 			}
 		}
 		}
 		err = s.backend.PullImage(ctx, image, tag, platform, metaHeaders, authConfig, output)
 		err = s.backend.PullImage(ctx, image, tag, platform, metaHeaders, authConfig, output)
-	} else { //import
+	} else { // import
 		src := r.Form.Get("fromSrc")
 		src := r.Form.Get("fromSrc")
 		// 'err' MUST NOT be defined within this block, we need any error
 		// 'err' MUST NOT be defined within this block, we need any error
 		// generated from the download to be available to the output
 		// generated from the download to be available to the output

+ 1 - 1
api/server/router/plugin/plugin_routes.go

@@ -211,7 +211,7 @@ func (pr *pluginRouter) createPlugin(ctx context.Context, w http.ResponseWriter,
 	if err := pr.backend.CreateFromContext(ctx, r.Body, options); err != nil {
 	if err := pr.backend.CreateFromContext(ctx, r.Body, options); err != nil {
 		return err
 		return err
 	}
 	}
-	//TODO: send progress bar
+	// TODO: send progress bar
 	w.WriteHeader(http.StatusNoContent)
 	w.WriteHeader(http.StatusNoContent)
 	return nil
 	return nil
 }
 }

+ 1 - 1
api/types/backend/backend.go

@@ -30,7 +30,7 @@ type ContainerAttachConfig struct {
 // expectation is for the logger endpoints to assemble the chunks using this
 // expectation is for the logger endpoints to assemble the chunks using this
 // metadata.
 // metadata.
 type PartialLogMetaData struct {
 type PartialLogMetaData struct {
-	Last    bool   //true if this message is last of a partial
+	Last    bool   // true if this message is last of a partial
 	ID      string // identifies group of messages comprising a single record
 	ID      string // identifies group of messages comprising a single record
 	Ordinal int    // ordering of message in partial group
 	Ordinal int    // ordering of message in partial group
 }
 }

+ 1 - 1
api/types/client.go

@@ -265,7 +265,7 @@ type ImagePullOptions struct {
 // if the privilege request fails.
 // if the privilege request fails.
 type RequestPrivilegeFunc func() (string, error)
 type RequestPrivilegeFunc func() (string, error)
 
 
-//ImagePushOptions holds information to push images.
+// ImagePushOptions holds information to push images.
 type ImagePushOptions ImagePullOptions
 type ImagePushOptions ImagePullOptions
 
 
 // ImageRemoveOptions holds parameters to remove images.
 // ImageRemoveOptions holds parameters to remove images.

+ 1 - 1
api/types/container/host_config.go

@@ -145,7 +145,7 @@ func (n NetworkMode) ConnectedContainer() string {
 	return ""
 	return ""
 }
 }
 
 
-//UserDefined indicates user-created network
+// UserDefined indicates user-created network
 func (n NetworkMode) UserDefined() string {
 func (n NetworkMode) UserDefined() string {
 	if n.IsUserDefined() {
 	if n.IsUserDefined() {
 		return string(n)
 		return string(n)

+ 3 - 3
api/types/filters/parse.go

@@ -154,7 +154,7 @@ func (args Args) Len() int {
 func (args Args) MatchKVList(key string, sources map[string]string) bool {
 func (args Args) MatchKVList(key string, sources map[string]string) bool {
 	fieldValues := args.fields[key]
 	fieldValues := args.fields[key]
 
 
-	//do not filter if there is no filter set or cannot determine filter
+	// do not filter if there is no filter set or cannot determine filter
 	if len(fieldValues) == 0 {
 	if len(fieldValues) == 0 {
 		return true
 		return true
 	}
 	}
@@ -200,7 +200,7 @@ func (args Args) Match(field, source string) bool {
 // ExactMatch returns true if the source matches exactly one of the values.
 // ExactMatch returns true if the source matches exactly one of the values.
 func (args Args) ExactMatch(key, source string) bool {
 func (args Args) ExactMatch(key, source string) bool {
 	fieldValues, ok := args.fields[key]
 	fieldValues, ok := args.fields[key]
-	//do not filter if there is no filter set or cannot determine filter
+	// do not filter if there is no filter set or cannot determine filter
 	if !ok || len(fieldValues) == 0 {
 	if !ok || len(fieldValues) == 0 {
 		return true
 		return true
 	}
 	}
@@ -213,7 +213,7 @@ func (args Args) ExactMatch(key, source string) bool {
 // matches exactly the value.
 // matches exactly the value.
 func (args Args) UniqueExactMatch(key, source string) bool {
 func (args Args) UniqueExactMatch(key, source string) bool {
 	fieldValues := args.fields[key]
 	fieldValues := args.fields[key]
-	//do not filter if there is no filter set or cannot determine filter
+	// do not filter if there is no filter set or cannot determine filter
 	if len(fieldValues) == 0 {
 	if len(fieldValues) == 0 {
 		return true
 		return true
 	}
 	}

+ 1 - 1
api/types/network/network.go

@@ -13,7 +13,7 @@ type Address struct {
 // IPAM represents IP Address Management
 // IPAM represents IP Address Management
 type IPAM struct {
 type IPAM struct {
 	Driver  string
 	Driver  string
-	Options map[string]string //Per network IPAM driver options
+	Options map[string]string // Per network IPAM driver options
 	Config  []IPAMConfig
 	Config  []IPAMConfig
 }
 }
 
 

+ 1 - 1
builder/remotecontext/remote_test.go

@@ -15,7 +15,7 @@ import (
 	"gotest.tools/fs"
 	"gotest.tools/fs"
 )
 )
 
 
-var binaryContext = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} //xz magic
+var binaryContext = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} // xz magic
 
 
 func TestSelectAcceptableMIME(t *testing.T) {
 func TestSelectAcceptableMIME(t *testing.T) {
 	validMimeStrings := []string{
 	validMimeStrings := []string{

+ 1 - 1
client/image_import.go

@@ -14,7 +14,7 @@ import (
 // It returns the JSON content in the response body.
 // It returns the JSON content in the response body.
 func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) {
 func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) {
 	if ref != "" {
 	if ref != "" {
-		//Check if the given image name can be resolved
+		// Check if the given image name can be resolved
 		if _, err := reference.ParseNormalizedNamed(ref); err != nil {
 		if _, err := reference.ParseNormalizedNamed(ref); err != nil {
 			return nil, err
 			return nil, err
 		}
 		}

+ 3 - 3
daemon/cluster/convert/network.go

@@ -47,10 +47,10 @@ func networkFromGRPC(n *swarmapi.Network) types.Network {
 		network.CreatedAt, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt)
 		network.CreatedAt, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt)
 		network.UpdatedAt, _ = gogotypes.TimestampFromProto(n.Meta.UpdatedAt)
 		network.UpdatedAt, _ = gogotypes.TimestampFromProto(n.Meta.UpdatedAt)
 
 
-		//Annotations
+		// Annotations
 		network.Spec.Annotations = annotationsFromGRPC(n.Spec.Annotations)
 		network.Spec.Annotations = annotationsFromGRPC(n.Spec.Annotations)
 
 
-		//DriverConfiguration
+		// DriverConfiguration
 		if n.Spec.DriverConfig != nil {
 		if n.Spec.DriverConfig != nil {
 			network.Spec.DriverConfiguration = &types.Driver{
 			network.Spec.DriverConfiguration = &types.Driver{
 				Name:    n.Spec.DriverConfig.Name,
 				Name:    n.Spec.DriverConfig.Name,
@@ -58,7 +58,7 @@ func networkFromGRPC(n *swarmapi.Network) types.Network {
 			}
 			}
 		}
 		}
 
 
-		//DriverState
+		// DriverState
 		if n.DriverState != nil {
 		if n.DriverState != nil {
 			network.DriverState = types.Driver{
 			network.DriverState = types.Driver{
 				Name:    n.DriverState.Name,
 				Name:    n.DriverState.Name,

+ 3 - 3
daemon/cluster/convert/node.go

@@ -29,10 +29,10 @@ func NodeFromGRPC(n swarmapi.Node) types.Node {
 	node.CreatedAt, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt)
 	node.CreatedAt, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt)
 	node.UpdatedAt, _ = gogotypes.TimestampFromProto(n.Meta.UpdatedAt)
 	node.UpdatedAt, _ = gogotypes.TimestampFromProto(n.Meta.UpdatedAt)
 
 
-	//Annotations
+	// Annotations
 	node.Spec.Annotations = annotationsFromGRPC(n.Spec.Annotations)
 	node.Spec.Annotations = annotationsFromGRPC(n.Spec.Annotations)
 
 
-	//Description
+	// Description
 	if n.Description != nil {
 	if n.Description != nil {
 		node.Description.Hostname = n.Description.Hostname
 		node.Description.Hostname = n.Description.Hostname
 		if n.Description.Platform != nil {
 		if n.Description.Platform != nil {
@@ -58,7 +58,7 @@ func NodeFromGRPC(n swarmapi.Node) types.Node {
 		}
 		}
 	}
 	}
 
 
-	//Manager
+	// Manager
 	if n.ManagerStatus != nil {
 	if n.ManagerStatus != nil {
 		node.ManagerStatus = &types.ManagerStatus{
 		node.ManagerStatus = &types.ManagerStatus{
 			Leader:       n.ManagerStatus.Leader,
 			Leader:       n.ManagerStatus.Leader,

+ 1 - 1
daemon/cluster/listen_addr.go

@@ -95,7 +95,7 @@ func validateDefaultAddrPool(defaultAddrPool []string, size uint32) error {
 		// defaultAddrPool is not defined
 		// defaultAddrPool is not defined
 		return nil
 		return nil
 	}
 	}
-	//if size is not set, then we use default value 24
+	// if size is not set, then we use default value 24
 	if size == 0 {
 	if size == 0 {
 		size = 24
 		size = 24
 	}
 	}

+ 0 - 1
daemon/cluster/swarm.go

@@ -93,7 +93,6 @@ func (c *Cluster) Init(req types.InitRequest) (string, error) {
 		}
 		}
 	}
 	}
 
 
-	//Validate Default Address Pool input
 	if err := validateDefaultAddrPool(req.DefaultAddrPool, req.SubnetSize); err != nil {
 	if err := validateDefaultAddrPool(req.DefaultAddrPool, req.SubnetSize); err != nil {
 		return "", err
 		return "", err
 	}
 	}

+ 1 - 1
daemon/container.go

@@ -154,7 +154,7 @@ func (daemon *Daemon) newContainer(name string, operatingSystem string, config *
 	base.Created = time.Now().UTC()
 	base.Created = time.Now().UTC()
 	base.Managed = managed
 	base.Managed = managed
 	base.Path = entrypoint
 	base.Path = entrypoint
-	base.Args = args //FIXME: de-duplicate from config
+	base.Args = args // FIXME: de-duplicate from config
 	base.Config = config
 	base.Config = config
 	base.HostConfig = &containertypes.HostConfig{}
 	base.HostConfig = &containertypes.HostConfig{}
 	base.ImageID = imgID
 	base.ImageID = imgID

+ 1 - 1
daemon/container_linux.go

@@ -8,7 +8,7 @@ import (
 )
 )
 
 
 func (daemon *Daemon) saveApparmorConfig(container *container.Container) error {
 func (daemon *Daemon) saveApparmorConfig(container *container.Container) error {
-	container.AppArmorProfile = "" //we don't care about the previous value.
+	container.AppArmorProfile = "" // we don't care about the previous value.
 
 
 	if !daemon.apparmorEnabled {
 	if !daemon.apparmorEnabled {
 		return nil // if apparmor is disabled there is nothing to do here.
 		return nil // if apparmor is disabled there is nothing to do here.

+ 3 - 3
daemon/events/events_test.go

@@ -163,9 +163,9 @@ func TestLogEvents(t *testing.T) {
 // https://github.com/docker/docker/issues/20999
 // https://github.com/docker/docker/issues/20999
 // Fixtures:
 // Fixtures:
 //
 //
-//2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)
-//2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)
-//2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)
+// 2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)
+// 2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)
+// 2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)
 func TestLoadBufferedEvents(t *testing.T) {
 func TestLoadBufferedEvents(t *testing.T) {
 	now := time.Now()
 	now := time.Now()
 	f, err := timetypes.GetTimestamp("2016-03-07T17:28:03.100000000+02:00", now)
 	f, err := timetypes.GetTimestamp("2016-03-07T17:28:03.100000000+02:00", now)

+ 1 - 1
daemon/graphdriver/btrfs/btrfs.go

@@ -134,7 +134,7 @@ func parseOptions(opt []string) (btrfsOptions, bool, error) {
 
 
 // Driver contains information about the filesystem mounted.
 // Driver contains information about the filesystem mounted.
 type Driver struct {
 type Driver struct {
-	//root of the file system
+	// root of the file system
 	home         string
 	home         string
 	uidMaps      []idtools.IDMap
 	uidMaps      []idtools.IDMap
 	gidMaps      []idtools.IDMap
 	gidMaps      []idtools.IDMap

+ 1 - 1
daemon/graphdriver/copy/copy.go

@@ -143,7 +143,7 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
 
 
 		switch mode := f.Mode(); {
 		switch mode := f.Mode(); {
 		case mode.IsRegular():
 		case mode.IsRegular():
-			//the type is 32bit on mips
+			// the type is 32bit on mips
 			id := fileID{dev: uint64(stat.Dev), ino: stat.Ino} // nolint: unconvert
 			id := fileID{dev: uint64(stat.Dev), ino: stat.Ino} // nolint: unconvert
 			if copyMode == Hardlink {
 			if copyMode == Hardlink {
 				isHardlink = true
 				isHardlink = true

+ 1 - 2
daemon/graphdriver/copy/copy_test.go

@@ -67,8 +67,7 @@ func TestCopyDir(t *testing.T) {
 		if srcFileSys.Dev == dstFileSys.Dev {
 		if srcFileSys.Dev == dstFileSys.Dev {
 			assert.Check(t, srcFileSys.Ino != dstFileSys.Ino)
 			assert.Check(t, srcFileSys.Ino != dstFileSys.Ino)
 		}
 		}
-		// Todo: check size, and ctim is not equal
-		/// on filesystems that have granular ctimes
+		// Todo: check size, and ctim is not equal on filesystems that have granular ctimes
 		assert.Check(t, is.DeepEqual(srcFileSys.Mode, dstFileSys.Mode))
 		assert.Check(t, is.DeepEqual(srcFileSys.Mode, dstFileSys.Mode))
 		assert.Check(t, is.DeepEqual(srcFileSys.Uid, dstFileSys.Uid))
 		assert.Check(t, is.DeepEqual(srcFileSys.Uid, dstFileSys.Uid))
 		assert.Check(t, is.DeepEqual(srcFileSys.Gid, dstFileSys.Gid))
 		assert.Check(t, is.DeepEqual(srcFileSys.Gid, dstFileSys.Gid))

+ 3 - 3
daemon/graphdriver/devmapper/deviceset.go

@@ -119,7 +119,7 @@ type DeviceSet struct {
 	deletionWorkerTicker  *time.Ticker
 	deletionWorkerTicker  *time.Ticker
 	uidMaps               []idtools.IDMap
 	uidMaps               []idtools.IDMap
 	gidMaps               []idtools.IDMap
 	gidMaps               []idtools.IDMap
-	minFreeSpacePercent   uint32 //min free space percentage in thinpool
+	minFreeSpacePercent   uint32 // min free space percentage in thinpool
 	xfsNospaceRetries     string // max retries when xfs receives ENOSPC
 	xfsNospaceRetries     string // max retries when xfs receives ENOSPC
 	lvmSetupConfig        directLVMConfig
 	lvmSetupConfig        directLVMConfig
 }
 }
@@ -1692,8 +1692,8 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) {
 		}
 		}
 	}
 	}
 
 
-	//create the root dir of the devmapper driver ownership to match this
-	//daemon's remapped root uid/gid so containers can start properly
+	// create the root dir of the devmapper driver ownership to match this
+	// daemon's remapped root uid/gid so containers can start properly
 	uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps)
 	uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps)
 	if err != nil {
 	if err != nil {
 		return err
 		return err

+ 1 - 1
daemon/graphdriver/devmapper/devmapper_test.go

@@ -110,7 +110,7 @@ func testChangeLoopBackSize(t *testing.T, delta, expectDataSize, expectMetaDataS
 	if err := driver.Cleanup(); err != nil {
 	if err := driver.Cleanup(); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	//Reload
+	// Reload
 	d, err := Init(driver.home, []string{
 	d, err := Init(driver.home, []string{
 		fmt.Sprintf("dm.loopdatasize=%d", defaultDataLoopbackSize+delta),
 		fmt.Sprintf("dm.loopdatasize=%d", defaultDataLoopbackSize+delta),
 		fmt.Sprintf("dm.loopmetadatasize=%d", defaultMetaDataLoopbackSize+delta),
 		fmt.Sprintf("dm.loopmetadatasize=%d", defaultMetaDataLoopbackSize+delta),

+ 1 - 1
daemon/graphdriver/driver.go

@@ -29,7 +29,7 @@ var (
 	drivers map[string]InitFunc
 	drivers map[string]InitFunc
 )
 )
 
 
-//CreateOpts contains optional arguments for Create() and CreateReadWrite()
+// CreateOpts contains optional arguments for Create() and CreateReadWrite()
 // methods.
 // methods.
 type CreateOpts struct {
 type CreateOpts struct {
 	MountLabel string
 	MountLabel string

+ 1 - 1
daemon/graphdriver/overlay2/mount.go

@@ -53,7 +53,7 @@ func mountFrom(dir, device, target, mType string, flags uintptr, label string) e
 		w.Close()
 		w.Close()
 		return fmt.Errorf("mountfrom error on re-exec cmd: %v", err)
 		return fmt.Errorf("mountfrom error on re-exec cmd: %v", err)
 	}
 	}
-	//write the options to the pipe for the untar exec to read
+	// write the options to the pipe for the untar exec to read
 	if err := json.NewEncoder(w).Encode(options); err != nil {
 	if err := json.NewEncoder(w).Encode(options); err != nil {
 		w.Close()
 		w.Close()
 		return fmt.Errorf("mountfrom json encode to pipe failed: %v", err)
 		return fmt.Errorf("mountfrom json encode to pipe failed: %v", err)

+ 1 - 1
daemon/images/images.go

@@ -171,7 +171,7 @@ func (i *ImageService) Images(imageFilters filters.Args, all bool, withExtraAttr
 			if all || len(i.imageStore.Children(id)) == 0 {
 			if all || len(i.imageStore.Children(id)) == 0 {
 
 
 				if imageFilters.Contains("dangling") && !danglingOnly {
 				if imageFilters.Contains("dangling") && !danglingOnly {
-					//dangling=false case, so dangling image is not needed
+					// dangling=false case, so dangling image is not needed
 					continue
 					continue
 				}
 				}
 				if imageFilters.Contains("reference") { // skip images with no references if filtering by reference
 				if imageFilters.Contains("reference") { // skip images with no references if filtering by reference

+ 1 - 1
daemon/links/links.go

@@ -56,7 +56,7 @@ func (l *Link) ToEnv() []string {
 		env = append(env, fmt.Sprintf("%s_PORT=%s://%s:%s", alias, p.Proto(), l.ChildIP, p.Port()))
 		env = append(env, fmt.Sprintf("%s_PORT=%s://%s:%s", alias, p.Proto(), l.ChildIP, p.Port()))
 	}
 	}
 
 
-	//sort the ports so that we can bulk the continuous ports together
+	// sort the ports so that we can bulk the continuous ports together
 	nat.Sort(l.Ports, func(ip, jp nat.Port) bool {
 	nat.Sort(l.Ports, func(ip, jp nat.Port) bool {
 		// If the two ports have the same number, tcp takes priority
 		// If the two ports have the same number, tcp takes priority
 		// Sort in desc order
 		// Sort in desc order

+ 1 - 1
daemon/list.go

@@ -388,7 +388,7 @@ func portOp(key string, filter map[nat.Port]bool) func(value string) error {
 		if strings.Contains(value, ":") {
 		if strings.Contains(value, ":") {
 			return fmt.Errorf("filter for '%s' should not contain ':': %s", key, value)
 			return fmt.Errorf("filter for '%s' should not contain ':': %s", key, value)
 		}
 		}
-		//support two formats, original format <portnum>/[<proto>] or <startport-endport>/[<proto>]
+		// support two formats, original format <portnum>/[<proto>] or <startport-endport>/[<proto>]
 		proto, port := nat.SplitProtoPort(value)
 		proto, port := nat.SplitProtoPort(value)
 		start, end, err := nat.ParsePortRange(port)
 		start, end, err := nat.ParsePortRange(port)
 		if err != nil {
 		if err != nil {

+ 1 - 1
daemon/logger/copier_test.go

@@ -203,7 +203,7 @@ func TestCopierSlow(t *testing.T) {
 	}
 	}
 
 
 	var jsonBuf bytes.Buffer
 	var jsonBuf bytes.Buffer
-	//encoder := &encodeCloser{Encoder: json.NewEncoder(&jsonBuf)}
+	// encoder := &encodeCloser{Encoder: json.NewEncoder(&jsonBuf)}
 	jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf), delay: 100 * time.Millisecond}
 	jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf), delay: 100 * time.Millisecond}
 
 
 	c := NewCopier(map[string]io.Reader{"stdout": &stdout}, jsonLog)
 	c := NewCopier(map[string]io.Reader{"stdout": &stdout}, jsonLog)

+ 1 - 1
daemon/logger/local/local.go

@@ -109,7 +109,7 @@ func makeMarshaller() func(m *logger.Message) ([]byte, error) {
 
 
 		messageToProto(m, proto, md)
 		messageToProto(m, proto, md)
 		protoSize := proto.Size()
 		protoSize := proto.Size()
-		writeLen := protoSize + (2 * encodeBinaryLen) //+ len(messageDelimiter)
+		writeLen := protoSize + (2 * encodeBinaryLen) // + len(messageDelimiter)
 
 
 		if writeLen > len(buf) {
 		if writeLen > len(buf) {
 			buf = make([]byte, writeLen)
 			buf = make([]byte, writeLen)

+ 2 - 2
daemon/network/settings.go

@@ -39,8 +39,8 @@ type EndpointSettings struct {
 // AttachmentStore stores the load balancer IP address for a network id.
 // AttachmentStore stores the load balancer IP address for a network id.
 type AttachmentStore struct {
 type AttachmentStore struct {
 	sync.Mutex
 	sync.Mutex
-	//key: networkd id
-	//value: load balancer ip address
+	// key: networkd id
+	// value: load balancer ip address
 	networkToNodeLBIP map[string]net.IP
 	networkToNodeLBIP map[string]net.IP
 }
 }
 
 

+ 1 - 1
image/tarexport/tarexport.go

@@ -32,7 +32,7 @@ type tarexporter struct {
 
 
 // LogImageEvent defines interface for event generation related to image tar(load and save) operations
 // LogImageEvent defines interface for event generation related to image tar(load and save) operations
 type LogImageEvent interface {
 type LogImageEvent interface {
-	//LogImageEvent generates an event related to an image operation
+	// LogImageEvent generates an event related to an image operation
 	LogImageEvent(imageID, refName, action string)
 	LogImageEvent(imageID, refName, action string)
 }
 }
 
 

+ 5 - 10
integration-cli/docker_api_build_test.go

@@ -210,26 +210,21 @@ func (s *DockerSuite) TestBuildAPIUnnormalizedTarPaths(c *testing.T) {
 			Name: "Dockerfile",
 			Name: "Dockerfile",
 			Size: int64(len(dockerfile)),
 			Size: int64(len(dockerfile)),
 		})
 		})
-		//failed to write tar file header
-		assert.NilError(c, err)
+		assert.NilError(c, err, "failed to write tar file header")
 
 
 		_, err = tw.Write(dockerfile)
 		_, err = tw.Write(dockerfile)
-		// failed to write Dockerfile in tar file content
-		assert.NilError(c, err)
+		assert.NilError(c, err, "failed to write Dockerfile in tar file content")
 
 
 		err = tw.WriteHeader(&tar.Header{
 		err = tw.WriteHeader(&tar.Header{
 			Name: "dir/./file",
 			Name: "dir/./file",
 			Size: int64(len(fileContents)),
 			Size: int64(len(fileContents)),
 		})
 		})
-		//failed to write tar file header
-		assert.NilError(c, err)
+		assert.NilError(c, err, "failed to write tar file header")
 
 
 		_, err = tw.Write(fileContents)
 		_, err = tw.Write(fileContents)
-		// failed to write file contents in tar file content
-		assert.NilError(c, err)
+		assert.NilError(c, err, "failed to write file contents in tar file content")
 
 
-		// failed to close tar archive
-		assert.NilError(c, tw.Close())
+		assert.NilError(c, tw.Close(), "failed to close tar archive")
 
 
 		res, body, err := request.Post("/build", request.RawContent(ioutil.NopCloser(buffer)), request.ContentType("application/x-tar"))
 		res, body, err := request.Post("/build", request.RawContent(ioutil.NopCloser(buffer)), request.ContentType("application/x-tar"))
 		assert.NilError(c, err)
 		assert.NilError(c, err)

+ 1 - 1
integration-cli/docker_api_containers_test.go

@@ -689,7 +689,7 @@ func (s *DockerSuite) TestContainerAPIVerifyHeader(c *testing.T) {
 	body.Close()
 	body.Close()
 }
 }
 
 
-//Issue 14230. daemon should return 500 for invalid port syntax
+// Issue 14230. daemon should return 500 for invalid port syntax
 func (s *DockerSuite) TestContainerAPIInvalidPortSyntax(c *testing.T) {
 func (s *DockerSuite) TestContainerAPIInvalidPortSyntax(c *testing.T) {
 	config := `{
 	config := `{
 				  "Image": "busybox",
 				  "Image": "busybox",

+ 1 - 1
integration-cli/docker_api_images_test.go

@@ -44,7 +44,7 @@ func (s *DockerSuite) TestAPIImagesFilter(c *testing.T) {
 		return images
 		return images
 	}
 	}
 
 
-	//incorrect number of matches returned
+	// incorrect number of matches returned
 	images := getImages("utest*/*")
 	images := getImages("utest*/*")
 	assert.Equal(c, len(images[0].RepoTags), 2)
 	assert.Equal(c, len(images[0].RepoTags), 2)
 
 

+ 5 - 5
integration-cli/docker_api_swarm_service_test.go

@@ -356,7 +356,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintRole(c *testing.T) {
 		node := daemons[0].GetNode(c, task.NodeID)
 		node := daemons[0].GetNode(c, task.NodeID)
 		assert.Equal(c, node.Spec.Role, swarm.NodeRoleWorker)
 		assert.Equal(c, node.Spec.Role, swarm.NodeRoleWorker)
 	}
 	}
-	//remove service
+	// remove service
 	daemons[0].RemoveService(c, id)
 	daemons[0].RemoveService(c, id)
 
 
 	// create service
 	// create service
@@ -370,7 +370,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintRole(c *testing.T) {
 		node := daemons[0].GetNode(c, task.NodeID)
 		node := daemons[0].GetNode(c, task.NodeID)
 		assert.Equal(c, node.Spec.Role, swarm.NodeRoleManager)
 		assert.Equal(c, node.Spec.Role, swarm.NodeRoleManager)
 	}
 	}
-	//remove service
+	// remove service
 	daemons[0].RemoveService(c, id)
 	daemons[0].RemoveService(c, id)
 
 
 	// create service
 	// create service
@@ -423,7 +423,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *testing.T) {
 	for _, task := range tasks {
 	for _, task := range tasks {
 		assert.Assert(c, task.NodeID == nodes[0].ID)
 		assert.Assert(c, task.NodeID == nodes[0].ID)
 	}
 	}
-	//remove service
+	// remove service
 	daemons[0].RemoveService(c, id)
 	daemons[0].RemoveService(c, id)
 
 
 	// create service
 	// create service
@@ -436,7 +436,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *testing.T) {
 	for _, task := range tasks {
 	for _, task := range tasks {
 		assert.Assert(c, task.NodeID != nodes[0].ID)
 		assert.Assert(c, task.NodeID != nodes[0].ID)
 	}
 	}
-	//remove service
+	// remove service
 	daemons[0].RemoveService(c, id)
 	daemons[0].RemoveService(c, id)
 
 
 	constraints = []string{"node.labels.security==medium"}
 	constraints = []string{"node.labels.security==medium"}
@@ -450,7 +450,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *testing.T) {
 	for _, task := range tasks {
 	for _, task := range tasks {
 		assert.Assert(c, task.NodeID == "")
 		assert.Assert(c, task.NodeID == "")
 	}
 	}
-	//remove service
+	// remove service
 	daemons[0].RemoveService(c, id)
 	daemons[0].RemoveService(c, id)
 
 
 	// multiple constraints
 	// multiple constraints

+ 2 - 3
integration-cli/docker_cli_by_digest_test.go

@@ -175,7 +175,7 @@ func (s *DockerRegistrySuite) TestRemoveImageByDigest(c *testing.T) {
 
 
 	// try to inspect again - it should error this time
 	// try to inspect again - it should error this time
 	_, err = inspectFieldWithError(imageReference, "Id")
 	_, err = inspectFieldWithError(imageReference, "Id")
-	//unexpected nil err trying to inspect what should be a non-existent image
+	// unexpected nil err trying to inspect what should be a non-existent image
 	assert.ErrorContains(c, err, "No such object")
 	assert.ErrorContains(c, err, "No such object")
 }
 }
 
 
@@ -255,8 +255,7 @@ func (s *DockerRegistrySuite) TestListImagesWithDigests(c *testing.T) {
 	assert.Assert(c, re1.MatchString(out), "expected %q: %s", re1.String(), out)
 	assert.Assert(c, re1.MatchString(out), "expected %q: %s", re1.String(), out)
 	// setup image2
 	// setup image2
 	digest2, err := setupImageWithTag(c, "tag2")
 	digest2, err := setupImageWithTag(c, "tag2")
-	//error setting up image
-	assert.NilError(c, err)
+	assert.NilError(c, err, "error setting up image")
 	imageReference2 := fmt.Sprintf("%s@%s", repoName, digest2)
 	imageReference2 := fmt.Sprintf("%s@%s", repoName, digest2)
 	c.Logf("imageReference2 = %s", imageReference2)
 	c.Logf("imageReference2 = %s", imageReference2)
 
 

+ 1 - 1
integration-cli/docker_cli_commit_test.go

@@ -38,7 +38,7 @@ func (s *DockerSuite) TestCommitWithoutPause(c *testing.T) {
 	dockerCmd(c, "inspect", cleanedImageID)
 	dockerCmd(c, "inspect", cleanedImageID)
 }
 }
 
 
-//test commit a paused container should not unpause it after commit
+// TestCommitPausedContainer tests that a paused container is not unpaused after being committed
 func (s *DockerSuite) TestCommitPausedContainer(c *testing.T) {
 func (s *DockerSuite) TestCommitPausedContainer(c *testing.T) {
 	testRequires(c, DaemonIsLinux)
 	testRequires(c, DaemonIsLinux)
 	out, _ := dockerCmd(c, "run", "-i", "-d", "busybox")
 	out, _ := dockerCmd(c, "run", "-i", "-d", "busybox")

+ 7 - 7
integration-cli/docker_cli_daemon_test.go

@@ -213,7 +213,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithInvalidBasesize(c *testing.T) {
 	s.d.Start(c)
 	s.d.Start(c)
 
 
 	oldBasesizeBytes := getBaseDeviceSize(c, s.d)
 	oldBasesizeBytes := getBaseDeviceSize(c, s.d)
-	var newBasesizeBytes int64 = 1073741824 //1GB in bytes
+	var newBasesizeBytes int64 = 1073741824 // 1GB in bytes
 
 
 	if newBasesizeBytes < oldBasesizeBytes {
 	if newBasesizeBytes < oldBasesizeBytes {
 		err := s.d.RestartWithError("--storage-opt", fmt.Sprintf("dm.basesize=%d", newBasesizeBytes))
 		err := s.d.RestartWithError("--storage-opt", fmt.Sprintf("dm.basesize=%d", newBasesizeBytes))
@@ -234,7 +234,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithIncreasedBasesize(c *testing.T)
 
 
 	oldBasesizeBytes := getBaseDeviceSize(c, s.d)
 	oldBasesizeBytes := getBaseDeviceSize(c, s.d)
 
 
-	var newBasesizeBytes int64 = 53687091200 //50GB in bytes
+	var newBasesizeBytes int64 = 53687091200 // 50GB in bytes
 
 
 	if newBasesizeBytes < oldBasesizeBytes {
 	if newBasesizeBytes < oldBasesizeBytes {
 		c.Skip(fmt.Sprintf("New base device size (%v) must be greater than (%s)", units.HumanSize(float64(newBasesizeBytes)), units.HumanSize(float64(oldBasesizeBytes))))
 		c.Skip(fmt.Sprintf("New base device size (%v) must be greater than (%s)", units.HumanSize(float64(newBasesizeBytes)), units.HumanSize(float64(oldBasesizeBytes))))
@@ -572,16 +572,16 @@ func (s *DockerDaemonSuite) TestDaemonKeyGeneration(c *testing.T) {
 // Note that this explicitly tests the conflict of {-b,--bridge} and {--bip} options as the means
 // Note that this explicitly tests the conflict of {-b,--bridge} and {--bip} options as the means
 // to get a daemon init failure; no other tests for -b/--bip conflict are therefore required
 // to get a daemon init failure; no other tests for -b/--bip conflict are therefore required
 func (s *DockerDaemonSuite) TestDaemonExitOnFailure(c *testing.T) {
 func (s *DockerDaemonSuite) TestDaemonExitOnFailure(c *testing.T) {
-	//attempt to start daemon with incorrect flags (we know -b and --bip conflict)
+	// attempt to start daemon with incorrect flags (we know -b and --bip conflict)
 	if err := s.d.StartWithError("--bridge", "nosuchbridge", "--bip", "1.1.1.1"); err != nil {
 	if err := s.d.StartWithError("--bridge", "nosuchbridge", "--bip", "1.1.1.1"); err != nil {
-		//verify we got the right error
+		// verify we got the right error
 		if !strings.Contains(err.Error(), "daemon exited") {
 		if !strings.Contains(err.Error(), "daemon exited") {
 			c.Fatalf("Expected daemon not to start, got %v", err)
 			c.Fatalf("Expected daemon not to start, got %v", err)
 		}
 		}
 		// look in the log and make sure we got the message that daemon is shutting down
 		// look in the log and make sure we got the message that daemon is shutting down
 		icmd.RunCommand("grep", "failed to start daemon", s.d.LogFileName()).Assert(c, icmd.Success)
 		icmd.RunCommand("grep", "failed to start daemon", s.d.LogFileName()).Assert(c, icmd.Success)
 	} else {
 	} else {
-		//if we didn't get an error and the daemon is running, this is a failure
+		// if we didn't get an error and the daemon is running, this is a failure
 		c.Fatal("Conflicting options should cause the daemon to error out with a failure")
 		c.Fatal("Conflicting options should cause the daemon to error out with a failure")
 	}
 	}
 }
 }
@@ -697,7 +697,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithBridgeIPChange(c *testing.T) {
 
 
 	s.d.Start(c, "--bip", bridgeIP)
 	s.d.Start(c, "--bip", bridgeIP)
 
 
-	//check if the iptables contains new bridgeIP MASQUERADE rule
+	// check if the iptables contains new bridgeIP MASQUERADE rule
 	ipTablesSearchString := bridgeIPNet.String()
 	ipTablesSearchString := bridgeIPNet.String()
 	icmd.RunCommand("iptables", "-t", "nat", "-nvL").Assert(c, icmd.Expected{
 	icmd.RunCommand("iptables", "-t", "nat", "-nvL").Assert(c, icmd.Expected{
 		Out: ipTablesSearchString,
 		Out: ipTablesSearchString,
@@ -1203,7 +1203,7 @@ func (s *DockerDaemonSuite) TestDaemonWithWrongkey(c *testing.T) {
 		c.Fatalf("Error Unmarshal: %s", err)
 		c.Fatalf("Error Unmarshal: %s", err)
 	}
 	}
 
 
-	//replace config.Kid with the fake value
+	// replace config.Kid with the fake value
 	config.Kid = "VSAJ:FUYR:X3H2:B2VZ:KZ6U:CJD5:K7BX:ZXHY:UZXT:P4FT:MJWG:HRJ4"
 	config.Kid = "VSAJ:FUYR:X3H2:B2VZ:KZ6U:CJD5:K7BX:ZXHY:UZXT:P4FT:MJWG:HRJ4"
 
 
 	// NEW Data-Struct to byte[]
 	// NEW Data-Struct to byte[]

+ 4 - 4
integration-cli/docker_cli_events_test.go

@@ -48,7 +48,7 @@ func (s *DockerSuite) TestEventsTimestampFormats(c *testing.T) {
 		events = events[:len(events)-1]
 		events = events[:len(events)-1]
 
 
 		nEvents := len(events)
 		nEvents := len(events)
-		assert.Assert(c, nEvents >= 5) //Missing expected event
+		assert.Assert(c, nEvents >= 5)
 		containerEvents := eventActionsByIDAndType(c, events, name, "container")
 		containerEvents := eventActionsByIDAndType(c, events, name, "container")
 		assert.Assert(c, is.DeepEqual(containerEvents, []string{"create", "attach", "start", "die", "destroy"}), out)
 		assert.Assert(c, is.DeepEqual(containerEvents, []string{"create", "attach", "start", "die", "destroy"}), out)
 	}
 	}
@@ -99,7 +99,7 @@ func (s *DockerSuite) TestEventsContainerEventsAttrSort(c *testing.T) {
 	events := strings.Split(out, "\n")
 	events := strings.Split(out, "\n")
 
 
 	nEvents := len(events)
 	nEvents := len(events)
-	assert.Assert(c, nEvents >= 3) //Missing expected event
+	assert.Assert(c, nEvents >= 3)
 	matchedEvents := 0
 	matchedEvents := 0
 	for _, event := range events {
 	for _, event := range events {
 		matches := eventstestutils.ScanMap(event)
 		matches := eventstestutils.ScanMap(event)
@@ -124,7 +124,7 @@ func (s *DockerSuite) TestEventsContainerEventsSinceUnixEpoch(c *testing.T) {
 	events = events[:len(events)-1]
 	events = events[:len(events)-1]
 
 
 	nEvents := len(events)
 	nEvents := len(events)
-	assert.Assert(c, nEvents >= 5) //Missing expected event
+	assert.Assert(c, nEvents >= 5)
 	containerEvents := eventActionsByIDAndType(c, events, "since-epoch-test", "container")
 	containerEvents := eventActionsByIDAndType(c, events, "since-epoch-test", "container")
 	assert.Assert(c, is.DeepEqual(containerEvents, []string{"create", "attach", "start", "die", "destroy"}), out)
 	assert.Assert(c, is.DeepEqual(containerEvents, []string{"create", "attach", "start", "die", "destroy"}), out)
 }
 }
@@ -664,7 +664,7 @@ func (s *DockerSuite) TestEventsContainerRestart(c *testing.T) {
 	events := strings.Split(strings.TrimSpace(out), "\n")
 	events := strings.Split(strings.TrimSpace(out), "\n")
 
 
 	nEvents := len(events)
 	nEvents := len(events)
-	assert.Assert(c, nEvents >= 1) //Missing expected event
+	assert.Assert(c, nEvents >= 1)
 	actions := eventActionsByIDAndType(c, events, "testEvent", "container")
 	actions := eventActionsByIDAndType(c, events, "testEvent", "container")
 
 
 	for _, a := range actions {
 	for _, a := range actions {

+ 1 - 1
integration-cli/docker_cli_events_unix_test.go

@@ -243,7 +243,7 @@ func (s *DockerSuite) TestEventsContainerWithMultiNetwork(c *testing.T) {
 	assert.Assert(c, strings.Contains(netEvents[0], "disconnect"))
 	assert.Assert(c, strings.Contains(netEvents[0], "disconnect"))
 	assert.Assert(c, strings.Contains(netEvents[1], "disconnect"))
 	assert.Assert(c, strings.Contains(netEvents[1], "disconnect"))
 
 
-	//both networks appeared in the network event output
+	// both networks appeared in the network event output
 	assert.Assert(c, strings.Contains(out, "test-event-network-local-1"))
 	assert.Assert(c, strings.Contains(out, "test-event-network-local-1"))
 	assert.Assert(c, strings.Contains(out, "test-event-network-local-2"))
 	assert.Assert(c, strings.Contains(out, "test-event-network-local-2"))
 }
 }

+ 2 - 2
integration-cli/docker_cli_images_test.go

@@ -242,10 +242,10 @@ func (s *DockerSuite) TestImagesEnsureDanglingImageOnlyListedOnce(c *testing.T)
 	assert.Equal(c, strings.Count(out, imageID), 1)
 	assert.Equal(c, strings.Count(out, imageID), 1)
 
 
 	out, _ = dockerCmd(c, "images", "-q", "-f", "dangling=false")
 	out, _ = dockerCmd(c, "images", "-q", "-f", "dangling=false")
-	//dangling=false would not include dangling images
+	// dangling=false would not include dangling images
 	assert.Assert(c, !strings.Contains(out, imageID))
 	assert.Assert(c, !strings.Contains(out, imageID))
 	out, _ = dockerCmd(c, "images")
 	out, _ = dockerCmd(c, "images")
-	//docker images still include dangling images
+	// docker images still include dangling images
 	assert.Assert(c, strings.Contains(out, imageID))
 	assert.Assert(c, strings.Contains(out, imageID))
 }
 }
 
 

+ 14 - 15
integration-cli/docker_cli_inspect_test.go

@@ -42,8 +42,8 @@ func (s *DockerSuite) TestInspectInt64(c *testing.T) {
 }
 }
 
 
 func (s *DockerSuite) TestInspectDefault(c *testing.T) {
 func (s *DockerSuite) TestInspectDefault(c *testing.T) {
-	//Both the container and image are named busybox. docker inspect will fetch the container JSON.
-	//If the container JSON is not available, it will go for the image JSON.
+	// Both the container and image are named busybox. docker inspect will fetch the container JSON.
+	// If the container JSON is not available, it will go for the image JSON.
 
 
 	out, _ := dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true")
 	out, _ := dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true")
 	containerID := strings.TrimSpace(out)
 	containerID := strings.TrimSpace(out)
@@ -78,8 +78,8 @@ func (s *DockerSuite) TestInspectStatus(c *testing.T) {
 }
 }
 
 
 func (s *DockerSuite) TestInspectTypeFlagContainer(c *testing.T) {
 func (s *DockerSuite) TestInspectTypeFlagContainer(c *testing.T) {
-	//Both the container and image are named busybox. docker inspect will fetch container
-	//JSON State.Running field. If the field is true, it's a container.
+	// Both the container and image are named busybox. docker inspect will fetch container
+	// JSON State.Running field. If the field is true, it's a container.
 	runSleepingContainer(c, "--name=busybox", "-d")
 	runSleepingContainer(c, "--name=busybox", "-d")
 
 
 	formatStr := "--format={{.State.Running}}"
 	formatStr := "--format={{.State.Running}}"
@@ -88,9 +88,9 @@ func (s *DockerSuite) TestInspectTypeFlagContainer(c *testing.T) {
 }
 }
 
 
 func (s *DockerSuite) TestInspectTypeFlagWithNoContainer(c *testing.T) {
 func (s *DockerSuite) TestInspectTypeFlagWithNoContainer(c *testing.T) {
-	//Run this test on an image named busybox. docker inspect will try to fetch container
-	//JSON. Since there is no container named busybox and --type=container, docker inspect will
-	//not try to get the image JSON. It will throw an error.
+	// Run this test on an image named busybox. docker inspect will try to fetch container
+	// JSON. Since there is no container named busybox and --type=container, docker inspect will
+	// not try to get the image JSON. It will throw an error.
 
 
 	dockerCmd(c, "run", "-d", "busybox", "true")
 	dockerCmd(c, "run", "-d", "busybox", "true")
 
 
@@ -100,9 +100,9 @@ func (s *DockerSuite) TestInspectTypeFlagWithNoContainer(c *testing.T) {
 }
 }
 
 
 func (s *DockerSuite) TestInspectTypeFlagWithImage(c *testing.T) {
 func (s *DockerSuite) TestInspectTypeFlagWithImage(c *testing.T) {
-	//Both the container and image are named busybox. docker inspect will fetch image
-	//JSON as --type=image. if there is no image with name busybox, docker inspect
-	//will throw an error.
+	// Both the container and image are named busybox. docker inspect will fetch image
+	// JSON as --type=image. if there is no image with name busybox, docker inspect
+	// will throw an error.
 
 
 	dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true")
 	dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true")
 
 
@@ -112,8 +112,8 @@ func (s *DockerSuite) TestInspectTypeFlagWithImage(c *testing.T) {
 }
 }
 
 
 func (s *DockerSuite) TestInspectTypeFlagWithInvalidValue(c *testing.T) {
 func (s *DockerSuite) TestInspectTypeFlagWithInvalidValue(c *testing.T) {
-	//Both the container and image are named busybox. docker inspect will fail
-	//as --type=foobar is not a valid value for the flag.
+	// Both the container and image are named busybox. docker inspect will fail
+	// as --type=foobar is not a valid value for the flag.
 
 
 	dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true")
 	dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true")
 
 
@@ -295,9 +295,8 @@ func (s *DockerSuite) TestInspectLogConfigNoType(c *testing.T) {
 }
 }
 
 
 func (s *DockerSuite) TestInspectNoSizeFlagContainer(c *testing.T) {
 func (s *DockerSuite) TestInspectNoSizeFlagContainer(c *testing.T) {
-
-	//Both the container and image are named busybox. docker inspect will fetch container
-	//JSON SizeRw and SizeRootFs field. If there is no flag --size/-s, there are no size fields.
+	// Both the container and image are named busybox. docker inspect will fetch container
+	// JSON SizeRw and SizeRootFs field. If there is no flag --size/-s, there are no size fields.
 
 
 	runSleepingContainer(c, "--name=busybox", "-d")
 	runSleepingContainer(c, "--name=busybox", "-d")
 
 

+ 1 - 1
integration-cli/docker_cli_ps_test.go

@@ -755,7 +755,7 @@ func (s *DockerSuite) TestPsListContainersFilterNetwork(c *testing.T) {
 	// skip header
 	// skip header
 	lines = lines[1:]
 	lines = lines[1:]
 
 
-	//ps output should have both the containers
+	// ps output should have both the containers
 	assert.Equal(c, len(RemoveLinesForExistingElements(lines, existing)), 2)
 	assert.Equal(c, len(RemoveLinesForExistingElements(lines, existing)), 2)
 
 
 	// Making sure onbridgenetwork and onnonenetwork is on the output
 	// Making sure onbridgenetwork and onnonenetwork is on the output

+ 16 - 16
integration-cli/docker_cli_run_test.go

@@ -175,7 +175,7 @@ func (s *DockerSuite) TestRunWithoutNetworking(c *testing.T) {
 	}
 	}
 }
 }
 
 
-//test --link use container name to link target
+// test --link use container name to link target
 func (s *DockerSuite) TestRunLinksContainerWithContainerName(c *testing.T) {
 func (s *DockerSuite) TestRunLinksContainerWithContainerName(c *testing.T) {
 	// TODO Windows: This test cannot run on a Windows daemon as the networking
 	// TODO Windows: This test cannot run on a Windows daemon as the networking
 	// settings are not populated back yet on inspect.
 	// settings are not populated back yet on inspect.
@@ -190,7 +190,7 @@ func (s *DockerSuite) TestRunLinksContainerWithContainerName(c *testing.T) {
 	}
 	}
 }
 }
 
 
-//test --link use container id to link target
+// test --link use container id to link target
 func (s *DockerSuite) TestRunLinksContainerWithContainerID(c *testing.T) {
 func (s *DockerSuite) TestRunLinksContainerWithContainerID(c *testing.T) {
 	// TODO Windows: This test cannot run on a Windows daemon as the networking
 	// TODO Windows: This test cannot run on a Windows daemon as the networking
 	// settings are not populated back yet on inspect.
 	// settings are not populated back yet on inspect.
@@ -1430,7 +1430,7 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *testing.T) {
 	tmpResolvConf := []byte("search pommesfrites.fr\nnameserver 12.34.56.78\n")
 	tmpResolvConf := []byte("search pommesfrites.fr\nnameserver 12.34.56.78\n")
 	tmpLocalhostResolvConf := []byte("nameserver 127.0.0.1")
 	tmpLocalhostResolvConf := []byte("nameserver 127.0.0.1")
 
 
-	//take a copy of resolv.conf for restoring after test completes
+	// take a copy of resolv.conf for restoring after test completes
 	resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf")
 	resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf")
 	if err != nil {
 	if err != nil {
 		c.Fatal(err)
 		c.Fatal(err)
@@ -1447,14 +1447,14 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *testing.T) {
 		icmd.RunCommand("umount", "/etc/resolv.conf").Assert(c, icmd.Success)
 		icmd.RunCommand("umount", "/etc/resolv.conf").Assert(c, icmd.Success)
 	}
 	}
 
 
-	//cleanup
+	// cleanup
 	defer func() {
 	defer func() {
 		if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
 		if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
 			c.Fatal(err)
 			c.Fatal(err)
 		}
 		}
 	}()
 	}()
 
 
-	//1. test that a restarting container gets an updated resolv.conf
+	// 1. test that a restarting container gets an updated resolv.conf
 	dockerCmd(c, "run", "--name=first", "busybox", "true")
 	dockerCmd(c, "run", "--name=first", "busybox", "true")
 	containerID1 := getIDByName(c, "first")
 	containerID1 := getIDByName(c, "first")
 
 
@@ -1472,16 +1472,16 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *testing.T) {
 		c.Fatalf("Restarted container does not have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv))
 		c.Fatalf("Restarted container does not have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv))
 	}
 	}
 
 
-	/*	//make a change to resolv.conf (in this case replacing our tmp copy with orig copy)
+	/*	// make a change to resolv.conf (in this case replacing our tmp copy with orig copy)
 		if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
 		if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
 						c.Fatal(err)
 						c.Fatal(err)
 								} */
 								} */
-	//2. test that a restarting container does not receive resolv.conf updates
+	// 2. test that a restarting container does not receive resolv.conf updates
 	//   if it modified the container copy of the starting point resolv.conf
 	//   if it modified the container copy of the starting point resolv.conf
 	dockerCmd(c, "run", "--name=second", "busybox", "sh", "-c", "echo 'search mylittlepony.com' >>/etc/resolv.conf")
 	dockerCmd(c, "run", "--name=second", "busybox", "sh", "-c", "echo 'search mylittlepony.com' >>/etc/resolv.conf")
 	containerID2 := getIDByName(c, "second")
 	containerID2 := getIDByName(c, "second")
 
 
-	//make a change to resolv.conf (in this case replacing our tmp copy with orig copy)
+	// make a change to resolv.conf (in this case replacing our tmp copy with orig copy)
 	if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
 	if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
 		c.Fatal(err)
 		c.Fatal(err)
 	}
 	}
@@ -1495,7 +1495,7 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *testing.T) {
 		c.Fatalf("Container's resolv.conf should not have been updated with host resolv.conf: %q", string(containerResolv))
 		c.Fatalf("Container's resolv.conf should not have been updated with host resolv.conf: %q", string(containerResolv))
 	}
 	}
 
 
-	//3. test that a running container's resolv.conf is not modified while running
+	// 3. test that a running container's resolv.conf is not modified while running
 	out, _ := dockerCmd(c, "run", "-d", "busybox", "top")
 	out, _ := dockerCmd(c, "run", "-d", "busybox", "top")
 	runningContainerID := strings.TrimSpace(out)
 	runningContainerID := strings.TrimSpace(out)
 
 
@@ -1510,7 +1510,7 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *testing.T) {
 		c.Fatalf("Running container should not have updated resolv.conf; expected %q, got %q", string(resolvConfSystem), string(containerResolv))
 		c.Fatalf("Running container should not have updated resolv.conf; expected %q, got %q", string(resolvConfSystem), string(containerResolv))
 	}
 	}
 
 
-	//4. test that a running container's resolv.conf is updated upon restart
+	// 4. test that a running container's resolv.conf is updated upon restart
 	//   (the above container is still running..)
 	//   (the above container is still running..)
 	dockerCmd(c, "restart", runningContainerID)
 	dockerCmd(c, "restart", runningContainerID)
 
 
@@ -1520,7 +1520,7 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *testing.T) {
 		c.Fatalf("Restarted container should have updated resolv.conf; expected %q, got %q", string(tmpResolvConf), string(containerResolv))
 		c.Fatalf("Restarted container should have updated resolv.conf; expected %q, got %q", string(tmpResolvConf), string(containerResolv))
 	}
 	}
 
 
-	//5. test that additions of a localhost resolver are cleaned from
+	// 5. test that additions of a localhost resolver are cleaned from
 	//   host resolv.conf before updating container's resolv.conf copies
 	//   host resolv.conf before updating container's resolv.conf copies
 
 
 	// replace resolv.conf with a localhost-only nameserver copy
 	// replace resolv.conf with a localhost-only nameserver copy
@@ -1539,7 +1539,7 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *testing.T) {
 		c.Fatalf("Container does not have cleaned/replaced DNS in resolv.conf; expected %q, got %q", expected, string(containerResolv))
 		c.Fatalf("Container does not have cleaned/replaced DNS in resolv.conf; expected %q, got %q", expected, string(containerResolv))
 	}
 	}
 
 
-	//6. Test that replacing (as opposed to modifying) resolv.conf triggers an update
+	// 6. Test that replacing (as opposed to modifying) resolv.conf triggers an update
 	//   of containers' resolv.conf.
 	//   of containers' resolv.conf.
 
 
 	// Restore the original resolv.conf
 	// Restore the original resolv.conf
@@ -1570,7 +1570,7 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *testing.T) {
 		c.Fatalf("Stopped container does not have updated resolv.conf; expected\n%q\n got\n%q", tmpResolvConf, string(containerResolv))
 		c.Fatalf("Stopped container does not have updated resolv.conf; expected\n%q\n got\n%q", tmpResolvConf, string(containerResolv))
 	}
 	}
 
 
-	//cleanup, restore original resolv.conf happens in defer func()
+	// cleanup, restore original resolv.conf happens in defer func()
 }
 }
 
 
 func (s *DockerSuite) TestRunAddHost(c *testing.T) {
 func (s *DockerSuite) TestRunAddHost(c *testing.T) {
@@ -1958,7 +1958,7 @@ func (s *DockerSuite) TestRunCidFileCleanupIfEmpty(c *testing.T) {
 }
 }
 
 
 // #2098 - Docker cidFiles only contain short version of the containerId
 // #2098 - Docker cidFiles only contain short version of the containerId
-//sudo docker run --cidfile /tmp/docker_tesc.cid ubuntu echo "test"
+// sudo docker run --cidfile /tmp/docker_tesc.cid ubuntu echo "test"
 // TestRunCidFile tests that run --cidfile returns the longid
 // TestRunCidFile tests that run --cidfile returns the longid
 func (s *DockerSuite) TestRunCidFileCheckIDLength(c *testing.T) {
 func (s *DockerSuite) TestRunCidFileCheckIDLength(c *testing.T) {
 	tmpDir, err := ioutil.TempDir("", "TestRunCidFile")
 	tmpDir, err := ioutil.TempDir("", "TestRunCidFile")
@@ -2016,7 +2016,7 @@ func (s *DockerSuite) TestRunInspectMacAddress(c *testing.T) {
 // test docker run use an invalid mac address
 // test docker run use an invalid mac address
 func (s *DockerSuite) TestRunWithInvalidMacAddress(c *testing.T) {
 func (s *DockerSuite) TestRunWithInvalidMacAddress(c *testing.T) {
 	out, _, err := dockerCmdWithError("run", "--mac-address", "92:d0:c6:0a:29", "busybox")
 	out, _, err := dockerCmdWithError("run", "--mac-address", "92:d0:c6:0a:29", "busybox")
-	//use an invalid mac address should with an error out
+	// use an invalid mac address should with an error out
 	if err == nil || !strings.Contains(out, "is not a valid mac address") {
 	if err == nil || !strings.Contains(out, "is not a valid mac address") {
 		c.Fatalf("run with an invalid --mac-address should with error out")
 		c.Fatalf("run with an invalid --mac-address should with error out")
 	}
 	}
@@ -2148,7 +2148,7 @@ func (s *DockerSuite) TestRunReuseBindVolumeThatIsSymlink(c *testing.T) {
 	dockerCmd(c, "run", "-v", fmt.Sprintf("%s:"+prefix+"/tmp/test", linkPath), "busybox", "ls", prefix+"/tmp/test")
 	dockerCmd(c, "run", "-v", fmt.Sprintf("%s:"+prefix+"/tmp/test", linkPath), "busybox", "ls", prefix+"/tmp/test")
 }
 }
 
 
-//GH#10604: Test an "/etc" volume doesn't overlay special bind mounts in container
+// GH#10604: Test an "/etc" volume doesn't overlay special bind mounts in container
 func (s *DockerSuite) TestRunCreateVolumeEtc(c *testing.T) {
 func (s *DockerSuite) TestRunCreateVolumeEtc(c *testing.T) {
 	// While Windows supports volumes, it does not support --add-host hence
 	// While Windows supports volumes, it does not support --add-host hence
 	// this test is not applicable on Windows.
 	// this test is not applicable on Windows.

+ 1 - 1
integration-cli/docker_cli_save_load_unix_test.go

@@ -66,7 +66,7 @@ func (s *DockerSuite) TestSaveAndLoadRepoStdout(c *testing.T) {
 	buf := make([]byte, 1024)
 	buf := make([]byte, 1024)
 
 
 	n, err := pty.Read(buf)
 	n, err := pty.Read(buf)
-	assert.NilError(c, err) //could not read tty output
+	assert.NilError(c, err, "could not read tty output")
 	assert.Assert(c, strings.Contains(string(buf[:n]), "cowardly refusing"), "help output is not being yielded")
 	assert.Assert(c, strings.Contains(string(buf[:n]), "cowardly refusing"), "help output is not being yielded")
 }
 }
 
 

+ 3 - 3
integration-cli/docker_cli_search_test.go

@@ -36,19 +36,19 @@ func (s *DockerSuite) TestSearchCmdOptions(c *testing.T) {
 	outSearchCmd, _ := dockerCmd(c, "search", "busybox")
 	outSearchCmd, _ := dockerCmd(c, "search", "busybox")
 	assert.Assert(c, strings.Count(outSearchCmd, "\n") > 3, outSearchCmd)
 	assert.Assert(c, strings.Count(outSearchCmd, "\n") > 3, outSearchCmd)
 
 
-	outSearchCmdautomated, _ := dockerCmd(c, "search", "--filter", "is-automated=true", "busybox") //The busybox is a busybox base image, not an AUTOMATED image.
+	outSearchCmdautomated, _ := dockerCmd(c, "search", "--filter", "is-automated=true", "busybox") // The busybox is a busybox base image, not an AUTOMATED image.
 	outSearchCmdautomatedSlice := strings.Split(outSearchCmdautomated, "\n")
 	outSearchCmdautomatedSlice := strings.Split(outSearchCmdautomated, "\n")
 	for i := range outSearchCmdautomatedSlice {
 	for i := range outSearchCmdautomatedSlice {
 		assert.Assert(c, !strings.HasPrefix(outSearchCmdautomatedSlice[i], "busybox "), "The busybox is not an AUTOMATED image: %s", outSearchCmdautomated)
 		assert.Assert(c, !strings.HasPrefix(outSearchCmdautomatedSlice[i], "busybox "), "The busybox is not an AUTOMATED image: %s", outSearchCmdautomated)
 	}
 	}
 
 
-	outSearchCmdNotOfficial, _ := dockerCmd(c, "search", "--filter", "is-official=false", "busybox") //The busybox is a busybox base image, official image.
+	outSearchCmdNotOfficial, _ := dockerCmd(c, "search", "--filter", "is-official=false", "busybox") // The busybox is a busybox base image, official image.
 	outSearchCmdNotOfficialSlice := strings.Split(outSearchCmdNotOfficial, "\n")
 	outSearchCmdNotOfficialSlice := strings.Split(outSearchCmdNotOfficial, "\n")
 	for i := range outSearchCmdNotOfficialSlice {
 	for i := range outSearchCmdNotOfficialSlice {
 		assert.Assert(c, !strings.HasPrefix(outSearchCmdNotOfficialSlice[i], "busybox "), "The busybox is not an OFFICIAL image: %s", outSearchCmdNotOfficial)
 		assert.Assert(c, !strings.HasPrefix(outSearchCmdNotOfficialSlice[i], "busybox "), "The busybox is not an OFFICIAL image: %s", outSearchCmdNotOfficial)
 	}
 	}
 
 
-	outSearchCmdOfficial, _ := dockerCmd(c, "search", "--filter", "is-official=true", "busybox") //The busybox is a busybox base image, official image.
+	outSearchCmdOfficial, _ := dockerCmd(c, "search", "--filter", "is-official=true", "busybox") // The busybox is a busybox base image, official image.
 	outSearchCmdOfficialSlice := strings.Split(outSearchCmdOfficial, "\n")
 	outSearchCmdOfficialSlice := strings.Split(outSearchCmdOfficial, "\n")
 	assert.Equal(c, len(outSearchCmdOfficialSlice), 3) // 1 header, 1 line, 1 carriage return
 	assert.Equal(c, len(outSearchCmdOfficialSlice), 3) // 1 header, 1 line, 1 carriage return
 	assert.Assert(c, strings.HasPrefix(outSearchCmdOfficialSlice[1], "busybox "), "The busybox is an OFFICIAL image: %s", outSearchCmdOfficial)
 	assert.Assert(c, strings.HasPrefix(outSearchCmdOfficialSlice[1], "busybox "), "The busybox is an OFFICIAL image: %s", outSearchCmdOfficial)

+ 1 - 1
integration/service/update_test.go

@@ -227,7 +227,7 @@ func TestServiceUpdateNetwork(t *testing.T) {
 	assert.NilError(t, err)
 	assert.NilError(t, err)
 	assert.Assert(t, len(netInfo.Containers) == 2, "Expected 2 endpoints, one for container and one for LB Sandbox")
 	assert.Assert(t, len(netInfo.Containers) == 2, "Expected 2 endpoints, one for container and one for LB Sandbox")
 
 
-	//Remove network from service
+	// Remove network from service
 	service.Spec.TaskTemplate.Networks = []swarmtypes.NetworkAttachmentConfig{}
 	service.Spec.TaskTemplate.Networks = []swarmtypes.NetworkAttachmentConfig{}
 	_, err = cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
 	_, err = cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
 	assert.NilError(t, err)
 	assert.NilError(t, err)

+ 33 - 33
libcontainerd/local/local_windows.go

@@ -117,42 +117,42 @@ func (c *client) Version(ctx context.Context) (containerd.Version, error) {
 // Isolation=Process example:
 // Isolation=Process example:
 //
 //
 // {
 // {
-//	"SystemType": "Container",
-//	"Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
-//	"Owner": "docker",
-//	"VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}",
-//	"IgnoreFlushesDuringBoot": true,
-//	"LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
-//	"Layers": [{
-//		"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
-//		"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
-//	}],
-//	"HostName": "5e0055c814a6",
-//	"MappedDirectories": [],
-//	"HvPartition": false,
-//	"EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"],
-//}
+// 	"SystemType": "Container",
+// 	"Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
+// 	"Owner": "docker",
+// 	"VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}",
+// 	"IgnoreFlushesDuringBoot": true,
+// 	"LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
+// 	"Layers": [{
+// 		"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
+// 		"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
+// 	}],
+// 	"HostName": "5e0055c814a6",
+// 	"MappedDirectories": [],
+// 	"HvPartition": false,
+// 	"EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"],
+// }
 //
 //
 // Isolation=Hyper-V example:
 // Isolation=Hyper-V example:
 //
 //
-//{
-//	"SystemType": "Container",
-//	"Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d",
-//	"Owner": "docker",
-//	"IgnoreFlushesDuringBoot": true,
-//	"Layers": [{
-//		"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
-//		"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
-//	}],
-//	"HostName": "475c2c58933b",
-//	"MappedDirectories": [],
-//	"HvPartition": true,
-//	"EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"],
-//	"DNSSearchList": "a.com,b.com,c.com",
-//	"HvRuntime": {
-//		"ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM"
-//	},
-//}
+// {
+// 	"SystemType": "Container",
+// 	"Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d",
+// 	"Owner": "docker",
+// 	"IgnoreFlushesDuringBoot": true,
+// 	"Layers": [{
+// 		"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
+// 		"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
+// 	}],
+// 	"HostName": "475c2c58933b",
+// 	"MappedDirectories": [],
+// 	"HvPartition": true,
+// 	"EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"],
+// 	"DNSSearchList": "a.com,b.com,c.com",
+// 	"HvRuntime": {
+// 		"ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM"
+// 	},
+// }
 func (c *client) Create(_ context.Context, id string, spec *specs.Spec, runtimeOptions interface{}, opts ...containerd.NewContainerOpts) error {
 func (c *client) Create(_ context.Context, id string, spec *specs.Spec, runtimeOptions interface{}, opts ...containerd.NewContainerOpts) error {
 	if ctr := c.getContainer(id); ctr != nil {
 	if ctr := c.getContainer(id); ctr != nil {
 		return errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
 		return errors.WithStack(errdefs.Conflict(errors.New("id already in use")))

+ 1 - 1
libcontainerd/queue/queue_test.go

@@ -14,7 +14,7 @@ func TestSerialization(t *testing.T) {
 	)
 	)
 
 
 	q.Append("aaa", func() {
 	q.Append("aaa", func() {
-		//simulate a long time task
+		// simulate a long time task
 		time.Sleep(10 * time.Millisecond)
 		time.Sleep(10 * time.Millisecond)
 		assert.Equal(t, serialization, 1)
 		assert.Equal(t, serialization, 1)
 		serialization = 2
 		serialization = 2

+ 6 - 6
pkg/archive/archive.go

@@ -442,7 +442,7 @@ func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownO
 }
 }
 
 
 // canonicalTarName provides a platform-independent and consistent posix-style
 // canonicalTarName provides a platform-independent and consistent posix-style
-//path for files and directories to be archived regardless of the platform.
+// path for files and directories to be archived regardless of the platform.
 func canonicalTarName(name string, isDir bool) string {
 func canonicalTarName(name string, isDir bool) string {
 	name = CanonicalTarNameForPath(name)
 	name = CanonicalTarNameForPath(name)
 
 
@@ -495,13 +495,13 @@ func (ta *tarAppender) addTarFile(path, name string) error {
 		}
 		}
 	}
 	}
 
 
-	//check whether the file is overlayfs whiteout
-	//if yes, skip re-mapping container ID mappings.
+	// check whether the file is overlayfs whiteout
+	// if yes, skip re-mapping container ID mappings.
 	isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0
 	isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0
 
 
-	//handle re-mapping container ID mappings back to host ID mappings before
-	//writing tar headers/files. We skip whiteout files because they were written
-	//by the kernel and already have proper ownership relative to the host
+	// handle re-mapping container ID mappings back to host ID mappings before
+	// writing tar headers/files. We skip whiteout files because they were written
+	// by the kernel and already have proper ownership relative to the host
 	if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() {
 	if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() {
 		fileIDPair, err := getFileUIDGID(fi.Sys())
 		fileIDPair, err := getFileUIDGID(fi.Sys())
 		if err != nil {
 		if err != nil {

+ 13 - 13
pkg/archive/archive_unix_test.go

@@ -282,31 +282,31 @@ func TestCopyInfoDestinationPathSymlink(t *testing.T) {
 	}
 	}
 
 
 	testData := []FileTestData{
 	testData := []FileTestData{
-		//Create a directory: /tmp/archive-copy-test*/dir1
-		//Test will "copy" file1 to dir1
+		// Create a directory: /tmp/archive-copy-test*/dir1
+		// Test will "copy" file1 to dir1
 		{resource: FileData{filetype: Dir, path: "dir1", permissions: 0740}, file: "file1", expected: CopyInfo{Path: root + "dir1/file1", Exists: false, IsDir: false}},
 		{resource: FileData{filetype: Dir, path: "dir1", permissions: 0740}, file: "file1", expected: CopyInfo{Path: root + "dir1/file1", Exists: false, IsDir: false}},
 
 
-		//Create a symlink directory to dir1: /tmp/archive-copy-test*/dirSymlink -> dir1
-		//Test will "copy" file2 to dirSymlink
+		// Create a symlink directory to dir1: /tmp/archive-copy-test*/dirSymlink -> dir1
+		// Test will "copy" file2 to dirSymlink
 		{resource: FileData{filetype: Symlink, path: "dirSymlink", contents: root + "dir1", permissions: 0600}, file: "file2", expected: CopyInfo{Path: root + "dirSymlink/file2", Exists: false, IsDir: false}},
 		{resource: FileData{filetype: Symlink, path: "dirSymlink", contents: root + "dir1", permissions: 0600}, file: "file2", expected: CopyInfo{Path: root + "dirSymlink/file2", Exists: false, IsDir: false}},
 
 
-		//Create a file in tmp directory: /tmp/archive-copy-test*/file1
-		//Test to cover when the full file path already exists.
+		// Create a file in tmp directory: /tmp/archive-copy-test*/file1
+		// Test to cover when the full file path already exists.
 		{resource: FileData{filetype: Regular, path: "file1", permissions: 0600}, file: "", expected: CopyInfo{Path: root + "file1", Exists: true}},
 		{resource: FileData{filetype: Regular, path: "file1", permissions: 0600}, file: "", expected: CopyInfo{Path: root + "file1", Exists: true}},
 
 
-		//Create a directory: /tmp/archive-copy*/dir2
-		//Test to cover when the full directory path already exists
+		// Create a directory: /tmp/archive-copy*/dir2
+		// Test to cover when the full directory path already exists
 		{resource: FileData{filetype: Dir, path: "dir2", permissions: 0740}, file: "", expected: CopyInfo{Path: root + "dir2", Exists: true, IsDir: true}},
 		{resource: FileData{filetype: Dir, path: "dir2", permissions: 0740}, file: "", expected: CopyInfo{Path: root + "dir2", Exists: true, IsDir: true}},
 
 
-		//Create a symlink to a non-existent target: /tmp/archive-copy*/symlink1 -> noSuchTarget
-		//Negative test to cover symlinking to a target that does not exit
+		// Create a symlink to a non-existent target: /tmp/archive-copy*/symlink1 -> noSuchTarget
+		// Negative test to cover symlinking to a target that does not exit
 		{resource: FileData{filetype: Symlink, path: "symlink1", contents: "noSuchTarget", permissions: 0600}, file: "", expected: CopyInfo{Path: root + "noSuchTarget", Exists: false}},
 		{resource: FileData{filetype: Symlink, path: "symlink1", contents: "noSuchTarget", permissions: 0600}, file: "", expected: CopyInfo{Path: root + "noSuchTarget", Exists: false}},
 
 
-		//Create a file in tmp directory for next test: /tmp/existingfile
+		// Create a file in tmp directory for next test: /tmp/existingfile
 		{resource: FileData{filetype: Regular, path: "existingfile", permissions: 0600}, file: "", expected: CopyInfo{Path: root + "existingfile", Exists: true}},
 		{resource: FileData{filetype: Regular, path: "existingfile", permissions: 0600}, file: "", expected: CopyInfo{Path: root + "existingfile", Exists: true}},
 
 
-		//Create a symlink to an existing file: /tmp/archive-copy*/symlink2 -> /tmp/existingfile
-		//Test to cover when the parent directory of a new file is a symlink
+		// Create a symlink to an existing file: /tmp/archive-copy*/symlink2 -> /tmp/existingfile
+		// Test to cover when the parent directory of a new file is a symlink
 		{resource: FileData{filetype: Symlink, path: "symlink2", contents: "existingfile", permissions: 0600}, file: "", expected: CopyInfo{Path: root + "existingfile", Exists: true}},
 		{resource: FileData{filetype: Symlink, path: "symlink2", contents: "existingfile", permissions: 0600}, file: "", expected: CopyInfo{Path: root + "existingfile", Exists: true}},
 	}
 	}
 
 

+ 1 - 1
pkg/archive/archive_windows.go

@@ -31,7 +31,7 @@ func CanonicalTarNameForPath(p string) string {
 // chmodTarEntry is used to adjust the file permissions used in tar header based
 // chmodTarEntry is used to adjust the file permissions used in tar header based
 // on the platform the archival is done.
 // on the platform the archival is done.
 func chmodTarEntry(perm os.FileMode) os.FileMode {
 func chmodTarEntry(perm os.FileMode) os.FileMode {
-	//perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
+	// perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
 	permPart := perm & os.ModePerm
 	permPart := perm & os.ModePerm
 	noPermPart := perm &^ os.ModePerm
 	noPermPart := perm &^ os.ModePerm
 	// Add the x bit: make everything +x from windows
 	// Add the x bit: make everything +x from windows

+ 2 - 2
pkg/chrootarchive/archive_test.go

@@ -96,8 +96,8 @@ func TestChrootUntarWithHugeExcludesList(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	options := &archive.TarOptions{}
 	options := &archive.TarOptions{}
-	//65534 entries of 64-byte strings ~= 4MB of environment space which should overflow
-	//on most systems when passed via environment or command line arguments
+	// 65534 entries of 64-byte strings ~= 4MB of environment space which should overflow
+	// on most systems when passed via environment or command line arguments
 	excludes := make([]string, 65534)
 	excludes := make([]string, 65534)
 	for i := 0; i < 65534; i++ {
 	for i := 0; i < 65534; i++ {
 		excludes[i] = strings.Repeat(string(i), 64)
 		excludes[i] = strings.Repeat(string(i), 64)

+ 2 - 2
pkg/chrootarchive/archive_unix.go

@@ -28,7 +28,7 @@ func untar() {
 
 
 	var options archive.TarOptions
 	var options archive.TarOptions
 
 
-	//read the options from the pipe "ExtraFiles"
+	// read the options from the pipe "ExtraFiles"
 	if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
 	if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
 		fatal(err)
 		fatal(err)
 	}
 	}
@@ -100,7 +100,7 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
 		return fmt.Errorf("Untar error on re-exec cmd: %v", err)
 		return fmt.Errorf("Untar error on re-exec cmd: %v", err)
 	}
 	}
 
 
-	//write the options to the pipe for the untar exec to read
+	// write the options to the pipe for the untar exec to read
 	if err := json.NewEncoder(w).Encode(options); err != nil {
 	if err := json.NewEncoder(w).Encode(options); err != nil {
 		w.Close()
 		w.Close()
 		return fmt.Errorf("Untar json encode to pipe failed: %v", err)
 		return fmt.Errorf("Untar json encode to pipe failed: %v", err)

+ 1 - 1
pkg/containerfs/archiver.go

@@ -194,7 +194,7 @@ func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error {
 // chmodTarEntry is used to adjust the file permissions used in tar header based
 // chmodTarEntry is used to adjust the file permissions used in tar header based
 // on the platform the archival is done.
 // on the platform the archival is done.
 func chmodTarEntry(perm os.FileMode) os.FileMode {
 func chmodTarEntry(perm os.FileMode) os.FileMode {
-	//perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
+	// perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
 	permPart := perm & os.ModePerm
 	permPart := perm & os.ModePerm
 	noPermPart := perm &^ os.ModePerm
 	noPermPart := perm &^ os.ModePerm
 	// Add the x bit: make everything +x from windows
 	// Add the x bit: make everything +x from windows

+ 2 - 2
pkg/idtools/utils_unix.go

@@ -18,8 +18,8 @@ func resolveBinary(binname string) (string, error) {
 	if err != nil {
 	if err != nil {
 		return "", err
 		return "", err
 	}
 	}
-	//only return no error if the final resolved binary basename
-	//matches what was searched for
+	// only return no error if the final resolved binary basename
+	// matches what was searched for
 	if filepath.Base(resolvedPath) == binname {
 	if filepath.Base(resolvedPath) == binname {
 		return resolvedPath, nil
 		return resolvedPath, nil
 	}
 	}

+ 1 - 1
pkg/jsonmessage/jsonmessage.go

@@ -178,7 +178,7 @@ func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error {
 		clearLine(out)
 		clearLine(out)
 		endl = "\r"
 		endl = "\r"
 		fmt.Fprint(out, endl)
 		fmt.Fprint(out, endl)
-	} else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal
+	} else if jm.Progress != nil && jm.Progress.String() != "" { // disable progressbar in non-terminal
 		return nil
 		return nil
 	}
 	}
 	if jm.TimeNano != 0 {
 	if jm.TimeNano != 0 {

+ 1 - 1
pkg/mount/mountinfo_freebsd.go

@@ -13,7 +13,7 @@ import (
 	"unsafe"
 	"unsafe"
 )
 )
 
 
-//parseMountTable returns information about mounted filesystems
+// parseMountTable returns information about mounted filesystems
 func parseMountTable(filter FilterFunc) ([]*Info, error) {
 func parseMountTable(filter FilterFunc) ([]*Info, error) {
 	var rawEntries *C.struct_statfs
 	var rawEntries *C.struct_statfs
 
 

+ 1 - 1
pkg/progress/progressreader.go

@@ -34,7 +34,7 @@ func NewProgressReader(in io.ReadCloser, out Output, size int64, id, action stri
 func (p *Reader) Read(buf []byte) (n int, err error) {
 func (p *Reader) Read(buf []byte) (n int, err error) {
 	read, err := p.in.Read(buf)
 	read, err := p.in.Read(buf)
 	p.current += int64(read)
 	p.current += int64(read)
-	updateEvery := int64(1024 * 512) //512kB
+	updateEvery := int64(1024 * 512) // 512kB
 	if p.size > 0 {
 	if p.size > 0 {
 		// Update progress for every 1% read if 1% < 512kB
 		// Update progress for every 1% read if 1% < 512kB
 		if increment := int64(0.01 * float64(p.size)); increment < updateEvery {
 		if increment := int64(0.01 * float64(p.size)); increment < updateEvery {

+ 1 - 1
pkg/signal/trap.go

@@ -61,7 +61,7 @@ func Trap(cleanup func(), logger interface {
 					DumpStacks("")
 					DumpStacks("")
 					logger.Info("Forcing docker daemon shutdown without cleanup on SIGQUIT")
 					logger.Info("Forcing docker daemon shutdown without cleanup on SIGQUIT")
 				}
 				}
-				//for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal #
+				// for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal #
 				os.Exit(128 + int(sig.(syscall.Signal)))
 				os.Exit(128 + int(sig.(syscall.Signal)))
 			}(sig)
 			}(sig)
 		}
 		}

+ 3 - 3
pkg/system/chtimes_unix.go

@@ -6,9 +6,9 @@ import (
 	"time"
 	"time"
 )
 )
 
 
-//setCTime will set the create time on a file. On Unix, the create
-//time is updated as a side effect of setting the modified time, so
-//no action is required.
+// setCTime will set the create time on a file. On Unix, the create
+// time is updated as a side effect of setting the modified time, so
+// no action is required.
 func setCTime(path string, ctime time.Time) error {
 func setCTime(path string, ctime time.Time) error {
 	return nil
 	return nil
 }
 }

+ 2 - 2
pkg/system/chtimes_windows.go

@@ -6,8 +6,8 @@ import (
 	"golang.org/x/sys/windows"
 	"golang.org/x/sys/windows"
 )
 )
 
 
-//setCTime will set the create time on a file. On Windows, this requires
-//calling SetFileTime and explicitly including the create time.
+// setCTime will set the create time on a file. On Windows, this requires
+// calling SetFileTime and explicitly including the create time.
 func setCTime(path string, ctime time.Time) error {
 func setCTime(path string, ctime time.Time) error {
 	ctimespec := windows.NsecToTimespec(ctime.UnixNano())
 	ctimespec := windows.NsecToTimespec(ctime.UnixNano())
 	pathp, e := windows.UTF16PtrFromString(path)
 	pathp, e := windows.UTF16PtrFromString(path)

+ 1 - 1
pkg/system/filesys_windows.go

@@ -235,7 +235,7 @@ func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle,
 		createmode = windows.OPEN_EXISTING
 		createmode = windows.OPEN_EXISTING
 	}
 	}
 	// Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang.
 	// Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang.
-	//https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx
+	// https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx
 	const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN
 	const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN
 	h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0)
 	h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0)
 	return h, e
 	return h, e

+ 1 - 1
pkg/tailfile/tailfile.go

@@ -18,7 +18,7 @@ var eol = []byte("\n")
 // ErrNonPositiveLinesNumber is an error returned if the lines number was negative.
 // ErrNonPositiveLinesNumber is an error returned if the lines number was negative.
 var ErrNonPositiveLinesNumber = errors.New("The number of lines to extract from the file must be positive")
 var ErrNonPositiveLinesNumber = errors.New("The number of lines to extract from the file must be positive")
 
 
-//TailFile returns last n lines of the passed in file.
+// TailFile returns last n lines of the passed in file.
 func TailFile(f *os.File, n int) ([][]byte, error) {
 func TailFile(f *os.File, n int) ([][]byte, error) {
 	size, err := f.Seek(0, io.SeekEnd)
 	size, err := f.Seek(0, io.SeekEnd)
 	if err != nil {
 	if err != nil {

+ 1 - 1
registry/registry_mock_test.go

@@ -268,7 +268,7 @@ func requiresAuth(w http.ResponseWriter, r *http.Request) bool {
 		value := fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano())
 		value := fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano())
 		cookie := &http.Cookie{Name: "session", Value: value, MaxAge: 3600}
 		cookie := &http.Cookie{Name: "session", Value: value, MaxAge: 3600}
 		http.SetCookie(w, cookie)
 		http.SetCookie(w, cookie)
-		//FIXME(sam): this should be sent only on Index routes
+		// FIXME(sam): this should be sent only on Index routes
 		value = fmt.Sprintf("FAKE-TOKEN-%d", time.Now().UnixNano())
 		value = fmt.Sprintf("FAKE-TOKEN-%d", time.Now().UnixNano())
 		w.Header().Add("X-Docker-Token", value)
 		w.Header().Add("X-Docker-Token", value)
 	}
 	}