Fix typos found across repository

Signed-off-by: Justas Brazauskas <brazauskasjustas@gmail.com>
This commit is contained in:
Justas Brazauskas 2015-12-13 18:00:39 +02:00
parent f5e6b09783
commit 927b334ebf
117 changed files with 159 additions and 159 deletions

View file

@ -145,7 +145,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
// then make sure we send both files over to the daemon // then make sure we send both files over to the daemon
// because Dockerfile is, obviously, needed no matter what, and // because Dockerfile is, obviously, needed no matter what, and
// .dockerignore is needed to know if either one needs to be // .dockerignore is needed to know if either one needs to be
// removed. The deamon will remove them for us, if needed, after it // removed. The daemon will remove them for us, if needed, after it
// parses the Dockerfile. Ignore errors here, as they will have been // parses the Dockerfile. Ignore errors here, as they will have been
// caught by ValidateContextDirectory above. // caught by ValidateContextDirectory above.
var includes = []string{"."} var includes = []string{"."}

View file

@ -231,7 +231,7 @@ func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string, cpP
// Ignore any error and assume that the parent directory of the destination // Ignore any error and assume that the parent directory of the destination
// path exists, in which case the copy may still succeed. If there is any // path exists, in which case the copy may still succeed. If there is any
// type of conflict (e.g., non-directory overwriting an existing directory // type of conflict (e.g., non-directory overwriting an existing directory
// or vice versia) the extraction will fail. If the destination simply did // or vice versa) the extraction will fail. If the destination simply did
// not exist, but the parent directory does, the extraction will still // not exist, but the parent directory does, the extraction will still
// succeed. // succeed.
if err == nil { if err == nil {
@ -266,7 +266,7 @@ func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string, cpP
// With the stat info about the local source as well as the // With the stat info about the local source as well as the
// destination, we have enough information to know whether we need to // destination, we have enough information to know whether we need to
// alter the archive that we upload so that when the server extracts // alter the archive that we upload so that when the server extracts
// it to the specified directory in the container we get the disired // it to the specified directory in the container we get the desired
// copy behavior. // copy behavior.
// See comments in the implementation of `archive.PrepareArchiveCopy` // See comments in the implementation of `archive.PrepareArchiveCopy`

View file

@ -130,7 +130,7 @@ func (cli *DockerCli) CmdNetworkDisconnect(args ...string) error {
return cli.client.NetworkDisconnect(cmd.Arg(0), cmd.Arg(1)) return cli.client.NetworkDisconnect(cmd.Arg(0), cmd.Arg(1))
} }
// CmdNetworkLs lists all the netorks managed by docker daemon // CmdNetworkLs lists all the networks managed by docker daemon
// //
// Usage: docker network ls [OPTIONS] // Usage: docker network ls [OPTIONS]
func (cli *DockerCli) CmdNetworkLs(args ...string) error { func (cli *DockerCli) CmdNetworkLs(args ...string) error {
@ -198,8 +198,8 @@ func (cli *DockerCli) CmdNetworkInspect(args ...string) error {
// Consolidates the ipam configuration as a group from different related configurations // Consolidates the ipam configuration as a group from different related configurations
// user can configure network with multiple non-overlapping subnets and hence it is // user can configure network with multiple non-overlapping subnets and hence it is
// possible to corelate the various related parameters and consolidate them. // possible to correlate the various related parameters and consolidate them.
// consoidateIpam consolidates subnets, ip-ranges, gateways and auxilary addresses into // consoidateIpam consolidates subnets, ip-ranges, gateways and auxiliary addresses into
// structured ipam data. // structured ipam data.
func consolidateIpam(subnets, ranges, gateways []string, auxaddrs map[string]string) ([]network.IPAMConfig, error) { func consolidateIpam(subnets, ranges, gateways []string, auxaddrs map[string]string) ([]network.IPAMConfig, error) {
if len(subnets) < len(ranges) || len(subnets) < len(gateways) { if len(subnets) < len(ranges) || len(subnets) < len(gateways) {

View file

@ -100,7 +100,7 @@ func (cli *DockerCli) CmdStart(args ...string) error {
return err return err
} }
// 4. Wait for attachement to break. // 4. Wait for attachment to break.
if c.Config.Tty && cli.isTerminalOut { if c.Config.Tty && cli.isTerminalOut {
if err := cli.monitorTtySize(containerID, false); err != nil { if err := cli.monitorTtySize(containerID, false); err != nil {
fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err)

View file

@ -20,7 +20,7 @@ const (
// Version of Current REST API // Version of Current REST API
Version version.Version = "1.22" Version version.Version = "1.22"
// MinVersion represents Minimun REST API version supported // MinVersion represents Minimum REST API version supported
MinVersion version.Version = "1.12" MinVersion version.Version = "1.12"
// DefaultDockerfileName is the Default filename with Docker commands, read by docker build // DefaultDockerfileName is the Default filename with Docker commands, read by docker build

View file

@ -139,7 +139,7 @@ func versionMiddleware(handler httputils.APIFunc) httputils.APIFunc {
// handleWithGlobalMiddlwares wraps the handler function for a request with // handleWithGlobalMiddlwares wraps the handler function for a request with
// the server's global middlewares. The order of the middlewares is backwards, // the server's global middlewares. The order of the middlewares is backwards,
// meaning that the first in the list will be evaludated last. // meaning that the first in the list will be evaluated last.
// //
// Example: handleWithGlobalMiddlewares(s.getContainersName) // Example: handleWithGlobalMiddlewares(s.getContainersName)
// //

View file

@ -478,7 +478,7 @@ func (s *router) postBuild(ctx context.Context, w http.ResponseWriter, r *http.R
func sanitizeRepoAndTags(names []string) ([]reference.Named, error) { func sanitizeRepoAndTags(names []string) ([]reference.Named, error) {
var ( var (
repoAndTags []reference.Named repoAndTags []reference.Named
// This map is used for deduplicating the "-t" paramter. // This map is used for deduplicating the "-t" parameter.
uniqNames = make(map[string]struct{}) uniqNames = make(map[string]struct{})
) )
for _, repo := range names { for _, repo := range names {

View file

@ -35,7 +35,7 @@ func (l localRoute) Path() string {
return l.path return l.path
} }
// NewRoute initialies a new local route for the reouter // NewRoute initializes a new local router for the reouter
func NewRoute(method, path string, handler httputils.APIFunc) dkrouter.Route { func NewRoute(method, path string, handler httputils.APIFunc) dkrouter.Route {
return localRoute{method, path, handler} return localRoute{method, path, handler}
} }

View file

@ -63,7 +63,7 @@ type BlkioStatEntry struct {
// BlkioStats stores All IO service stats for data read and write // BlkioStats stores All IO service stats for data read and write
// TODO Windows: This can be factored out // TODO Windows: This can be factored out
type BlkioStats struct { type BlkioStats struct {
// number of bytes tranferred to and from the block device // number of bytes transferred to and from the block device
IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"`
IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"`
IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"`

View file

@ -105,7 +105,7 @@ func (fl *Flag) IsTrue() bool {
// compile time error so it doesn't matter too much when we stop our // compile time error so it doesn't matter too much when we stop our
// processing as long as we do stop it, so this allows the code // processing as long as we do stop it, so this allows the code
// around AddXXX() to be just: // around AddXXX() to be just:
// defFlag := AddString("desription", "") // defFlag := AddString("description", "")
// w/o needing to add an if-statement around each one. // w/o needing to add an if-statement around each one.
func (bf *BFlags) Parse() error { func (bf *BFlags) Parse() error {
// If there was an error while defining the possible flags // If there was an error while defining the possible flags

View file

@ -640,7 +640,7 @@ func arg(b *Builder, args []string, attributes map[string]bool, original string)
// If there is a default value associated with this arg then add it to the // If there is a default value associated with this arg then add it to the
// b.buildArgs if one is not already passed to the builder. The args passed // b.buildArgs if one is not already passed to the builder. The args passed
// to builder override the defaut value of 'arg'. // to builder override the default value of 'arg'.
if _, ok := b.BuildArgs[name]; !ok && hasDefault { if _, ok := b.BuildArgs[name]; !ok && hasDefault {
b.BuildArgs[name] = value b.BuildArgs[name] = value
} }

View file

@ -4,7 +4,7 @@
// parser package for more information) that are yielded from the parser itself. // parser package for more information) that are yielded from the parser itself.
// Calling NewBuilder with the BuildOpts struct can be used to customize the // Calling NewBuilder with the BuildOpts struct can be used to customize the
// experience for execution purposes only. Parsing is controlled in the parser // experience for execution purposes only. Parsing is controlled in the parser
// package, and this division of resposibility should be respected. // package, and this division of responsibility should be respected.
// //
// Please see the jump table targets for the actual invocations, most of which // Please see the jump table targets for the actual invocations, most of which
// will call out to the functions in internals.go to deal with their tasks. // will call out to the functions in internals.go to deal with their tasks.

View file

@ -70,7 +70,7 @@ func TestTestData(t *testing.T) {
} }
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
// CRLF --> CR to match Unix behaviour // CRLF --> CR to match Unix behavior
content = bytes.Replace(content, []byte{'\x0d', '\x0a'}, []byte{'\x0a'}, -1) content = bytes.Replace(content, []byte{'\x0d', '\x0a'}, []byte{'\x0a'}, -1)
} }

View file

@ -71,7 +71,7 @@ type ConfigFile struct {
filename string // Note: not serialized - for internal use only filename string // Note: not serialized - for internal use only
} }
// NewConfigFile initilizes an empty configuration file for the given filename 'fn' // NewConfigFile initializes an empty configuration file for the given filename 'fn'
func NewConfigFile(fn string) *ConfigFile { func NewConfigFile(fn string) *ConfigFile {
return &ConfigFile{ return &ConfigFile{
AuthConfigs: make(map[string]AuthConfig), AuthConfigs: make(map[string]AuthConfig),

View file

@ -518,7 +518,7 @@ func (container *Container) AddMountPointWithVolume(destination string, vol volu
} }
} }
// IsDestinationMounted checkes whether a path is mounted on the container or not. // IsDestinationMounted checks whether a path is mounted on the container or not.
func (container *Container) IsDestinationMounted(destination string) bool { func (container *Container) IsDestinationMounted(destination string) bool {
return container.MountPoints[destination] != nil return container.MountPoints[destination] != nil
} }

View file

@ -41,7 +41,7 @@ func (container *Container) IpcMounts() []execdriver.Mount {
return nil return nil
} }
// UnmountVolumes explicitely unmounts volumes from the container. // UnmountVolumes explicitly unmounts volumes from the container.
func (container *Container) UnmountVolumes(forceSyscall bool) error { func (container *Container) UnmountVolumes(forceSyscall bool) error {
return nil return nil
} }

View file

@ -121,7 +121,7 @@ func (m *containerMonitor) ExitOnNext() {
} }
// Close closes the container's resources such as networking allocations and // Close closes the container's resources such as networking allocations and
// unmounts the contatiner's root filesystem // unmounts the container's root filesystem
func (m *containerMonitor) Close() error { func (m *containerMonitor) Close() error {
// Cleanup networking and mounts // Cleanup networking and mounts
m.supervisor.Cleanup(m.container) m.supervisor.Cleanup(m.container)

View file

@ -56,7 +56,7 @@ while [ $# -gt 0 ]; do
layersFs=$(echo "$manifestJson" | jq --raw-output '.fsLayers | .[] | .blobSum') layersFs=$(echo "$manifestJson" | jq --raw-output '.fsLayers | .[] | .blobSum')
IFS=$'\n' IFS=$'\n'
# bash v4 on Windows CI requires CRLF seperator # bash v4 on Windows CI requires CRLF separator
if [ "$(go env GOHOSTOS)" = 'windows' ]; then if [ "$(go env GOHOSTOS)" = 'windows' ]; then
major=$(echo ${BASH_VERSION%%[^0.9]} | cut -d. -f1) major=$(echo ${BASH_VERSION%%[^0.9]} | cut -d. -f1)
if [ "$major" -ge 4 ]; then if [ "$major" -ge 4 ]; then

View file

@ -20,7 +20,7 @@ import (
var ErrExtractPointNotDirectory = errors.New("extraction point is not a directory") var ErrExtractPointNotDirectory = errors.New("extraction point is not a directory")
// ContainerCopy performs a deprecated operation of archiving the resource at // ContainerCopy performs a deprecated operation of archiving the resource at
// the specified path in the conatiner identified by the given name. // the specified path in the container identified by the given name.
func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) { func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) {
container, err := daemon.GetContainer(name) container, err := daemon.GetContainer(name)
if err != nil { if err != nil {

View file

@ -49,9 +49,9 @@ func (daemon *Daemon) createContainerPlatformSpecificSettings(container *contain
// FIXME Windows: This code block is present in the Linux version and // FIXME Windows: This code block is present in the Linux version and
// allows the contents to be copied to the container FS prior to it // allows the contents to be copied to the container FS prior to it
// being started. However, the function utilises the FollowSymLinkInScope // being started. However, the function utilizes the FollowSymLinkInScope
// path which does not cope with Windows volume-style file paths. There // path which does not cope with Windows volume-style file paths. There
// is a seperate effort to resolve this (@swernli), so this processing // is a separate effort to resolve this (@swernli), so this processing
// is deferred for now. A case where this would be useful is when // is deferred for now. A case where this would be useful is when
// a dockerfile includes a VOLUME statement, but something is created // a dockerfile includes a VOLUME statement, but something is created
// in that directory during the dockerfile processing. What this means // in that directory during the dockerfile processing. What this means

View file

@ -13,7 +13,7 @@ import (
func setupDumpStackTrap() { func setupDumpStackTrap() {
// Windows does not support signals like *nix systems. So instead of // Windows does not support signals like *nix systems. So instead of
// trapping on SIGUSR1 to dump stacks, we wait on a Win32 event to be // trapping on SIGUSR1 to dump stacks, we wait on a Win32 event to be
// signalled. // signaled.
go func() { go func() {
sa := syscall.SecurityAttributes{ sa := syscall.SecurityAttributes{
Length: 0, Length: 0,

View file

@ -284,7 +284,7 @@ func (d *Driver) setupMounts(container *configs.Config, c *execdriver.Command) e
userMounts[m.Destination] = struct{}{} userMounts[m.Destination] = struct{}{}
} }
// Filter out mounts that are overriden by user supplied mounts // Filter out mounts that are overridden by user supplied mounts
var defaultMounts []*configs.Mount var defaultMounts []*configs.Mount
_, mountDev := userMounts["/dev"] _, mountDev := userMounts["/dev"]
for _, m := range container.Mounts { for _, m := range container.Mounts {

View file

@ -3,7 +3,7 @@
package btrfs package btrfs
// TODO(vbatts) remove this work-around once supported linux distros are on // TODO(vbatts) remove this work-around once supported linux distros are on
// btrfs utililties of >= 3.16.1 // btrfs utilities of >= 3.16.1
func btrfsBuildVersion() string { func btrfsBuildVersion() string {
return "-" return "-"

View file

@ -766,7 +766,7 @@ func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) {
if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceID); err != nil { if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceID); err != nil {
if devicemapper.DeviceIDExists(err) { if devicemapper.DeviceIDExists(err) {
// Device ID already exists. This should not // Device ID already exists. This should not
// happen. Now we have a mechianism to find // happen. Now we have a mechanism to find
// a free device ID. So something is not right. // a free device ID. So something is not right.
// Give a warning and continue. // Give a warning and continue.
logrus.Errorf("Device ID %d exists in pool but it is supposed to be unused", deviceID) logrus.Errorf("Device ID %d exists in pool but it is supposed to be unused", deviceID)
@ -818,7 +818,7 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInf
if err := devicemapper.CreateSnapDevice(devices.getPoolDevName(), deviceID, baseInfo.Name(), baseInfo.DeviceID); err != nil { if err := devicemapper.CreateSnapDevice(devices.getPoolDevName(), deviceID, baseInfo.Name(), baseInfo.DeviceID); err != nil {
if devicemapper.DeviceIDExists(err) { if devicemapper.DeviceIDExists(err) {
// Device ID already exists. This should not // Device ID already exists. This should not
// happen. Now we have a mechianism to find // happen. Now we have a mechanism to find
// a free device ID. So something is not right. // a free device ID. So something is not right.
// Give a warning and continue. // Give a warning and continue.
logrus.Errorf("Device ID %d exists in pool but it is supposed to be unused", deviceID) logrus.Errorf("Device ID %d exists in pool but it is supposed to be unused", deviceID)
@ -1749,7 +1749,7 @@ func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error {
info.Deleted = true info.Deleted = true
// save device metadata to refelect deleted state. // save device metadata to reflect deleted state.
if err := devices.saveMetadata(info); err != nil { if err := devices.saveMetadata(info); err != nil {
info.Deleted = false info.Deleted = false
return err return err
@ -1759,7 +1759,7 @@ func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error {
return nil return nil
} }
// Should be caled with devices.Lock() held. // Should be called with devices.Lock() held.
func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) error { func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) error {
if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil { if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil {
logrus.Debugf("Error opening transaction hash = %s deviceId = %d", "", info.DeviceID) logrus.Debugf("Error opening transaction hash = %s deviceId = %d", "", info.DeviceID)
@ -1805,7 +1805,7 @@ func (devices *DeviceSet) issueDiscard(info *devInfo) error {
// This is a workaround for the kernel not discarding block so // This is a workaround for the kernel not discarding block so
// on the thin pool when we remove a thinp device, so we do it // on the thin pool when we remove a thinp device, so we do it
// manually. // manually.
// Even if device is deferred deleted, activate it and isue // Even if device is deferred deleted, activate it and issue
// discards. // discards.
if err := devices.activateDeviceIfNeeded(info, true); err != nil { if err := devices.activateDeviceIfNeeded(info, true); err != nil {
return err return err
@ -2131,7 +2131,7 @@ func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
defer devices.Unlock() defer devices.Unlock()
// If there are running containers when daemon crashes, during daemon // If there are running containers when daemon crashes, during daemon
// restarting, it will kill running contaienrs and will finally call // restarting, it will kill running containers and will finally call
// Put() without calling Get(). So info.MountCount may become negative. // Put() without calling Get(). So info.MountCount may become negative.
// if info.mountCount goes negative, we do the unmount and assign // if info.mountCount goes negative, we do the unmount and assign
// it to 0. // it to 0.

View file

@ -13,7 +13,7 @@ package devmapper
// * version number of the interface that they were // * version number of the interface that they were
// * compiled with. // * compiled with.
// * // *
// * All recognised ioctl commands (ie. those that don't // * All recognized ioctl commands (ie. those that don't
// * return -ENOTTY) fill out this field, even if the // * return -ENOTTY) fill out this field, even if the
// * command failed. // * command failed.
// */ // */

View file

@ -177,7 +177,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
idFile := path.Join(mp, "id") idFile := path.Join(mp, "id")
if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) {
// Create an "id" file with the container/image id in it to help reconscruct this in case // Create an "id" file with the container/image id in it to help reconstruct this in case
// of later problems // of later problems
if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil {
d.DeviceSet.UnmountDevice(id, mp) d.DeviceSet.UnmountDevice(id, mp)

View file

@ -17,14 +17,14 @@ import (
type FsMagic uint32 type FsMagic uint32
const ( const (
// FsMagicUnsupported is a predifined contant value other than a valid filesystem id. // FsMagicUnsupported is a predefined constant value other than a valid filesystem id.
FsMagicUnsupported = FsMagic(0x00000000) FsMagicUnsupported = FsMagic(0x00000000)
) )
var ( var (
// DefaultDriver if a storage driver is not specified. // DefaultDriver if a storage driver is not specified.
DefaultDriver string DefaultDriver string
// All registred drivers // All registered drivers
drivers map[string]InitFunc drivers map[string]InitFunc
// ErrNotSupported returned when driver is not supported. // ErrNotSupported returned when driver is not supported.
@ -120,7 +120,7 @@ func GetDriver(name, home string, options []string, uidMaps, gidMaps []idtools.I
return nil, ErrNotSupported return nil, ErrNotSupported
} }
// getBuiltinDriver initalizes and returns the registered driver, but does not try to load from plugins // getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins
func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) {
if initFunc, exists := drivers[name]; exists { if initFunc, exists := drivers[name]; exists {
return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps)

View file

@ -30,7 +30,7 @@ var (
ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff") ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff")
) )
// ApplyDiffProtoDriver wraps the ProtoDriver by extending the inteface with ApplyDiff method. // ApplyDiffProtoDriver wraps the ProtoDriver by extending the interface with ApplyDiff method.
type ApplyDiffProtoDriver interface { type ApplyDiffProtoDriver interface {
graphdriver.ProtoDriver graphdriver.ProtoDriver
// ApplyDiff writes the diff to the archive for the given id and parent id. // ApplyDiff writes the diff to the archive for the given id and parent id.

View file

@ -31,7 +31,7 @@ func init() {
graphdriver.Register("zfs", Init) graphdriver.Register("zfs", Init)
} }
// Logger returns a zfs logger implmentation. // Logger returns a zfs logger implementation.
type Logger struct{} type Logger struct{}
// Log wraps log message from ZFS driver with a prefix '[zfs]'. // Log wraps log message from ZFS driver with a prefix '[zfs]'.

View file

@ -105,7 +105,7 @@ func (daemon *Daemon) Kill(container *container.Container) error {
return nil return nil
} }
// killPossibleDeadProcess is a wrapper aroung killSig() suppressing "no such process" error. // killPossibleDeadProcess is a wrapper around killSig() suppressing "no such process" error.
func (daemon *Daemon) killPossiblyDeadProcess(container *container.Container, sig int) error { func (daemon *Daemon) killPossiblyDeadProcess(container *container.Container, sig int) error {
err := daemon.killWithSignal(container, sig) err := daemon.killWithSignal(container, sig)
if err == syscall.ESRCH { if err == syscall.ESRCH {

View file

@ -266,7 +266,7 @@ func includeContainerInList(container *container.Container, ctx *listContext) it
return excludeContainer return excludeContainer
} }
// Stop interation when the container arrives to the filter container // Stop iteration when the container arrives to the filter container
if ctx.sinceFilter != nil { if ctx.sinceFilter != nil {
if container.ID == ctx.sinceFilter.ID { if container.ID == ctx.sinceFilter.ID {
return stopIteration return stopIteration

View file

@ -54,7 +54,7 @@ func New(ctx logger.Context) (logger.Logger, error) {
} }
extra := ctx.ExtraAttributes(nil) extra := ctx.ExtraAttributes(nil)
logrus.Debugf("logging driver fluentd configured for container:%s, host:%s, port:%d, tag:%s, extra:%v.", ctx.ContainerID, host, port, tag, extra) logrus.Debugf("logging driver fluentd configured for container:%s, host:%s, port:%d, tag:%s, extra:%v.", ctx.ContainerID, host, port, tag, extra)
// logger tries to recoonect 2**32 - 1 times // logger tries to reconnect 2**32 - 1 times
// failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds]
log, err := fluent.New(fluent.Config{FluentPort: port, FluentHost: host, RetryWait: 1000, MaxRetry: math.MaxInt32}) log, err := fluent.New(fluent.Config{FluentPort: port, FluentHost: host, RetryWait: 1000, MaxRetry: math.MaxInt32})
if err != nil { if err != nil {

View file

@ -146,7 +146,7 @@ func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan int
// io.ErrUnexpectedEOF is returned from json.Decoder when there is // io.ErrUnexpectedEOF is returned from json.Decoder when there is
// remaining data in the parser's buffer while an io.EOF occurs. // remaining data in the parser's buffer while an io.EOF occurs.
// If the json logger writes a partial json log entry to the disk // If the json logger writes a partial json log entry to the disk
// while at the same time the decoder tries to decode it, the race codition happens. // while at the same time the decoder tries to decode it, the race condition happens.
if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry { if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry {
reader := io.MultiReader(dec.Buffered(), f) reader := io.MultiReader(dec.Buffered(), f)
dec = json.NewDecoder(reader) dec = json.NewDecoder(reader)

View file

@ -32,7 +32,7 @@ func NewRotateFileWriter(logPath string, capacity int64, maxFiles int) (*RotateF
}, nil }, nil
} }
//WriteLog write log messge to File //WriteLog write log message to File
func (w *RotateFileWriter) Write(message []byte) (int, error) { func (w *RotateFileWriter) Write(message []byte) (int, error) {
w.mu.Lock() w.mu.Lock()
defer w.mu.Unlock() defer w.mu.Unlock()
@ -106,7 +106,7 @@ func backup(fromPath, toPath string) error {
return os.Rename(fromPath, toPath) return os.Rename(fromPath, toPath)
} }
// LogPath returns the location the given wirter logs to. // LogPath returns the location the given writer logs to.
func (w *RotateFileWriter) LogPath() string { func (w *RotateFileWriter) LogPath() string {
return w.f.Name() return w.f.Name()
} }

View file

@ -91,7 +91,7 @@ func New(ctx logger.Context) (logger.Logger, error) {
tlsConfig := &tls.Config{} tlsConfig := &tls.Config{}
// Splunk is using autogenerated certificates by default, // Splunk is using autogenerated certificates by default,
// allow users to trust them with skiping verification // allow users to trust them with skipping verification
if insecureSkipVerifyStr, ok := ctx.Config[splunkInsecureSkipVerifyKey]; ok { if insecureSkipVerifyStr, ok := ctx.Config[splunkInsecureSkipVerifyKey]; ok {
insecureSkipVerify, err := strconv.ParseBool(insecureSkipVerifyStr) insecureSkipVerify, err := strconv.ParseBool(insecureSkipVerifyStr)
if err != nil { if err != nil {

View file

@ -18,7 +18,7 @@ const (
) )
// NetworkControllerEnabled checks if the networking stack is enabled. // NetworkControllerEnabled checks if the networking stack is enabled.
// This feature depends on OS primitives and it's dissabled in systems like Windows. // This feature depends on OS primitives and it's disabled in systems like Windows.
func (daemon *Daemon) NetworkControllerEnabled() bool { func (daemon *Daemon) NetworkControllerEnabled() bool {
return daemon.netController != nil return daemon.netController != nil
} }

View file

@ -70,7 +70,7 @@ func (m mounts) parts(i int) int {
// 1. Select the previously configured mount points for the containers, if any. // 1. Select the previously configured mount points for the containers, if any.
// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination. // 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination.
// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations. // 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations.
// 4. Cleanup old volumes that are about to be reasigned. // 4. Cleanup old volumes that are about to be reassigned.
func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *runconfig.HostConfig) error { func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *runconfig.HostConfig) error {
binds := map[string]bool{} binds := map[string]bool{}
mountPoints := map[string]*volume.MountPoint{} mountPoints := map[string]*volume.MountPoint{}
@ -148,7 +148,7 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo
container.Lock() container.Lock()
// 4. Cleanup old volumes that are about to be reasigned. // 4. Cleanup old volumes that are about to be reassigned.
for _, m := range mountPoints { for _, m := range mountPoints {
if m.BackwardsCompatible() { if m.BackwardsCompatible() {
if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil { if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil {

View file

@ -41,7 +41,7 @@ type ImagePushConfig struct {
// MetadataStore is the storage backend for distribution-specific // MetadataStore is the storage backend for distribution-specific
// metadata. // metadata.
MetadataStore metadata.Store MetadataStore metadata.Store
// LayerStore manges layers. // LayerStore manages layers.
LayerStore layer.Store LayerStore layer.Store
// ImageStore manages images. // ImageStore manages images.
ImageStore image.Store ImageStore image.Store

View file

@ -61,7 +61,7 @@ type transfer struct {
// running remains open as long as the transfer is in progress. // running remains open as long as the transfer is in progress.
running chan struct{} running chan struct{}
// hasWatchers stays open until all watchers release the trasnfer. // hasWatchers stays open until all watchers release the transfer.
hasWatchers chan struct{} hasWatchers chan struct{}
// broadcastDone is true if the master progress channel has closed. // broadcastDone is true if the master progress channel has closed.
@ -240,9 +240,9 @@ func (t *transfer) Cancel() {
// DoFunc is a function called by the transfer manager to actually perform // DoFunc is a function called by the transfer manager to actually perform
// a transfer. It should be non-blocking. It should wait until the start channel // a transfer. It should be non-blocking. It should wait until the start channel
// is closed before transfering any data. If the function closes inactive, that // is closed before transferring any data. If the function closes inactive, that
// signals to the transfer manager that the job is no longer actively moving // signals to the transfer manager that the job is no longer actively moving
// data - for example, it may be waiting for a dependent tranfer to finish. // data - for example, it may be waiting for a dependent transfer to finish.
// This prevents it from taking up a slot. // This prevents it from taking up a slot.
type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer

2
docs/.gitignore vendored
View file

@ -1,2 +1,2 @@
# avoid commiting the awsconfig file used for releases # avoid committing the awsconfig file used for releases
awsconfig awsconfig

View file

@ -55,7 +55,7 @@ The `boot2docker` command reads its configuration from the `$BOOT2DOCKER_PROFILE
The configuration shows you where `boot2docker` is looking for the `profile` file. It also output the settings that are in use. The configuration shows you where `boot2docker` is looking for the `profile` file. It also output the settings that are in use.
2. Initialise a default file to customize using `boot2docker config > ~/.boot2docker/profile` command. 2. Initialize a default file to customize using `boot2docker config > ~/.boot2docker/profile` command.
3. Add the following lines to `$HOME/.boot2docker/profile`: 3. Add the following lines to `$HOME/.boot2docker/profile`:

View file

@ -12,7 +12,7 @@ weight = 7
# Control and configure Docker with systemd # Control and configure Docker with systemd
Many Linux distributions use systemd to start the Docker daemon. This document Many Linux distributions use systemd to start the Docker daemon. This document
shows a few examples of how to customise Docker's settings. shows a few examples of how to customize Docker's settings.
## Starting the Docker daemon ## Starting the Docker daemon

View file

@ -10,7 +10,7 @@ parent = "smn_linux"
# openSUSE and SUSE Linux Enterprise # openSUSE and SUSE Linux Enterprise
This page provides instructions for installing and configuring the lastest This page provides instructions for installing and configuring the latest
Docker Engine software on openSUSE and SUSE systems. Docker Engine software on openSUSE and SUSE systems.
>**Note:** You can also find bleeding edge Docker versions inside of the repositories maintained by the [Virtualization:containers project](https://build.opensuse.org/project/show/Virtualization:containers) on the [Open Build Service](https://build.opensuse.org/). This project delivers also other packages that are related with the Docker ecosystem (for example, Docker Compose). >**Note:** You can also find bleeding edge Docker versions inside of the repositories maintained by the [Virtualization:containers project](https://build.opensuse.org/project/show/Virtualization:containers) on the [Open Build Service](https://build.opensuse.org/). This project delivers also other packages that are related with the Docker ecosystem (for example, Docker Compose).

View file

@ -103,7 +103,7 @@ This section lists each version from latest to oldest. Each listing includes a
consistent with other date/time values returned by the API. consistent with other date/time values returned by the API.
* `AuthConfig` now supports a `registrytoken` for token based authentication * `AuthConfig` now supports a `registrytoken` for token based authentication
* `POST /containers/create` now has a 4M minimum value limit for `HostConfig.KernelMemory` * `POST /containers/create` now has a 4M minimum value limit for `HostConfig.KernelMemory`
* Pushes initated with `POST /images/(name)/push` and pulls initiated with `POST /images/create` * Pushes initiated with `POST /images/(name)/push` and pulls initiated with `POST /images/create`
will be cancelled if the HTTP connection making the API request is closed before will be cancelled if the HTTP connection making the API request is closed before
the push or pull completes. the push or pull completes.

View file

@ -27,11 +27,11 @@ and Docker images will report:
delete, import, pull, push, tag, untag delete, import, pull, push, tag, untag
The `--since` and `--until` parameters can be Unix timestamps, date formated The `--since` and `--until` parameters can be Unix timestamps, date formatted
timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed
relative to the client machines time. If you do not provide the --since option, relative to the client machines time. If you do not provide the --since option,
the command returns only new and/or live events. Supported formats for date the command returns only new and/or live events. Supported formats for date
formated time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`,
`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local `2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local
timezone on the client will be used if you do not provide either a `Z` or a timezone on the client will be used if you do not provide either a `Z` or a
`+-00:00` timezone offset at the end of the timestamp. When providing Unix `+-00:00` timezone offset at the end of the timestamp. When providing Unix

View file

@ -101,7 +101,7 @@ ID 260 gen 11 top level 5 path btrfs/subvolumes/3c9a9d7cc6a235eb2de58ca9ef3551c6
ID 261 gen 12 top level 5 path btrfs/subvolumes/0a17decee4139b0de68478f149cc16346f5e711c5ae3bb969895f22dd6723751 ID 261 gen 12 top level 5 path btrfs/subvolumes/0a17decee4139b0de68478f149cc16346f5e711c5ae3bb969895f22dd6723751
``` ```
Under the `/var/lib/docker/btrfs/subvolumes` directoy, each of these subvolumes and snapshots are visible as a normal Unix directory: Under the `/var/lib/docker/btrfs/subvolumes` directory, each of these subvolumes and snapshots are visible as a normal Unix directory:
```bash ```bash
$ ls -l /var/lib/docker/btrfs/subvolumes/ $ ls -l /var/lib/docker/btrfs/subvolumes/

View file

@ -81,7 +81,7 @@ var (
HTTPStatusCode: http.StatusInternalServerError, HTTPStatusCode: http.StatusInternalServerError,
}) })
// ErrorCodeEmptyID is generated when an ID is the emptry string. // ErrorCodeEmptyID is generated when an ID is the empty string.
ErrorCodeEmptyID = errcode.Register(errGroup, errcode.ErrorDescriptor{ ErrorCodeEmptyID = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "EMPTYID", Value: "EMPTYID",
Message: "Invalid empty id", Message: "Invalid empty id",

View file

@ -204,7 +204,7 @@ Respond with a string error if an error occurred.
{} {}
``` ```
Perform neccessary tasks to release resources help by the plugin, for example Perform necessary tasks to release resources help by the plugin, for example
unmounting all the layered file systems. unmounting all the layered file systems.
**Response**: **Response**:

View file

@ -43,7 +43,7 @@ values in the following formats:
If numeric IDs are provided, translation back to valid user or group names If numeric IDs are provided, translation back to valid user or group names
will occur so that the subordinate uid and gid information can be read, given will occur so that the subordinate uid and gid information can be read, given
these resources are name-based, not id-based. If the numeric ID information these resources are name-based, not id-based. If the numeric ID information
provided does not exist as entries in `/etc/passwd` or `/etc/group`, dameon provided does not exist as entries in `/etc/passwd` or `/etc/group`, daemon
startup will fail with an error message. startup will fail with an error message.
*An example: starting with default Docker user management:* *An example: starting with default Docker user management:*
@ -67,7 +67,7 @@ create the following range, based on an existing user already having the first
> **Note:** On a fresh Fedora install, we found that we had to `touch` the > **Note:** On a fresh Fedora install, we found that we had to `touch` the
> `/etc/subuid` and `/etc/subgid` files to have ranges assigned when users > `/etc/subuid` and `/etc/subgid` files to have ranges assigned when users
> were created. Once these files existed, range assigment on user creation > were created. Once these files existed, range assignment on user creation
> worked properly. > worked properly.
If you have a preferred/self-managed user with subordinate ID mappings already If you have a preferred/self-managed user with subordinate ID mappings already

View file

@ -12,7 +12,7 @@ set -e
# will be used as Docker binary version and package version. # will be used as Docker binary version and package version.
# - The hash of the git commit will also be included in the Docker binary, # - The hash of the git commit will also be included in the Docker binary,
# with the suffix -dirty if the repository isn't clean. # with the suffix -dirty if the repository isn't clean.
# - The script is intented to be run inside the docker container specified # - The script is intended to be run inside the docker container specified
# in the Dockerfile at the root of the source. In other words: # in the Dockerfile at the root of the source. In other words:
# DO NOT CALL THIS SCRIPT DIRECTLY. # DO NOT CALL THIS SCRIPT DIRECTLY.
# - The right way to call this script is to invoke "make" from # - The right way to call this script is to invoke "make" from

View file

@ -225,7 +225,7 @@ release_build() {
;; ;;
arm) arm)
s3Arch=armel s3Arch=armel
# someday, we might potentially support mutliple GOARM values, in which case we might get armhf here too # someday, we might potentially support multiple GOARM values, in which case we might get armhf here too
;; ;;
*) *)
echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'" echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'"

View file

@ -268,7 +268,7 @@ func testGetSet(t *testing.T, store StoreBackend) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// skipping use of digest pkg because its used by the imlementation // skipping use of digest pkg because its used by the implementation
h := sha256.New() h := sha256.New()
_, err = h.Write(randomInput) _, err = h.Write(randomInput)
if err != nil { if err != nil {

View file

@ -96,7 +96,7 @@ type History struct {
Author string `json:"author,omitempty"` Author string `json:"author,omitempty"`
// CreatedBy keeps the Dockerfile command used while building image. // CreatedBy keeps the Dockerfile command used while building image.
CreatedBy string `json:"created_by,omitempty"` CreatedBy string `json:"created_by,omitempty"`
// Comment is custom mesage set by the user when creating the image. // Comment is custom message set by the user when creating the image.
Comment string `json:"comment,omitempty"` Comment string `json:"comment,omitempty"`
// EmptyLayer is set to true if this history item did not generate a // EmptyLayer is set to true if this history item did not generate a
// layer. Otherwise, the history item is associated with the next // layer. Otherwise, the history item is associated with the next

View file

@ -6,7 +6,7 @@ import "github.com/docker/docker/layer"
// RootFS describes images root filesystem // RootFS describes images root filesystem
// This is currently a placeholder that only supports layers. In the future // This is currently a placeholder that only supports layers. In the future
// this can be made into a interface that supports different implementaions. // this can be made into a interface that supports different implementations.
type RootFS struct { type RootFS struct {
Type string `json:"type"` Type string `json:"type"`
DiffIDs []layer.DiffID `json:"diff_ids,omitempty"` DiffIDs []layer.DiffID `json:"diff_ids,omitempty"`

View file

@ -12,7 +12,7 @@ import (
// RootFS describes images root filesystem // RootFS describes images root filesystem
// This is currently a placeholder that only supports layers. In the future // This is currently a placeholder that only supports layers. In the future
// this can be made into a interface that supports different implementaions. // this can be made into a interface that supports different implementations.
type RootFS struct { type RootFS struct {
Type string `json:"type"` Type string `json:"type"`
DiffIDs []layer.DiffID `json:"diff_ids,omitempty"` DiffIDs []layer.DiffID `json:"diff_ids,omitempty"`

View file

@ -137,7 +137,7 @@ func (s *DockerSuite) TestPostContainersAttach(c *check.C) {
// Since the container only emits stdout, attaching to stderr should return nothing. // Since the container only emits stdout, attaching to stderr should return nothing.
expectTimeout(conn, br, "stdout") expectTimeout(conn, br, "stdout")
// Test the simlar functions of the stderr stream. // Test the similar functions of the stderr stream.
cid, _ = dockerCmd(c, "run", "-di", "busybox", "/bin/sh", "-c", "cat >&2") cid, _ = dockerCmd(c, "run", "-di", "busybox", "/bin/sh", "-c", "cat >&2")
cid = strings.TrimSpace(cid) cid = strings.TrimSpace(cid)
conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain") conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain")

View file

@ -35,7 +35,7 @@ func (s *DockerSuite) TestAttachClosedOnContainerStop(c *check.C) {
errChan := make(chan error) errChan := make(chan error)
go func() { go func() {
defer close(errChan) defer close(errChan)
// Container is wating for us to signal it to stop // Container is waiting for us to signal it to stop
dockerCmd(c, "stop", id) dockerCmd(c, "stop", id)
// And wait for the attach command to end // And wait for the attach command to end
errChan <- attachCmd.Wait() errChan <- attachCmd.Wait()

View file

@ -4612,7 +4612,7 @@ func (s *DockerSuite) TestBuildInvalidTag(c *check.C) {
testRequires(c, DaemonIsLinux) testRequires(c, DaemonIsLinux)
name := "abcd:" + stringutils.GenerateRandomAlphaOnlyString(200) name := "abcd:" + stringutils.GenerateRandomAlphaOnlyString(200)
_, out, err := buildImageWithOut(name, "FROM scratch\nMAINTAINER quux\n", true) _, out, err := buildImageWithOut(name, "FROM scratch\nMAINTAINER quux\n", true)
// if the error doesnt check for illegal tag name, or the image is built // if the error doesn't check for illegal tag name, or the image is built
// then this should fail // then this should fail
if !strings.Contains(out, "invalid reference format") || strings.Contains(out, "Sending build context to Docker daemon") { if !strings.Contains(out, "invalid reference format") || strings.Contains(out, "Sending build context to Docker daemon") {
c.Fatalf("failed to stop before building. Error: %s, Output: %s", err, out) c.Fatalf("failed to stop before building. Error: %s, Output: %s", err, out)
@ -4817,7 +4817,7 @@ func (s *DockerSuite) TestBuildVerifySingleQuoteFails(c *check.C) {
// This testcase is supposed to generate an error because the // This testcase is supposed to generate an error because the
// JSON array we're passing in on the CMD uses single quotes instead // JSON array we're passing in on the CMD uses single quotes instead
// of double quotes (per the JSON spec). This means we interpret it // of double quotes (per the JSON spec). This means we interpret it
// as a "string" insead of "JSON array" and pass it on to "sh -c" and // as a "string" instead of "JSON array" and pass it on to "sh -c" and
// it should barf on it. // it should barf on it.
name := "testbuildsinglequotefails" name := "testbuildsinglequotefails"

View file

@ -87,7 +87,7 @@ func (s *DockerSuite) TestCpToErrDstParentNotExists(c *check.C) {
} }
// Test for error when DST ends in a trailing path separator but exists as a // Test for error when DST ends in a trailing path separator but exists as a
// file. Also test that we cannot overwirite an existing directory with a // file. Also test that we cannot overwrite an existing directory with a
// non-directory and cannot overwrite an existing // non-directory and cannot overwrite an existing
func (s *DockerSuite) TestCpToErrDstNotDir(c *check.C) { func (s *DockerSuite) TestCpToErrDstNotDir(c *check.C) {
testRequires(c, DaemonIsLinux) testRequires(c, DaemonIsLinux)

View file

@ -171,7 +171,7 @@ func (s *DockerSuite) TestImagesEnsureDanglingImageOnlyListedOnce(c *check.C) {
dockerCmd(c, "tag", "-f", "busybox", "foobox") dockerCmd(c, "tag", "-f", "busybox", "foobox")
out, _ = dockerCmd(c, "images", "-q", "-f", "dangling=true") out, _ = dockerCmd(c, "images", "-q", "-f", "dangling=true")
// Exect one dangling image // Expect one dangling image
c.Assert(strings.Count(out, imageID), checker.Equals, 1) c.Assert(strings.Count(out, imageID), checker.Equals, 1)
} }

View file

@ -170,7 +170,7 @@ func (s *DockerNetworkSuite) SetUpSuite(c *check.C) {
return return
} }
w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json")
// make sure libnetwork is now asking to release the expected address fro mthe expected poolid // make sure libnetwork is now asking to release the expected address from the expected poolid
if addressRequest.PoolID != poolID { if addressRequest.PoolID != poolID {
fmt.Fprintf(w, `{"Error":"unknown pool id"}`) fmt.Fprintf(w, `{"Error":"unknown pool id"}`)
} else if addressReleaseReq.Address != gw { } else if addressReleaseReq.Address != gw {
@ -429,7 +429,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkIpamMultipleNetworks(c *check.C) {
assertNwIsAvailable(c, "test5") assertNwIsAvailable(c, "test5")
// test network with multiple subnets // test network with multiple subnets
// bridge network doesnt support multiple subnets. hence, use a dummy driver that supports // bridge network doesn't support multiple subnets. hence, use a dummy driver that supports
dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, "--subnet=192.168.0.0/16", "--subnet=192.170.0.0/16", "test6") dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, "--subnet=192.168.0.0/16", "--subnet=192.170.0.0/16", "test6")
assertNwIsAvailable(c, "test6") assertNwIsAvailable(c, "test6")
@ -491,7 +491,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkIpamInvalidCombinations(c *check.C
_, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--gateway=192.168.0.1", "--gateway=192.168.0.2", "test") _, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--gateway=192.168.0.1", "--gateway=192.168.0.2", "test")
c.Assert(err, check.NotNil) c.Assert(err, check.NotNil)
// Multiple overlaping subnets in the same network must fail // Multiple overlapping subnets in the same network must fail
_, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--subnet=192.168.1.0/16", "test") _, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--subnet=192.168.1.0/16", "test")
c.Assert(err, check.NotNil) c.Assert(err, check.NotNil)

View file

@ -223,7 +223,7 @@ func (s *DockerSuite) TestUnpublishedPortsInPsOutput(c *check.C) {
// Cannot find expected port binding (expBnd2) in docker ps output // Cannot find expected port binding (expBnd2) in docker ps output
c.Assert(out, checker.Contains, expBnd2) c.Assert(out, checker.Contains, expBnd2)
// Remove container now otherwise it will interfeer with next test // Remove container now otherwise it will interfere with next test
stopRemoveContainer(id, c) stopRemoveContainer(id, c)
// Run the container with explicit port bindings and no exposed ports // Run the container with explicit port bindings and no exposed ports
@ -236,7 +236,7 @@ func (s *DockerSuite) TestUnpublishedPortsInPsOutput(c *check.C) {
c.Assert(out, checker.Contains, expBnd1) c.Assert(out, checker.Contains, expBnd1)
// Cannot find expected port binding (expBnd2) in docker ps output // Cannot find expected port binding (expBnd2) in docker ps output
c.Assert(out, checker.Contains, expBnd2) c.Assert(out, checker.Contains, expBnd2)
// Remove container now otherwise it will interfeer with next test // Remove container now otherwise it will interfere with next test
stopRemoveContainer(id, c) stopRemoveContainer(id, c)
// Run the container with one unpublished exposed port and one explicit port binding // Run the container with one unpublished exposed port and one explicit port binding

View file

@ -754,7 +754,7 @@ func (s *DockerSuite) TestRunContainerNetwork(c *check.C) {
func (s *DockerSuite) TestRunNetHostNotAllowedWithLinks(c *check.C) { func (s *DockerSuite) TestRunNetHostNotAllowedWithLinks(c *check.C) {
// TODO Windows: This is Linux specific as --link is not supported and // TODO Windows: This is Linux specific as --link is not supported and
// this will be deprecated in favour of container networking model. // this will be deprecated in favor of container networking model.
testRequires(c, DaemonIsLinux, NotUserNamespace) testRequires(c, DaemonIsLinux, NotUserNamespace)
dockerCmd(c, "run", "--name", "linked", "busybox", "true") dockerCmd(c, "run", "--name", "linked", "busybox", "true")

View file

@ -109,14 +109,14 @@ func (s *DockerSuite) TestVolumeCliLsFilterDangling(c *check.C) {
out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=false") out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=false")
// Same as above, but expicitly disabling dangling // Same as above, but explicitly disabling dangling
c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output"))
c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output"))
c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output"))
out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=true") out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=true")
// Filter "dangling" volumes; ony "dangling" (unused) volumes should be in the output // Filter "dangling" volumes; only "dangling" (unused) volumes should be in the output
c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output"))
c.Assert(out, check.Not(checker.Contains), "testisinuse1\n", check.Commentf("volume 'testisinuse1' in output, but not expected")) c.Assert(out, check.Not(checker.Contains), "testisinuse1\n", check.Commentf("volume 'testisinuse1' in output, but not expected"))
c.Assert(out, check.Not(checker.Contains), "testisinuse2\n", check.Commentf("volume 'testisinuse2' in output, but not expected")) c.Assert(out, check.Not(checker.Contains), "testisinuse2\n", check.Commentf("volume 'testisinuse2' in output, but not expected"))

View file

@ -30,7 +30,7 @@ var (
// daemonPlatform is held globally so that tests can make intelligent // daemonPlatform is held globally so that tests can make intelligent
// decisions on how to configure themselves according to the platform // decisions on how to configure themselves according to the platform
// of the daemon. This is initialised in docker_utils by sending // of the daemon. This is initialized in docker_utils by sending
// a version call to the daemon and examining the response header. // a version call to the daemon and examining the response header.
daemonPlatform string daemonPlatform string

View file

@ -1416,7 +1416,7 @@ func newFakeGit(name string, files map[string]string, enforceLocalServer bool) (
return nil, fmt.Errorf("cannot start fake storage: %v", err) return nil, fmt.Errorf("cannot start fake storage: %v", err)
} }
} else { } else {
// always start a local http server on CLI test machin // always start a local http server on CLI test machine
httpServer := httptest.NewServer(http.FileServer(http.Dir(root))) httpServer := httptest.NewServer(http.FileServer(http.Dir(root)))
server = &localGitServer{httpServer} server = &localGitServer{httpServer}
} }
@ -1430,7 +1430,7 @@ func newFakeGit(name string, files map[string]string, enforceLocalServer bool) (
// Write `content` to the file at path `dst`, creating it if necessary, // Write `content` to the file at path `dst`, creating it if necessary,
// as well as any missing directories. // as well as any missing directories.
// The file is truncated if it already exists. // The file is truncated if it already exists.
// Fail the test when error occures. // Fail the test when error occurs.
func writeFile(dst, content string, c *check.C) { func writeFile(dst, content string, c *check.C) {
// Create subdirectories if necessary // Create subdirectories if necessary
c.Assert(os.MkdirAll(path.Dir(dst), 0700), check.IsNil) c.Assert(os.MkdirAll(path.Dir(dst), 0700), check.IsNil)
@ -1443,7 +1443,7 @@ func writeFile(dst, content string, c *check.C) {
} }
// Return the contents of file at path `src`. // Return the contents of file at path `src`.
// Fail the test when error occures. // Fail the test when error occurs.
func readFile(src string, c *check.C) (content string) { func readFile(src string, c *check.C) (content string) {
data, err := ioutil.ReadFile(src) data, err := ioutil.ReadFile(src)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)

View file

@ -1,6 +1,6 @@
// Package layer is package for managing read only // Package layer is package for managing read only
// and read-write mounts on the union file system // and read-write mounts on the union file system
// driver. Read-only mounts are refenced using a // driver. Read-only mounts are referenced using a
// content hash and are protected from mutation in // content hash and are protected from mutation in
// the exposed interface. The tar format is used // the exposed interface. The tar format is used
// to create read only layers and export both // to create read only layers and export both
@ -189,7 +189,7 @@ type MetadataStore interface {
GetInitID(string) (string, error) GetInitID(string) (string, error)
GetMountParent(string) (ChainID, error) GetMountParent(string) (ChainID, error)
// List returns the full list of referened // List returns the full list of referenced
// read-only and read-write layers // read-only and read-write layers
List() ([]ChainID, []string, error) List() ([]ChainID, []string, error)

View file

@ -418,7 +418,7 @@ func (ls *layerStore) saveMount(mount *mountedLayer) error {
func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit) (string, error) { func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit) (string, error) {
// Use "<graph-id>-init" to maintain compatibility with graph drivers // Use "<graph-id>-init" to maintain compatibility with graph drivers
// which are expecting this layer with this special name. If all // which are expecting this layer with this special name. If all
// graph drivers can be updated to not rely on knowin about this layer // graph drivers can be updated to not rely on knowing about this layer
// then the initID should be randomly generated. // then the initID should be randomly generated.
initID := fmt.Sprintf("%s-init", graphID) initID := fmt.Sprintf("%s-init", graphID)

View file

@ -37,11 +37,11 @@ and Docker images will report:
**--until**="" **--until**=""
Stream events until this timestamp Stream events until this timestamp
The `--since` and `--until` parameters can be Unix timestamps, date formated The `--since` and `--until` parameters can be Unix timestamps, date formatted
timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed
relative to the client machines time. If you do not provide the --since option, relative to the client machines time. If you do not provide the --since option,
the command returns only new and/or live events. Supported formats for date the command returns only new and/or live events. Supported formats for date
formated time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`,
`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local `2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local
timezone on the client will be used if you do not provide either a `Z` or a timezone on the client will be used if you do not provide either a `Z` or a
`+-00:00` timezone offset at the end of the timestamp. When providing Unix `+-00:00` timezone offset at the end of the timestamp. When providing Unix

View file

@ -48,7 +48,7 @@ the running containers.
.Ports - Exposed ports. .Ports - Exposed ports.
.Status - Container status. .Status - Container status.
.Size - Container disk size. .Size - Container disk size.
.Labels - All labels asigned to the container. .Labels - All labels assigned to the container.
.Label - Value of a specific label for this container. For example `{{.Label "com.docker.swarm.cpu"}}` .Label - Value of a specific label for this container. For example `{{.Label "com.docker.swarm.cpu"}}`
**--help** **--help**

View file

@ -106,7 +106,7 @@ func TestParseEnvFileBadlyFormattedFile(t *testing.T) {
} }
} }
// Test ParseEnvFile for a file with a line exeeding bufio.MaxScanTokenSize // Test ParseEnvFile for a file with a line exceeding bufio.MaxScanTokenSize
func TestParseEnvFileLineTooLongFile(t *testing.T) { func TestParseEnvFileLineTooLongFile(t *testing.T) {
content := strings.Repeat("a", bufio.MaxScanTokenSize+42) content := strings.Repeat("a", bufio.MaxScanTokenSize+42)
content = fmt.Sprint("foo=", content) content = fmt.Sprint("foo=", content)

View file

@ -22,7 +22,7 @@ func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt {
} }
// Set sets an IPv4 or IPv6 address from a given string. If the given // Set sets an IPv4 or IPv6 address from a given string. If the given
// string is not parsable as an IP address it returns an error. // string is not parseable as an IP address it returns an error.
func (o *IPOpt) Set(val string) error { func (o *IPOpt) Set(val string) error {
ip := net.ParseIP(val) ip := net.ParseIP(val)
if ip == nil { if ip == nil {

View file

@ -31,7 +31,7 @@ type (
Archive io.ReadCloser Archive io.ReadCloser
// Reader is a type of io.Reader. // Reader is a type of io.Reader.
Reader io.Reader Reader io.Reader
// Compression is the state represtents if compressed or not. // Compression is the state represents if compressed or not.
Compression int Compression int
// TarChownOptions wraps the chown options UID and GID. // TarChownOptions wraps the chown options UID and GID.
TarChownOptions struct { TarChownOptions struct {

View file

@ -19,7 +19,7 @@ func fixVolumePathPrefix(srcPath string) string {
} }
// getWalkRoot calculates the root path when performing a TarWithOptions. // getWalkRoot calculates the root path when performing a TarWithOptions.
// We use a seperate function as this is platform specific. On Linux, we // We use a separate function as this is platform specific. On Linux, we
// can't use filepath.Join(srcPath,include) because this will clean away // can't use filepath.Join(srcPath,include) because this will clean away
// a trailing "." or "/" which may be important. // a trailing "." or "/" which may be important.
func getWalkRoot(srcPath string, include string) string { func getWalkRoot(srcPath string, include string) string {

View file

@ -19,7 +19,7 @@ func fixVolumePathPrefix(srcPath string) string {
} }
// getWalkRoot calculates the root path when performing a TarWithOptions. // getWalkRoot calculates the root path when performing a TarWithOptions.
// We use a seperate function as this is platform specific. // We use a separate function as this is platform specific.
func getWalkRoot(srcPath string, include string) string { func getWalkRoot(srcPath string, include string) string {
return filepath.Join(srcPath, include) return filepath.Join(srcPath, include)
} }

View file

@ -150,7 +150,7 @@ func Changes(layers []string, rw string) ([]Change, error) {
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
// This block is here to ensure the change is recorded even if the // This block is here to ensure the change is recorded even if the
// modify time, mode and size of the parent directoriy in the rw and ro layers are all equal. // modify time, mode and size of the parent directory in the rw and ro layers are all equal.
// Check https://github.com/docker/docker/pull/13590 for details. // Check https://github.com/docker/docker/pull/13590 for details.
if f.IsDir() { if f.IsDir() {
changedDirs[path] = struct{}{} changedDirs[path] = struct{}{}

View file

@ -9,7 +9,7 @@ package archive
const WhiteoutPrefix = ".wh." const WhiteoutPrefix = ".wh."
// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not // WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
// for remoing an actaul file. Normally these files are excluded from exported // for removing an actual file. Normally these files are excluded from exported
// archives. // archives.
const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix

View file

@ -87,7 +87,7 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil
func AttachLoopDevice(sparseName string) (loop *os.File, err error) { func AttachLoopDevice(sparseName string) (loop *os.File, err error) {
// Try to retrieve the next available loopback device via syscall. // Try to retrieve the next available loopback device via syscall.
// If it fails, we discard error and start loopking for a // If it fails, we discard error and start looping for a
// loopback from index 0. // loopback from index 0.
startIndex, err := getNextFreeLoopbackIndex() startIndex, err := getNextFreeLoopbackIndex()
if err != nil { if err != nil {

View file

@ -290,7 +290,7 @@ func (s *FakeStore) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVP
} }
// WatchTree will fail the first time, and return the mockKVchan afterwards. // WatchTree will fail the first time, and return the mockKVchan afterwards.
// This is the behaviour we need for testing.. If we need 'moar', should update this. // This is the behavior we need for testing.. If we need 'moar', should update this.
func (s *FakeStore) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { func (s *FakeStore) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
if s.watchTreeCallCount == 0 { if s.watchTreeCallCount == 0 {
s.watchTreeCallCount = 1 s.watchTreeCallCount = 1

View file

@ -1,6 +1,6 @@
// Package filenotify provides a mechanism for watching file(s) for changes. // Package filenotify provides a mechanism for watching file(s) for changes.
// Generally leans on fsnotify, but provides a poll-based notifier which fsnotify does not support. // Generally leans on fsnotify, but provides a poll-based notifier which fsnotify does not support.
// These are wrapped up in a common interface so that either can be used interchangably in your code. // These are wrapped up in a common interface so that either can be used interchangeably in your code.
package filenotify package filenotify
import "gopkg.in/fsnotify.v1" import "gopkg.in/fsnotify.v1"

View file

@ -24,7 +24,7 @@ const watchWaitTime = 200 * time.Millisecond
// filePoller is used to poll files for changes, especially in cases where fsnotify // filePoller is used to poll files for changes, especially in cases where fsnotify
// can't be run (e.g. when inotify handles are exhausted) // can't be run (e.g. when inotify handles are exhausted)
// filePoller satifies the FileWatcher interface // filePoller satisfies the FileWatcher interface
type filePoller struct { type filePoller struct {
// watches is the list of files currently being polled, close the associated channel to stop the watch // watches is the list of files currently being polled, close the associated channel to stop the watch
watches map[string]chan struct{} watches map[string]chan struct{}

View file

@ -78,7 +78,7 @@ func Matches(file string, patterns []string) (bool, error) {
// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. // OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go.
// It will assume that the inputs have been preprocessed and therefore the function // It will assume that the inputs have been preprocessed and therefore the function
// doen't need to do as much error checking and clean-up. This was done to avoid // doesn't need to do as much error checking and clean-up. This was done to avoid
// repeating these steps on each file being checked during the archive process. // repeating these steps on each file being checked during the archive process.
// The more generic fileutils.Matches() can't make these assumptions. // The more generic fileutils.Matches() can't make these assumptions.
func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) {

View file

@ -295,7 +295,7 @@ func ConsumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, s
} }
} }
// ParseCgroupPaths arses 'procCgroupData', which is output of '/proc/<pid>/cgroup', and returns // ParseCgroupPaths parses 'procCgroupData', which is output of '/proc/<pid>/cgroup', and returns
// a map which cgroup name as key and path as value. // a map which cgroup name as key and path as value.
func ParseCgroupPaths(procCgroupData string) map[string]string { func ParseCgroupPaths(procCgroupData string) map[string]string {
cgroupPaths := map[string]string{} cgroupPaths := map[string]string{}
@ -337,7 +337,7 @@ func (c *ChannelBuffer) ReadTimeout(p []byte, n time.Duration) (int, error) {
} }
} }
// RunAtDifferentDate runs the specifed function with the given time. // RunAtDifferentDate runs the specified function with the given time.
// It changes the date of the system, which can led to weird behaviors. // It changes the date of the system, which can led to weird behaviors.
func RunAtDifferentDate(date time.Time, block func()) { func RunAtDifferentDate(date time.Time, block func()) {
// Layout for date. MMDDhhmmYYYY // Layout for date. MMDDhhmmYYYY

View file

@ -309,7 +309,7 @@ func TestCompareDirectoryEntries(t *testing.T) {
} }
} }
// FIXME make an "unhappy path" test for ListTar without "panicing" :-) // FIXME make an "unhappy path" test for ListTar without "panicking" :-)
func TestListTar(t *testing.T) { func TestListTar(t *testing.T) {
tmpFolder, err := ioutil.TempDir("", "integration-cli-utils-list-tar") tmpFolder, err := ioutil.TempDir("", "integration-cli-utils-list-tar")
if err != nil { if err != nil {

View file

@ -20,7 +20,7 @@ func NopWriteCloser(w io.Writer) io.WriteCloser {
return &nopWriteCloser{w} return &nopWriteCloser{w}
} }
// NopFlusher represents a type which flush opetatin is nop. // NopFlusher represents a type which flush operation is nop.
type NopFlusher struct{} type NopFlusher struct{}
// Flush is a nop operation. // Flush is a nop operation.

View file

@ -19,8 +19,8 @@ type JSONLog struct {
// Format returns the log formatted according to format // Format returns the log formatted according to format
// If format is nil, returns the log message // If format is nil, returns the log message
// If format is json, returns the log marshalled in json format // If format is json, returns the log marshaled in json format
// By defalut, returns the log with the log time formatted according to format. // By default, returns the log with the log time formatted according to format.
func (jl *JSONLog) Format(format string) (string, error) { func (jl *JSONLog) Format(format string) (string, error) {
if format == "" { if format == "" {
return jl.Log, nil return jl.Log, nil

View file

@ -60,7 +60,7 @@ func (p *JSONProgress) String() string {
percentage = 50 percentage = 50
} }
if width > 110 { if width > 110 {
// this number can't be negetive gh#7136 // this number can't be negative gh#7136
numSpaces := 0 numSpaces := 0
if 50-percentage > 0 { if 50-percentage > 0 {
numSpaces = 50 - percentage numSpaces = 50 - percentage
@ -106,7 +106,7 @@ type JSONMessage struct {
// Display displays the JSONMessage to `out`. `isTerminal` describes if `out` // Display displays the JSONMessage to `out`. `isTerminal` describes if `out`
// is a terminal. If this is the case, it will erase the entire current line // is a terminal. If this is the case, it will erase the entire current line
// when dislaying the progressbar. // when displaying the progressbar.
func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error {
if jm.Error != nil { if jm.Error != nil {
if jm.Error.Code == 401 { if jm.Error.Code == 401 {

View file

@ -41,7 +41,7 @@ func (l *lockCtr) inc() {
atomic.AddInt32(&l.waiters, 1) atomic.AddInt32(&l.waiters, 1)
} }
// dec decrements the number of waiters wating on the lock // dec decrements the number of waiters waiting on the lock
func (l *lockCtr) dec() { func (l *lockCtr) dec() {
atomic.AddInt32(&l.waiters, -1) atomic.AddInt32(&l.waiters, -1)
} }

View file

@ -1228,7 +1228,7 @@ func (v mergeVal) IsBoolFlag() bool {
// Merge is an helper function that merges n FlagSets into a single dest FlagSet // Merge is an helper function that merges n FlagSets into a single dest FlagSet
// In case of name collision between the flagsets it will apply // In case of name collision between the flagsets it will apply
// the destination FlagSet's errorHandling behaviour. // the destination FlagSet's errorHandling behavior.
func Merge(dest *FlagSet, flagsets ...*FlagSet) error { func Merge(dest *FlagSet, flagsets ...*FlagSet) error {
for _, fset := range flagsets { for _, fset := range flagsets {
for k, f := range fset.formal { for k, f := range fset.formal {

View file

@ -23,7 +23,7 @@ const (
SYNCHRONOUS = syscall.MS_SYNCHRONOUS SYNCHRONOUS = syscall.MS_SYNCHRONOUS
// DIRSYNC will force all directory updates within the file system to be done // DIRSYNC will force all directory updates within the file system to be done
// synchronously. This affects the following system calls: creat, link, // synchronously. This affects the following system calls: create, link,
// unlink, symlink, mkdir, rmdir, mknod and rename. // unlink, symlink, mkdir, rmdir, mknod and rename.
DIRSYNC = syscall.MS_DIRSYNC DIRSYNC = syscall.MS_DIRSYNC

View file

@ -168,7 +168,7 @@ func TestSubtreeShared(t *testing.T) {
} }
}() }()
// NOW, check that the file from the outside directory is avaible in the source directory // NOW, check that the file from the outside directory is available in the source directory
if _, err := os.Stat(sourceCheckPath); err != nil { if _, err := os.Stat(sourceCheckPath); err != nil {
t.Fatal(err) t.Fatal(err)
} }

View file

@ -128,7 +128,7 @@ func (filters Args) Len() int {
return len(filters.fields) return len(filters.fields)
} }
// MatchKVList returns true if the values for the specified field maches the ones // MatchKVList returns true if the values for the specified field matches the ones
// from the sources. // from the sources.
// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, // e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
// field is 'label' and sources are {'label1': '1', 'label2': '2'} // field is 'label' and sources are {'label1': '1', 'label2': '2'}

View file

@ -10,7 +10,7 @@ import (
"time" "time"
) )
// Rand is a global *rand.Rand instance, which initilized with NewSource() source. // Rand is a global *rand.Rand instance, which initialized with NewSource() source.
var Rand = rand.New(NewSource()) var Rand = rand.New(NewSource())
// Reader is a global, shared instance of a pseudorandom bytes generator. // Reader is a global, shared instance of a pseudorandom bytes generator.

View file

@ -41,7 +41,7 @@ func naiveSelf() string {
if absName, err := filepath.Abs(name); err == nil { if absName, err := filepath.Abs(name); err == nil {
return absName return absName
} }
// if we coudn't get absolute name, return original // if we couldn't get absolute name, return original
// (NOTE: Go only errors on Abs() if os.Getwd fails) // (NOTE: Go only errors on Abs() if os.Getwd fails)
return name return name
} }

View file

@ -54,7 +54,7 @@ func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []b
return []byte(str + streamNewline) return []byte(str + streamNewline)
} }
// FormatError formats the specifed error. // FormatError formats the specified error.
func (sf *StreamFormatter) FormatError(err error) []byte { func (sf *StreamFormatter) FormatError(err error) []byte {
if sf.json { if sf.json {
jsonError, ok := err.(*jsonmessage.JSONError) jsonError, ok := err.(*jsonmessage.JSONError)

View file

@ -48,7 +48,7 @@ func generateID(crypto bool) string {
} }
id := hex.EncodeToString(b) id := hex.EncodeToString(b)
// if we try to parse the truncated for as an int and we don't have // if we try to parse the truncated for as an int and we don't have
// an error then the value is all numberic and causes issues when // an error then the value is all numeric and causes issues when
// used as a hostname. ref #3869 // used as a hostname. ref #3869
if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil {
continue continue

View file

@ -5,7 +5,7 @@ import (
"strings" "strings"
) )
// StrSlice representes a string or an array of strings. // StrSlice represents a string or an array of strings.
// We need to override the json decoder to accept both options. // We need to override the json decoder to accept both options.
type StrSlice struct { type StrSlice struct {
parts []string parts []string

View file

@ -91,7 +91,7 @@ func walkSymlinks(path string) (string, error) {
return "", errors.New("EvalSymlinks: too many links in " + originalPath) return "", errors.New("EvalSymlinks: too many links in " + originalPath)
} }
// A path beginnging with `\\?\` represents the root, so automatically // A path beginning with `\\?\` represents the root, so automatically
// skip that part and begin processing the next segment. // skip that part and begin processing the next segment.
if strings.HasPrefix(path, longpath.Prefix) { if strings.HasPrefix(path, longpath.Prefix) {
b.WriteString(longpath.Prefix) b.WriteString(longpath.Prefix)

View file

@ -36,7 +36,7 @@ type cgroupMemInfo struct {
// Whether soft limit is supported or not // Whether soft limit is supported or not
MemoryReservation bool MemoryReservation bool
// Whether OOM killer disalbe is supported or not // Whether OOM killer disable is supported or not
OomKillDisable bool OomKillDisable bool
// Whether memory swappiness is supported or not // Whether memory swappiness is supported or not

View file

@ -5,7 +5,7 @@ import (
"unsafe" "unsafe"
) )
// LUtimesNano is used to change access and modification time of the speficied path. // LUtimesNano is used to change access and modification time of the specified path.
// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. // It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm.
func LUtimesNano(path string, ts []syscall.Timespec) error { func LUtimesNano(path string, ts []syscall.Timespec) error {
// These are not currently available in syscall // These are not currently available in syscall

Some files were not shown because too many files have changed in this diff Show more