Fix typos found across repository

Signed-off-by: Justas Brazauskas <brazauskasjustas@gmail.com>
This commit is contained in:
Justas Brazauskas 2015-12-13 18:00:39 +02:00
parent f5e6b09783
commit 927b334ebf
117 changed files with 159 additions and 159 deletions

View file

@ -145,7 +145,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
// then make sure we send both files over to the daemon
// because Dockerfile is, obviously, needed no matter what, and
// .dockerignore is needed to know if either one needs to be
// removed. The deamon will remove them for us, if needed, after it
// removed. The daemon will remove them for us, if needed, after it
// parses the Dockerfile. Ignore errors here, as they will have been
// caught by ValidateContextDirectory above.
var includes = []string{"."}

View file

@ -231,7 +231,7 @@ func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string, cpP
// Ignore any error and assume that the parent directory of the destination
// path exists, in which case the copy may still succeed. If there is any
// type of conflict (e.g., non-directory overwriting an existing directory
// or vice versia) the extraction will fail. If the destination simply did
// or vice versa) the extraction will fail. If the destination simply did
// not exist, but the parent directory does, the extraction will still
// succeed.
if err == nil {
@ -266,7 +266,7 @@ func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string, cpP
// With the stat info about the local source as well as the
// destination, we have enough information to know whether we need to
// alter the archive that we upload so that when the server extracts
// it to the specified directory in the container we get the disired
// it to the specified directory in the container we get the desired
// copy behavior.
// See comments in the implementation of `archive.PrepareArchiveCopy`

View file

@ -130,7 +130,7 @@ func (cli *DockerCli) CmdNetworkDisconnect(args ...string) error {
return cli.client.NetworkDisconnect(cmd.Arg(0), cmd.Arg(1))
}
// CmdNetworkLs lists all the netorks managed by docker daemon
// CmdNetworkLs lists all the networks managed by docker daemon
//
// Usage: docker network ls [OPTIONS]
func (cli *DockerCli) CmdNetworkLs(args ...string) error {
@ -198,8 +198,8 @@ func (cli *DockerCli) CmdNetworkInspect(args ...string) error {
// Consolidates the ipam configuration as a group from different related configurations
// user can configure network with multiple non-overlapping subnets and hence it is
// possible to corelate the various related parameters and consolidate them.
// consoidateIpam consolidates subnets, ip-ranges, gateways and auxilary addresses into
// possible to correlate the various related parameters and consolidate them.
// consoidateIpam consolidates subnets, ip-ranges, gateways and auxiliary addresses into
// structured ipam data.
func consolidateIpam(subnets, ranges, gateways []string, auxaddrs map[string]string) ([]network.IPAMConfig, error) {
if len(subnets) < len(ranges) || len(subnets) < len(gateways) {

View file

@ -100,7 +100,7 @@ func (cli *DockerCli) CmdStart(args ...string) error {
return err
}
// 4. Wait for attachement to break.
// 4. Wait for attachment to break.
if c.Config.Tty && cli.isTerminalOut {
if err := cli.monitorTtySize(containerID, false); err != nil {
fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err)

View file

@ -20,7 +20,7 @@ const (
// Version of Current REST API
Version version.Version = "1.22"
// MinVersion represents Minimun REST API version supported
// MinVersion represents Minimum REST API version supported
MinVersion version.Version = "1.12"
// DefaultDockerfileName is the Default filename with Docker commands, read by docker build

View file

@ -139,7 +139,7 @@ func versionMiddleware(handler httputils.APIFunc) httputils.APIFunc {
// handleWithGlobalMiddlwares wraps the handler function for a request with
// the server's global middlewares. The order of the middlewares is backwards,
// meaning that the first in the list will be evaludated last.
// meaning that the first in the list will be evaluated last.
//
// Example: handleWithGlobalMiddlewares(s.getContainersName)
//

View file

@ -478,7 +478,7 @@ func (s *router) postBuild(ctx context.Context, w http.ResponseWriter, r *http.R
func sanitizeRepoAndTags(names []string) ([]reference.Named, error) {
var (
repoAndTags []reference.Named
// This map is used for deduplicating the "-t" paramter.
// This map is used for deduplicating the "-t" parameter.
uniqNames = make(map[string]struct{})
)
for _, repo := range names {

View file

@ -35,7 +35,7 @@ func (l localRoute) Path() string {
return l.path
}
// NewRoute initialies a new local route for the reouter
// NewRoute initializes a new local router for the reouter
func NewRoute(method, path string, handler httputils.APIFunc) dkrouter.Route {
return localRoute{method, path, handler}
}

View file

@ -63,7 +63,7 @@ type BlkioStatEntry struct {
// BlkioStats stores All IO service stats for data read and write
// TODO Windows: This can be factored out
type BlkioStats struct {
// number of bytes tranferred to and from the block device
// number of bytes transferred to and from the block device
IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"`
IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"`
IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"`

View file

@ -105,7 +105,7 @@ func (fl *Flag) IsTrue() bool {
// compile time error so it doesn't matter too much when we stop our
// processing as long as we do stop it, so this allows the code
// around AddXXX() to be just:
// defFlag := AddString("desription", "")
// defFlag := AddString("description", "")
// w/o needing to add an if-statement around each one.
func (bf *BFlags) Parse() error {
// If there was an error while defining the possible flags

View file

@ -640,7 +640,7 @@ func arg(b *Builder, args []string, attributes map[string]bool, original string)
// If there is a default value associated with this arg then add it to the
// b.buildArgs if one is not already passed to the builder. The args passed
// to builder override the defaut value of 'arg'.
// to builder override the default value of 'arg'.
if _, ok := b.BuildArgs[name]; !ok && hasDefault {
b.BuildArgs[name] = value
}

View file

@ -4,7 +4,7 @@
// parser package for more information) that are yielded from the parser itself.
// Calling NewBuilder with the BuildOpts struct can be used to customize the
// experience for execution purposes only. Parsing is controlled in the parser
// package, and this division of resposibility should be respected.
// package, and this division of responsibility should be respected.
//
// Please see the jump table targets for the actual invocations, most of which
// will call out to the functions in internals.go to deal with their tasks.

View file

@ -70,7 +70,7 @@ func TestTestData(t *testing.T) {
}
if runtime.GOOS == "windows" {
// CRLF --> CR to match Unix behaviour
// CRLF --> CR to match Unix behavior
content = bytes.Replace(content, []byte{'\x0d', '\x0a'}, []byte{'\x0a'}, -1)
}

View file

@ -71,7 +71,7 @@ type ConfigFile struct {
filename string // Note: not serialized - for internal use only
}
// NewConfigFile initilizes an empty configuration file for the given filename 'fn'
// NewConfigFile initializes an empty configuration file for the given filename 'fn'
func NewConfigFile(fn string) *ConfigFile {
return &ConfigFile{
AuthConfigs: make(map[string]AuthConfig),

View file

@ -518,7 +518,7 @@ func (container *Container) AddMountPointWithVolume(destination string, vol volu
}
}
// IsDestinationMounted checkes whether a path is mounted on the container or not.
// IsDestinationMounted checks whether a path is mounted on the container or not.
func (container *Container) IsDestinationMounted(destination string) bool {
return container.MountPoints[destination] != nil
}

View file

@ -41,7 +41,7 @@ func (container *Container) IpcMounts() []execdriver.Mount {
return nil
}
// UnmountVolumes explicitely unmounts volumes from the container.
// UnmountVolumes explicitly unmounts volumes from the container.
func (container *Container) UnmountVolumes(forceSyscall bool) error {
return nil
}

View file

@ -121,7 +121,7 @@ func (m *containerMonitor) ExitOnNext() {
}
// Close closes the container's resources such as networking allocations and
// unmounts the contatiner's root filesystem
// unmounts the container's root filesystem
func (m *containerMonitor) Close() error {
// Cleanup networking and mounts
m.supervisor.Cleanup(m.container)

View file

@ -56,13 +56,13 @@ while [ $# -gt 0 ]; do
layersFs=$(echo "$manifestJson" | jq --raw-output '.fsLayers | .[] | .blobSum')
IFS=$'\n'
# bash v4 on Windows CI requires CRLF seperator
# bash v4 on Windows CI requires CRLF separator
if [ "$(go env GOHOSTOS)" = 'windows' ]; then
major=$(echo ${BASH_VERSION%%[^0.9]} | cut -d. -f1)
if [ "$major" -ge 4 ]; then
IFS=$'\r\n'
fi
fi
fi
layers=( ${layersFs} )
unset IFS

View file

@ -20,7 +20,7 @@ import (
var ErrExtractPointNotDirectory = errors.New("extraction point is not a directory")
// ContainerCopy performs a deprecated operation of archiving the resource at
// the specified path in the conatiner identified by the given name.
// the specified path in the container identified by the given name.
func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) {
container, err := daemon.GetContainer(name)
if err != nil {

View file

@ -49,9 +49,9 @@ func (daemon *Daemon) createContainerPlatformSpecificSettings(container *contain
// FIXME Windows: This code block is present in the Linux version and
// allows the contents to be copied to the container FS prior to it
// being started. However, the function utilises the FollowSymLinkInScope
// being started. However, the function utilizes the FollowSymLinkInScope
// path which does not cope with Windows volume-style file paths. There
// is a seperate effort to resolve this (@swernli), so this processing
// is a separate effort to resolve this (@swernli), so this processing
// is deferred for now. A case where this would be useful is when
// a dockerfile includes a VOLUME statement, but something is created
// in that directory during the dockerfile processing. What this means

View file

@ -13,7 +13,7 @@ import (
func setupDumpStackTrap() {
// Windows does not support signals like *nix systems. So instead of
// trapping on SIGUSR1 to dump stacks, we wait on a Win32 event to be
// signalled.
// signaled.
go func() {
sa := syscall.SecurityAttributes{
Length: 0,

View file

@ -284,7 +284,7 @@ func (d *Driver) setupMounts(container *configs.Config, c *execdriver.Command) e
userMounts[m.Destination] = struct{}{}
}
// Filter out mounts that are overriden by user supplied mounts
// Filter out mounts that are overridden by user supplied mounts
var defaultMounts []*configs.Mount
_, mountDev := userMounts["/dev"]
for _, m := range container.Mounts {

View file

@ -3,7 +3,7 @@
package btrfs
// TODO(vbatts) remove this work-around once supported linux distros are on
// btrfs utililties of >= 3.16.1
// btrfs utilities of >= 3.16.1
func btrfsBuildVersion() string {
return "-"

View file

@ -766,7 +766,7 @@ func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) {
if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceID); err != nil {
if devicemapper.DeviceIDExists(err) {
// Device ID already exists. This should not
// happen. Now we have a mechianism to find
// happen. Now we have a mechanism to find
// a free device ID. So something is not right.
// Give a warning and continue.
logrus.Errorf("Device ID %d exists in pool but it is supposed to be unused", deviceID)
@ -818,7 +818,7 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInf
if err := devicemapper.CreateSnapDevice(devices.getPoolDevName(), deviceID, baseInfo.Name(), baseInfo.DeviceID); err != nil {
if devicemapper.DeviceIDExists(err) {
// Device ID already exists. This should not
// happen. Now we have a mechianism to find
// happen. Now we have a mechanism to find
// a free device ID. So something is not right.
// Give a warning and continue.
logrus.Errorf("Device ID %d exists in pool but it is supposed to be unused", deviceID)
@ -1749,7 +1749,7 @@ func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error {
info.Deleted = true
// save device metadata to refelect deleted state.
// save device metadata to reflect deleted state.
if err := devices.saveMetadata(info); err != nil {
info.Deleted = false
return err
@ -1759,7 +1759,7 @@ func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error {
return nil
}
// Should be caled with devices.Lock() held.
// Should be called with devices.Lock() held.
func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) error {
if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil {
logrus.Debugf("Error opening transaction hash = %s deviceId = %d", "", info.DeviceID)
@ -1805,7 +1805,7 @@ func (devices *DeviceSet) issueDiscard(info *devInfo) error {
// This is a workaround for the kernel not discarding block so
// on the thin pool when we remove a thinp device, so we do it
// manually.
// Even if device is deferred deleted, activate it and isue
// Even if device is deferred deleted, activate it and issue
// discards.
if err := devices.activateDeviceIfNeeded(info, true); err != nil {
return err
@ -2131,7 +2131,7 @@ func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
defer devices.Unlock()
// If there are running containers when daemon crashes, during daemon
// restarting, it will kill running contaienrs and will finally call
// restarting, it will kill running containers and will finally call
// Put() without calling Get(). So info.MountCount may become negative.
// if info.mountCount goes negative, we do the unmount and assign
// it to 0.

View file

@ -13,7 +13,7 @@ package devmapper
// * version number of the interface that they were
// * compiled with.
// *
// * All recognised ioctl commands (ie. those that don't
// * All recognized ioctl commands (ie. those that don't
// * return -ENOTTY) fill out this field, even if the
// * command failed.
// */

View file

@ -177,7 +177,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
idFile := path.Join(mp, "id")
if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) {
// Create an "id" file with the container/image id in it to help reconscruct this in case
// Create an "id" file with the container/image id in it to help reconstruct this in case
// of later problems
if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil {
d.DeviceSet.UnmountDevice(id, mp)

View file

@ -17,14 +17,14 @@ import (
type FsMagic uint32
const (
// FsMagicUnsupported is a predifined contant value other than a valid filesystem id.
// FsMagicUnsupported is a predefined constant value other than a valid filesystem id.
FsMagicUnsupported = FsMagic(0x00000000)
)
var (
// DefaultDriver if a storage driver is not specified.
DefaultDriver string
// All registred drivers
// All registered drivers
drivers map[string]InitFunc
// ErrNotSupported returned when driver is not supported.
@ -120,7 +120,7 @@ func GetDriver(name, home string, options []string, uidMaps, gidMaps []idtools.I
return nil, ErrNotSupported
}
// getBuiltinDriver initalizes and returns the registered driver, but does not try to load from plugins
// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins
func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) {
if initFunc, exists := drivers[name]; exists {
return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps)

View file

@ -30,7 +30,7 @@ var (
ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff")
)
// ApplyDiffProtoDriver wraps the ProtoDriver by extending the inteface with ApplyDiff method.
// ApplyDiffProtoDriver wraps the ProtoDriver by extending the interface with ApplyDiff method.
type ApplyDiffProtoDriver interface {
graphdriver.ProtoDriver
// ApplyDiff writes the diff to the archive for the given id and parent id.

View file

@ -31,7 +31,7 @@ func init() {
graphdriver.Register("zfs", Init)
}
// Logger returns a zfs logger implmentation.
// Logger returns a zfs logger implementation.
type Logger struct{}
// Log wraps log message from ZFS driver with a prefix '[zfs]'.

View file

@ -105,7 +105,7 @@ func (daemon *Daemon) Kill(container *container.Container) error {
return nil
}
// killPossibleDeadProcess is a wrapper aroung killSig() suppressing "no such process" error.
// killPossibleDeadProcess is a wrapper around killSig() suppressing "no such process" error.
func (daemon *Daemon) killPossiblyDeadProcess(container *container.Container, sig int) error {
err := daemon.killWithSignal(container, sig)
if err == syscall.ESRCH {

View file

@ -266,7 +266,7 @@ func includeContainerInList(container *container.Container, ctx *listContext) it
return excludeContainer
}
// Stop interation when the container arrives to the filter container
// Stop iteration when the container arrives to the filter container
if ctx.sinceFilter != nil {
if container.ID == ctx.sinceFilter.ID {
return stopIteration

View file

@ -54,7 +54,7 @@ func New(ctx logger.Context) (logger.Logger, error) {
}
extra := ctx.ExtraAttributes(nil)
logrus.Debugf("logging driver fluentd configured for container:%s, host:%s, port:%d, tag:%s, extra:%v.", ctx.ContainerID, host, port, tag, extra)
// logger tries to recoonect 2**32 - 1 times
// logger tries to reconnect 2**32 - 1 times
// failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds]
log, err := fluent.New(fluent.Config{FluentPort: port, FluentHost: host, RetryWait: 1000, MaxRetry: math.MaxInt32})
if err != nil {

View file

@ -146,7 +146,7 @@ func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan int
// io.ErrUnexpectedEOF is returned from json.Decoder when there is
// remaining data in the parser's buffer while an io.EOF occurs.
// If the json logger writes a partial json log entry to the disk
// while at the same time the decoder tries to decode it, the race codition happens.
// while at the same time the decoder tries to decode it, the race condition happens.
if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry {
reader := io.MultiReader(dec.Buffered(), f)
dec = json.NewDecoder(reader)

View file

@ -32,7 +32,7 @@ func NewRotateFileWriter(logPath string, capacity int64, maxFiles int) (*RotateF
}, nil
}
//WriteLog write log messge to File
//WriteLog write log message to File
func (w *RotateFileWriter) Write(message []byte) (int, error) {
w.mu.Lock()
defer w.mu.Unlock()
@ -106,7 +106,7 @@ func backup(fromPath, toPath string) error {
return os.Rename(fromPath, toPath)
}
// LogPath returns the location the given wirter logs to.
// LogPath returns the location the given writer logs to.
func (w *RotateFileWriter) LogPath() string {
return w.f.Name()
}

View file

@ -91,7 +91,7 @@ func New(ctx logger.Context) (logger.Logger, error) {
tlsConfig := &tls.Config{}
// Splunk is using autogenerated certificates by default,
// allow users to trust them with skiping verification
// allow users to trust them with skipping verification
if insecureSkipVerifyStr, ok := ctx.Config[splunkInsecureSkipVerifyKey]; ok {
insecureSkipVerify, err := strconv.ParseBool(insecureSkipVerifyStr)
if err != nil {

View file

@ -18,7 +18,7 @@ const (
)
// NetworkControllerEnabled checks if the networking stack is enabled.
// This feature depends on OS primitives and it's dissabled in systems like Windows.
// This feature depends on OS primitives and it's disabled in systems like Windows.
func (daemon *Daemon) NetworkControllerEnabled() bool {
return daemon.netController != nil
}

View file

@ -70,7 +70,7 @@ func (m mounts) parts(i int) int {
// 1. Select the previously configured mount points for the containers, if any.
// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination.
// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations.
// 4. Cleanup old volumes that are about to be reasigned.
// 4. Cleanup old volumes that are about to be reassigned.
func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *runconfig.HostConfig) error {
binds := map[string]bool{}
mountPoints := map[string]*volume.MountPoint{}
@ -148,7 +148,7 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo
container.Lock()
// 4. Cleanup old volumes that are about to be reasigned.
// 4. Cleanup old volumes that are about to be reassigned.
for _, m := range mountPoints {
if m.BackwardsCompatible() {
if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil {

View file

@ -41,7 +41,7 @@ type ImagePushConfig struct {
// MetadataStore is the storage backend for distribution-specific
// metadata.
MetadataStore metadata.Store
// LayerStore manges layers.
// LayerStore manages layers.
LayerStore layer.Store
// ImageStore manages images.
ImageStore image.Store

View file

@ -61,7 +61,7 @@ type transfer struct {
// running remains open as long as the transfer is in progress.
running chan struct{}
// hasWatchers stays open until all watchers release the trasnfer.
// hasWatchers stays open until all watchers release the transfer.
hasWatchers chan struct{}
// broadcastDone is true if the master progress channel has closed.
@ -240,9 +240,9 @@ func (t *transfer) Cancel() {
// DoFunc is a function called by the transfer manager to actually perform
// a transfer. It should be non-blocking. It should wait until the start channel
// is closed before transfering any data. If the function closes inactive, that
// is closed before transferring any data. If the function closes inactive, that
// signals to the transfer manager that the job is no longer actively moving
// data - for example, it may be waiting for a dependent tranfer to finish.
// data - for example, it may be waiting for a dependent transfer to finish.
// This prevents it from taking up a slot.
type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer

2
docs/.gitignore vendored
View file

@ -1,2 +1,2 @@
# avoid commiting the awsconfig file used for releases
# avoid committing the awsconfig file used for releases
awsconfig

View file

@ -55,7 +55,7 @@ The `boot2docker` command reads its configuration from the `$BOOT2DOCKER_PROFILE
The configuration shows you where `boot2docker` is looking for the `profile` file. It also output the settings that are in use.
2. Initialise a default file to customize using `boot2docker config > ~/.boot2docker/profile` command.
2. Initialize a default file to customize using `boot2docker config > ~/.boot2docker/profile` command.
3. Add the following lines to `$HOME/.boot2docker/profile`:

View file

@ -12,7 +12,7 @@ weight = 7
# Control and configure Docker with systemd
Many Linux distributions use systemd to start the Docker daemon. This document
shows a few examples of how to customise Docker's settings.
shows a few examples of how to customize Docker's settings.
## Starting the Docker daemon

View file

@ -10,7 +10,7 @@ parent = "smn_linux"
# openSUSE and SUSE Linux Enterprise
This page provides instructions for installing and configuring the lastest
This page provides instructions for installing and configuring the latest
Docker Engine software on openSUSE and SUSE systems.
>**Note:** You can also find bleeding edge Docker versions inside of the repositories maintained by the [Virtualization:containers project](https://build.opensuse.org/project/show/Virtualization:containers) on the [Open Build Service](https://build.opensuse.org/). This project delivers also other packages that are related with the Docker ecosystem (for example, Docker Compose).

View file

@ -99,11 +99,11 @@ This section lists each version from latest to oldest. Each listing includes a
* `GET /info` Now returns `Architecture` and `OSType` fields, providing information
about the host architecture and operating system type that the daemon runs on.
* `GET /networks/(name)` now returns a `Name` field for each container attached to the network.
* `GET /version` now returns the `BuildTime` field in RFC3339Nano format to make it
* `GET /version` now returns the `BuildTime` field in RFC3339Nano format to make it
consistent with other date/time values returned by the API.
* `AuthConfig` now supports a `registrytoken` for token based authentication
* `POST /containers/create` now has a 4M minimum value limit for `HostConfig.KernelMemory`
* Pushes initated with `POST /images/(name)/push` and pulls initiated with `POST /images/create`
* Pushes initiated with `POST /images/(name)/push` and pulls initiated with `POST /images/create`
will be cancelled if the HTTP connection making the API request is closed before
the push or pull completes.

View file

@ -27,11 +27,11 @@ and Docker images will report:
delete, import, pull, push, tag, untag
The `--since` and `--until` parameters can be Unix timestamps, date formated
The `--since` and `--until` parameters can be Unix timestamps, date formatted
timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed
relative to the client machines time. If you do not provide the --since option,
the command returns only new and/or live events. Supported formats for date
formated time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`,
formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`,
`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local
timezone on the client will be used if you do not provide either a `Z` or a
`+-00:00` timezone offset at the end of the timestamp. When providing Unix

View file

@ -101,7 +101,7 @@ ID 260 gen 11 top level 5 path btrfs/subvolumes/3c9a9d7cc6a235eb2de58ca9ef3551c6
ID 261 gen 12 top level 5 path btrfs/subvolumes/0a17decee4139b0de68478f149cc16346f5e711c5ae3bb969895f22dd6723751
```
Under the `/var/lib/docker/btrfs/subvolumes` directoy, each of these subvolumes and snapshots are visible as a normal Unix directory:
Under the `/var/lib/docker/btrfs/subvolumes` directory, each of these subvolumes and snapshots are visible as a normal Unix directory:
```bash
$ ls -l /var/lib/docker/btrfs/subvolumes/

View file

@ -81,7 +81,7 @@ var (
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeEmptyID is generated when an ID is the emptry string.
// ErrorCodeEmptyID is generated when an ID is the empty string.
ErrorCodeEmptyID = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "EMPTYID",
Message: "Invalid empty id",

View file

@ -204,7 +204,7 @@ Respond with a string error if an error occurred.
{}
```
Perform neccessary tasks to release resources help by the plugin, for example
Perform necessary tasks to release resources help by the plugin, for example
unmounting all the layered file systems.
**Response**:

View file

@ -12,13 +12,13 @@ In this experimental phase, the Docker daemon creates a single daemon-wide mappi
for all containers running on the same engine instance. The mappings will
utilize the existing subordinate user and group ID feature available on all modern
Linux distributions.
The [`/etc/subuid`](http://man7.org/linux/man-pages/man5/subuid.5.html) and
The [`/etc/subuid`](http://man7.org/linux/man-pages/man5/subuid.5.html) and
[`/etc/subgid`](http://man7.org/linux/man-pages/man5/subgid.5.html) files will be
read for the user, and optional group, specified to the `--userns-remap`
parameter. If you do not wish to specify your own user and/or group, you can
read for the user, and optional group, specified to the `--userns-remap`
parameter. If you do not wish to specify your own user and/or group, you can
provide `default` as the value to this flag, and a user will be created on your behalf
and provided subordinate uid and gid ranges. This default user will be named
`dockremap`, and entries will be created for it in `/etc/passwd` and
`dockremap`, and entries will be created for it in `/etc/passwd` and
`/etc/group` using your distro's standard user and group creation tools.
> **Note**: The single mapping per-daemon restriction exists for this experimental
@ -43,7 +43,7 @@ values in the following formats:
If numeric IDs are provided, translation back to valid user or group names
will occur so that the subordinate uid and gid information can be read, given
these resources are name-based, not id-based. If the numeric ID information
provided does not exist as entries in `/etc/passwd` or `/etc/group`, dameon
provided does not exist as entries in `/etc/passwd` or `/etc/group`, daemon
startup will fail with an error message.
*An example: starting with default Docker user management:*
@ -67,7 +67,7 @@ create the following range, based on an existing user already having the first
> **Note:** On a fresh Fedora install, we found that we had to `touch` the
> `/etc/subuid` and `/etc/subgid` files to have ranges assigned when users
> were created. Once these files existed, range assigment on user creation
> were created. Once these files existed, range assignment on user creation
> worked properly.
If you have a preferred/self-managed user with subordinate ID mappings already
@ -84,7 +84,7 @@ current experimental user namespace support.
The simplest case exists where only one contiguous range is defined for the
provided user or group. In this case, Docker will use that entire contiguous
range for the mapping of host uids and gids to the container process. This
range for the mapping of host uids and gids to the container process. This
means that the first ID in the range will be the remapped root user, and the
IDs above that initial ID will map host ID 1 through the end of the range.

View file

@ -12,7 +12,7 @@ set -e
# will be used as Docker binary version and package version.
# - The hash of the git commit will also be included in the Docker binary,
# with the suffix -dirty if the repository isn't clean.
# - The script is intented to be run inside the docker container specified
# - The script is intended to be run inside the docker container specified
# in the Dockerfile at the root of the source. In other words:
# DO NOT CALL THIS SCRIPT DIRECTLY.
# - The right way to call this script is to invoke "make" from

View file

@ -225,7 +225,7 @@ release_build() {
;;
arm)
s3Arch=armel
# someday, we might potentially support mutliple GOARM values, in which case we might get armhf here too
# someday, we might potentially support multiple GOARM values, in which case we might get armhf here too
;;
*)
echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'"

View file

@ -268,7 +268,7 @@ func testGetSet(t *testing.T, store StoreBackend) {
if err != nil {
t.Fatal(err)
}
// skipping use of digest pkg because its used by the imlementation
// skipping use of digest pkg because its used by the implementation
h := sha256.New()
_, err = h.Write(randomInput)
if err != nil {

View file

@ -96,7 +96,7 @@ type History struct {
Author string `json:"author,omitempty"`
// CreatedBy keeps the Dockerfile command used while building image.
CreatedBy string `json:"created_by,omitempty"`
// Comment is custom mesage set by the user when creating the image.
// Comment is custom message set by the user when creating the image.
Comment string `json:"comment,omitempty"`
// EmptyLayer is set to true if this history item did not generate a
// layer. Otherwise, the history item is associated with the next

View file

@ -6,7 +6,7 @@ import "github.com/docker/docker/layer"
// RootFS describes images root filesystem
// This is currently a placeholder that only supports layers. In the future
// this can be made into a interface that supports different implementaions.
// this can be made into a interface that supports different implementations.
type RootFS struct {
Type string `json:"type"`
DiffIDs []layer.DiffID `json:"diff_ids,omitempty"`

View file

@ -12,7 +12,7 @@ import (
// RootFS describes images root filesystem
// This is currently a placeholder that only supports layers. In the future
// this can be made into a interface that supports different implementaions.
// this can be made into a interface that supports different implementations.
type RootFS struct {
Type string `json:"type"`
DiffIDs []layer.DiffID `json:"diff_ids,omitempty"`

View file

@ -137,7 +137,7 @@ func (s *DockerSuite) TestPostContainersAttach(c *check.C) {
// Since the container only emits stdout, attaching to stderr should return nothing.
expectTimeout(conn, br, "stdout")
// Test the simlar functions of the stderr stream.
// Test the similar functions of the stderr stream.
cid, _ = dockerCmd(c, "run", "-di", "busybox", "/bin/sh", "-c", "cat >&2")
cid = strings.TrimSpace(cid)
conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain")

View file

@ -35,7 +35,7 @@ func (s *DockerSuite) TestAttachClosedOnContainerStop(c *check.C) {
errChan := make(chan error)
go func() {
defer close(errChan)
// Container is wating for us to signal it to stop
// Container is waiting for us to signal it to stop
dockerCmd(c, "stop", id)
// And wait for the attach command to end
errChan <- attachCmd.Wait()

View file

@ -4612,7 +4612,7 @@ func (s *DockerSuite) TestBuildInvalidTag(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "abcd:" + stringutils.GenerateRandomAlphaOnlyString(200)
_, out, err := buildImageWithOut(name, "FROM scratch\nMAINTAINER quux\n", true)
// if the error doesnt check for illegal tag name, or the image is built
// if the error doesn't check for illegal tag name, or the image is built
// then this should fail
if !strings.Contains(out, "invalid reference format") || strings.Contains(out, "Sending build context to Docker daemon") {
c.Fatalf("failed to stop before building. Error: %s, Output: %s", err, out)
@ -4817,7 +4817,7 @@ func (s *DockerSuite) TestBuildVerifySingleQuoteFails(c *check.C) {
// This testcase is supposed to generate an error because the
// JSON array we're passing in on the CMD uses single quotes instead
// of double quotes (per the JSON spec). This means we interpret it
// as a "string" insead of "JSON array" and pass it on to "sh -c" and
// as a "string" instead of "JSON array" and pass it on to "sh -c" and
// it should barf on it.
name := "testbuildsinglequotefails"

View file

@ -87,7 +87,7 @@ func (s *DockerSuite) TestCpToErrDstParentNotExists(c *check.C) {
}
// Test for error when DST ends in a trailing path separator but exists as a
// file. Also test that we cannot overwirite an existing directory with a
// file. Also test that we cannot overwrite an existing directory with a
// non-directory and cannot overwrite an existing
func (s *DockerSuite) TestCpToErrDstNotDir(c *check.C) {
testRequires(c, DaemonIsLinux)

View file

@ -171,7 +171,7 @@ func (s *DockerSuite) TestImagesEnsureDanglingImageOnlyListedOnce(c *check.C) {
dockerCmd(c, "tag", "-f", "busybox", "foobox")
out, _ = dockerCmd(c, "images", "-q", "-f", "dangling=true")
// Exect one dangling image
// Expect one dangling image
c.Assert(strings.Count(out, imageID), checker.Equals, 1)
}

View file

@ -170,7 +170,7 @@ func (s *DockerNetworkSuite) SetUpSuite(c *check.C) {
return
}
w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json")
// make sure libnetwork is now asking to release the expected address fro mthe expected poolid
// make sure libnetwork is now asking to release the expected address from the expected poolid
if addressRequest.PoolID != poolID {
fmt.Fprintf(w, `{"Error":"unknown pool id"}`)
} else if addressReleaseReq.Address != gw {
@ -429,7 +429,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkIpamMultipleNetworks(c *check.C) {
assertNwIsAvailable(c, "test5")
// test network with multiple subnets
// bridge network doesnt support multiple subnets. hence, use a dummy driver that supports
// bridge network doesn't support multiple subnets. hence, use a dummy driver that supports
dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, "--subnet=192.168.0.0/16", "--subnet=192.170.0.0/16", "test6")
assertNwIsAvailable(c, "test6")
@ -491,7 +491,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkIpamInvalidCombinations(c *check.C
_, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--gateway=192.168.0.1", "--gateway=192.168.0.2", "test")
c.Assert(err, check.NotNil)
// Multiple overlaping subnets in the same network must fail
// Multiple overlapping subnets in the same network must fail
_, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--subnet=192.168.1.0/16", "test")
c.Assert(err, check.NotNil)

View file

@ -223,7 +223,7 @@ func (s *DockerSuite) TestUnpublishedPortsInPsOutput(c *check.C) {
// Cannot find expected port binding (expBnd2) in docker ps output
c.Assert(out, checker.Contains, expBnd2)
// Remove container now otherwise it will interfeer with next test
// Remove container now otherwise it will interfere with next test
stopRemoveContainer(id, c)
// Run the container with explicit port bindings and no exposed ports
@ -236,7 +236,7 @@ func (s *DockerSuite) TestUnpublishedPortsInPsOutput(c *check.C) {
c.Assert(out, checker.Contains, expBnd1)
// Cannot find expected port binding (expBnd2) in docker ps output
c.Assert(out, checker.Contains, expBnd2)
// Remove container now otherwise it will interfeer with next test
// Remove container now otherwise it will interfere with next test
stopRemoveContainer(id, c)
// Run the container with one unpublished exposed port and one explicit port binding

View file

@ -754,7 +754,7 @@ func (s *DockerSuite) TestRunContainerNetwork(c *check.C) {
func (s *DockerSuite) TestRunNetHostNotAllowedWithLinks(c *check.C) {
// TODO Windows: This is Linux specific as --link is not supported and
// this will be deprecated in favour of container networking model.
// this will be deprecated in favor of container networking model.
testRequires(c, DaemonIsLinux, NotUserNamespace)
dockerCmd(c, "run", "--name", "linked", "busybox", "true")

View file

@ -109,14 +109,14 @@ func (s *DockerSuite) TestVolumeCliLsFilterDangling(c *check.C) {
out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=false")
// Same as above, but expicitly disabling dangling
// Same as above, but explicitly disabling dangling
c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output"))
c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output"))
c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output"))
out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=true")
// Filter "dangling" volumes; ony "dangling" (unused) volumes should be in the output
// Filter "dangling" volumes; only "dangling" (unused) volumes should be in the output
c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output"))
c.Assert(out, check.Not(checker.Contains), "testisinuse1\n", check.Commentf("volume 'testisinuse1' in output, but not expected"))
c.Assert(out, check.Not(checker.Contains), "testisinuse2\n", check.Commentf("volume 'testisinuse2' in output, but not expected"))

View file

@ -30,7 +30,7 @@ var (
// daemonPlatform is held globally so that tests can make intelligent
// decisions on how to configure themselves according to the platform
// of the daemon. This is initialised in docker_utils by sending
// of the daemon. This is initialized in docker_utils by sending
// a version call to the daemon and examining the response header.
daemonPlatform string

View file

@ -1416,7 +1416,7 @@ func newFakeGit(name string, files map[string]string, enforceLocalServer bool) (
return nil, fmt.Errorf("cannot start fake storage: %v", err)
}
} else {
// always start a local http server on CLI test machin
// always start a local http server on CLI test machine
httpServer := httptest.NewServer(http.FileServer(http.Dir(root)))
server = &localGitServer{httpServer}
}
@ -1430,7 +1430,7 @@ func newFakeGit(name string, files map[string]string, enforceLocalServer bool) (
// Write `content` to the file at path `dst`, creating it if necessary,
// as well as any missing directories.
// The file is truncated if it already exists.
// Fail the test when error occures.
// Fail the test when error occurs.
func writeFile(dst, content string, c *check.C) {
// Create subdirectories if necessary
c.Assert(os.MkdirAll(path.Dir(dst), 0700), check.IsNil)
@ -1443,7 +1443,7 @@ func writeFile(dst, content string, c *check.C) {
}
// Return the contents of file at path `src`.
// Fail the test when error occures.
// Fail the test when error occurs.
func readFile(src string, c *check.C) (content string) {
data, err := ioutil.ReadFile(src)
c.Assert(err, check.IsNil)

View file

@ -1,6 +1,6 @@
// Package layer is package for managing read only
// and read-write mounts on the union file system
// driver. Read-only mounts are refenced using a
// driver. Read-only mounts are referenced using a
// content hash and are protected from mutation in
// the exposed interface. The tar format is used
// to create read only layers and export both
@ -189,7 +189,7 @@ type MetadataStore interface {
GetInitID(string) (string, error)
GetMountParent(string) (ChainID, error)
// List returns the full list of referened
// List returns the full list of referenced
// read-only and read-write layers
List() ([]ChainID, []string, error)

View file

@ -418,7 +418,7 @@ func (ls *layerStore) saveMount(mount *mountedLayer) error {
func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit) (string, error) {
// Use "<graph-id>-init" to maintain compatibility with graph drivers
// which are expecting this layer with this special name. If all
// graph drivers can be updated to not rely on knowin about this layer
// graph drivers can be updated to not rely on knowing about this layer
// then the initID should be randomly generated.
initID := fmt.Sprintf("%s-init", graphID)

View file

@ -37,11 +37,11 @@ and Docker images will report:
**--until**=""
Stream events until this timestamp
The `--since` and `--until` parameters can be Unix timestamps, date formated
The `--since` and `--until` parameters can be Unix timestamps, date formatted
timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed
relative to the client machines time. If you do not provide the --since option,
the command returns only new and/or live events. Supported formats for date
formated time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`,
formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`,
`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local
timezone on the client will be used if you do not provide either a `Z` or a
`+-00:00` timezone offset at the end of the timestamp. When providing Unix
@ -49,7 +49,7 @@ timestamps enter seconds[.nanoseconds], where seconds is the number of seconds
that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap
seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a
fraction of a second no more than nine digits long.
# EXAMPLES
## Listening for Docker events

View file

@ -48,7 +48,7 @@ the running containers.
.Ports - Exposed ports.
.Status - Container status.
.Size - Container disk size.
.Labels - All labels asigned to the container.
.Labels - All labels assigned to the container.
.Label - Value of a specific label for this container. For example `{{.Label "com.docker.swarm.cpu"}}`
**--help**

View file

@ -106,7 +106,7 @@ func TestParseEnvFileBadlyFormattedFile(t *testing.T) {
}
}
// Test ParseEnvFile for a file with a line exeeding bufio.MaxScanTokenSize
// Test ParseEnvFile for a file with a line exceeding bufio.MaxScanTokenSize
func TestParseEnvFileLineTooLongFile(t *testing.T) {
content := strings.Repeat("a", bufio.MaxScanTokenSize+42)
content = fmt.Sprint("foo=", content)

View file

@ -22,7 +22,7 @@ func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt {
}
// Set sets an IPv4 or IPv6 address from a given string. If the given
// string is not parsable as an IP address it returns an error.
// string is not parseable as an IP address it returns an error.
func (o *IPOpt) Set(val string) error {
ip := net.ParseIP(val)
if ip == nil {

View file

@ -31,7 +31,7 @@ type (
Archive io.ReadCloser
// Reader is a type of io.Reader.
Reader io.Reader
// Compression is the state represtents if compressed or not.
// Compression is the state represents if compressed or not.
Compression int
// TarChownOptions wraps the chown options UID and GID.
TarChownOptions struct {

View file

@ -19,7 +19,7 @@ func fixVolumePathPrefix(srcPath string) string {
}
// getWalkRoot calculates the root path when performing a TarWithOptions.
// We use a seperate function as this is platform specific. On Linux, we
// We use a separate function as this is platform specific. On Linux, we
// can't use filepath.Join(srcPath,include) because this will clean away
// a trailing "." or "/" which may be important.
func getWalkRoot(srcPath string, include string) string {

View file

@ -19,7 +19,7 @@ func fixVolumePathPrefix(srcPath string) string {
}
// getWalkRoot calculates the root path when performing a TarWithOptions.
// We use a seperate function as this is platform specific.
// We use a separate function as this is platform specific.
func getWalkRoot(srcPath string, include string) string {
return filepath.Join(srcPath, include)
}

View file

@ -150,7 +150,7 @@ func Changes(layers []string, rw string) ([]Change, error) {
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
// This block is here to ensure the change is recorded even if the
// modify time, mode and size of the parent directoriy in the rw and ro layers are all equal.
// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
// Check https://github.com/docker/docker/pull/13590 for details.
if f.IsDir() {
changedDirs[path] = struct{}{}

View file

@ -9,7 +9,7 @@ package archive
const WhiteoutPrefix = ".wh."
// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
// for remoing an actaul file. Normally these files are excluded from exported
// for removing an actual file. Normally these files are excluded from exported
// archives.
const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix

View file

@ -87,7 +87,7 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil
func AttachLoopDevice(sparseName string) (loop *os.File, err error) {
// Try to retrieve the next available loopback device via syscall.
// If it fails, we discard error and start loopking for a
// If it fails, we discard error and start looping for a
// loopback from index 0.
startIndex, err := getNextFreeLoopbackIndex()
if err != nil {

View file

@ -290,7 +290,7 @@ func (s *FakeStore) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVP
}
// WatchTree will fail the first time, and return the mockKVchan afterwards.
// This is the behaviour we need for testing.. If we need 'moar', should update this.
// This is the behavior we need for testing.. If we need 'moar', should update this.
func (s *FakeStore) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
if s.watchTreeCallCount == 0 {
s.watchTreeCallCount = 1

View file

@ -1,6 +1,6 @@
// Package filenotify provides a mechanism for watching file(s) for changes.
// Generally leans on fsnotify, but provides a poll-based notifier which fsnotify does not support.
// These are wrapped up in a common interface so that either can be used interchangably in your code.
// These are wrapped up in a common interface so that either can be used interchangeably in your code.
package filenotify
import "gopkg.in/fsnotify.v1"

View file

@ -24,7 +24,7 @@ const watchWaitTime = 200 * time.Millisecond
// filePoller is used to poll files for changes, especially in cases where fsnotify
// can't be run (e.g. when inotify handles are exhausted)
// filePoller satifies the FileWatcher interface
// filePoller satisfies the FileWatcher interface
type filePoller struct {
// watches is the list of files currently being polled, close the associated channel to stop the watch
watches map[string]chan struct{}

View file

@ -78,7 +78,7 @@ func Matches(file string, patterns []string) (bool, error) {
// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go.
// It will assume that the inputs have been preprocessed and therefore the function
// doen't need to do as much error checking and clean-up. This was done to avoid
// doesn't need to do as much error checking and clean-up. This was done to avoid
// repeating these steps on each file being checked during the archive process.
// The more generic fileutils.Matches() can't make these assumptions.
func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) {

View file

@ -295,7 +295,7 @@ func ConsumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, s
}
}
// ParseCgroupPaths arses 'procCgroupData', which is output of '/proc/<pid>/cgroup', and returns
// ParseCgroupPaths parses 'procCgroupData', which is output of '/proc/<pid>/cgroup', and returns
// a map which cgroup name as key and path as value.
func ParseCgroupPaths(procCgroupData string) map[string]string {
cgroupPaths := map[string]string{}
@ -337,7 +337,7 @@ func (c *ChannelBuffer) ReadTimeout(p []byte, n time.Duration) (int, error) {
}
}
// RunAtDifferentDate runs the specifed function with the given time.
// RunAtDifferentDate runs the specified function with the given time.
// It changes the date of the system, which can led to weird behaviors.
func RunAtDifferentDate(date time.Time, block func()) {
// Layout for date. MMDDhhmmYYYY

View file

@ -309,7 +309,7 @@ func TestCompareDirectoryEntries(t *testing.T) {
}
}
// FIXME make an "unhappy path" test for ListTar without "panicing" :-)
// FIXME make an "unhappy path" test for ListTar without "panicking" :-)
func TestListTar(t *testing.T) {
tmpFolder, err := ioutil.TempDir("", "integration-cli-utils-list-tar")
if err != nil {

View file

@ -20,7 +20,7 @@ func NopWriteCloser(w io.Writer) io.WriteCloser {
return &nopWriteCloser{w}
}
// NopFlusher represents a type which flush opetatin is nop.
// NopFlusher represents a type which flush operation is nop.
type NopFlusher struct{}
// Flush is a nop operation.

View file

@ -19,8 +19,8 @@ type JSONLog struct {
// Format returns the log formatted according to format
// If format is nil, returns the log message
// If format is json, returns the log marshalled in json format
// By defalut, returns the log with the log time formatted according to format.
// If format is json, returns the log marshaled in json format
// By default, returns the log with the log time formatted according to format.
func (jl *JSONLog) Format(format string) (string, error) {
if format == "" {
return jl.Log, nil

View file

@ -60,7 +60,7 @@ func (p *JSONProgress) String() string {
percentage = 50
}
if width > 110 {
// this number can't be negetive gh#7136
// this number can't be negative gh#7136
numSpaces := 0
if 50-percentage > 0 {
numSpaces = 50 - percentage
@ -106,7 +106,7 @@ type JSONMessage struct {
// Display displays the JSONMessage to `out`. `isTerminal` describes if `out`
// is a terminal. If this is the case, it will erase the entire current line
// when dislaying the progressbar.
// when displaying the progressbar.
func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error {
if jm.Error != nil {
if jm.Error.Code == 401 {

View file

@ -41,7 +41,7 @@ func (l *lockCtr) inc() {
atomic.AddInt32(&l.waiters, 1)
}
// dec decrements the number of waiters wating on the lock
// dec decrements the number of waiters waiting on the lock
func (l *lockCtr) dec() {
atomic.AddInt32(&l.waiters, -1)
}

View file

@ -1228,7 +1228,7 @@ func (v mergeVal) IsBoolFlag() bool {
// Merge is an helper function that merges n FlagSets into a single dest FlagSet
// In case of name collision between the flagsets it will apply
// the destination FlagSet's errorHandling behaviour.
// the destination FlagSet's errorHandling behavior.
func Merge(dest *FlagSet, flagsets ...*FlagSet) error {
for _, fset := range flagsets {
for k, f := range fset.formal {

View file

@ -23,7 +23,7 @@ const (
SYNCHRONOUS = syscall.MS_SYNCHRONOUS
// DIRSYNC will force all directory updates within the file system to be done
// synchronously. This affects the following system calls: creat, link,
// synchronously. This affects the following system calls: create, link,
// unlink, symlink, mkdir, rmdir, mknod and rename.
DIRSYNC = syscall.MS_DIRSYNC

View file

@ -168,7 +168,7 @@ func TestSubtreeShared(t *testing.T) {
}
}()
// NOW, check that the file from the outside directory is avaible in the source directory
// NOW, check that the file from the outside directory is available in the source directory
if _, err := os.Stat(sourceCheckPath); err != nil {
t.Fatal(err)
}

View file

@ -128,7 +128,7 @@ func (filters Args) Len() int {
return len(filters.fields)
}
// MatchKVList returns true if the values for the specified field maches the ones
// MatchKVList returns true if the values for the specified field matches the ones
// from the sources.
// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
// field is 'label' and sources are {'label1': '1', 'label2': '2'}

View file

@ -10,7 +10,7 @@ import (
"time"
)
// Rand is a global *rand.Rand instance, which initilized with NewSource() source.
// Rand is a global *rand.Rand instance, which initialized with NewSource() source.
var Rand = rand.New(NewSource())
// Reader is a global, shared instance of a pseudorandom bytes generator.

View file

@ -41,7 +41,7 @@ func naiveSelf() string {
if absName, err := filepath.Abs(name); err == nil {
return absName
}
// if we coudn't get absolute name, return original
// if we couldn't get absolute name, return original
// (NOTE: Go only errors on Abs() if os.Getwd fails)
return name
}

View file

@ -54,7 +54,7 @@ func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []b
return []byte(str + streamNewline)
}
// FormatError formats the specifed error.
// FormatError formats the specified error.
func (sf *StreamFormatter) FormatError(err error) []byte {
if sf.json {
jsonError, ok := err.(*jsonmessage.JSONError)

View file

@ -48,7 +48,7 @@ func generateID(crypto bool) string {
}
id := hex.EncodeToString(b)
// if we try to parse the truncated for as an int and we don't have
// an error then the value is all numberic and causes issues when
// an error then the value is all numeric and causes issues when
// used as a hostname. ref #3869
if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil {
continue

View file

@ -5,7 +5,7 @@ import (
"strings"
)
// StrSlice representes a string or an array of strings.
// StrSlice represents a string or an array of strings.
// We need to override the json decoder to accept both options.
type StrSlice struct {
parts []string

View file

@ -91,7 +91,7 @@ func walkSymlinks(path string) (string, error) {
return "", errors.New("EvalSymlinks: too many links in " + originalPath)
}
// A path beginnging with `\\?\` represents the root, so automatically
// A path beginning with `\\?\` represents the root, so automatically
// skip that part and begin processing the next segment.
if strings.HasPrefix(path, longpath.Prefix) {
b.WriteString(longpath.Prefix)

View file

@ -36,7 +36,7 @@ type cgroupMemInfo struct {
// Whether soft limit is supported or not
MemoryReservation bool
// Whether OOM killer disalbe is supported or not
// Whether OOM killer disable is supported or not
OomKillDisable bool
// Whether memory swappiness is supported or not

View file

@ -5,7 +5,7 @@ import (
"unsafe"
)
// LUtimesNano is used to change access and modification time of the speficied path.
// LUtimesNano is used to change access and modification time of the specified path.
// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm.
func LUtimesNano(path string, ts []syscall.Timespec) error {
// These are not currently available in syscall

Some files were not shown because too many files have changed in this diff Show more