Explorar o código

golint fixes for daemon/ package

 - some method names were changed to have a 'Locking' suffix, as the
 downcased versions already existed, and the existing functions simply
 had locks around the already downcased version.
 - deleting unused functions
 - package comment
 - magic numbers replaced by golang constants
 - comments all over

Signed-off-by: Morgan Bauer <mbauer@us.ibm.com>
Morgan Bauer %!s(int64=10) %!d(string=hai) anos
pai
achega
abd72d4008

+ 1 - 1
api/server/server_experimental_unix.go

@@ -3,7 +3,7 @@
 package server
 
 func (s *Server) registerSubRouter() {
-	httpHandler := s.daemon.NetworkApiRouter()
+	httpHandler := s.daemon.NetworkAPIRouter()
 
 	subrouter := s.router.PathPrefix("/v{version:[0-9.]+}/networks").Subrouter()
 	subrouter.Methods("GET", "POST", "PUT", "DELETE").HandlerFunc(httpHandler)

+ 8 - 8
daemon/archive.go

@@ -29,7 +29,7 @@ func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, err
 		res = res[1:]
 	}
 
-	return container.Copy(res)
+	return container.copy(res)
 }
 
 // ContainerStatPath stats the filesystem resource at the specified path in the
@@ -142,7 +142,7 @@ func (container *Container) StatPath(path string) (stat *types.ContainerPathStat
 	defer container.Unmount()
 
 	err = container.mountVolumes()
-	defer container.UnmountVolumes(true)
+	defer container.unmountVolumes(true)
 	if err != nil {
 		return nil, err
 	}
@@ -177,7 +177,7 @@ func (container *Container) ArchivePath(path string) (content io.ReadCloser, sta
 	defer func() {
 		if err != nil {
 			// unmount any volumes
-			container.UnmountVolumes(true)
+			container.unmountVolumes(true)
 			// unmount the container's rootfs
 			container.Unmount()
 		}
@@ -212,13 +212,13 @@ func (container *Container) ArchivePath(path string) (content io.ReadCloser, sta
 
 	content = ioutils.NewReadCloserWrapper(data, func() error {
 		err := data.Close()
-		container.UnmountVolumes(true)
+		container.unmountVolumes(true)
 		container.Unmount()
 		container.Unlock()
 		return err
 	})
 
-	container.LogEvent("archive-path")
+	container.logEvent("archive-path")
 
 	return content, stat, nil
 }
@@ -239,7 +239,7 @@ func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool,
 	defer container.Unmount()
 
 	err = container.mountVolumes()
-	defer container.UnmountVolumes(true)
+	defer container.unmountVolumes(true)
 	if err != nil {
 		return err
 	}
@@ -288,7 +288,7 @@ func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool,
 	}
 
 	if !toVolume && container.hostConfig.ReadonlyRootfs {
-		return ErrContainerRootfsReadonly
+		return ErrRootFSReadOnly
 	}
 
 	options := &archive.TarOptions{
@@ -302,7 +302,7 @@ func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool,
 		return err
 	}
 
-	container.LogEvent("extract-to-dir")
+	container.logEvent("extract-to-dir")
 
 	return nil
 }

+ 7 - 2
daemon/attach.go

@@ -6,6 +6,7 @@ import (
 	"github.com/docker/docker/pkg/stdcopy"
 )
 
+// ContainerAttachWithLogsConfig holds the streams to use when connecting to a container to view logs.
 type ContainerAttachWithLogsConfig struct {
 	InStream                       io.ReadCloser
 	OutStream                      io.Writer
@@ -13,6 +14,7 @@ type ContainerAttachWithLogsConfig struct {
 	Logs, Stream                   bool
 }
 
+// ContainerAttachWithLogs attaches to logs according to the config passed in. See ContainerAttachWithLogsConfig.
 func (daemon *Daemon) ContainerAttachWithLogs(container *Container, c *ContainerAttachWithLogsConfig) error {
 	var errStream io.Writer
 
@@ -36,15 +38,18 @@ func (daemon *Daemon) ContainerAttachWithLogs(container *Container, c *Container
 		stderr = errStream
 	}
 
-	return container.AttachWithLogs(stdin, stdout, stderr, c.Logs, c.Stream)
+	return container.attachWithLogs(stdin, stdout, stderr, c.Logs, c.Stream)
 }
 
+// ContainerWsAttachWithLogsConfig attach with websockets, since all
+// stream data is delegated to the websocket to handle, there
 type ContainerWsAttachWithLogsConfig struct {
 	InStream             io.ReadCloser
 	OutStream, ErrStream io.Writer
 	Logs, Stream         bool
 }
 
+// ContainerWsAttachWithLogs websocket connection
 func (daemon *Daemon) ContainerWsAttachWithLogs(container *Container, c *ContainerWsAttachWithLogsConfig) error {
-	return container.AttachWithLogs(c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream)
+	return container.attachWithLogs(c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream)
 }

+ 1 - 1
daemon/changes.go

@@ -9,5 +9,5 @@ func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) {
 		return nil, err
 	}
 
-	return container.Changes()
+	return container.changes()
 }

+ 8 - 6
daemon/commit.go

@@ -5,6 +5,8 @@ import (
 	"github.com/docker/docker/runconfig"
 )
 
+// ContainerCommitConfig contains build configs for commit operation,
+// and is used when making a commit with the current state of the container.
 type ContainerCommitConfig struct {
 	Pause   bool
 	Repo    string
@@ -15,14 +17,14 @@ type ContainerCommitConfig struct {
 }
 
 // Commit creates a new filesystem image from the current state of a container.
-// The image can optionally be tagged into a repository
+// The image can optionally be tagged into a repository.
 func (daemon *Daemon) Commit(container *Container, c *ContainerCommitConfig) (*image.Image, error) {
-	if c.Pause && !container.IsPaused() {
-		container.Pause()
-		defer container.Unpause()
+	if c.Pause && !container.isPaused() {
+		container.pause()
+		defer container.unpause()
 	}
 
-	rwTar, err := container.ExportRw()
+	rwTar, err := container.exportRw()
 	if err != nil {
 		return nil, err
 	}
@@ -55,6 +57,6 @@ func (daemon *Daemon) Commit(container *Container, c *ContainerCommitConfig) (*i
 			return img, err
 		}
 	}
-	container.LogEvent("commit")
+	container.logEvent("commit")
 	return img, nil
 }

+ 4 - 4
daemon/config.go

@@ -18,8 +18,8 @@ type CommonConfig struct {
 	Bridge         bridgeConfig // Bridge holds bridge network specific configuration.
 	Context        map[string][]string
 	DisableBridge  bool
-	Dns            []string
-	DnsSearch      []string
+	DNS            []string
+	DNSSearch      []string
 	ExecDriver     string
 	ExecOptions    []string
 	ExecRoot       string
@@ -50,8 +50,8 @@ func (config *Config) InstallCommonFlags(cmd *flag.FlagSet, usageFn func(string)
 	cmd.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, defaultExec, usageFn("Exec driver to use"))
 	cmd.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, usageFn("Set the containers network MTU"))
 	// FIXME: why the inconsistency between "hosts" and "sockets"?
-	cmd.Var(opts.NewListOptsRef(&config.Dns, opts.ValidateIPAddress), []string{"#dns", "-dns"}, usageFn("DNS server to use"))
-	cmd.Var(opts.NewListOptsRef(&config.DnsSearch, opts.ValidateDNSSearch), []string{"-dns-search"}, usageFn("DNS search domains to use"))
+	cmd.Var(opts.NewListOptsRef(&config.DNS, opts.ValidateIPAddress), []string{"#dns", "-dns"}, usageFn("DNS server to use"))
+	cmd.Var(opts.NewListOptsRef(&config.DNSSearch, opts.ValidateDNSSearch), []string{"-dns-search"}, usageFn("DNS search domains to use"))
 	cmd.Var(opts.NewListOptsRef(&config.Labels, opts.ValidateLabel), []string{"-label"}, usageFn("Set key=value labels to the daemon"))
 	cmd.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", usageFn("Default driver for container logs"))
 	cmd.Var(opts.NewMapOpts(config.LogConfig.Config, nil), []string{"-log-opt"}, usageFn("Set log driver options"))

+ 136 - 123
daemon/container.go

@@ -34,13 +34,12 @@ import (
 )
 
 var (
-	ErrNotATTY                 = errors.New("The PTY is not a file")
-	ErrNoTTY                   = errors.New("No PTY found")
-	ErrContainerStart          = errors.New("The container failed to start. Unknown error")
-	ErrContainerStartTimeout   = errors.New("The container failed to start due to timed out.")
-	ErrContainerRootfsReadonly = errors.New("container rootfs is marked read-only")
+	// ErrRootFSReadOnly is returned when a container
+	// rootfs is marked readonly.
+	ErrRootFSReadOnly = errors.New("container rootfs is marked read-only")
 )
 
+// ErrContainerNotRunning holds the id of the container that is not running.
 type ErrContainerNotRunning struct {
 	id string
 }
@@ -49,48 +48,49 @@ func (e ErrContainerNotRunning) Error() string {
 	return fmt.Sprintf("Container %s is not running", e.id)
 }
 
-type StreamConfig struct {
+type streamConfig struct {
 	stdout    *broadcastwriter.BroadcastWriter
 	stderr    *broadcastwriter.BroadcastWriter
 	stdin     io.ReadCloser
 	stdinPipe io.WriteCloser
 }
 
-// CommonContainer holds the settings for a container which are applicable
-// across all platforms supported by the daemon.
+// CommonContainer holds the fields for a container which are
+// applicable across all platforms supported by the daemon.
 type CommonContainer struct {
-	StreamConfig
-
-	*State `json:"State"` // Needed for remote api version <= 1.11
-	root   string         // Path to the "home" of the container, including metadata.
-	basefs string         // Path to the graphdriver mountpoint
-
-	ID                       string
-	Created                  time.Time
-	Path                     string
-	Args                     []string
-	Config                   *runconfig.Config
-	ImageID                  string `json:"Image"`
-	NetworkSettings          *network.Settings
-	LogPath                  string
-	Name                     string
-	Driver                   string
-	ExecDriver               string
-	MountLabel, ProcessLabel string
-	RestartCount             int
-	HasBeenStartedBefore     bool
-	HasBeenManuallyStopped   bool // used for unless-stopped restart policy
-	hostConfig               *runconfig.HostConfig
-	command                  *execdriver.Command
-	monitor                  *containerMonitor
-	execCommands             *execStore
-	daemon                   *Daemon
+	streamConfig
+	// embed for Container to support states directly.
+	*State          `json:"State"` // Needed for remote api version <= 1.11
+	root            string         // Path to the "home" of the container, including metadata.
+	basefs          string         // Path to the graphdriver mountpoint
+	ID              string
+	Created         time.Time
+	Path            string
+	Args            []string
+	Config          *runconfig.Config
+	ImageID         string `json:"Image"`
+	NetworkSettings *network.Settings
+	LogPath         string
+	Name            string
+	Driver          string
+	ExecDriver      string
+	// MountLabel contains the options for the 'mount' command
+	MountLabel             string
+	ProcessLabel           string
+	RestartCount           int
+	HasBeenStartedBefore   bool
+	HasBeenManuallyStopped bool // used for unless-stopped restart policy
+	hostConfig             *runconfig.HostConfig
+	command                *execdriver.Command
+	monitor                *containerMonitor
+	execCommands           *execStore
+	daemon                 *Daemon
 	// logDriver for closing
 	logDriver logger.Logger
 	logCopier *logger.Copier
 }
 
-func (container *Container) FromDisk() error {
+func (container *Container) fromDisk() error {
 	pth, err := container.jsonPath()
 	if err != nil {
 		return err
@@ -131,10 +131,10 @@ func (container *Container) toDisk() error {
 		return err
 	}
 
-	return container.WriteHostConfig()
+	return container.writeHostConfig()
 }
 
-func (container *Container) ToDisk() error {
+func (container *Container) toDiskLocking() error {
 	container.Lock()
 	err := container.toDisk()
 	container.Unlock()
@@ -165,7 +165,7 @@ func (container *Container) readHostConfig() error {
 	return json.NewDecoder(f).Decode(&container.hostConfig)
 }
 
-func (container *Container) WriteHostConfig() error {
+func (container *Container) writeHostConfig() error {
 	data, err := json.Marshal(container.hostConfig)
 	if err != nil {
 		return err
@@ -179,7 +179,7 @@ func (container *Container) WriteHostConfig() error {
 	return ioutil.WriteFile(pth, data, 0666)
 }
 
-func (container *Container) LogEvent(action string) {
+func (container *Container) logEvent(action string) {
 	d := container.daemon
 	d.EventsService.Log(
 		action,
@@ -188,7 +188,7 @@ func (container *Container) LogEvent(action string) {
 	)
 }
 
-// Evaluates `path` in the scope of the container's basefs, with proper path
+// GetResourcePath evaluates `path` in the scope of the container's basefs, with proper path
 // sanitisation. Symlinks are all scoped to the basefs of the container, as
 // though the container's basefs was `/`.
 //
@@ -221,18 +221,18 @@ func (container *Container) GetResourcePath(path string) (string, error) {
 //       if no component of the returned path changes (such as a component
 //       symlinking to a different path) between using this method and using the
 //       path. See symlink.FollowSymlinkInScope for more details.
-func (container *Container) GetRootResourcePath(path string) (string, error) {
+func (container *Container) getRootResourcePath(path string) (string, error) {
 	// IMPORTANT - These are paths on the OS where the daemon is running, hence
 	// any filepath operations must be done in an OS agnostic way.
 	cleanPath := filepath.Join(string(os.PathSeparator), path)
 	return symlink.FollowSymlinkInScope(filepath.Join(container.root, cleanPath), container.root)
 }
 
-func (container *Container) ExportRw() (archive.Archive, error) {
+func (container *Container) exportContainerRw() (archive.Archive, error) {
 	if container.daemon == nil {
 		return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID)
 	}
-	archive, err := container.daemon.Diff(container)
+	archive, err := container.daemon.diff(container)
 	if err != nil {
 		return nil, err
 	}
@@ -243,6 +243,10 @@ func (container *Container) ExportRw() (archive.Archive, error) {
 		nil
 }
 
+// Start prepares the container to run by setting up everything the
+// container needs, such as storage and networking, as well as links
+// between containers. The container is left waiting for a signal to
+// begin running.
 func (container *Container) Start() (err error) {
 	container.Lock()
 	defer container.Unlock()
@@ -266,7 +270,7 @@ func (container *Container) Start() (err error) {
 			}
 			container.toDisk()
 			container.cleanup()
-			container.LogEvent("die")
+			container.logEvent("die")
 		}
 	}()
 
@@ -302,7 +306,7 @@ func (container *Container) Start() (err error) {
 	return container.waitForStart()
 }
 
-func (container *Container) Run() error {
+func (container *Container) run() error {
 	if err := container.Start(); err != nil {
 		return err
 	}
@@ -311,7 +315,7 @@ func (container *Container) Run() error {
 	return nil
 }
 
-func (container *Container) Output() (output []byte, err error) {
+func (container *Container) output() (output []byte, err error) {
 	pipe := container.StdoutPipe()
 	defer pipe.Close()
 	if err := container.Start(); err != nil {
@@ -322,7 +326,7 @@ func (container *Container) Output() (output []byte, err error) {
 	return output, err
 }
 
-// StreamConfig.StdinPipe returns a WriteCloser which can be used to feed data
+// streamConfig.StdinPipe returns a WriteCloser which can be used to feed data
 // to the standard input of the container's active process.
 // Container.StdoutPipe and Container.StderrPipe each return a ReadCloser
 // which can be used to retrieve the standard output (and error) generated
@@ -330,17 +334,17 @@ func (container *Container) Output() (output []byte, err error) {
 // copied and delivered to all StdoutPipe and StderrPipe consumers, using
 // a kind of "broadcaster".
 
-func (streamConfig *StreamConfig) StdinPipe() io.WriteCloser {
+func (streamConfig *streamConfig) StdinPipe() io.WriteCloser {
 	return streamConfig.stdinPipe
 }
 
-func (streamConfig *StreamConfig) StdoutPipe() io.ReadCloser {
+func (streamConfig *streamConfig) StdoutPipe() io.ReadCloser {
 	reader, writer := io.Pipe()
 	streamConfig.stdout.AddWriter(writer)
 	return ioutils.NewBufReader(reader)
 }
 
-func (streamConfig *StreamConfig) StderrPipe() io.ReadCloser {
+func (streamConfig *streamConfig) StderrPipe() io.ReadCloser {
 	reader, writer := io.Pipe()
 	streamConfig.stderr.AddWriter(writer)
 	return ioutils.NewBufReader(reader)
@@ -353,7 +357,7 @@ func (container *Container) isNetworkAllocated() bool {
 // cleanup releases any network resources allocated to the container along with any rules
 // around how containers are linked together.  It also unmounts the container's root filesystem.
 func (container *Container) cleanup() {
-	container.ReleaseNetwork()
+	container.releaseNetwork()
 
 	if err := container.Unmount(); err != nil {
 		logrus.Errorf("%v: Failed to umount filesystem: %v", container.ID, err)
@@ -363,10 +367,15 @@ func (container *Container) cleanup() {
 		container.daemon.unregisterExecCommand(eConfig)
 	}
 
-	container.UnmountVolumes(false)
+	container.unmountVolumes(false)
 }
 
-func (container *Container) KillSig(sig int) error {
+// killSig sends the container the given signal. This wrapper for the
+// host specific kill command prepares the container before attempting
+// to send the signal. An error is returned if the container is paused
+// or not running, or if there is a problem returned from the
+// underlying kill command.
+func (container *Container) killSig(sig int) error {
 	logrus.Debugf("Sending %d to %s", sig, container.ID)
 	container.Lock()
 	defer container.Unlock()
@@ -391,24 +400,24 @@ func (container *Container) KillSig(sig int) error {
 		return nil
 	}
 
-	if err := container.daemon.Kill(container, sig); err != nil {
+	if err := container.daemon.kill(container, sig); err != nil {
 		return err
 	}
-	container.LogEvent("kill")
+	container.logEvent("kill")
 	return nil
 }
 
-// Wrapper aroung KillSig() suppressing "no such process" error.
+// Wrapper aroung killSig() suppressing "no such process" error.
 func (container *Container) killPossiblyDeadProcess(sig int) error {
-	err := container.KillSig(sig)
+	err := container.killSig(sig)
 	if err == syscall.ESRCH {
-		logrus.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.GetPid(), sig)
+		logrus.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.getPID(), sig)
 		return nil
 	}
 	return err
 }
 
-func (container *Container) Pause() error {
+func (container *Container) pause() error {
 	container.Lock()
 	defer container.Unlock()
 
@@ -426,11 +435,11 @@ func (container *Container) Pause() error {
 		return err
 	}
 	container.Paused = true
-	container.LogEvent("pause")
+	container.logEvent("pause")
 	return nil
 }
 
-func (container *Container) Unpause() error {
+func (container *Container) unpause() error {
 	container.Lock()
 	defer container.Unlock()
 
@@ -448,17 +457,18 @@ func (container *Container) Unpause() error {
 		return err
 	}
 	container.Paused = false
-	container.LogEvent("unpause")
+	container.logEvent("unpause")
 	return nil
 }
 
+// Kill forcefully terminates a container.
 func (container *Container) Kill() error {
 	if !container.IsRunning() {
 		return ErrContainerNotRunning{container.ID}
 	}
 
 	// 1. Send SIGKILL
-	if err := container.killPossiblyDeadProcess(9); err != nil {
+	if err := container.killPossiblyDeadProcess(int(syscall.SIGKILL)); err != nil {
 		// While normally we might "return err" here we're not going to
 		// because if we can't stop the container by this point then
 		// its probably because its already stopped. Meaning, between
@@ -487,15 +497,20 @@ func (container *Container) Kill() error {
 	return nil
 }
 
+// Stop halts a container by sending SIGTERM, waiting for the given
+// duration in seconds, and then calling SIGKILL and waiting for the
+// process to exit. If a negative duration is given, Stop will wait
+// for SIGTERM forever. If the container is not running Stop returns
+// immediately.
 func (container *Container) Stop(seconds int) error {
 	if !container.IsRunning() {
 		return nil
 	}
 
 	// 1. Send a SIGTERM
-	if err := container.killPossiblyDeadProcess(15); err != nil {
+	if err := container.killPossiblyDeadProcess(int(syscall.SIGTERM)); err != nil {
 		logrus.Infof("Failed to send SIGTERM to the process, force killing")
-		if err := container.killPossiblyDeadProcess(9); err != nil {
+		if err := container.killPossiblyDeadProcess(int(syscall.SIGKILL)); err != nil {
 			return err
 		}
 	}
@@ -510,10 +525,14 @@ func (container *Container) Stop(seconds int) error {
 		}
 	}
 
-	container.LogEvent("stop")
+	container.logEvent("stop")
 	return nil
 }
 
+// Restart attempts to gracefully stop and then start the
+// container. When stopping, wait for the given duration in seconds to
+// gracefully stop, before forcefully terminating the container. If
+// given a negative duration, wait forever for a graceful stop.
 func (container *Container) Restart(seconds int) error {
 	// Avoid unnecessarily unmounting and then directly mounting
 	// the container when the container stops and then starts
@@ -530,10 +549,12 @@ func (container *Container) Restart(seconds int) error {
 		return err
 	}
 
-	container.LogEvent("restart")
+	container.logEvent("restart")
 	return nil
 }
 
+// Resize changes the TTY of the process running inside the container
+// to the given height and width. The container must be running.
 func (container *Container) Resize(h, w int) error {
 	if !container.IsRunning() {
 		return ErrContainerNotRunning{container.ID}
@@ -541,11 +562,11 @@ func (container *Container) Resize(h, w int) error {
 	if err := container.command.ProcessConfig.Terminal.Resize(h, w); err != nil {
 		return err
 	}
-	container.LogEvent("resize")
+	container.logEvent("resize")
 	return nil
 }
 
-func (container *Container) Export() (archive.Archive, error) {
+func (container *Container) export() (archive.Archive, error) {
 	if err := container.Mount(); err != nil {
 		return nil, err
 	}
@@ -560,46 +581,45 @@ func (container *Container) Export() (archive.Archive, error) {
 		container.Unmount()
 		return err
 	})
-	container.LogEvent("export")
+	container.logEvent("export")
 	return arch, err
 }
 
+// Mount sets container.basefs
 func (container *Container) Mount() error {
 	return container.daemon.Mount(container)
 }
 
 func (container *Container) changes() ([]archive.Change, error) {
-	return container.daemon.Changes(container)
-}
-
-func (container *Container) Changes() ([]archive.Change, error) {
 	container.Lock()
 	defer container.Unlock()
-	return container.changes()
+	return container.daemon.changes(container)
 }
 
-func (container *Container) GetImage() (*image.Image, error) {
+func (container *Container) getImage() (*image.Image, error) {
 	if container.daemon == nil {
 		return nil, fmt.Errorf("Can't get image of unregistered container")
 	}
 	return container.daemon.graph.Get(container.ImageID)
 }
 
+// Unmount asks the daemon to release the layered filesystems that are
+// mounted by the container.
 func (container *Container) Unmount() error {
-	return container.daemon.Unmount(container)
+	return container.daemon.unmount(container)
 }
 
 func (container *Container) hostConfigPath() (string, error) {
-	return container.GetRootResourcePath("hostconfig.json")
+	return container.getRootResourcePath("hostconfig.json")
 }
 
 func (container *Container) jsonPath() (string, error) {
-	return container.GetRootResourcePath("config.json")
+	return container.getRootResourcePath("config.json")
 }
 
 // This method must be exported to be used from the lxc template
 // This directory is only usable when the container is running
-func (container *Container) RootfsPath() string {
+func (container *Container) rootfsPath() string {
 	return container.basefs
 }
 
@@ -610,7 +630,7 @@ func validateID(id string) error {
 	return nil
 }
 
-func (container *Container) Copy(resource string) (rc io.ReadCloser, err error) {
+func (container *Container) copy(resource string) (rc io.ReadCloser, err error) {
 	container.Lock()
 
 	defer func() {
@@ -629,7 +649,7 @@ func (container *Container) Copy(resource string) (rc io.ReadCloser, err error)
 	defer func() {
 		if err != nil {
 			// unmount any volumes
-			container.UnmountVolumes(true)
+			container.unmountVolumes(true)
 			// unmount the container's rootfs
 			container.Unmount()
 		}
@@ -666,17 +686,17 @@ func (container *Container) Copy(resource string) (rc io.ReadCloser, err error)
 
 	reader := ioutils.NewReadCloserWrapper(archive, func() error {
 		err := archive.Close()
-		container.UnmountVolumes(true)
+		container.unmountVolumes(true)
 		container.Unmount()
 		container.Unlock()
 		return err
 	})
-	container.LogEvent("copy")
+	container.logEvent("copy")
 	return reader, nil
 }
 
 // Returns true if the container exposes a certain port
-func (container *Container) Exposes(p nat.Port) bool {
+func (container *Container) exposes(p nat.Port) bool {
 	_, exists := container.Config.ExposedPorts[p]
 	return exists
 }
@@ -718,7 +738,7 @@ func (container *Container) getLogger() (logger.Logger, error) {
 
 	// Set logging file for "json-logger"
 	if cfg.Type == jsonfilelog.Name {
-		ctx.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID))
+		ctx.LogPath, err = container.getRootResourcePath(fmt.Sprintf("%s-json.log", container.ID))
 		if err != nil {
 			return nil, err
 		}
@@ -764,7 +784,7 @@ func (container *Container) waitForStart() error {
 	return nil
 }
 
-func (container *Container) GetProcessLabel() string {
+func (container *Container) getProcessLabel() string {
 	// even if we have a process label return "" if we are running
 	// in privileged mode
 	if container.hostConfig.Privileged {
@@ -773,31 +793,22 @@ func (container *Container) GetProcessLabel() string {
 	return container.ProcessLabel
 }
 
-func (container *Container) GetMountLabel() string {
+func (container *Container) getMountLabel() string {
 	if container.hostConfig.Privileged {
 		return ""
 	}
 	return container.MountLabel
 }
 
-func (container *Container) Stats() (*execdriver.ResourceStats, error) {
-	return container.daemon.Stats(container)
-}
-
-func (c *Container) LogDriverType() string {
-	c.Lock()
-	defer c.Unlock()
-	if c.hostConfig.LogConfig.Type == "" {
-		return c.daemon.defaultLogConfig.Type
-	}
-	return c.hostConfig.LogConfig.Type
+func (container *Container) stats() (*execdriver.ResourceStats, error) {
+	return container.daemon.stats(container)
 }
 
-func (container *Container) GetExecIDs() []string {
+func (container *Container) getExecIDs() []string {
 	return container.execCommands.List()
 }
 
-func (container *Container) Exec(execConfig *execConfig) error {
+func (container *Container) exec(ExecConfig *ExecConfig) error {
 	container.Lock()
 	defer container.Unlock()
 
@@ -810,16 +821,16 @@ func (container *Container) Exec(execConfig *execConfig) error {
 				c.Close()
 			}
 		}
-		close(execConfig.waitStart)
+		close(ExecConfig.waitStart)
 	}
 
 	// We use a callback here instead of a goroutine and an chan for
 	// synchronization purposes
-	cErr := promise.Go(func() error { return container.monitorExec(execConfig, callback) })
+	cErr := promise.Go(func() error { return container.monitorExec(ExecConfig, callback) })
 
 	// Exec should not return until the process is actually running
 	select {
-	case <-execConfig.waitStart:
+	case <-ExecConfig.waitStart:
 	case err := <-cErr:
 		return err
 	}
@@ -827,46 +838,48 @@ func (container *Container) Exec(execConfig *execConfig) error {
 	return nil
 }
 
-func (container *Container) monitorExec(execConfig *execConfig, callback execdriver.StartCallback) error {
+func (container *Container) monitorExec(ExecConfig *ExecConfig, callback execdriver.StartCallback) error {
 	var (
 		err      error
 		exitCode int
 	)
-	pipes := execdriver.NewPipes(execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdout, execConfig.StreamConfig.stderr, execConfig.OpenStdin)
-	exitCode, err = container.daemon.Exec(container, execConfig, pipes, callback)
+	pipes := execdriver.NewPipes(ExecConfig.streamConfig.stdin, ExecConfig.streamConfig.stdout, ExecConfig.streamConfig.stderr, ExecConfig.OpenStdin)
+	exitCode, err = container.daemon.Exec(container, ExecConfig, pipes, callback)
 	if err != nil {
 		logrus.Errorf("Error running command in existing container %s: %s", container.ID, err)
 	}
 	logrus.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode)
-	if execConfig.OpenStdin {
-		if err := execConfig.StreamConfig.stdin.Close(); err != nil {
+	if ExecConfig.OpenStdin {
+		if err := ExecConfig.streamConfig.stdin.Close(); err != nil {
 			logrus.Errorf("Error closing stdin while running in %s: %s", container.ID, err)
 		}
 	}
-	if err := execConfig.StreamConfig.stdout.Clean(); err != nil {
+	if err := ExecConfig.streamConfig.stdout.Clean(); err != nil {
 		logrus.Errorf("Error closing stdout while running in %s: %s", container.ID, err)
 	}
-	if err := execConfig.StreamConfig.stderr.Clean(); err != nil {
+	if err := ExecConfig.streamConfig.stderr.Clean(); err != nil {
 		logrus.Errorf("Error closing stderr while running in %s: %s", container.ID, err)
 	}
-	if execConfig.ProcessConfig.Terminal != nil {
-		if err := execConfig.ProcessConfig.Terminal.Close(); err != nil {
+	if ExecConfig.ProcessConfig.Terminal != nil {
+		if err := ExecConfig.ProcessConfig.Terminal.Close(); err != nil {
 			logrus.Errorf("Error closing terminal while running in container %s: %s", container.ID, err)
 		}
 	}
 	// remove the exec command from the container's store only and not the
 	// daemon's store so that the exec command can be inspected.
-	container.execCommands.Delete(execConfig.ID)
+	container.execCommands.Delete(ExecConfig.ID)
 	return err
 }
 
-func (c *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
-	return attach(&c.StreamConfig, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, stdin, stdout, stderr)
+// Attach connects to the container's TTY, delegating to standard
+// streams or websockets depending on the configuration.
+func (container *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
+	return attach(&container.streamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, stdin, stdout, stderr)
 }
 
-func (c *Container) AttachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error {
+func (container *Container) attachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error {
 	if logs {
-		logDriver, err := c.getLogger()
+		logDriver, err := container.getLogger()
 		if err != nil {
 			return err
 		}
@@ -896,7 +909,7 @@ func (c *Container) AttachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer
 		}
 	}
 
-	c.LogEvent("attach")
+	container.logEvent("attach")
 
 	//stream
 	if stream {
@@ -910,17 +923,17 @@ func (c *Container) AttachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer
 			}()
 			stdinPipe = r
 		}
-		<-c.Attach(stdinPipe, stdout, stderr)
+		<-container.Attach(stdinPipe, stdout, stderr)
 		// If we are in stdinonce mode, wait for the process to end
 		// otherwise, simply return
-		if c.Config.StdinOnce && !c.Config.Tty {
-			c.WaitStop(-1 * time.Second)
+		if container.Config.StdinOnce && !container.Config.Tty {
+			container.WaitStop(-1 * time.Second)
 		}
 	}
 	return nil
 }
 
-func attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
+func attach(streamConfig *streamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
 	var (
 		cStdout, cStderr io.ReadCloser
 		cStdin           io.WriteCloser

+ 52 - 30
daemon/container_unix.go

@@ -18,7 +18,9 @@ import (
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/daemon/links"
 	"github.com/docker/docker/daemon/network"
+	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/directory"
+	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/nat"
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/pkg/system"
@@ -35,8 +37,13 @@ import (
 	"github.com/opencontainers/runc/libcontainer/label"
 )
 
+// DefaultPathEnv is unix style list of directories to search for
+// executables. Each directory is separated from the next by a colon
+// ':' character .
 const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
 
+// Container holds the fields specific to unixen implementations. See
+// CommonContainer for standard fields common to all containers.
 type Container struct {
 	CommonContainer
 
@@ -47,15 +54,15 @@ type Container struct {
 	HostsPath       string
 	MountPoints     map[string]*mountPoint
 	ResolvConfPath  string
-	UpdateDns       bool
-	Volumes         map[string]string // Deprecated since 1.7, kept for backwards compatibility
-	VolumesRW       map[string]bool   // Deprecated since 1.7, kept for backwards compatibility
+
+	Volumes   map[string]string // Deprecated since 1.7, kept for backwards compatibility
+	VolumesRW map[string]bool   // Deprecated since 1.7, kept for backwards compatibility
 }
 
 func killProcessDirectly(container *Container) error {
 	if _, err := container.WaitStop(10 * time.Second); err != nil {
 		// Ensure that we don't kill ourselves
-		if pid := container.GetPid(); pid != 0 {
+		if pid := container.getPID(); pid != 0 {
 			logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID))
 			if err := syscall.Kill(pid, 9); err != nil {
 				if err != syscall.ESRCH {
@@ -73,7 +80,7 @@ func (container *Container) setupLinkedContainers() ([]string, error) {
 		env    []string
 		daemon = container.daemon
 	)
-	children, err := daemon.Children(container.Name)
+	children, err := daemon.children(container.Name)
 	if err != nil {
 		return nil, err
 	}
@@ -231,7 +238,7 @@ func populateCommand(c *Container, env []string) error {
 	for _, ul := range ulimits {
 		ulIdx[ul.Name] = ul
 	}
-	for name, ul := range c.daemon.config.Ulimits {
+	for name, ul := range c.daemon.configStore.Ulimits {
 		if _, exists := ulIdx[name]; !exists {
 			ulimits = append(ulimits, ul)
 		}
@@ -277,7 +284,7 @@ func populateCommand(c *Container, env []string) error {
 
 	c.command = &execdriver.Command{
 		ID:                 c.ID,
-		Rootfs:             c.RootfsPath(),
+		Rootfs:             c.rootfsPath(),
 		ReadonlyRootfs:     c.hostConfig.ReadonlyRootfs,
 		InitPath:           "/.dockerinit",
 		WorkingDir:         c.Config.WorkingDir,
@@ -292,8 +299,8 @@ func populateCommand(c *Container, env []string) error {
 		CapDrop:            c.hostConfig.CapDrop.Slice(),
 		GroupAdd:           c.hostConfig.GroupAdd,
 		ProcessConfig:      processConfig,
-		ProcessLabel:       c.GetProcessLabel(),
-		MountLabel:         c.GetMountLabel(),
+		ProcessLabel:       c.getProcessLabel(),
+		MountLabel:         c.getMountLabel(),
 		LxcConfig:          lxcConfig,
 		AppArmorProfile:    c.AppArmorProfile,
 		CgroupParent:       c.hostConfig.CgroupParent,
@@ -321,8 +328,8 @@ func mergeDevices(defaultDevices, userDevices []*configs.Device) []*configs.Devi
 	return append(devs, userDevices...)
 }
 
-// GetSize, return real size, virtual size
-func (container *Container) GetSize() (int64, int64) {
+// GetSize returns the real size & virtual size of the container.
+func (container *Container) getSize() (int64, int64) {
 	var (
 		sizeRw, sizeRootfs int64
 		err                error
@@ -373,7 +380,7 @@ func (container *Container) trySetNetworkMount(destination string, path string)
 }
 
 func (container *Container) buildHostnameFile() error {
-	hostnamePath, err := container.GetRootResourcePath("hostname")
+	hostnamePath, err := container.getRootResourcePath("hostname")
 	if err != nil {
 		return err
 	}
@@ -400,13 +407,13 @@ func (container *Container) buildJoinOptions() ([]libnetwork.EndpointOption, err
 		joinOptions = append(joinOptions, libnetwork.JoinOptionUseDefaultSandbox())
 	}
 
-	container.HostsPath, err = container.GetRootResourcePath("hosts")
+	container.HostsPath, err = container.getRootResourcePath("hosts")
 	if err != nil {
 		return nil, err
 	}
 	joinOptions = append(joinOptions, libnetwork.JoinOptionHostsPath(container.HostsPath))
 
-	container.ResolvConfPath, err = container.GetRootResourcePath("resolv.conf")
+	container.ResolvConfPath, err = container.getRootResourcePath("resolv.conf")
 	if err != nil {
 		return nil, err
 	}
@@ -414,8 +421,8 @@ func (container *Container) buildJoinOptions() ([]libnetwork.EndpointOption, err
 
 	if len(container.hostConfig.DNS) > 0 {
 		dns = container.hostConfig.DNS
-	} else if len(container.daemon.config.Dns) > 0 {
-		dns = container.daemon.config.Dns
+	} else if len(container.daemon.configStore.DNS) > 0 {
+		dns = container.daemon.configStore.DNS
 	}
 
 	for _, d := range dns {
@@ -424,8 +431,8 @@ func (container *Container) buildJoinOptions() ([]libnetwork.EndpointOption, err
 
 	if len(container.hostConfig.DNSSearch) > 0 {
 		dnsSearch = container.hostConfig.DNSSearch
-	} else if len(container.daemon.config.DnsSearch) > 0 {
-		dnsSearch = container.daemon.config.DnsSearch
+	} else if len(container.daemon.configStore.DNSSearch) > 0 {
+		dnsSearch = container.daemon.configStore.DNSSearch
 	}
 
 	for _, ds := range dnsSearch {
@@ -445,7 +452,7 @@ func (container *Container) buildJoinOptions() ([]libnetwork.EndpointOption, err
 
 	var childEndpoints, parentEndpoints []string
 
-	children, err := container.daemon.Children(container.Name)
+	children, err := container.daemon.children(container.Name)
 	if err != nil {
 		return nil, err
 	}
@@ -470,7 +477,7 @@ func (container *Container) buildJoinOptions() ([]libnetwork.EndpointOption, err
 		joinOptions = append(joinOptions, libnetwork.JoinOptionExtraHost(parts[0], parts[1]))
 	}
 
-	refs := container.daemon.ContainerGraph().RefPaths(container.ID)
+	refs := container.daemon.containerGraph().RefPaths(container.ID)
 	for _, ref := range refs {
 		if ref.ParentID == "0" {
 			continue
@@ -481,7 +488,7 @@ func (container *Container) buildJoinOptions() ([]libnetwork.EndpointOption, err
 			logrus.Error(err)
 		}
 
-		if c != nil && !container.daemon.config.DisableBridge && container.hostConfig.NetworkMode.IsPrivate() {
+		if c != nil && !container.daemon.configStore.DisableBridge && container.hostConfig.NetworkMode.IsPrivate() {
 			logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", c.ID, ref.Name, container.NetworkSettings.IPAddress)
 			joinOptions = append(joinOptions, libnetwork.JoinOptionParentUpdate(c.NetworkSettings.EndpointID, ref.Name, container.NetworkSettings.IPAddress))
 			if c.NetworkSettings.EndpointID != "" {
@@ -642,7 +649,7 @@ func (container *Container) updateNetworkSettings(n libnetwork.Network, ep libne
 	}
 
 	if container.hostConfig.NetworkMode == runconfig.NetworkMode("bridge") {
-		networkSettings.Bridge = container.daemon.config.Bridge.Iface
+		networkSettings.Bridge = container.daemon.configStore.Bridge.Iface
 	}
 
 	container.NetworkSettings = networkSettings
@@ -651,7 +658,7 @@ func (container *Container) updateNetworkSettings(n libnetwork.Network, ep libne
 
 // UpdateNetwork is used to update the container's network (e.g. when linked containers
 // get removed/unlinked).
-func (container *Container) UpdateNetwork() error {
+func (container *Container) updateNetwork() error {
 	n, err := container.daemon.netController.NetworkByID(container.NetworkSettings.NetworkID)
 	if err != nil {
 		return fmt.Errorf("error locating network id %s: %v", container.NetworkSettings.NetworkID, err)
@@ -803,7 +810,7 @@ func (container *Container) secondaryNetworkRequired(primaryNetworkType string)
 		return false
 	}
 
-	if container.daemon.config.DisableBridge {
+	if container.daemon.configStore.DisableBridge {
 		return false
 	}
 
@@ -816,7 +823,7 @@ func (container *Container) secondaryNetworkRequired(primaryNetworkType string)
 	return false
 }
 
-func (container *Container) AllocateNetwork() error {
+func (container *Container) allocateNetwork() error {
 	mode := container.hostConfig.NetworkMode
 	controller := container.daemon.netController
 	if container.Config.NetworkDisabled || mode.IsContainer() {
@@ -837,7 +844,7 @@ func (container *Container) AllocateNetwork() error {
 		return fmt.Errorf("conflicting options: publishing a service and network mode")
 	}
 
-	if runconfig.NetworkMode(networkDriver).IsBridge() && container.daemon.config.DisableBridge {
+	if runconfig.NetworkMode(networkDriver).IsBridge() && container.daemon.configStore.DisableBridge {
 		container.Config.NetworkDisabled = true
 		return nil
 	}
@@ -861,7 +868,7 @@ func (container *Container) AllocateNetwork() error {
 		return err
 	}
 
-	return container.WriteHostConfig()
+	return container.writeHostConfig()
 }
 
 func (container *Container) configureNetwork(networkName, service, networkDriver string, canCreateNetwork bool) error {
@@ -945,13 +952,28 @@ func (container *Container) initializeNetworking() error {
 
 	}
 
-	if err := container.AllocateNetwork(); err != nil {
+	if err := container.allocateNetwork(); err != nil {
 		return err
 	}
 
 	return container.buildHostnameFile()
 }
 
+func (container *Container) exportRw() (archive.Archive, error) {
+	if container.daemon == nil {
+		return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID)
+	}
+	archive, err := container.daemon.diff(container)
+	if err != nil {
+		return nil, err
+	}
+	return ioutils.NewReadCloserWrapper(archive, func() error {
+			err := archive.Close()
+			return err
+		}),
+		nil
+}
+
 func (container *Container) getIpcContainer() (*Container, error) {
 	containerID := container.hostConfig.IpcMode.Container()
 	c, err := container.daemon.Get(containerID)
@@ -1013,7 +1035,7 @@ func (container *Container) getNetworkedContainer() (*Container, error) {
 	}
 }
 
-func (container *Container) ReleaseNetwork() {
+func (container *Container) releaseNetwork() {
 	if container.hostConfig.NetworkMode.IsContainer() || container.Config.NetworkDisabled {
 		return
 	}
@@ -1060,7 +1082,7 @@ func (container *Container) ReleaseNetwork() {
 	}
 }
 
-func (container *Container) UnmountVolumes(forceSyscall bool) error {
+func (container *Container) unmountVolumes(forceSyscall bool) error {
 	var volumeMounts []mountPoint
 
 	for _, mntPoint := range container.MountPoints {

+ 22 - 22
daemon/container_windows.go

@@ -7,12 +7,15 @@ import (
 	"strings"
 
 	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/pkg/archive"
 )
 
-// This is deliberately empty on Windows as the default path will be set by
+// DefaultPathEnv is deliberately empty on Windows as the default path will be set by
 // the container. Docker has no context of what the default path should be.
 const DefaultPathEnv = ""
 
+// Container holds fields specific to the Windows implementation. See
+// CommonContainer for standard fields common to all containers.
 type Container struct {
 	CommonContainer
 
@@ -23,14 +26,6 @@ func killProcessDirectly(container *Container) error {
 	return nil
 }
 
-func (container *Container) setupContainerDns() error {
-	return nil
-}
-
-func (container *Container) updateParentsHosts() error {
-	return nil
-}
-
 func (container *Container) setupLinkedContainers() ([]string, error) {
 	return nil, nil
 }
@@ -60,7 +55,7 @@ func populateCommand(c *Container, env []string) error {
 		if !c.Config.NetworkDisabled {
 			en.Interface = &execdriver.NetworkInterface{
 				MacAddress:   c.Config.MacAddress,
-				Bridge:       c.daemon.config.Bridge.VirtualSwitchName,
+				Bridge:       c.daemon.configStore.Bridge.VirtualSwitchName,
 				PortBindings: c.hostConfig.PortBindings,
 
 				// TODO Windows. Include IPAddress. There already is a
@@ -118,7 +113,7 @@ func populateCommand(c *Container, env []string) error {
 	// TODO Windows: Factor out remainder of unused fields.
 	c.command = &execdriver.Command{
 		ID:             c.ID,
-		Rootfs:         c.RootfsPath(),
+		Rootfs:         c.rootfsPath(),
 		ReadonlyRootfs: c.hostConfig.ReadonlyRootfs,
 		InitPath:       "/.dockerinit",
 		WorkingDir:     c.Config.WorkingDir,
@@ -128,8 +123,8 @@ func populateCommand(c *Container, env []string) error {
 		CapAdd:         c.hostConfig.CapAdd.Slice(),
 		CapDrop:        c.hostConfig.CapDrop.Slice(),
 		ProcessConfig:  processConfig,
-		ProcessLabel:   c.GetProcessLabel(),
-		MountLabel:     c.GetMountLabel(),
+		ProcessLabel:   c.getProcessLabel(),
+		MountLabel:     c.getMountLabel(),
 		FirstStart:     !c.HasBeenStartedBefore,
 		LayerFolder:    layerFolder,
 		LayerPaths:     layerPaths,
@@ -138,28 +133,33 @@ func populateCommand(c *Container, env []string) error {
 	return nil
 }
 
-// GetSize, return real size, virtual size
-func (container *Container) GetSize() (int64, int64) {
+// GetSize returns real size & virtual size
+func (container *Container) getSize() (int64, int64) {
 	// TODO Windows
 	return 0, 0
 }
 
-func (container *Container) AllocateNetwork() error {
+// allocateNetwork is a no-op on Windows.
+func (container *Container) allocateNetwork() error {
 	return nil
 }
 
-func (container *Container) UpdateNetwork() error {
-	return nil
+func (container *Container) exportRw() (archive.Archive, error) {
+	if container.IsRunning() {
+		return nil, fmt.Errorf("Cannot export a running container.")
+	}
+	// TODO Windows. Implementation (different to Linux)
+	return nil, nil
 }
 
-func (container *Container) ReleaseNetwork() {
+func (container *Container) updateNetwork() error {
+	return nil
 }
 
-func (container *Container) RestoreNetwork() error {
-	return nil
+func (container *Container) releaseNetwork() {
 }
 
-func (container *Container) UnmountVolumes(forceSyscall bool) error {
+func (container *Container) unmountVolumes(forceSyscall bool) error {
 	return nil
 }
 

+ 5 - 4
daemon/create.go

@@ -13,6 +13,7 @@ import (
 	"github.com/opencontainers/runc/libcontainer/label"
 )
 
+// ContainerCreate takes configs and creates a container.
 func (daemon *Daemon) ContainerCreate(name string, config *runconfig.Config, hostConfig *runconfig.HostConfig, adjustCPUShares bool) (*Container, []string, error) {
 	if config == nil {
 		return nil, nil, fmt.Errorf("Config cannot be empty in order to create a container")
@@ -70,7 +71,7 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos
 		hostConfig = &runconfig.HostConfig{}
 	}
 	if hostConfig.SecurityOpt == nil {
-		hostConfig.SecurityOpt, err = daemon.GenerateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode)
+		hostConfig.SecurityOpt, err = daemon.generateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode)
 		if err != nil {
 			return nil, nil, err
 		}
@@ -104,15 +105,15 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos
 		return nil, nil, err
 	}
 
-	if err := container.ToDisk(); err != nil {
+	if err := container.toDiskLocking(); err != nil {
 		logrus.Errorf("Error saving new container to disk: %v", err)
 		return nil, nil, err
 	}
-	container.LogEvent("create")
+	container.logEvent("create")
 	return container, warnings, nil
 }
 
-func (daemon *Daemon) GenerateSecurityOpt(ipcMode runconfig.IpcMode, pidMode runconfig.PidMode) ([]string, error) {
+func (daemon *Daemon) generateSecurityOpt(ipcMode runconfig.IpcMode, pidMode runconfig.PidMode) ([]string, error) {
 	if ipcMode.IsHost() || pidMode.IsHost() {
 		return label.DisableSecOpt(), nil
 	}

+ 82 - 51
daemon/daemon.go

@@ -1,3 +1,8 @@
+// Package daemon exposes the functions that occur on the host server
+// that the Docker daemon is running.
+//
+// In implementing the various functions of the daemon, there is often
+// a method-specific struct for configuring the runtime behavior.
 package daemon
 
 import (
@@ -19,6 +24,7 @@ import (
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/daemon/execdriver/execdrivers"
 	"github.com/docker/docker/daemon/graphdriver"
+	// register vfs
 	_ "github.com/docker/docker/daemon/graphdriver/vfs"
 	"github.com/docker/docker/daemon/logger"
 	"github.com/docker/docker/daemon/network"
@@ -47,7 +53,7 @@ var (
 	validContainerNameChars   = `[a-zA-Z0-9][a-zA-Z0-9_.-]`
 	validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`)
 
-	ErrSystemNotSupported = errors.New("The Docker daemon is not supported on this platform.")
+	errSystemNotSupported = errors.New("The Docker daemon is not supported on this platform.")
 )
 
 type contStore struct {
@@ -81,10 +87,11 @@ func (c *contStore) List() []*Container {
 		containers.Add(cont)
 	}
 	c.Unlock()
-	containers.Sort()
+	containers.sort()
 	return *containers
 }
 
+// Daemon holds information about the Docker daemon.
 type Daemon struct {
 	ID               string
 	repository       string
@@ -94,8 +101,8 @@ type Daemon struct {
 	graph            *graph.Graph
 	repositories     *graph.TagStore
 	idIndex          *truncindex.TruncIndex
-	config           *Config
-	containerGraph   *graphdb.Database
+	configStore      *Config
+	containerGraphDB *graphdb.Database
 	driver           graphdriver.Driver
 	execDriver       execdriver.Driver
 	statsCollector   *statsCollector
@@ -127,11 +134,11 @@ func (daemon *Daemon) Get(prefixOrName string) (*Container, error) {
 		return containerByName, nil
 	}
 
-	containerId, indexError := daemon.idIndex.Get(prefixOrName)
+	containerID, indexError := daemon.idIndex.Get(prefixOrName)
 	if indexError != nil {
 		return nil, indexError
 	}
-	return daemon.containers.Get(containerId), nil
+	return daemon.containers.Get(containerID), nil
 }
 
 // Exists returns a true if a container of the specified ID or name exists,
@@ -150,7 +157,7 @@ func (daemon *Daemon) containerRoot(id string) string {
 func (daemon *Daemon) load(id string) (*Container, error) {
 	container := daemon.newBaseContainer(id)
 
-	if err := container.FromDisk(); err != nil {
+	if err := container.fromDisk(); err != nil {
 		return nil, err
 	}
 
@@ -200,8 +207,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err
 	if container.IsRunning() {
 		logrus.Debugf("killing old running container %s", container.ID)
 		// Set exit code to 128 + SIGKILL (9) to properly represent unsuccessful exit
-		container.SetStopped(&execdriver.ExitStatus{ExitCode: 137})
-
+		container.setStoppedLocking(&execdriver.ExitStatus{ExitCode: 137})
 		// use the current driver and ensure that the container is dead x.x
 		cmd := &execdriver.Command{
 			ID: container.ID,
@@ -211,7 +217,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err
 		if err := container.Unmount(); err != nil {
 			logrus.Debugf("unmount error %s", err)
 		}
-		if err := container.ToDisk(); err != nil {
+		if err := container.toDiskLocking(); err != nil {
 			logrus.Errorf("Error saving stopped state to disk: %v", err)
 		}
 	}
@@ -235,7 +241,7 @@ func (daemon *Daemon) ensureName(container *Container) error {
 		}
 		container.Name = name
 
-		if err := container.ToDisk(); err != nil {
+		if err := container.toDiskLocking(); err != nil {
 			logrus.Errorf("Error saving container name to disk: %v", err)
 		}
 	}
@@ -283,7 +289,7 @@ func (daemon *Daemon) restore() error {
 		}
 	}
 
-	if entities := daemon.containerGraph.List("/", -1); entities != nil {
+	if entities := daemon.containerGraphDB.List("/", -1); entities != nil {
 		for _, p := range entities.Paths() {
 			if !debug && logrus.GetLevel() == logrus.InfoLevel {
 				fmt.Print(".")
@@ -318,7 +324,7 @@ func (daemon *Daemon) restore() error {
 
 			// check the restart policy on the containers and restart any container with
 			// the restart policy of "always"
-			if daemon.config.AutoRestart && container.shouldRestart() {
+			if daemon.configStore.AutoRestart && container.shouldRestart() {
 				logrus.Debugf("Starting container %s", container.ID)
 
 				if err := container.Start(); err != nil {
@@ -351,7 +357,7 @@ func (daemon *Daemon) mergeAndVerifyConfig(config *runconfig.Config, img *image.
 	return nil
 }
 
-func (daemon *Daemon) generateIdAndName(name string) (string, string, error) {
+func (daemon *Daemon) generateIDAndName(name string) (string, string, error) {
 	var (
 		err error
 		id  = stringid.GenerateNonCryptoID()
@@ -380,7 +386,7 @@ func (daemon *Daemon) reserveName(id, name string) (string, error) {
 		name = "/" + name
 	}
 
-	if _, err := daemon.containerGraph.Set(name, id); err != nil {
+	if _, err := daemon.containerGraphDB.Set(name, id); err != nil {
 		if !graphdb.IsNonUniqueNameError(err) {
 			return "", err
 		}
@@ -392,7 +398,7 @@ func (daemon *Daemon) reserveName(id, name string) (string, error) {
 			}
 
 			// Remove name and continue starting the container
-			if err := daemon.containerGraph.Delete(name); err != nil {
+			if err := daemon.containerGraphDB.Delete(name); err != nil {
 				return "", err
 			}
 		} else {
@@ -413,7 +419,7 @@ func (daemon *Daemon) generateNewName(id string) (string, error) {
 			name = "/" + name
 		}
 
-		if _, err := daemon.containerGraph.Set(name, id); err != nil {
+		if _, err := daemon.containerGraphDB.Set(name, id); err != nil {
 			if !graphdb.IsNonUniqueNameError(err) {
 				return "", err
 			}
@@ -423,7 +429,7 @@ func (daemon *Daemon) generateNewName(id string) (string, error) {
 	}
 
 	name = "/" + stringid.TruncateID(id)
-	if _, err := daemon.containerGraph.Set(name, id); err != nil {
+	if _, err := daemon.containerGraphDB.Set(name, id); err != nil {
 		return "", err
 	}
 	return name, nil
@@ -460,7 +466,7 @@ func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID
 		id  string
 		err error
 	)
-	id, name, err = daemon.generateIdAndName(name)
+	id, name, err = daemon.generateIDAndName(name)
 	if err != nil {
 		return nil, err
 	}
@@ -483,6 +489,9 @@ func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID
 	return &base, err
 }
 
+// GetFullContainerName returns a constructed container name. I think
+// it has to do with the fact that a container is a file on disek and
+// this is sort of just creating a file name.
 func GetFullContainerName(name string) (string, error) {
 	if name == "" {
 		return "", fmt.Errorf("Container name cannot be empty")
@@ -493,12 +502,13 @@ func GetFullContainerName(name string) (string, error) {
 	return name, nil
 }
 
+// GetByName returns a container given a name.
 func (daemon *Daemon) GetByName(name string) (*Container, error) {
 	fullName, err := GetFullContainerName(name)
 	if err != nil {
 		return nil, err
 	}
-	entity := daemon.containerGraph.Get(fullName)
+	entity := daemon.containerGraphDB.Get(fullName)
 	if entity == nil {
 		return nil, fmt.Errorf("Could not find entity for %s", name)
 	}
@@ -509,14 +519,17 @@ func (daemon *Daemon) GetByName(name string) (*Container, error) {
 	return e, nil
 }
 
-func (daemon *Daemon) Children(name string) (map[string]*Container, error) {
+// children returns all child containers of the container with the
+// given name. The containers are returned as a map from the container
+// name to a pointer to Container.
+func (daemon *Daemon) children(name string) (map[string]*Container, error) {
 	name, err := GetFullContainerName(name)
 	if err != nil {
 		return nil, err
 	}
 	children := make(map[string]*Container)
 
-	err = daemon.containerGraph.Walk(name, func(p string, e *graphdb.Entity) error {
+	err = daemon.containerGraphDB.Walk(name, func(p string, e *graphdb.Entity) error {
 		c, err := daemon.Get(e.ID())
 		if err != nil {
 			return err
@@ -531,24 +544,28 @@ func (daemon *Daemon) Children(name string) (map[string]*Container, error) {
 	return children, nil
 }
 
-func (daemon *Daemon) Parents(name string) ([]string, error) {
+// parents returns the names of the parent containers of the container
+// with the given name.
+func (daemon *Daemon) parents(name string) ([]string, error) {
 	name, err := GetFullContainerName(name)
 	if err != nil {
 		return nil, err
 	}
 
-	return daemon.containerGraph.Parents(name)
+	return daemon.containerGraphDB.Parents(name)
 }
 
-func (daemon *Daemon) RegisterLink(parent, child *Container, alias string) error {
+func (daemon *Daemon) registerLink(parent, child *Container, alias string) error {
 	fullName := filepath.Join(parent.Name, alias)
-	if !daemon.containerGraph.Exists(fullName) {
-		_, err := daemon.containerGraph.Set(fullName, child.ID)
+	if !daemon.containerGraphDB.Exists(fullName) {
+		_, err := daemon.containerGraphDB.Set(fullName, child.ID)
 		return err
 	}
 	return nil
 }
 
+// NewDaemon sets up everything for the daemon to be able to service
+// requests from the webserver.
 func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemon, err error) {
 	setDefaultMtu(config)
 
@@ -562,7 +579,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
 
 	// Verify the platform is supported as a daemon
 	if !platformSupported {
-		return nil, ErrSystemNotSupported
+		return nil, errSystemNotSupported
 	}
 
 	// Validate platform-specific requirements
@@ -705,7 +722,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
 		return nil, err
 	}
 
-	d.containerGraph = graph
+	d.containerGraphDB = graph
 
 	var sysInitPath string
 	if config.ExecDriver == "lxc" {
@@ -735,7 +752,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
 	d.graph = g
 	d.repositories = repositories
 	d.idIndex = truncindex.NewTruncIndex([]string{})
-	d.config = config
+	d.configStore = config
 	d.sysInitPath = sysInitPath
 	d.execDriver = ed
 	d.statsCollector = newStatsCollector(1 * time.Second)
@@ -753,6 +770,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
 	return d, nil
 }
 
+// Shutdown stops the daemon.
 func (daemon *Daemon) Shutdown() error {
 	daemon.shutdown = true
 	if daemon.containers != nil {
@@ -767,7 +785,7 @@ func (daemon *Daemon) Shutdown() error {
 				go func() {
 					defer group.Done()
 					// TODO(windows): Handle docker restart with paused containers
-					if c.IsPaused() {
+					if c.isPaused() {
 						// To terminate a process in freezer cgroup, we should send
 						// SIGTERM to this process then unfreeze it, and the process will
 						// force to terminate immediately.
@@ -777,11 +795,11 @@ func (daemon *Daemon) Shutdown() error {
 							logrus.Warnf("System does not support SIGTERM")
 							return
 						}
-						if err := daemon.Kill(c, int(sig)); err != nil {
+						if err := daemon.kill(c, int(sig)); err != nil {
 							logrus.Debugf("sending SIGTERM to container %s with error: %v", c.ID, err)
 							return
 						}
-						if err := c.Unpause(); err != nil {
+						if err := c.unpause(); err != nil {
 							logrus.Debugf("Failed to unpause container %s with error: %v", c.ID, err)
 							return
 						}
@@ -792,7 +810,7 @@ func (daemon *Daemon) Shutdown() error {
 								logrus.Warnf("System does not support SIGKILL")
 								return
 							}
-							daemon.Kill(c, int(sig))
+							daemon.kill(c, int(sig))
 						}
 					} else {
 						// If container failed to exit in 10 seconds of SIGTERM, then using the force
@@ -813,8 +831,8 @@ func (daemon *Daemon) Shutdown() error {
 		}
 	}
 
-	if daemon.containerGraph != nil {
-		if err := daemon.containerGraph.Close(); err != nil {
+	if daemon.containerGraphDB != nil {
+		if err := daemon.containerGraphDB.Close(); err != nil {
 			logrus.Errorf("Error during container graph.Close(): %v", err)
 		}
 	}
@@ -828,8 +846,10 @@ func (daemon *Daemon) Shutdown() error {
 	return nil
 }
 
+// Mount sets container.basefs
+// (is it not set coming in? why is it unset?)
 func (daemon *Daemon) Mount(container *Container) error {
-	dir, err := daemon.driver.Get(container.ID, container.GetMountLabel())
+	dir, err := daemon.driver.Get(container.ID, container.getMountLabel())
 	if err != nil {
 		return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, daemon.driver, err)
 	}
@@ -848,24 +868,24 @@ func (daemon *Daemon) Mount(container *Container) error {
 	return nil
 }
 
-func (daemon *Daemon) Unmount(container *Container) error {
+func (daemon *Daemon) unmount(container *Container) error {
 	daemon.driver.Put(container.ID)
 	return nil
 }
 
-func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
+func (daemon *Daemon) run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
 	return daemon.execDriver.Run(c.command, pipes, startCallback)
 }
 
-func (daemon *Daemon) Kill(c *Container, sig int) error {
+func (daemon *Daemon) kill(c *Container, sig int) error {
 	return daemon.execDriver.Kill(c.command, sig)
 }
 
-func (daemon *Daemon) Stats(c *Container) (*execdriver.ResourceStats, error) {
+func (daemon *Daemon) stats(c *Container) (*execdriver.ResourceStats, error) {
 	return daemon.execDriver.Stats(c.ID)
 }
 
-func (daemon *Daemon) SubscribeToContainerStats(name string) (chan interface{}, error) {
+func (daemon *Daemon) subscribeToContainerStats(name string) (chan interface{}, error) {
 	c, err := daemon.Get(name)
 	if err != nil {
 		return nil, err
@@ -874,7 +894,7 @@ func (daemon *Daemon) SubscribeToContainerStats(name string) (chan interface{},
 	return ch, nil
 }
 
-func (daemon *Daemon) UnsubscribeToContainerStats(name string, ch chan interface{}) error {
+func (daemon *Daemon) unsubscribeToContainerStats(name string, ch chan interface{}) error {
 	c, err := daemon.Get(name)
 	if err != nil {
 		return err
@@ -883,12 +903,12 @@ func (daemon *Daemon) UnsubscribeToContainerStats(name string, ch chan interface
 	return nil
 }
 
-func (daemon *Daemon) Changes(container *Container) ([]archive.Change, error) {
+func (daemon *Daemon) changes(container *Container) ([]archive.Change, error) {
 	initID := fmt.Sprintf("%s-init", container.ID)
 	return daemon.driver.Changes(container.ID, initID)
 }
 
-func (daemon *Daemon) Diff(container *Container) (archive.Archive, error) {
+func (daemon *Daemon) diff(container *Container) (archive.Archive, error) {
 	initID := fmt.Sprintf("%s-init", container.ID)
 	return daemon.driver.Diff(container.ID, initID)
 }
@@ -923,6 +943,8 @@ func (daemon *Daemon) createRootfs(container *Container) error {
 	return nil
 }
 
+// Graph needs to be removed.
+//
 // FIXME: this is a convenience function for integration tests
 // which need direct access to daemon.graph.
 // Once the tests switch to using engine and jobs, this method
@@ -931,30 +953,39 @@ func (daemon *Daemon) Graph() *graph.Graph {
 	return daemon.graph
 }
 
+// Repositories returns all repositories.
 func (daemon *Daemon) Repositories() *graph.TagStore {
 	return daemon.repositories
 }
 
-func (daemon *Daemon) Config() *Config {
-	return daemon.config
+func (daemon *Daemon) config() *Config {
+	return daemon.configStore
 }
 
-func (daemon *Daemon) SystemInitPath() string {
+func (daemon *Daemon) systemInitPath() string {
 	return daemon.sysInitPath
 }
 
+// GraphDriver returns the currently used driver for processing
+// container layers.
 func (daemon *Daemon) GraphDriver() graphdriver.Driver {
 	return daemon.driver
 }
 
+// ExecutionDriver returns the currently used driver for creating and
+// starting execs in a container.
 func (daemon *Daemon) ExecutionDriver() execdriver.Driver {
 	return daemon.execDriver
 }
 
-func (daemon *Daemon) ContainerGraph() *graphdb.Database {
-	return daemon.containerGraph
+func (daemon *Daemon) containerGraph() *graphdb.Database {
+	return daemon.containerGraphDB
 }
 
+// ImageGetCached returns the earliest created image that is a child
+// of the image with imgID, that had the same config when it was
+// created. nil is returned if a child cannot be found. An error is
+// returned if the parent image cannot be found.
 func (daemon *Daemon) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) {
 	// Retrieve all images
 	images := daemon.Graph().Map()
@@ -1010,7 +1041,7 @@ func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.
 	container.Lock()
 	defer container.Unlock()
 	// Register any links from the host config before starting the container
-	if err := daemon.RegisterLinks(container, hostConfig); err != nil {
+	if err := daemon.registerLinks(container, hostConfig); err != nil {
 		return err
 	}
 

+ 1 - 0
daemon/daemon_btrfs.go

@@ -3,5 +3,6 @@
 package daemon
 
 import (
+	// register the btrfs graphdriver
 	_ "github.com/docker/docker/daemon/graphdriver/btrfs"
 )

+ 1 - 0
daemon/daemon_devicemapper.go

@@ -3,5 +3,6 @@
 package daemon
 
 import (
+	// register the devmapper graphdriver
 	_ "github.com/docker/docker/daemon/graphdriver/devmapper"
 )

+ 1 - 0
daemon/daemon_overlay.go

@@ -3,5 +3,6 @@
 package daemon
 
 import (
+	// register the overlay graphdriver
 	_ "github.com/docker/docker/daemon/graphdriver/overlay"
 )

+ 27 - 27
daemon/daemon_test.go

@@ -86,9 +86,9 @@ func TestGet(t *testing.T) {
 	graph.Set(c5.Name, c5.ID)
 
 	daemon := &Daemon{
-		containers:     store,
-		idIndex:        index,
-		containerGraph: graph,
+		containers:       store,
+		idIndex:          index,
+		containerGraphDB: graph,
 	}
 
 	if container, _ := daemon.Get("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 {
@@ -130,15 +130,15 @@ func TestLoadWithVolume(t *testing.T) {
 	}
 	defer os.RemoveAll(tmp)
 
-	containerId := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
-	containerPath := filepath.Join(tmp, containerId)
+	containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
+	containerPath := filepath.Join(tmp, containerID)
 	if err := os.MkdirAll(containerPath, 0755); err != nil {
 		t.Fatal(err)
 	}
 
-	hostVolumeId := stringid.GenerateNonCryptoID()
-	vfsPath := filepath.Join(tmp, "vfs", "dir", hostVolumeId)
-	volumePath := filepath.Join(tmp, "volumes", hostVolumeId)
+	hostVolumeID := stringid.GenerateNonCryptoID()
+	vfsPath := filepath.Join(tmp, "vfs", "dir", hostVolumeID)
+	volumePath := filepath.Join(tmp, "volumes", hostVolumeID)
 
 	if err := os.MkdirAll(vfsPath, 0755); err != nil {
 		t.Fatal(err)
@@ -187,7 +187,7 @@ func TestLoadWithVolume(t *testing.T) {
 	}
 	defer volumedrivers.Unregister(volume.DefaultDriverName)
 
-	c, err := daemon.load(containerId)
+	c, err := daemon.load(containerID)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -202,8 +202,8 @@ func TestLoadWithVolume(t *testing.T) {
 	}
 
 	m := c.MountPoints["/vol1"]
-	if m.Name != hostVolumeId {
-		t.Fatalf("Expected mount name to be %s, was %s\n", hostVolumeId, m.Name)
+	if m.Name != hostVolumeID {
+		t.Fatalf("Expected mount name to be %s, was %s\n", hostVolumeID, m.Name)
 	}
 
 	if m.Destination != "/vol1" {
@@ -235,8 +235,8 @@ func TestLoadWithBindMount(t *testing.T) {
 	}
 	defer os.RemoveAll(tmp)
 
-	containerId := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
-	containerPath := filepath.Join(tmp, containerId)
+	containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
+	containerPath := filepath.Join(tmp, containerID)
 	if err = os.MkdirAll(containerPath, 0755); err != nil {
 		t.Fatal(err)
 	}
@@ -275,7 +275,7 @@ func TestLoadWithBindMount(t *testing.T) {
 	}
 	defer volumedrivers.Unregister(volume.DefaultDriverName)
 
-	c, err := daemon.load(containerId)
+	c, err := daemon.load(containerID)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -314,14 +314,14 @@ func TestLoadWithVolume17RC(t *testing.T) {
 	}
 	defer os.RemoveAll(tmp)
 
-	containerId := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
-	containerPath := filepath.Join(tmp, containerId)
+	containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
+	containerPath := filepath.Join(tmp, containerID)
 	if err := os.MkdirAll(containerPath, 0755); err != nil {
 		t.Fatal(err)
 	}
 
-	hostVolumeId := "6a3c03fc4a4e588561a543cc3bdd50089e27bd11bbb0e551e19bf735e2514101"
-	volumePath := filepath.Join(tmp, "volumes", hostVolumeId)
+	hostVolumeID := "6a3c03fc4a4e588561a543cc3bdd50089e27bd11bbb0e551e19bf735e2514101"
+	volumePath := filepath.Join(tmp, "volumes", hostVolumeID)
 
 	if err := os.MkdirAll(volumePath, 0755); err != nil {
 		t.Fatal(err)
@@ -366,7 +366,7 @@ func TestLoadWithVolume17RC(t *testing.T) {
 	}
 	defer volumedrivers.Unregister(volume.DefaultDriverName)
 
-	c, err := daemon.load(containerId)
+	c, err := daemon.load(containerID)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -381,8 +381,8 @@ func TestLoadWithVolume17RC(t *testing.T) {
 	}
 
 	m := c.MountPoints["/vol1"]
-	if m.Name != hostVolumeId {
-		t.Fatalf("Expected mount name to be %s, was %s\n", hostVolumeId, m.Name)
+	if m.Name != hostVolumeID {
+		t.Fatalf("Expected mount name to be %s, was %s\n", hostVolumeID, m.Name)
 	}
 
 	if m.Destination != "/vol1" {
@@ -414,15 +414,15 @@ func TestRemoveLocalVolumesFollowingSymlinks(t *testing.T) {
 	}
 	defer os.RemoveAll(tmp)
 
-	containerId := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
-	containerPath := filepath.Join(tmp, containerId)
+	containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
+	containerPath := filepath.Join(tmp, containerID)
 	if err := os.MkdirAll(containerPath, 0755); err != nil {
 		t.Fatal(err)
 	}
 
-	hostVolumeId := stringid.GenerateNonCryptoID()
-	vfsPath := filepath.Join(tmp, "vfs", "dir", hostVolumeId)
-	volumePath := filepath.Join(tmp, "volumes", hostVolumeId)
+	hostVolumeID := stringid.GenerateNonCryptoID()
+	vfsPath := filepath.Join(tmp, "vfs", "dir", hostVolumeID)
+	volumePath := filepath.Join(tmp, "volumes", hostVolumeID)
 
 	if err := os.MkdirAll(vfsPath, 0755); err != nil {
 		t.Fatal(err)
@@ -471,7 +471,7 @@ func TestRemoveLocalVolumesFollowingSymlinks(t *testing.T) {
 	}
 	defer volumedrivers.Unregister(volume.DefaultDriverName)
 
-	c, err := daemon.load(containerId)
+	c, err := daemon.load(containerID)
 	if err != nil {
 		t.Fatal(err)
 	}

+ 10 - 8
daemon/daemon_unix.go

@@ -63,7 +63,7 @@ func parseSecurityOpt(container *Container, config *runconfig.HostConfig) error
 	return err
 }
 
-func CheckKernelVersion(k, major, minor int) bool {
+func checkKernelVersion(k, major, minor int) bool {
 	if v, err := kernel.GetKernelVersion(); err != nil {
 		logrus.Warnf("%s", err)
 	} else {
@@ -82,7 +82,7 @@ func checkKernel() error {
 	// without actually causing a kernel panic, so we need this workaround until
 	// the circumstances of pre-3.10 crashes are clearer.
 	// For details see https://github.com/docker/docker/issues/407
-	if !CheckKernelVersion(3, 10, 0) {
+	if !checkKernelVersion(3, 10, 0) {
 		v, _ := kernel.GetKernelVersion()
 		if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" {
 			logrus.Warnf("Your Linux kernel version %s can be unstable running docker. Please upgrade your kernel to 3.10.0.", v.String())
@@ -161,7 +161,7 @@ func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *runconfig.HostC
 		logrus.Warnf("Your kernel does not support kernel memory limit capabilities. Limitation discarded.")
 		hostConfig.KernelMemory = 0
 	}
-	if hostConfig.KernelMemory > 0 && !CheckKernelVersion(4, 0, 0) {
+	if hostConfig.KernelMemory > 0 && !checkKernelVersion(4, 0, 0) {
 		warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.")
 		logrus.Warnf("You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.")
 	}
@@ -194,7 +194,6 @@ func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *runconfig.HostC
 	if hostConfig.BlkioWeight > 0 && (hostConfig.BlkioWeight < 10 || hostConfig.BlkioWeight > 1000) {
 		return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000.")
 	}
-
 	if hostConfig.OomKillDisable && !sysInfo.OomKillDisable {
 		hostConfig.OomKillDisable = false
 		return warnings, fmt.Errorf("Your kernel does not support oom kill disable.")
@@ -494,11 +493,14 @@ func setupInitLayer(initLayer string) error {
 	return nil
 }
 
-func (daemon *Daemon) NetworkApiRouter() func(w http.ResponseWriter, req *http.Request) {
+// NetworkAPIRouter implements a feature for server-experimental,
+// directly calling into libnetwork.
+func (daemon *Daemon) NetworkAPIRouter() func(w http.ResponseWriter, req *http.Request) {
 	return nwapi.NewHTTPHandler(daemon.netController)
 }
 
-func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error {
+// registerLinks writes the links to a file.
+func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig.HostConfig) error {
 	if hostConfig == nil || hostConfig.Links == nil {
 		return nil
 	}
@@ -523,7 +525,7 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.
 		if child.hostConfig.NetworkMode.IsHost() {
 			return runconfig.ErrConflictHostNetworkAndLinks
 		}
-		if err := daemon.RegisterLink(container, child, alias); err != nil {
+		if err := daemon.registerLink(container, child, alias); err != nil {
 			return err
 		}
 	}
@@ -531,7 +533,7 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.
 	// After we load all the links into the daemon
 	// set them to nil on the hostconfig
 	hostConfig.Links = nil
-	if err := container.WriteHostConfig(); err != nil {
+	if err := container.writeHostConfig(); err != nil {
 		return err
 	}
 

+ 8 - 5
daemon/daemon_windows.go

@@ -6,6 +6,7 @@ import (
 	"syscall"
 
 	"github.com/docker/docker/daemon/graphdriver"
+	// register the windows graph driver
 	_ "github.com/docker/docker/daemon/graphdriver/windows"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/runconfig"
@@ -13,7 +14,7 @@ import (
 )
 
 const (
-	DefaultVirtualSwitch = "Virtual Switch"
+	defaultVirtualSwitch = "Virtual Switch"
 	platformSupported    = true
 )
 
@@ -91,12 +92,14 @@ func isBridgeNetworkDisabled(config *Config) bool {
 func initNetworkController(config *Config) (libnetwork.NetworkController, error) {
 	// Set the name of the virtual switch if not specified by -b on daemon start
 	if config.Bridge.VirtualSwitchName == "" {
-		config.Bridge.VirtualSwitchName = DefaultVirtualSwitch
+		config.Bridge.VirtualSwitchName = defaultVirtualSwitch
 	}
 	return nil, nil
 }
 
-func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error {
+// registerLinks sets up links between containers and writes the
+// configuration out for persistence.
+func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig.HostConfig) error {
 	// TODO Windows. Factored out for network modes. There may be more
 	// refactoring required here.
 
@@ -114,7 +117,7 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.
 			//An error from daemon.Get() means this name could not be found
 			return fmt.Errorf("Could not get container for %s", name)
 		}
-		if err := daemon.RegisterLink(container, child, alias); err != nil {
+		if err := daemon.registerLink(container, child, alias); err != nil {
 			return err
 		}
 	}
@@ -122,7 +125,7 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.
 	// After we load all the links into the daemon
 	// set them to nil on the hostconfig
 	hostConfig.Links = nil
-	if err := container.WriteHostConfig(); err != nil {
+	if err := container.writeHostConfig(); err != nil {
 		return err
 	}
 	return nil

+ 1 - 0
daemon/daemon_zfs.go

@@ -3,5 +3,6 @@
 package daemon
 
 import (
+	// register the zfs driver
 	_ "github.com/docker/docker/daemon/graphdriver/zfs"
 )

+ 15 - 10
daemon/delete.go

@@ -8,10 +8,15 @@ import (
 	"github.com/Sirupsen/logrus"
 )
 
+// ContainerRmConfig is a holder for passing in runtime config.
 type ContainerRmConfig struct {
 	ForceRemove, RemoveVolume, RemoveLink bool
 }
 
+// ContainerRm removes the container id from the filesystem. An error
+// is returned if the container is not found, or if the remove
+// fails. If the remove succeeds, the container name is released, and
+// network links are removed.
 func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error {
 	container, err := daemon.Get(name)
 	if err != nil {
@@ -27,18 +32,18 @@ func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error
 		if parent == "/" {
 			return fmt.Errorf("Conflict, cannot remove the default name of the container")
 		}
-		pe := daemon.ContainerGraph().Get(parent)
+		pe := daemon.containerGraph().Get(parent)
 		if pe == nil {
 			return fmt.Errorf("Cannot get parent %s for name %s", parent, name)
 		}
 
-		if err := daemon.ContainerGraph().Delete(name); err != nil {
+		if err := daemon.containerGraph().Delete(name); err != nil {
 			return err
 		}
 
 		parentContainer, _ := daemon.Get(pe.ID())
 		if parentContainer != nil {
-			if err := parentContainer.UpdateNetwork(); err != nil {
+			if err := parentContainer.updateNetwork(); err != nil {
 				logrus.Debugf("Could not update network to remove link %s: %v", n, err)
 			}
 		}
@@ -75,23 +80,23 @@ func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) {
 	}
 
 	// Container state RemovalInProgress should be used to avoid races.
-	if err = container.SetRemovalInProgress(); err != nil {
+	if err = container.setRemovalInProgress(); err != nil {
 		return fmt.Errorf("Failed to set container state to RemovalInProgress: %s", err)
 	}
 
-	defer container.ResetRemovalInProgress()
+	defer container.resetRemovalInProgress()
 
 	if err = container.Stop(3); err != nil {
 		return err
 	}
 
 	// Mark container dead. We don't want anybody to be restarting it.
-	container.SetDead()
+	container.setDead()
 
 	// Save container state to disk. So that if error happens before
 	// container meta file got removed from disk, then a restart of
 	// docker should not make a dead container alive.
-	if err := container.ToDisk(); err != nil {
+	if err := container.toDiskLocking(); err != nil {
 		logrus.Errorf("Error saving dying container to disk: %v", err)
 	}
 
@@ -102,11 +107,11 @@ func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) {
 			daemon.idIndex.Delete(container.ID)
 			daemon.containers.Delete(container.ID)
 			os.RemoveAll(container.root)
-			container.LogEvent("destroy")
+			container.logEvent("destroy")
 		}
 	}()
 
-	if _, err := daemon.containerGraph.Purge(container.ID); err != nil {
+	if _, err := daemon.containerGraphDB.Purge(container.ID); err != nil {
 		logrus.Debugf("Unable to remove container from link graph: %s", err)
 	}
 
@@ -131,7 +136,7 @@ func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) {
 	daemon.idIndex.Delete(container.ID)
 	daemon.containers.Delete(container.ID)
 
-	container.LogEvent("destroy")
+	container.logEvent("destroy")
 	return nil
 }
 

+ 59 - 53
daemon/exec.go

@@ -17,13 +17,16 @@ import (
 	"github.com/docker/docker/runconfig"
 )
 
-type execConfig struct {
+// ExecConfig holds the configurations for execs. The Daemon keeps
+// track of both running and finished execs so that they can be
+// examined both during and after completion.
+type ExecConfig struct {
 	sync.Mutex
 	ID            string
 	Running       bool
 	ExitCode      int
 	ProcessConfig *execdriver.ProcessConfig
-	StreamConfig
+	streamConfig
 	OpenStdin  bool
 	OpenStderr bool
 	OpenStdout bool
@@ -35,21 +38,21 @@ type execConfig struct {
 }
 
 type execStore struct {
-	s map[string]*execConfig
+	s map[string]*ExecConfig
 	sync.RWMutex
 }
 
 func newExecStore() *execStore {
-	return &execStore{s: make(map[string]*execConfig, 0)}
+	return &execStore{s: make(map[string]*ExecConfig, 0)}
 }
 
-func (e *execStore) Add(id string, execConfig *execConfig) {
+func (e *execStore) Add(id string, ExecConfig *ExecConfig) {
 	e.Lock()
-	e.s[id] = execConfig
+	e.s[id] = ExecConfig
 	e.Unlock()
 }
 
-func (e *execStore) Get(id string) *execConfig {
+func (e *execStore) Get(id string) *ExecConfig {
 	e.RLock()
 	res := e.s[id]
 	e.RUnlock()
@@ -72,24 +75,24 @@ func (e *execStore) List() []string {
 	return IDs
 }
 
-func (execConfig *execConfig) Resize(h, w int) error {
+func (ExecConfig *ExecConfig) resize(h, w int) error {
 	select {
-	case <-execConfig.waitStart:
+	case <-ExecConfig.waitStart:
 	case <-time.After(time.Second):
-		return fmt.Errorf("Exec %s is not running, so it can not be resized.", execConfig.ID)
+		return fmt.Errorf("Exec %s is not running, so it can not be resized.", ExecConfig.ID)
 	}
-	return execConfig.ProcessConfig.Terminal.Resize(h, w)
+	return ExecConfig.ProcessConfig.Terminal.Resize(h, w)
 }
 
-func (d *Daemon) registerExecCommand(execConfig *execConfig) {
+func (d *Daemon) registerExecCommand(ExecConfig *ExecConfig) {
 	// Storing execs in container in order to kill them gracefully whenever the container is stopped or removed.
-	execConfig.Container.execCommands.Add(execConfig.ID, execConfig)
+	ExecConfig.Container.execCommands.Add(ExecConfig.ID, ExecConfig)
 	// Storing execs in daemon for easy access via remote API.
-	d.execCommands.Add(execConfig.ID, execConfig)
+	d.execCommands.Add(ExecConfig.ID, ExecConfig)
 }
 
-func (d *Daemon) getExecConfig(name string) (*execConfig, error) {
-	execConfig := d.execCommands.Get(name)
+func (d *Daemon) getExecConfig(name string) (*ExecConfig, error) {
+	ExecConfig := d.execCommands.Get(name)
 
 	// If the exec is found but its container is not in the daemon's list of
 	// containers then it must have been delete, in which case instead of
@@ -97,20 +100,20 @@ func (d *Daemon) getExecConfig(name string) (*execConfig, error) {
 	// the user sees the same error now that they will after the
 	// 5 minute clean-up loop is run which erases old/dead execs.
 
-	if execConfig != nil && d.containers.Get(execConfig.Container.ID) != nil {
+	if ExecConfig != nil && d.containers.Get(ExecConfig.Container.ID) != nil {
 
-		if !execConfig.Container.IsRunning() {
-			return nil, fmt.Errorf("Container %s is not running", execConfig.Container.ID)
+		if !ExecConfig.Container.IsRunning() {
+			return nil, fmt.Errorf("Container %s is not running", ExecConfig.Container.ID)
 		}
-		return execConfig, nil
+		return ExecConfig, nil
 	}
 
 	return nil, fmt.Errorf("No such exec instance '%s' found in daemon", name)
 }
 
-func (d *Daemon) unregisterExecCommand(execConfig *execConfig) {
-	execConfig.Container.execCommands.Delete(execConfig.ID)
-	d.execCommands.Delete(execConfig.ID)
+func (d *Daemon) unregisterExecCommand(ExecConfig *ExecConfig) {
+	ExecConfig.Container.execCommands.Delete(ExecConfig.ID)
+	d.execCommands.Delete(ExecConfig.ID)
 }
 
 func (d *Daemon) getActiveContainer(name string) (*Container, error) {
@@ -122,12 +125,13 @@ func (d *Daemon) getActiveContainer(name string) (*Container, error) {
 	if !container.IsRunning() {
 		return nil, fmt.Errorf("Container %s is not running", name)
 	}
-	if container.IsPaused() {
+	if container.isPaused() {
 		return nil, fmt.Errorf("Container %s is paused, unpause the container before exec", name)
 	}
 	return container, nil
 }
 
+// ContainerExecCreate sets up an exec in a running container.
 func (d *Daemon) ContainerExecCreate(config *runconfig.ExecConfig) (string, error) {
 	// Not all drivers support Exec (LXC for example)
 	if err := checkExecSupport(d.execDriver.Name()); err != nil {
@@ -155,55 +159,56 @@ func (d *Daemon) ContainerExecCreate(config *runconfig.ExecConfig) (string, erro
 		Privileged: config.Privileged,
 	}
 
-	execConfig := &execConfig{
+	ExecConfig := &ExecConfig{
 		ID:            stringid.GenerateNonCryptoID(),
 		OpenStdin:     config.AttachStdin,
 		OpenStdout:    config.AttachStdout,
 		OpenStderr:    config.AttachStderr,
-		StreamConfig:  StreamConfig{},
+		streamConfig:  streamConfig{},
 		ProcessConfig: processConfig,
 		Container:     container,
 		Running:       false,
 		waitStart:     make(chan struct{}),
 	}
 
-	d.registerExecCommand(execConfig)
+	d.registerExecCommand(ExecConfig)
 
-	container.LogEvent("exec_create: " + execConfig.ProcessConfig.Entrypoint + " " + strings.Join(execConfig.ProcessConfig.Arguments, " "))
-
-	return execConfig.ID, nil
+	container.logEvent("exec_create: " + ExecConfig.ProcessConfig.Entrypoint + " " + strings.Join(ExecConfig.ProcessConfig.Arguments, " "))
 
+	return ExecConfig.ID, nil
 }
 
+// ContainerExecStart starts a previously set up exec instance. The
+// std streams are set up.
 func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error {
 	var (
 		cStdin           io.ReadCloser
 		cStdout, cStderr io.Writer
 	)
 
-	execConfig, err := d.getExecConfig(execName)
+	ExecConfig, err := d.getExecConfig(execName)
 	if err != nil {
 		return err
 	}
 
 	func() {
-		execConfig.Lock()
-		defer execConfig.Unlock()
-		if execConfig.Running {
+		ExecConfig.Lock()
+		defer ExecConfig.Unlock()
+		if ExecConfig.Running {
 			err = fmt.Errorf("Error: Exec command %s is already running", execName)
 		}
-		execConfig.Running = true
+		ExecConfig.Running = true
 	}()
 	if err != nil {
 		return err
 	}
 
-	logrus.Debugf("starting exec command %s in container %s", execConfig.ID, execConfig.Container.ID)
-	container := execConfig.Container
+	logrus.Debugf("starting exec command %s in container %s", ExecConfig.ID, ExecConfig.Container.ID)
+	container := ExecConfig.Container
 
-	container.LogEvent("exec_start: " + execConfig.ProcessConfig.Entrypoint + " " + strings.Join(execConfig.ProcessConfig.Arguments, " "))
+	container.logEvent("exec_start: " + ExecConfig.ProcessConfig.Entrypoint + " " + strings.Join(ExecConfig.ProcessConfig.Arguments, " "))
 
-	if execConfig.OpenStdin {
+	if ExecConfig.OpenStdin {
 		r, w := io.Pipe()
 		go func() {
 			defer w.Close()
@@ -212,32 +217,32 @@ func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout
 		}()
 		cStdin = r
 	}
-	if execConfig.OpenStdout {
+	if ExecConfig.OpenStdout {
 		cStdout = stdout
 	}
-	if execConfig.OpenStderr {
+	if ExecConfig.OpenStderr {
 		cStderr = stderr
 	}
 
-	execConfig.StreamConfig.stderr = broadcastwriter.New()
-	execConfig.StreamConfig.stdout = broadcastwriter.New()
+	ExecConfig.streamConfig.stderr = broadcastwriter.New()
+	ExecConfig.streamConfig.stdout = broadcastwriter.New()
 	// Attach to stdin
-	if execConfig.OpenStdin {
-		execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdinPipe = io.Pipe()
+	if ExecConfig.OpenStdin {
+		ExecConfig.streamConfig.stdin, ExecConfig.streamConfig.stdinPipe = io.Pipe()
 	} else {
-		execConfig.StreamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
+		ExecConfig.streamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
 	}
 
-	attachErr := attach(&execConfig.StreamConfig, execConfig.OpenStdin, true, execConfig.ProcessConfig.Tty, cStdin, cStdout, cStderr)
+	attachErr := attach(&ExecConfig.streamConfig, ExecConfig.OpenStdin, true, ExecConfig.ProcessConfig.Tty, cStdin, cStdout, cStderr)
 
 	execErr := make(chan error)
 
-	// Note, the execConfig data will be removed when the container
+	// Note, the ExecConfig data will be removed when the container
 	// itself is deleted.  This allows us to query it (for things like
 	// the exitStatus) even after the cmd is done running.
 
 	go func() {
-		if err := container.Exec(execConfig); err != nil {
+		if err := container.exec(ExecConfig); err != nil {
 			execErr <- fmt.Errorf("Cannot run exec command %s in container %s: %s", execName, container.ID, err)
 		}
 	}()
@@ -260,16 +265,17 @@ func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout
 	}
 }
 
-func (d *Daemon) Exec(c *Container, execConfig *execConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
-	exitStatus, err := d.execDriver.Exec(c.command, execConfig.ProcessConfig, pipes, startCallback)
+// Exec calls the underlying exec driver to run
+func (d *Daemon) Exec(c *Container, ExecConfig *ExecConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
+	exitStatus, err := d.execDriver.Exec(c.command, ExecConfig.ProcessConfig, pipes, startCallback)
 
 	// On err, make sure we don't leave ExitCode at zero
 	if err != nil && exitStatus == 0 {
 		exitStatus = 128
 	}
 
-	execConfig.ExitCode = exitStatus
-	execConfig.Running = false
+	ExecConfig.ExitCode = exitStatus
+	ExecConfig.Running = false
 
 	return exitStatus, err
 }

+ 3 - 1
daemon/export.go

@@ -5,13 +5,15 @@ import (
 	"io"
 )
 
+// ContainerExport writes the contents of the container to the given
+// writer. An error is returned if the container cannot be found.
 func (daemon *Daemon) ContainerExport(name string, out io.Writer) error {
 	container, err := daemon.Get(name)
 	if err != nil {
 		return err
 	}
 
-	data, err := container.Export()
+	data, err := container.export()
 	if err != nil {
 		return fmt.Errorf("%s: %s", name, err)
 	}

+ 2 - 1
daemon/history.go

@@ -22,10 +22,11 @@ func (history *History) Swap(i, j int) {
 	containers[i], containers[j] = containers[j], containers[i]
 }
 
+// Add the given container to history.
 func (history *History) Add(container *Container) {
 	*history = append(*history, container)
 }
 
-func (history *History) Sort() {
+func (history *History) sort() {
 	sort.Sort(history)
 }

+ 1 - 0
daemon/image_delete.go

@@ -13,6 +13,7 @@ import (
 	"github.com/docker/docker/utils"
 )
 
+// ImageDelete removes the image from the filesystem.
 // FIXME: remove ImageDelete's dependency on Daemon, then move to graph/
 func (daemon *Daemon) ImageDelete(name string, force, noprune bool) ([]types.ImageDelete, error) {
 	list := []types.ImageDelete{}

+ 8 - 4
daemon/info.go

@@ -17,6 +17,7 @@ import (
 	"github.com/docker/docker/utils"
 )
 
+// SystemInfo returns information about the host server the daemon is running on.
 func (daemon *Daemon) SystemInfo() (*types.Info, error) {
 	images := daemon.Graph().Map()
 	var imgcount int
@@ -50,11 +51,14 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
 		logrus.Errorf("Could not read system memory info: %v", err)
 	}
 
-	// if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION)
+	// if we still have the original dockerinit binary from before
+	// we copied it locally, let's return the path to that, since
+	// that's more intuitive (the copied path is trivial to derive
+	// by hand given VERSION)
 	initPath := utils.DockerInitPath("")
 	if initPath == "" {
 		// if that fails, we'll just return the path from the daemon
-		initPath = daemon.SystemInitPath()
+		initPath = daemon.systemInitPath()
 	}
 
 	sysInfo := sysinfo.New(false)
@@ -83,8 +87,8 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
 		InitPath:           initPath,
 		NCPU:               runtime.NumCPU(),
 		MemTotal:           meminfo.MemTotal,
-		DockerRootDir:      daemon.Config().Root,
-		Labels:             daemon.Config().Labels,
+		DockerRootDir:      daemon.config().Root,
+		Labels:             daemon.config().Labels,
 		ExperimentalBuild:  utils.ExperimentalBuild(),
 	}
 

+ 10 - 3
daemon/inspect.go

@@ -7,6 +7,9 @@ import (
 	"github.com/docker/docker/api/types"
 )
 
+// ContainerInspect returns low-level information about a
+// container. Returns an error if the container cannot be found, or if
+// there is an error getting the data.
 func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error) {
 	container, err := daemon.Get(name)
 	if err != nil {
@@ -30,7 +33,7 @@ func (daemon *Daemon) getInspectData(container *Container) (*types.ContainerJSON
 	// make a copy to play with
 	hostConfig := *container.hostConfig
 
-	if children, err := daemon.Children(container.Name); err == nil {
+	if children, err := daemon.children(container.Name); err == nil {
 		for linkAlias, child := range children {
 			hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias))
 		}
@@ -73,7 +76,7 @@ func (daemon *Daemon) getInspectData(container *Container) (*types.ContainerJSON
 		ExecDriver:      container.ExecDriver,
 		MountLabel:      container.MountLabel,
 		ProcessLabel:    container.ProcessLabel,
-		ExecIDs:         container.GetExecIDs(),
+		ExecIDs:         container.getExecIDs(),
 		HostConfig:      &hostConfig,
 	}
 
@@ -90,7 +93,9 @@ func (daemon *Daemon) getInspectData(container *Container) (*types.ContainerJSON
 	return contJSONBase, nil
 }
 
-func (daemon *Daemon) ContainerExecInspect(id string) (*execConfig, error) {
+// ContainerExecInspect returns low-level information about the exec
+// command. An error is returned if the exec cannot be found.
+func (daemon *Daemon) ContainerExecInspect(id string) (*ExecConfig, error) {
 	eConfig, err := daemon.getExecConfig(id)
 	if err != nil {
 		return nil, err
@@ -98,6 +103,8 @@ func (daemon *Daemon) ContainerExecInspect(id string) (*execConfig, error) {
 	return eConfig, nil
 }
 
+// VolumeInspect looks up a volume by name. An error is returned if
+// the volume cannot be found.
 func (daemon *Daemon) VolumeInspect(name string) (*types.Volume, error) {
 	v, err := daemon.volumes.Get(name)
 	if err != nil {

+ 1 - 0
daemon/inspect_unix.go

@@ -14,6 +14,7 @@ func setPlatformSpecificContainerFields(container *Container, contJSONBase *type
 	return contJSONBase
 }
 
+// ContainerInspectPre120 is for backwards compatibility with pre v1.20 clients.
 func (daemon *Daemon) ContainerInspectPre120(name string) (*types.ContainerJSONPre120, error) {
 	container, err := daemon.Get(name)
 	if err != nil {

+ 1 - 1
daemon/kill.go

@@ -19,7 +19,7 @@ func (daemon *Daemon) ContainerKill(name string, sig uint64) error {
 		}
 	} else {
 		// Otherwise, just send the requested signal
-		if err := container.KillSig(int(sig)); err != nil {
+		if err := container.killSig(int(sig)); err != nil {
 			return err
 		}
 	}

+ 18 - 7
daemon/list.go

@@ -17,15 +17,24 @@ func (daemon *Daemon) List() []*Container {
 	return daemon.containers.List()
 }
 
+// ContainersConfig is a struct for configuring the command to list
+// containers.
 type ContainersConfig struct {
-	All     bool
-	Since   string
-	Before  string
-	Limit   int
-	Size    bool
+	// if true show all containers, otherwise only running containers.
+	All bool
+	// show all containers created after this container id
+	Since string
+	// show all containers created before this container id
+	Before string
+	// number of containers to return at most
+	Limit int
+	// if true include the sizes of the containers
+	Size bool
+	// return only containers that match filters
 	Filters string
 }
 
+// Containers returns a list of all the containers.
 func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container, error) {
 	var (
 		foundBefore bool
@@ -62,7 +71,7 @@ func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container,
 		}
 	}
 	names := map[string][]string{}
-	daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
+	daemon.containerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
 		names[e.ID()] = append(names[e.ID()], p)
 		return nil
 	}, 1)
@@ -195,7 +204,7 @@ func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container,
 		}
 
 		if config.Size {
-			sizeRw, sizeRootFs := container.GetSize()
+			sizeRw, sizeRootFs := container.getSize()
 			newC.SizeRw = sizeRw
 			newC.SizeRootFs = sizeRootFs
 		}
@@ -215,6 +224,8 @@ func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container,
 	return containers, nil
 }
 
+// Volumes lists known volumes, using the filter to restrict the range
+// of volumes returned.
 func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, error) {
 	var volumesOut []*types.Volume
 	volFilters, err := filters.FromParam(filter)

+ 2 - 2
daemon/logdrivers_linux.go

@@ -1,8 +1,8 @@
 package daemon
 
-// Importing packages here only to make sure their init gets called and
-// therefore they register themselves to the logdriver factory.
 import (
+	// Importing packages here only to make sure their init gets called and
+	// therefore they register themselves to the logdriver factory.
 	_ "github.com/docker/docker/daemon/logger/fluentd"
 	_ "github.com/docker/docker/daemon/logger/gelf"
 	_ "github.com/docker/docker/daemon/logger/journald"

+ 2 - 2
daemon/logdrivers_windows.go

@@ -1,7 +1,7 @@
 package daemon
 
-// Importing packages here only to make sure their init gets called and
-// therefore they register themselves to the logdriver factory.
 import (
+	// Importing packages here only to make sure their init gets called and
+	// therefore they register themselves to the logdriver factory.
 	_ "github.com/docker/docker/daemon/logger/jsonfilelog"
 )

+ 13 - 3
daemon/logs.go

@@ -11,15 +11,25 @@ import (
 	"github.com/docker/docker/pkg/stdcopy"
 )
 
+// ContainerLogsConfig holds configs for logging operations. Exists
+// for users of the daemon to to pass it a logging configuration.
 type ContainerLogsConfig struct {
-	Follow, Timestamps   bool
-	Tail                 string
-	Since                time.Time
+	// if true stream log output
+	Follow bool
+	// if true include timestamps for each line of log output
+	Timestamps bool
+	// return that many lines of log output from the end
+	Tail string
+	// filter logs by returning on those entries after this time
+	Since time.Time
+	// whether or not to show stdout and stderr as well as log entries.
 	UseStdout, UseStderr bool
 	OutStream            io.Writer
 	Stop                 <-chan bool
 }
 
+// ContainerLogs hooks up a container's stdout and stderr streams
+// configured with the given struct.
 func (daemon *Daemon) ContainerLogs(container *Container, config *ContainerLogsConfig) error {
 	if !(config.UseStdout || config.UseStderr) {
 		return fmt.Errorf("You must choose at least one stream")

+ 8 - 8
daemon/monitor.go

@@ -138,11 +138,11 @@ func (m *containerMonitor) Start() error {
 
 		pipes := execdriver.NewPipes(m.container.stdin, m.container.stdout, m.container.stderr, m.container.Config.OpenStdin)
 
-		m.container.LogEvent("start")
+		m.container.logEvent("start")
 
 		m.lastStartTime = time.Now()
 
-		if exitStatus, err = m.container.daemon.Run(m.container, pipes, m.callback); err != nil {
+		if exitStatus, err = m.container.daemon.run(m.container, pipes, m.callback); err != nil {
 			// if we receive an internal error from the initial start of a container then lets
 			// return it instead of entering the restart loop
 			if m.container.RestartCount == 0 {
@@ -161,11 +161,11 @@ func (m *containerMonitor) Start() error {
 		m.resetMonitor(err == nil && exitStatus.ExitCode == 0)
 
 		if m.shouldRestart(exitStatus.ExitCode) {
-			m.container.SetRestarting(&exitStatus)
+			m.container.setRestarting(&exitStatus)
 			if exitStatus.OOMKilled {
-				m.container.LogEvent("oom")
+				m.container.logEvent("oom")
 			}
-			m.container.LogEvent("die")
+			m.container.logEvent("die")
 			m.resetContainer(true)
 
 			// sleep with a small time increment between each restart to help avoid issues cased by quickly
@@ -180,9 +180,9 @@ func (m *containerMonitor) Start() error {
 			continue
 		}
 		if exitStatus.OOMKilled {
-			m.container.LogEvent("oom")
+			m.container.logEvent("oom")
 		}
-		m.container.LogEvent("die")
+		m.container.logEvent("die")
 		m.resetContainer(true)
 		return err
 	}
@@ -270,7 +270,7 @@ func (m *containerMonitor) callback(processConfig *execdriver.ProcessConfig, pid
 		close(m.startSignal)
 	}
 
-	if err := m.container.ToDisk(); err != nil {
+	if err := m.container.toDiskLocking(); err != nil {
 		logrus.Errorf("Error saving container to disk: %v", err)
 	}
 }

+ 1 - 1
daemon/pause.go

@@ -9,7 +9,7 @@ func (daemon *Daemon) ContainerPause(name string) error {
 		return err
 	}
 
-	if err := container.Pause(); err != nil {
+	if err := container.pause(); err != nil {
 		return fmt.Errorf("Cannot pause container %s: %s", name, err)
 	}
 

+ 6 - 3
daemon/rename.go

@@ -4,6 +4,9 @@ import (
 	"fmt"
 )
 
+// ContainerRename changes the name of a container, using the oldName
+// to find the container. An error is returned if newName is already
+// reserved.
 func (daemon *Daemon) ContainerRename(oldName, newName string) error {
 	if oldName == "" || newName == "" {
 		return fmt.Errorf("usage: docker rename OLD_NAME NEW_NAME")
@@ -27,10 +30,10 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error {
 	undo := func() {
 		container.Name = oldName
 		daemon.reserveName(container.ID, oldName)
-		daemon.containerGraph.Delete(newName)
+		daemon.containerGraphDB.Delete(newName)
 	}
 
-	if err := daemon.containerGraph.Delete(oldName); err != nil {
+	if err := daemon.containerGraphDB.Delete(oldName); err != nil {
 		undo()
 		return fmt.Errorf("Failed to delete container %q: %v", oldName, err)
 	}
@@ -40,6 +43,6 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error {
 		return err
 	}
 
-	container.LogEvent("rename")
+	container.logEvent("rename")
 	return nil
 }

+ 7 - 2
daemon/resize.go

@@ -1,5 +1,7 @@
 package daemon
 
+// ContainerResize changes the size of the TTY of the process running
+// in the container with the given name to the given height and width.
 func (daemon *Daemon) ContainerResize(name string, height, width int) error {
 	container, err := daemon.Get(name)
 	if err != nil {
@@ -9,11 +11,14 @@ func (daemon *Daemon) ContainerResize(name string, height, width int) error {
 	return container.Resize(height, width)
 }
 
+// ContainerExecResize changes the size of the TTY of the process
+// running in the exec with the given name to the given height and
+// width.
 func (daemon *Daemon) ContainerExecResize(name string, height, width int) error {
-	execConfig, err := daemon.getExecConfig(name)
+	ExecConfig, err := daemon.getExecConfig(name)
 	if err != nil {
 		return err
 	}
 
-	return execConfig.Resize(height, width)
+	return ExecConfig.resize(height, width)
 }

+ 6 - 0
daemon/restart.go

@@ -2,6 +2,12 @@ package daemon
 
 import "fmt"
 
+// ContainerRestart stops and starts a container. It attempts to
+// gracefully stop the container within the given timeout, forcefully
+// stopping it if the timeout is exceeded. If given a negative
+// timeout, ContainerRestart will wait forever until a graceful
+// stop. Returns an error if the container cannot be found, or if
+// there is an underlying error at any stage of the restart.
 func (daemon *Daemon) ContainerRestart(name string, seconds int) error {
 	container, err := daemon.Get(name)
 	if err != nil {

+ 2 - 1
daemon/start.go

@@ -7,13 +7,14 @@ import (
 	"github.com/docker/docker/runconfig"
 )
 
+// ContainerStart starts a container.
 func (daemon *Daemon) ContainerStart(name string, hostConfig *runconfig.HostConfig) error {
 	container, err := daemon.Get(name)
 	if err != nil {
 		return err
 	}
 
-	if container.IsPaused() {
+	if container.isPaused() {
 		return fmt.Errorf("Cannot start a paused container, try unpause instead.")
 	}
 

+ 26 - 36
daemon/state.go

@@ -9,8 +9,13 @@ import (
 	"github.com/docker/docker/pkg/units"
 )
 
+// State holds the current container state, and has methods to get and
+// set the state. Container has an embed, which allows all of the
+// functions defined against State to run against Container.
 type State struct {
 	sync.Mutex
+	// FIXME: Why do we have both paused and running if a
+	// container cannot be paused and running at the same time?
 	Running           bool
 	Paused            bool
 	Restarting        bool
@@ -25,6 +30,7 @@ type State struct {
 	waitChan          chan struct{}
 }
 
+// NewState creates a default state object with a fresh channel for state changes.
 func NewState() *State {
 	return &State{
 		waitChan: make(chan struct{}),
@@ -111,10 +117,11 @@ func wait(waitChan <-chan struct{}, timeout time.Duration) error {
 	}
 }
 
-// WaitRunning waits until state is running. If state already running it returns
-// immediately. If you want wait forever you must supply negative timeout.
-// Returns pid, that was passed to SetRunning
-func (s *State) WaitRunning(timeout time.Duration) (int, error) {
+// waitRunning waits until state is running. If state is already
+// running it returns immediately. If you want wait forever you must
+// supply negative timeout. Returns pid, that was passed to
+// setRunningLocking.
+func (s *State) waitRunning(timeout time.Duration) (int, error) {
 	s.Lock()
 	if s.Running {
 		pid := s.Pid
@@ -126,12 +133,12 @@ func (s *State) WaitRunning(timeout time.Duration) (int, error) {
 	if err := wait(waitChan, timeout); err != nil {
 		return -1, err
 	}
-	return s.GetPid(), nil
+	return s.getPID(), nil
 }
 
 // WaitStop waits until state is stopped. If state already stopped it returns
 // immediately. If you want wait forever you must supply negative timeout.
-// Returns exit code, that was passed to SetStopped
+// Returns exit code, that was passed to setStoppedLocking
 func (s *State) WaitStop(timeout time.Duration) (int, error) {
 	s.Lock()
 	if !s.Running {
@@ -144,9 +151,10 @@ func (s *State) WaitStop(timeout time.Duration) (int, error) {
 	if err := wait(waitChan, timeout); err != nil {
 		return -1, err
 	}
-	return s.GetExitCode(), nil
+	return s.getExitCode(), nil
 }
 
+// IsRunning returns whether the running flag is set. Used by Container to check whether a container is running.
 func (s *State) IsRunning() bool {
 	s.Lock()
 	res := s.Running
@@ -154,21 +162,22 @@ func (s *State) IsRunning() bool {
 	return res
 }
 
-func (s *State) GetPid() int {
+// GetPID holds the process id of a container.
+func (s *State) getPID() int {
 	s.Lock()
 	res := s.Pid
 	s.Unlock()
 	return res
 }
 
-func (s *State) GetExitCode() int {
+func (s *State) getExitCode() int {
 	s.Lock()
 	res := s.ExitCode
 	s.Unlock()
 	return res
 }
 
-func (s *State) SetRunning(pid int) {
+func (s *State) setRunningLocking(pid int) {
 	s.Lock()
 	s.setRunning(pid)
 	s.Unlock()
@@ -186,7 +195,7 @@ func (s *State) setRunning(pid int) {
 	s.waitChan = make(chan struct{})
 }
 
-func (s *State) SetStopped(exitStatus *execdriver.ExitStatus) {
+func (s *State) setStoppedLocking(exitStatus *execdriver.ExitStatus) {
 	s.Lock()
 	s.setStopped(exitStatus)
 	s.Unlock()
@@ -203,9 +212,9 @@ func (s *State) setStopped(exitStatus *execdriver.ExitStatus) {
 	s.waitChan = make(chan struct{})
 }
 
-// SetRestarting is when docker handles the auto restart of containers when they are
+// setRestarting is when docker handles the auto restart of containers when they are
 // in the middle of a stop and being restarted again
-func (s *State) SetRestarting(exitStatus *execdriver.ExitStatus) {
+func (s *State) setRestartingLocking(exitStatus *execdriver.ExitStatus) {
 	s.Lock()
 	s.setRestarting(exitStatus)
 	s.Unlock()
@@ -231,33 +240,14 @@ func (s *State) setError(err error) {
 	s.Error = err.Error()
 }
 
-func (s *State) IsRestarting() bool {
-	s.Lock()
-	res := s.Restarting
-	s.Unlock()
-	return res
-}
-
-func (s *State) SetPaused() {
-	s.Lock()
-	s.Paused = true
-	s.Unlock()
-}
-
-func (s *State) SetUnpaused() {
-	s.Lock()
-	s.Paused = false
-	s.Unlock()
-}
-
-func (s *State) IsPaused() bool {
+func (s *State) isPaused() bool {
 	s.Lock()
 	res := s.Paused
 	s.Unlock()
 	return res
 }
 
-func (s *State) SetRemovalInProgress() error {
+func (s *State) setRemovalInProgress() error {
 	s.Lock()
 	defer s.Unlock()
 	if s.removalInProgress {
@@ -267,13 +257,13 @@ func (s *State) SetRemovalInProgress() error {
 	return nil
 }
 
-func (s *State) ResetRemovalInProgress() {
+func (s *State) resetRemovalInProgress() {
 	s.Lock()
 	s.removalInProgress = false
 	s.Unlock()
 }
 
-func (s *State) SetDead() {
+func (s *State) setDead() {
 	s.Lock()
 	s.Dead = true
 	s.Unlock()

+ 11 - 8
daemon/state_test.go

@@ -14,11 +14,12 @@ func TestStateRunStop(t *testing.T) {
 		started := make(chan struct{})
 		var pid int64
 		go func() {
-			runPid, _ := s.WaitRunning(-1 * time.Second)
+			runPid, _ := s.waitRunning(-1 * time.Second)
 			atomic.StoreInt64(&pid, int64(runPid))
 			close(started)
 		}()
-		s.SetRunning(i + 100)
+		s.setRunningLocking(i + 100)
+
 		if !s.IsRunning() {
 			t.Fatal("State not running")
 		}
@@ -38,8 +39,8 @@ func TestStateRunStop(t *testing.T) {
 		if runPid != i+100 {
 			t.Fatalf("Pid %v, expected %v", runPid, i+100)
 		}
-		if pid, err := s.WaitRunning(-1 * time.Second); err != nil || pid != i+100 {
-			t.Fatalf("WaitRunning returned pid: %v, err: %v, expected pid: %v, err: %v", pid, err, i+100, nil)
+		if pid, err := s.waitRunning(-1 * time.Second); err != nil || pid != i+100 {
+			t.Fatalf("waitRunning returned pid: %v, err: %v, expected pid: %v, err: %v", pid, err, i+100, nil)
 		}
 
 		stopped := make(chan struct{})
@@ -49,7 +50,7 @@ func TestStateRunStop(t *testing.T) {
 			atomic.StoreInt64(&exit, int64(exitCode))
 			close(stopped)
 		}()
-		s.SetStopped(&execdriver.ExitStatus{ExitCode: i})
+		s.setStoppedLocking(&execdriver.ExitStatus{ExitCode: i})
 		if s.IsRunning() {
 			t.Fatal("State is running")
 		}
@@ -79,7 +80,7 @@ func TestStateTimeoutWait(t *testing.T) {
 	s := NewState()
 	started := make(chan struct{})
 	go func() {
-		s.WaitRunning(100 * time.Millisecond)
+		s.waitRunning(100 * time.Millisecond)
 		close(started)
 	}()
 	select {
@@ -88,10 +89,12 @@ func TestStateTimeoutWait(t *testing.T) {
 	case <-started:
 		t.Log("Start callback fired")
 	}
-	s.SetRunning(42)
+
+	s.setRunningLocking(42)
+
 	stopped := make(chan struct{})
 	go func() {
-		s.WaitRunning(100 * time.Millisecond)
+		s.waitRunning(100 * time.Millisecond)
 		close(stopped)
 	}()
 	select {

+ 9 - 5
daemon/stats.go

@@ -10,14 +10,18 @@ import (
 	"github.com/opencontainers/runc/libcontainer"
 )
 
+// ContainerStatsConfig holds information for configuring the runtime
+// behavior of a daemon.ContainerStats() call.
 type ContainerStatsConfig struct {
 	Stream    bool
 	OutStream io.Writer
 	Stop      <-chan bool
 }
 
+// ContainerStats writes information about the container to the stream
+// given in the config object.
 func (daemon *Daemon) ContainerStats(name string, config *ContainerStatsConfig) error {
-	updates, err := daemon.SubscribeToContainerStats(name)
+	updates, err := daemon.subscribeToContainerStats(name)
 	if err != nil {
 		return err
 	}
@@ -26,7 +30,7 @@ func (daemon *Daemon) ContainerStats(name string, config *ContainerStatsConfig)
 		config.OutStream.Write(nil)
 	}
 
-	var preCpuStats types.CPUStats
+	var preCPUStats types.CPUStats
 	getStat := func(v interface{}) *types.Stats {
 		update := v.(*execdriver.ResourceStats)
 		// Retrieve the nw statistics from libnetwork and inject them in the Stats
@@ -34,17 +38,17 @@ func (daemon *Daemon) ContainerStats(name string, config *ContainerStatsConfig)
 			update.Stats.Interfaces = nwStats
 		}
 		ss := convertStatsToAPITypes(update.Stats)
-		ss.PreCPUStats = preCpuStats
+		ss.PreCPUStats = preCPUStats
 		ss.MemoryStats.Limit = uint64(update.MemoryLimit)
 		ss.Read = update.Read
 		ss.CPUStats.SystemUsage = update.SystemUsage
-		preCpuStats = ss.CPUStats
+		preCPUStats = ss.CPUStats
 		return ss
 	}
 
 	enc := json.NewEncoder(config.OutStream)
 
-	defer daemon.UnsubscribeToContainerStats(name, updates)
+	defer daemon.unsubscribeToContainerStats(name, updates)
 
 	noStreamFirstFrame := true
 	for {

+ 25 - 18
daemon/stats_collector_unix.go

@@ -23,10 +23,10 @@ import (
 // and will start processing stats when they are started.
 func newStatsCollector(interval time.Duration) *statsCollector {
 	s := &statsCollector{
-		interval:   interval,
-		publishers: make(map[*Container]*pubsub.Publisher),
-		clockTicks: uint64(system.GetClockTicks()),
-		bufReader:  bufio.NewReaderSize(nil, 128),
+		interval:            interval,
+		publishers:          make(map[*Container]*pubsub.Publisher),
+		clockTicksPerSecond: uint64(system.GetClockTicks()),
+		bufReader:           bufio.NewReaderSize(nil, 128),
 	}
 	go s.run()
 	return s
@@ -34,11 +34,11 @@ func newStatsCollector(interval time.Duration) *statsCollector {
 
 // statsCollector manages and provides container resource stats
 type statsCollector struct {
-	m          sync.Mutex
-	interval   time.Duration
-	clockTicks uint64
-	publishers map[*Container]*pubsub.Publisher
-	bufReader  *bufio.Reader
+	m                   sync.Mutex
+	interval            time.Duration
+	clockTicksPerSecond uint64
+	publishers          map[*Container]*pubsub.Publisher
+	bufReader           *bufio.Reader
 }
 
 // collect registers the container with the collector and adds it to
@@ -89,7 +89,7 @@ func (s *statsCollector) run() {
 	var pairs []publishersPair
 
 	for range time.Tick(s.interval) {
-		systemUsage, err := s.getSystemCpuUsage()
+		systemUsage, err := s.getSystemCPUUsage()
 		if err != nil {
 			logrus.Errorf("collecting system cpu usage: %v", err)
 			continue
@@ -107,7 +107,7 @@ func (s *statsCollector) run() {
 		s.m.Unlock()
 
 		for _, pair := range pairs {
-			stats, err := pair.container.Stats()
+			stats, err := pair.container.stats()
 			if err != nil {
 				if err != execdriver.ErrNotRunning {
 					logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err)
@@ -120,11 +120,17 @@ func (s *statsCollector) run() {
 	}
 }
 
-const nanoSeconds = 1e9
+const nanoSecondsPerSecond = 1e9
 
-// getSystemCpuUSage returns the host system's cpu usage in nanoseconds
-// for the system to match the cgroup readings are returned in the same format.
-func (s *statsCollector) getSystemCpuUsage() (uint64, error) {
+// getSystemCPUUsage returns the host system's cpu usage in
+// nanoseconds. An error is returned if the format of the underlying
+// file does not match.
+//
+// Uses /proc/stat defined by POSIX. Looks for the cpu
+// statistics line and then sums up the first seven fields
+// provided. See `man 5 proc` for details on specific field
+// information.
+func (s *statsCollector) getSystemCPUUsage() (uint64, error) {
 	var line string
 	f, err := os.Open("/proc/stat")
 	if err != nil {
@@ -147,15 +153,16 @@ func (s *statsCollector) getSystemCpuUsage() (uint64, error) {
 			if len(parts) < 8 {
 				return 0, fmt.Errorf("invalid number of cpu fields")
 			}
-			var sum uint64
+			var totalClockTicks uint64
 			for _, i := range parts[1:8] {
 				v, err := strconv.ParseUint(i, 10, 64)
 				if err != nil {
 					return 0, fmt.Errorf("Unable to convert value %s to int: %s", i, err)
 				}
-				sum += v
+				totalClockTicks += v
 			}
-			return (sum * nanoSeconds) / s.clockTicks, nil
+			return (totalClockTicks * nanoSecondsPerSecond) /
+				s.clockTicksPerSecond, nil
 		}
 	}
 	return 0, fmt.Errorf("invalid stat format")

+ 6 - 0
daemon/stop.go

@@ -2,6 +2,12 @@ package daemon
 
 import "fmt"
 
+// ContainerStop looks for the given container and terminates it,
+// waiting the given number of seconds before forcefully killing the
+// container. If a negative number of seconds is given, ContainerStop
+// will wait for a graceful termination. An error is returned if the
+// container is not found, is already stopped, or if there is a
+// problem stopping the container.
 func (daemon *Daemon) ContainerStop(name string, seconds int) error {
 	container, err := daemon.Get(name)
 	if err != nil {

+ 7 - 1
daemon/top_unix.go

@@ -11,6 +11,11 @@ import (
 	"github.com/docker/docker/api/types"
 )
 
+// ContainerTop lists the processes running inside of the given
+// container by calling ps with the given args, or with the flags
+// "-ef" if no args are given.  An error is returned if the container
+// is not found, or is not running, or if there are any problems
+// running ps, or parsing the output.
 func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) {
 	if psArgs == "" {
 		psArgs = "-ef"
@@ -50,6 +55,7 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.Container
 		return nil, fmt.Errorf("Couldn't find PID field in ps output")
 	}
 
+	// loop through the output and extract the PID from each line
 	for _, line := range lines[1:] {
 		if len(line) == 0 {
 			continue
@@ -70,6 +76,6 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.Container
 			}
 		}
 	}
-	container.LogEvent("top")
+	container.logEvent("top")
 	return procList, nil
 }

+ 1 - 0
daemon/top_windows.go

@@ -6,6 +6,7 @@ import (
 	"github.com/docker/docker/api/types"
 )
 
+// ContainerTop is not supported on Windows and returns an error.
 func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) {
 	return nil, fmt.Errorf("Top is not supported on Windows")
 }

+ 1 - 1
daemon/unpause.go

@@ -9,7 +9,7 @@ func (daemon *Daemon) ContainerUnpause(name string) error {
 		return err
 	}
 
-	if err := container.Unpause(); err != nil {
+	if err := container.unpause(); err != nil {
 		return fmt.Errorf("Cannot unpause container %s: %s", name, err)
 	}
 

+ 3 - 3
daemon/volumes_unit_test.go

@@ -5,7 +5,7 @@ import "testing"
 func TestParseVolumeFrom(t *testing.T) {
 	cases := []struct {
 		spec    string
-		expId   string
+		expID   string
 		expMode string
 		fail    bool
 	}{
@@ -25,8 +25,8 @@ func TestParseVolumeFrom(t *testing.T) {
 			continue
 		}
 
-		if id != c.expId {
-			t.Fatalf("Expected id %s, was %s, for spec %s\n", c.expId, id, c.spec)
+		if id != c.expID {
+			t.Fatalf("Expected id %s, was %s, for spec %s\n", c.expID, id, c.spec)
 		}
 		if mode != c.expMode {
 			t.Fatalf("Expected mode %s, was %s for spec %s\n", c.expMode, mode, c.spec)

+ 1 - 1
daemon/volumes_unix.go

@@ -249,7 +249,7 @@ func (daemon *Daemon) verifyVolumesInfo(container *Container) error {
 			}
 		}
 
-		return container.ToDisk()
+		return container.toDiskLocking()
 	}
 
 	return nil

+ 5 - 0
daemon/wait.go

@@ -2,6 +2,11 @@ package daemon
 
 import "time"
 
+// ContainerWait stops processing until the given container is
+// stopped. If the container is not found, an error is returned. On a
+// successful stop, the exit code of the container is returned. On a
+// timeout, an error is returned. If you want to wait forever, supply
+// a negative duration for the timeout.
 func (daemon *Daemon) ContainerWait(name string, timeout time.Duration) (int, error) {
 	container, err := daemon.Get(name)
 	if err != nil {

+ 1 - 0
hack/make/validate-lint

@@ -18,6 +18,7 @@ packages=(
 	builder/parser
 	builder/parser/dumper
 	cliconfig
+	daemon
 	daemon/events
 	daemon/execdriver
 	daemon/execdriver/execdrivers