Sfoglia il codice sorgente

Merge pull request #26108 from mlaventure/data-mngt

New Data Management commands
Arnaud Porterie 8 anni fa
parent
commit
86de7c000f
49 ha cambiato i file con 1962 aggiunte e 13 eliminazioni
  1. 6 0
      api/server/router/container/backend.go
  2. 1 0
      api/server/router/container/container.go
  3. 21 0
      api/server/router/container/container_routes.go
  4. 2 1
      api/server/router/image/backend.go
  5. 1 0
      api/server/router/image/image.go
  6. 22 1
      api/server/router/image/image_routes.go
  7. 1 0
      api/server/router/system/backend.go
  8. 1 0
      api/server/router/system/system.go
  9. 9 0
      api/server/router/system/system_routes.go
  10. 1 0
      api/server/router/volume/backend.go
  11. 1 0
      api/server/router/volume/volume.go
  12. 21 0
      api/server/router/volume/volume_routes.go
  13. 50 0
      api/types/types.go
  14. 1 0
      cli/command/container/cmd.go
  15. 74 0
      cli/command/container/prune.go
  16. 1 2
      cli/command/container/stats.go
  17. 1 1
      cli/command/events_utils.go
  18. 14 0
      cli/command/formatter/container.go
  19. 331 0
      cli/command/formatter/disk_usage.go
  20. 32 1
      cli/command/formatter/image.go
  21. 1 1
      cli/command/formatter/image_test.go
  22. 18 0
      cli/command/formatter/volume.go
  23. 2 0
      cli/command/image/cmd.go
  24. 90 0
      cli/command/image/prune.go
  25. 39 0
      cli/command/prune/prune.go
  26. 2 0
      cli/command/system/cmd.go
  27. 55 0
      cli/command/system/df.go
  28. 90 0
      cli/command/system/prune.go
  29. 22 0
      cli/command/utils.go
  30. 1 0
      cli/command/volume/cmd.go
  31. 74 0
      cli/command/volume/prune.go
  32. 26 0
      client/container_prune.go
  33. 26 0
      client/disk_usage.go
  34. 26 0
      client/image_prune.go
  35. 4 0
      client/interface.go
  36. 26 0
      client/volume_prune.go
  37. 100 0
      daemon/disk_usage.go
  38. 68 4
      daemon/images.go
  39. 152 0
      daemon/prune.go
  40. 32 2
      daemon/volumes.go
  41. 10 0
      distribution/xfer/download_test.go
  42. 201 0
      docs/reference/api/docker_remote_api_v1.25.md
  43. 41 0
      docs/reference/commandline/container_prune.md
  44. 65 0
      docs/reference/commandline/image_prune.md
  45. 68 0
      docs/reference/commandline/system_df.md
  46. 70 0
      docs/reference/commandline/system_prune.md
  47. 48 0
      docs/reference/commandline/volume_prune.md
  48. 1 0
      layer/layer.go
  49. 13 0
      layer/layer_store.go

+ 6 - 0
api/server/router/container/backend.go

@@ -62,6 +62,11 @@ type attachBackend interface {
 	ContainerAttach(name string, c *backend.ContainerAttachConfig) error
 	ContainerAttach(name string, c *backend.ContainerAttachConfig) error
 }
 }
 
 
+// systemBackend includes functions to implement to provide system wide containers functionality
+type systemBackend interface {
+	ContainersPrune(config *types.ContainersPruneConfig) (*types.ContainersPruneReport, error)
+}
+
 // Backend is all the methods that need to be implemented to provide container specific functionality.
 // Backend is all the methods that need to be implemented to provide container specific functionality.
 type Backend interface {
 type Backend interface {
 	execBackend
 	execBackend
@@ -69,4 +74,5 @@ type Backend interface {
 	stateBackend
 	stateBackend
 	monitorBackend
 	monitorBackend
 	attachBackend
 	attachBackend
+	systemBackend
 }
 }

+ 1 - 0
api/server/router/container/container.go

@@ -68,6 +68,7 @@ func (r *containerRouter) initRoutes() {
 		router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize),
 		router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize),
 		router.NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename),
 		router.NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename),
 		router.NewPostRoute("/containers/{name:.*}/update", r.postContainerUpdate),
 		router.NewPostRoute("/containers/{name:.*}/update", r.postContainerUpdate),
+		router.NewPostRoute("/containers/prune", r.postContainersPrune),
 		// PUT
 		// PUT
 		router.NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive),
 		router.NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive),
 		// DELETE
 		// DELETE

+ 21 - 0
api/server/router/container/container_routes.go

@@ -524,3 +524,24 @@ func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.Respons
 	}
 	}
 	return err
 	return err
 }
 }
+
+func (s *containerRouter) postContainersPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+	if err := httputils.ParseForm(r); err != nil {
+		return err
+	}
+
+	if err := httputils.CheckForJSON(r); err != nil {
+		return err
+	}
+
+	var cfg types.ContainersPruneConfig
+	if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil {
+		return err
+	}
+
+	pruneReport, err := s.backend.ContainersPrune(&cfg)
+	if err != nil {
+		return err
+	}
+	return httputils.WriteJSON(w, http.StatusOK, pruneReport)
+}

+ 2 - 1
api/server/router/image/backend.go

@@ -25,9 +25,10 @@ type containerBackend interface {
 type imageBackend interface {
 type imageBackend interface {
 	ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error)
 	ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error)
 	ImageHistory(imageName string) ([]*types.ImageHistory, error)
 	ImageHistory(imageName string) ([]*types.ImageHistory, error)
-	Images(filterArgs string, filter string, all bool) ([]*types.Image, error)
+	Images(filterArgs string, filter string, all bool, withExtraAttrs bool) ([]*types.Image, error)
 	LookupImage(name string) (*types.ImageInspect, error)
 	LookupImage(name string) (*types.ImageInspect, error)
 	TagImage(imageName, repository, tag string) error
 	TagImage(imageName, repository, tag string) error
+	ImagesPrune(config *types.ImagesPruneConfig) (*types.ImagesPruneReport, error)
 }
 }
 
 
 type importExportBackend interface {
 type importExportBackend interface {

+ 1 - 0
api/server/router/image/image.go

@@ -43,6 +43,7 @@ func (r *imageRouter) initRoutes() {
 		router.Cancellable(router.NewPostRoute("/images/create", r.postImagesCreate)),
 		router.Cancellable(router.NewPostRoute("/images/create", r.postImagesCreate)),
 		router.Cancellable(router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush)),
 		router.Cancellable(router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush)),
 		router.NewPostRoute("/images/{name:.*}/tag", r.postImagesTag),
 		router.NewPostRoute("/images/{name:.*}/tag", r.postImagesTag),
+		router.NewPostRoute("/images/prune", r.postImagesPrune),
 		// DELETE
 		// DELETE
 		router.NewDeleteRoute("/images/{name:.*}", r.deleteImages),
 		router.NewDeleteRoute("/images/{name:.*}", r.deleteImages),
 	}
 	}

+ 22 - 1
api/server/router/image/image_routes.go

@@ -248,7 +248,7 @@ func (s *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter,
 	}
 	}
 
 
 	// FIXME: The filter parameter could just be a match filter
 	// FIXME: The filter parameter could just be a match filter
-	images, err := s.backend.Images(r.Form.Get("filters"), r.Form.Get("filter"), httputils.BoolValue(r, "all"))
+	images, err := s.backend.Images(r.Form.Get("filters"), r.Form.Get("filter"), httputils.BoolValue(r, "all"), false)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -314,3 +314,24 @@ func (s *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWriter
 	}
 	}
 	return httputils.WriteJSON(w, http.StatusOK, query.Results)
 	return httputils.WriteJSON(w, http.StatusOK, query.Results)
 }
 }
+
+func (s *imageRouter) postImagesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+	if err := httputils.ParseForm(r); err != nil {
+		return err
+	}
+
+	if err := httputils.CheckForJSON(r); err != nil {
+		return err
+	}
+
+	var cfg types.ImagesPruneConfig
+	if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil {
+		return err
+	}
+
+	pruneReport, err := s.backend.ImagesPrune(&cfg)
+	if err != nil {
+		return err
+	}
+	return httputils.WriteJSON(w, http.StatusOK, pruneReport)
+}

+ 1 - 0
api/server/router/system/backend.go

@@ -14,6 +14,7 @@ import (
 type Backend interface {
 type Backend interface {
 	SystemInfo() (*types.Info, error)
 	SystemInfo() (*types.Info, error)
 	SystemVersion() types.Version
 	SystemVersion() types.Version
+	SystemDiskUsage() (*types.DiskUsage, error)
 	SubscribeToEvents(since, until time.Time, ef filters.Args) ([]events.Message, chan interface{})
 	SubscribeToEvents(since, until time.Time, ef filters.Args) ([]events.Message, chan interface{})
 	UnsubscribeFromEvents(chan interface{})
 	UnsubscribeFromEvents(chan interface{})
 	AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error)
 	AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error)

+ 1 - 0
api/server/router/system/system.go

@@ -26,6 +26,7 @@ func NewRouter(b Backend, c *cluster.Cluster) router.Router {
 		router.Cancellable(router.NewGetRoute("/events", r.getEvents)),
 		router.Cancellable(router.NewGetRoute("/events", r.getEvents)),
 		router.NewGetRoute("/info", r.getInfo),
 		router.NewGetRoute("/info", r.getInfo),
 		router.NewGetRoute("/version", r.getVersion),
 		router.NewGetRoute("/version", r.getVersion),
+		router.NewGetRoute("/system/df", r.getDiskUsage),
 		router.NewPostRoute("/auth", r.postAuth),
 		router.NewPostRoute("/auth", r.postAuth),
 	}
 	}
 
 

+ 9 - 0
api/server/router/system/system_routes.go

@@ -56,6 +56,15 @@ func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r
 	return httputils.WriteJSON(w, http.StatusOK, info)
 	return httputils.WriteJSON(w, http.StatusOK, info)
 }
 }
 
 
+func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+	du, err := s.backend.SystemDiskUsage()
+	if err != nil {
+		return err
+	}
+
+	return httputils.WriteJSON(w, http.StatusOK, du)
+}
+
 func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if err := httputils.ParseForm(r); err != nil {
 	if err := httputils.ParseForm(r); err != nil {
 		return err
 		return err

+ 1 - 0
api/server/router/volume/backend.go

@@ -12,4 +12,5 @@ type Backend interface {
 	VolumeInspect(name string) (*types.Volume, error)
 	VolumeInspect(name string) (*types.Volume, error)
 	VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error)
 	VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error)
 	VolumeRm(name string, force bool) error
 	VolumeRm(name string, force bool) error
+	VolumesPrune(config *types.VolumesPruneConfig) (*types.VolumesPruneReport, error)
 }
 }

+ 1 - 0
api/server/router/volume/volume.go

@@ -29,6 +29,7 @@ func (r *volumeRouter) initRoutes() {
 		router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName),
 		router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName),
 		// POST
 		// POST
 		router.NewPostRoute("/volumes/create", r.postVolumesCreate),
 		router.NewPostRoute("/volumes/create", r.postVolumesCreate),
+		router.NewPostRoute("/volumes/prune", r.postVolumesPrune),
 		// DELETE
 		// DELETE
 		router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes),
 		router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes),
 	}
 	}

+ 21 - 0
api/server/router/volume/volume_routes.go

@@ -65,3 +65,24 @@ func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter,
 	w.WriteHeader(http.StatusNoContent)
 	w.WriteHeader(http.StatusNoContent)
 	return nil
 	return nil
 }
 }
+
+func (v *volumeRouter) postVolumesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+	if err := httputils.ParseForm(r); err != nil {
+		return err
+	}
+
+	if err := httputils.CheckForJSON(r); err != nil {
+		return err
+	}
+
+	var cfg types.VolumesPruneConfig
+	if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil {
+		return err
+	}
+
+	pruneReport, err := v.backend.VolumesPrune(&cfg)
+	if err != nil {
+		return err
+	}
+	return httputils.WriteJSON(w, http.StatusOK, pruneReport)
+}

+ 50 - 0
api/types/types.go

@@ -95,8 +95,10 @@ type Image struct {
 	RepoDigests []string
 	RepoDigests []string
 	Created     int64
 	Created     int64
 	Size        int64
 	Size        int64
+	SharedSize  int64
 	VirtualSize int64
 	VirtualSize int64
 	Labels      map[string]string
 	Labels      map[string]string
+	Containers  int64
 }
 }
 
 
 // GraphDriverData returns Image's graph driver config info
 // GraphDriverData returns Image's graph driver config info
@@ -438,6 +440,8 @@ type Volume struct {
 	Status     map[string]interface{} `json:",omitempty"` // Status provides low-level status information about the volume
 	Status     map[string]interface{} `json:",omitempty"` // Status provides low-level status information about the volume
 	Labels     map[string]string      // Labels is metadata specific to the volume
 	Labels     map[string]string      // Labels is metadata specific to the volume
 	Scope      string                 // Scope describes the level at which the volume exists (e.g. `global` for cluster-wide or `local` for machine level)
 	Scope      string                 // Scope describes the level at which the volume exists (e.g. `global` for cluster-wide or `local` for machine level)
+	Size       int64                  // Size holds how much disk space is used by the (local driver only). Sets to -1 if not provided.
+	RefCount   int                    // RefCount holds the number of containers having this volume attached to them. Sets to -1 if not provided.
 }
 }
 
 
 // VolumesListResponse contains the response for the remote API:
 // VolumesListResponse contains the response for the remote API:
@@ -526,3 +530,49 @@ type Runtime struct {
 	Path string   `json:"path"`
 	Path string   `json:"path"`
 	Args []string `json:"runtimeArgs,omitempty"`
 	Args []string `json:"runtimeArgs,omitempty"`
 }
 }
+
+// DiskUsage contains response of Remote API:
+// GET "/system/df"
+type DiskUsage struct {
+	LayersSize int64
+	Images     []*Image
+	Containers []*Container
+	Volumes    []*Volume
+}
+
+// ImagesPruneConfig contains the configuration for Remote API:
+// POST "/image/prune"
+type ImagesPruneConfig struct {
+	DanglingOnly bool
+}
+
+// ContainersPruneConfig contains the configuration for Remote API:
+// POST "/image/prune"
+type ContainersPruneConfig struct {
+}
+
+// VolumesPruneConfig contains the configuration for Remote API:
+// POST "/images/prune"
+type VolumesPruneConfig struct {
+}
+
+// ContainersPruneReport contains the response for Remote API:
+// POST "/containers/prune"
+type ContainersPruneReport struct {
+	ContainersDeleted []string
+	SpaceReclaimed    uint64
+}
+
+// VolumesPruneReport contains the response for Remote API:
+// POST "/volumes/prune"
+type VolumesPruneReport struct {
+	VolumesDeleted []string
+	SpaceReclaimed uint64
+}
+
+// ImagesPruneReport contains the response for Remote API:
+// POST "/image/prune"
+type ImagesPruneReport struct {
+	ImagesDeleted  []ImageDelete
+	SpaceReclaimed uint64
+}

+ 1 - 0
cli/command/container/cmd.go

@@ -44,6 +44,7 @@ func NewContainerCommand(dockerCli *command.DockerCli) *cobra.Command {
 		NewWaitCommand(dockerCli),
 		NewWaitCommand(dockerCli),
 		newListCommand(dockerCli),
 		newListCommand(dockerCli),
 		newInspectCommand(dockerCli),
 		newInspectCommand(dockerCli),
+		NewPruneCommand(dockerCli),
 	)
 	)
 	return cmd
 	return cmd
 }
 }

+ 74 - 0
cli/command/container/prune.go

@@ -0,0 +1,74 @@
+package container
+
+import (
+	"fmt"
+
+	"golang.org/x/net/context"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/cli"
+	"github.com/docker/docker/cli/command"
+	units "github.com/docker/go-units"
+	"github.com/spf13/cobra"
+)
+
+type pruneOptions struct {
+	force bool
+}
+
+// NewPruneCommand returns a new cobra prune command for containers
+func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command {
+	var opts pruneOptions
+
+	cmd := &cobra.Command{
+		Use:   "prune",
+		Short: "Remove all stopped containers",
+		Args:  cli.NoArgs,
+		RunE: func(cmd *cobra.Command, args []string) error {
+			spaceReclaimed, output, err := runPrune(dockerCli, opts)
+			if err != nil {
+				return err
+			}
+			if output != "" {
+				fmt.Fprintln(dockerCli.Out(), output)
+			}
+			fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed)))
+			return nil
+		},
+	}
+
+	flags := cmd.Flags()
+	flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation")
+
+	return cmd
+}
+
+const warning = `WARNING! This will remove all stopped containers.
+Are you sure you want to continue? [y/N] `
+
+func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (spaceReclaimed uint64, output string, err error) {
+	if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) {
+		return
+	}
+
+	report, err := dockerCli.Client().ContainersPrune(context.Background(), types.ContainersPruneConfig{})
+	if err != nil {
+		return
+	}
+
+	if len(report.ContainersDeleted) > 0 {
+		output = "Deleted Containers:"
+		for _, id := range report.ContainersDeleted {
+			output += id + "\n"
+		}
+		spaceReclaimed = report.SpaceReclaimed
+	}
+
+	return
+}
+
+// RunPrune call the Container Prune API
+// This returns the amount of space reclaimed and a detailed output string
+func RunPrune(dockerCli *command.DockerCli) (uint64, string, error) {
+	return runPrune(dockerCli, pruneOptions{force: true})
+}

+ 1 - 2
cli/command/container/stats.go

@@ -15,7 +15,6 @@ import (
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cli/command"
 	"github.com/docker/docker/cli/command"
 	"github.com/docker/docker/cli/command/formatter"
 	"github.com/docker/docker/cli/command/formatter"
-	"github.com/docker/docker/cli/command/system"
 	"github.com/spf13/cobra"
 	"github.com/spf13/cobra"
 )
 )
 
 
@@ -110,7 +109,7 @@ func runStats(dockerCli *command.DockerCli, opts *statsOptions) error {
 		// retrieving the list of running containers to avoid a race where we
 		// retrieving the list of running containers to avoid a race where we
 		// would "miss" a creation.
 		// would "miss" a creation.
 		started := make(chan struct{})
 		started := make(chan struct{})
-		eh := system.InitEventHandler()
+		eh := command.InitEventHandler()
 		eh.Handle("create", func(e events.Message) {
 		eh.Handle("create", func(e events.Message) {
 			if opts.all {
 			if opts.all {
 				s := formatter.NewContainerStats(e.ID[:12], daemonOSType)
 				s := formatter.NewContainerStats(e.ID[:12], daemonOSType)

+ 1 - 1
cli/command/system/events_utils.go → cli/command/events_utils.go

@@ -1,4 +1,4 @@
-package system
+package command
 
 
 import (
 import (
 	"sync"
 	"sync"

+ 14 - 0
cli/command/formatter/container.go

@@ -23,6 +23,7 @@ const (
 	statusHeader      = "STATUS"
 	statusHeader      = "STATUS"
 	portsHeader       = "PORTS"
 	portsHeader       = "PORTS"
 	mountsHeader      = "MOUNTS"
 	mountsHeader      = "MOUNTS"
+	localVolumes      = "LOCAL VOLUMES"
 )
 )
 
 
 // NewContainerFormat returns a Format for rendering using a Context
 // NewContainerFormat returns a Format for rendering using a Context
@@ -199,3 +200,16 @@ func (c *containerContext) Mounts() string {
 	}
 	}
 	return strings.Join(mounts, ",")
 	return strings.Join(mounts, ",")
 }
 }
+
+func (c *containerContext) LocalVolumes() string {
+	c.AddHeader(localVolumes)
+
+	count := 0
+	for _, m := range c.c.Mounts {
+		if m.Driver == "local" {
+			count++
+		}
+	}
+
+	return fmt.Sprintf("%d", count)
+}

+ 331 - 0
cli/command/formatter/disk_usage.go

@@ -0,0 +1,331 @@
+package formatter
+
+import (
+	"bytes"
+	"fmt"
+	"strings"
+	"text/template"
+
+	"github.com/docker/distribution/reference"
+	"github.com/docker/docker/api/types"
+	units "github.com/docker/go-units"
+)
+
+const (
+	defaultDiskUsageImageTableFormat     = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.VirtualSize}}\t{{.SharedSize}}\t{{.UniqueSize}}\t{{.Containers}}"
+	defaultDiskUsageContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Names}}"
+	defaultDiskUsageVolumeTableFormat    = "table {{.Name}}\t{{.Links}}\t{{.Size}}"
+	defaultDiskUsageTableFormat          = "table {{.Type}}\t{{.TotalCount}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}"
+
+	typeHeader        = "TYPE"
+	totalHeader       = "TOTAL"
+	activeHeader      = "ACTIVE"
+	reclaimableHeader = "RECLAIMABLE"
+	containersHeader  = "CONTAINERS"
+	sharedSizeHeader  = "SHARED SIZE"
+	uniqueSizeHeader  = "UNIQUE SiZE"
+)
+
+// DiskUsageContext contains disk usage specific information required by the formater, encapsulate a Context struct.
+type DiskUsageContext struct {
+	Context
+	Verbose    bool
+	LayersSize int64
+	Images     []*types.Image
+	Containers []*types.Container
+	Volumes    []*types.Volume
+}
+
+func (ctx *DiskUsageContext) startSubsection(format string) (*template.Template, error) {
+	ctx.buffer = bytes.NewBufferString("")
+	ctx.header = ""
+	ctx.Format = Format(format)
+	ctx.preFormat()
+
+	return ctx.parseFormat()
+}
+
+func (ctx *DiskUsageContext) Write() {
+	if ctx.Verbose == false {
+		ctx.buffer = bytes.NewBufferString("")
+		ctx.Format = defaultDiskUsageTableFormat
+		ctx.preFormat()
+
+		tmpl, err := ctx.parseFormat()
+		if err != nil {
+			return
+		}
+
+		err = ctx.contextFormat(tmpl, &diskUsageImagesContext{
+			totalSize: ctx.LayersSize,
+			images:    ctx.Images,
+		})
+		if err != nil {
+			return
+		}
+		err = ctx.contextFormat(tmpl, &diskUsageContainersContext{
+			containers: ctx.Containers,
+		})
+		if err != nil {
+			return
+		}
+
+		err = ctx.contextFormat(tmpl, &diskUsageVolumesContext{
+			volumes: ctx.Volumes,
+		})
+		if err != nil {
+			return
+		}
+
+		ctx.postFormat(tmpl, &diskUsageContainersContext{containers: []*types.Container{}})
+
+		return
+	}
+
+	// First images
+	tmpl, err := ctx.startSubsection(defaultDiskUsageImageTableFormat)
+	if err != nil {
+		return
+	}
+
+	ctx.Output.Write([]byte("Images space usage:\n\n"))
+	for _, i := range ctx.Images {
+		repo := "<none>"
+		tag := "<none>"
+		if len(i.RepoTags) > 0 && !isDangling(*i) {
+			// Only show the first tag
+			ref, err := reference.ParseNamed(i.RepoTags[0])
+			if err != nil {
+				continue
+			}
+			if nt, ok := ref.(reference.NamedTagged); ok {
+				repo = ref.Name()
+				tag = nt.Tag()
+			}
+		}
+
+		err = ctx.contextFormat(tmpl, &imageContext{
+			repo:  repo,
+			tag:   tag,
+			trunc: true,
+			i:     *i,
+		})
+		if err != nil {
+			return
+		}
+	}
+	ctx.postFormat(tmpl, &imageContext{})
+
+	// Now containers
+	ctx.Output.Write([]byte("\nContainers space usage:\n\n"))
+	tmpl, err = ctx.startSubsection(defaultDiskUsageContainerTableFormat)
+	if err != nil {
+		return
+	}
+	for _, c := range ctx.Containers {
+		// Don't display the virtual size
+		c.SizeRootFs = 0
+		err = ctx.contextFormat(tmpl, &containerContext{
+			trunc: true,
+			c:     *c,
+		})
+		if err != nil {
+			return
+		}
+	}
+	ctx.postFormat(tmpl, &containerContext{})
+
+	// And volumes
+	ctx.Output.Write([]byte("\nLocal Volumes space usage:\n\n"))
+	tmpl, err = ctx.startSubsection(defaultDiskUsageVolumeTableFormat)
+	if err != nil {
+		return
+	}
+	for _, v := range ctx.Volumes {
+		err = ctx.contextFormat(tmpl, &volumeContext{
+			v: *v,
+		})
+		if err != nil {
+			return
+		}
+	}
+	ctx.postFormat(tmpl, &volumeContext{v: types.Volume{}})
+}
+
+type diskUsageImagesContext struct {
+	HeaderContext
+	totalSize int64
+	images    []*types.Image
+}
+
+func (c *diskUsageImagesContext) Type() string {
+	c.AddHeader(typeHeader)
+	return "Images"
+}
+
+func (c *diskUsageImagesContext) TotalCount() string {
+	c.AddHeader(totalHeader)
+	return fmt.Sprintf("%d", len(c.images))
+}
+
+func (c *diskUsageImagesContext) Active() string {
+	c.AddHeader(activeHeader)
+	used := 0
+	for _, i := range c.images {
+		if i.Containers > 0 {
+			used++
+		}
+	}
+
+	return fmt.Sprintf("%d", used)
+}
+
+func (c *diskUsageImagesContext) Size() string {
+	c.AddHeader(sizeHeader)
+	return units.HumanSize(float64(c.totalSize))
+
+}
+
+func (c *diskUsageImagesContext) Reclaimable() string {
+	var used int64
+
+	c.AddHeader(reclaimableHeader)
+	for _, i := range c.images {
+		if i.Containers != 0 {
+			used += i.Size
+		}
+	}
+
+	reclaimable := c.totalSize - used
+	if c.totalSize > 0 {
+		return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/c.totalSize)
+	}
+	return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable)))
+}
+
+type diskUsageContainersContext struct {
+	HeaderContext
+	verbose    bool
+	containers []*types.Container
+}
+
+func (c *diskUsageContainersContext) Type() string {
+	c.AddHeader(typeHeader)
+	return "Containers"
+}
+
+func (c *diskUsageContainersContext) TotalCount() string {
+	c.AddHeader(totalHeader)
+	return fmt.Sprintf("%d", len(c.containers))
+}
+
+func (c *diskUsageContainersContext) isActive(container types.Container) bool {
+	return strings.Contains(container.State, "running") ||
+		strings.Contains(container.State, "paused") ||
+		strings.Contains(container.State, "restarting")
+}
+
+func (c *diskUsageContainersContext) Active() string {
+	c.AddHeader(activeHeader)
+	used := 0
+	for _, container := range c.containers {
+		if c.isActive(*container) {
+			used++
+		}
+	}
+
+	return fmt.Sprintf("%d", used)
+}
+
+func (c *diskUsageContainersContext) Size() string {
+	var size int64
+
+	c.AddHeader(sizeHeader)
+	for _, container := range c.containers {
+		size += container.SizeRw
+	}
+
+	return units.HumanSize(float64(size))
+}
+
+func (c *diskUsageContainersContext) Reclaimable() string {
+	var reclaimable int64
+	var totalSize int64
+
+	c.AddHeader(reclaimableHeader)
+	for _, container := range c.containers {
+		if !c.isActive(*container) {
+			reclaimable += container.SizeRw
+		}
+		totalSize += container.SizeRw
+	}
+
+	if totalSize > 0 {
+		return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize)
+	}
+
+	return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable)))
+}
+
+type diskUsageVolumesContext struct {
+	HeaderContext
+	verbose bool
+	volumes []*types.Volume
+}
+
+func (c *diskUsageVolumesContext) Type() string {
+	c.AddHeader(typeHeader)
+	return "Local Volumes"
+}
+
+func (c *diskUsageVolumesContext) TotalCount() string {
+	c.AddHeader(totalHeader)
+	return fmt.Sprintf("%d", len(c.volumes))
+}
+
+func (c *diskUsageVolumesContext) Active() string {
+	c.AddHeader(activeHeader)
+
+	used := 0
+	for _, v := range c.volumes {
+		if v.RefCount > 0 {
+			used++
+		}
+	}
+
+	return fmt.Sprintf("%d", used)
+}
+
+func (c *diskUsageVolumesContext) Size() string {
+	var size int64
+
+	c.AddHeader(sizeHeader)
+	for _, v := range c.volumes {
+		if v.Size != -1 {
+			size += v.Size
+		}
+	}
+
+	return units.HumanSize(float64(size))
+}
+
+func (c *diskUsageVolumesContext) Reclaimable() string {
+	var reclaimable int64
+	var totalSize int64
+
+	c.AddHeader(reclaimableHeader)
+	for _, v := range c.volumes {
+		if v.Size != -1 {
+			if v.RefCount == 0 {
+				reclaimable += v.Size
+			}
+			totalSize += v.Size
+		}
+	}
+
+	if totalSize > 0 {
+		return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize)
+	}
+
+	return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable)))
+}

+ 32 - 1
cli/command/formatter/image.go

@@ -1,6 +1,7 @@
 package formatter
 package formatter
 
 
 import (
 import (
+	"fmt"
 	"time"
 	"time"
 
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
@@ -225,5 +226,35 @@ func (c *imageContext) CreatedAt() string {
 
 
 func (c *imageContext) Size() string {
 func (c *imageContext) Size() string {
 	c.AddHeader(sizeHeader)
 	c.AddHeader(sizeHeader)
-	return units.HumanSizeWithPrecision(float64(c.i.Size), 3)
+	//NOTE: For backward compatibility we need to return VirtualSize
+	return units.HumanSizeWithPrecision(float64(c.i.VirtualSize), 3)
+}
+
+func (c *imageContext) Containers() string {
+	c.AddHeader(containersHeader)
+	if c.i.Containers == -1 {
+		return "N/A"
+	}
+	return fmt.Sprintf("%d", c.i.Containers)
+}
+
+func (c *imageContext) VirtualSize() string {
+	c.AddHeader(sizeHeader)
+	return units.HumanSize(float64(c.i.VirtualSize))
+}
+
+func (c *imageContext) SharedSize() string {
+	c.AddHeader(sharedSizeHeader)
+	if c.i.SharedSize == -1 {
+		return "N/A"
+	}
+	return units.HumanSize(float64(c.i.SharedSize))
+}
+
+func (c *imageContext) UniqueSize() string {
+	c.AddHeader(uniqueSizeHeader)
+	if c.i.Size == -1 {
+		return "N/A"
+	}
+	return units.HumanSize(float64(c.i.Size))
 }
 }

+ 1 - 1
cli/command/formatter/image_test.go

@@ -32,7 +32,7 @@ func TestImageContext(t *testing.T) {
 			trunc: false,
 			trunc: false,
 		}, imageID, imageIDHeader, ctx.ID},
 		}, imageID, imageIDHeader, ctx.ID},
 		{imageContext{
 		{imageContext{
-			i:     types.Image{Size: 10},
+			i:     types.Image{Size: 10, VirtualSize: 10},
 			trunc: true,
 			trunc: true,
 		}, "10 B", sizeHeader, ctx.Size},
 		}, "10 B", sizeHeader, ctx.Size},
 		{imageContext{
 		{imageContext{

+ 18 - 0
cli/command/formatter/volume.go

@@ -5,6 +5,7 @@ import (
 	"strings"
 	"strings"
 
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
+	units "github.com/docker/go-units"
 )
 )
 
 
 const (
 const (
@@ -12,6 +13,7 @@ const (
 	defaultVolumeTableFormat = "table {{.Driver}}\t{{.Name}}"
 	defaultVolumeTableFormat = "table {{.Driver}}\t{{.Name}}"
 
 
 	mountpointHeader = "MOUNTPOINT"
 	mountpointHeader = "MOUNTPOINT"
+	linksHeader      = "LINKS"
 	// Status header ?
 	// Status header ?
 )
 )
 
 
@@ -96,3 +98,19 @@ func (c *volumeContext) Label(name string) string {
 	}
 	}
 	return c.v.Labels[name]
 	return c.v.Labels[name]
 }
 }
+
+func (c *volumeContext) Links() string {
+	c.AddHeader(linksHeader)
+	if c.v.Size == -1 {
+		return "N/A"
+	}
+	return fmt.Sprintf("%d", c.v.RefCount)
+}
+
+func (c *volumeContext) Size() string {
+	c.AddHeader(sizeHeader)
+	if c.v.Size == -1 {
+		return "N/A"
+	}
+	return units.HumanSize(float64(c.v.Size))
+}

+ 2 - 0
cli/command/image/cmd.go

@@ -31,6 +31,8 @@ func NewImageCommand(dockerCli *command.DockerCli) *cobra.Command {
 		newListCommand(dockerCli),
 		newListCommand(dockerCli),
 		newRemoveCommand(dockerCli),
 		newRemoveCommand(dockerCli),
 		newInspectCommand(dockerCli),
 		newInspectCommand(dockerCli),
+		NewPruneCommand(dockerCli),
 	)
 	)
+
 	return cmd
 	return cmd
 }
 }

+ 90 - 0
cli/command/image/prune.go

@@ -0,0 +1,90 @@
+package image
+
+import (
+	"fmt"
+
+	"golang.org/x/net/context"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/cli"
+	"github.com/docker/docker/cli/command"
+	units "github.com/docker/go-units"
+	"github.com/spf13/cobra"
+)
+
+type pruneOptions struct {
+	force bool
+	all   bool
+}
+
+// NewPruneCommand returns a new cobra prune command for images
+func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command {
+	var opts pruneOptions
+
+	cmd := &cobra.Command{
+		Use:   "prune",
+		Short: "Remove unused images",
+		Args:  cli.NoArgs,
+		RunE: func(cmd *cobra.Command, args []string) error {
+			spaceReclaimed, output, err := runPrune(dockerCli, opts)
+			if err != nil {
+				return err
+			}
+			if output != "" {
+				fmt.Fprintln(dockerCli.Out(), output)
+			}
+			fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed)))
+			return nil
+		},
+	}
+
+	flags := cmd.Flags()
+	flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation")
+	flags.BoolVarP(&opts.all, "all", "a", false, "Remove all unused images, not just dangling ones")
+
+	return cmd
+}
+
+const (
+	allImageWarning = `WARNING! This will remove all images without at least one container associated to them.
+Are you sure you want to continue?`
+	danglingWarning = `WARNING! This will remove all dangling images.
+Are you sure you want to continue?`
+)
+
+func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (spaceReclaimed uint64, output string, err error) {
+	warning := danglingWarning
+	if opts.all {
+		warning = allImageWarning
+	}
+	if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) {
+		return
+	}
+
+	report, err := dockerCli.Client().ImagesPrune(context.Background(), types.ImagesPruneConfig{
+		DanglingOnly: !opts.all,
+	})
+	if err != nil {
+		return
+	}
+
+	if len(report.ImagesDeleted) > 0 {
+		output = "Deleted Images:\n"
+		for _, st := range report.ImagesDeleted {
+			if st.Untagged != "" {
+				output += fmt.Sprintln("untagged:", st.Untagged)
+			} else {
+				output += fmt.Sprintln("deleted:", st.Deleted)
+			}
+		}
+		spaceReclaimed = report.SpaceReclaimed
+	}
+
+	return
+}
+
+// RunPrune call the Image Prune API
+// This returns the amount of space reclaimed and a detailed output string
+func RunPrune(dockerCli *command.DockerCli, all bool) (uint64, string, error) {
+	return runPrune(dockerCli, pruneOptions{force: true, all: all})
+}

+ 39 - 0
cli/command/prune/prune.go

@@ -0,0 +1,39 @@
+package prune
+
+import (
+	"github.com/docker/docker/cli/command"
+	"github.com/docker/docker/cli/command/container"
+	"github.com/docker/docker/cli/command/image"
+	"github.com/docker/docker/cli/command/volume"
+	"github.com/spf13/cobra"
+)
+
+// NewContainerPruneCommand return a cobra prune command for containers
+func NewContainerPruneCommand(dockerCli *command.DockerCli) *cobra.Command {
+	return container.NewPruneCommand(dockerCli)
+}
+
+// NewVolumePruneCommand return a cobra prune command for volumes
+func NewVolumePruneCommand(dockerCli *command.DockerCli) *cobra.Command {
+	return volume.NewPruneCommand(dockerCli)
+}
+
+// NewImagePruneCommand return a cobra prune command for images
+func NewImagePruneCommand(dockerCli *command.DockerCli) *cobra.Command {
+	return image.NewPruneCommand(dockerCli)
+}
+
+// RunContainerPrune execute a prune command for containers
+func RunContainerPrune(dockerCli *command.DockerCli) (uint64, string, error) {
+	return container.RunPrune(dockerCli)
+}
+
+// RunVolumePrune execute a prune command for volumes
+func RunVolumePrune(dockerCli *command.DockerCli) (uint64, string, error) {
+	return volume.RunPrune(dockerCli)
+}
+
+// RunImagePrune execute a prune command for images
+func RunImagePrune(dockerCli *command.DockerCli, all bool) (uint64, string, error) {
+	return image.RunPrune(dockerCli, all)
+}

+ 2 - 0
cli/command/system/cmd.go

@@ -22,6 +22,8 @@ func NewSystemCommand(dockerCli *command.DockerCli) *cobra.Command {
 	cmd.AddCommand(
 	cmd.AddCommand(
 		NewEventsCommand(dockerCli),
 		NewEventsCommand(dockerCli),
 		NewInfoCommand(dockerCli),
 		NewInfoCommand(dockerCli),
+		NewDiskUsageCommand(dockerCli),
+		NewPruneCommand(dockerCli),
 	)
 	)
 	return cmd
 	return cmd
 }
 }

+ 55 - 0
cli/command/system/df.go

@@ -0,0 +1,55 @@
+package system
+
+import (
+	"github.com/docker/docker/cli"
+	"github.com/docker/docker/cli/command"
+	"github.com/docker/docker/cli/command/formatter"
+	"github.com/spf13/cobra"
+	"golang.org/x/net/context"
+)
+
+type diskUsageOptions struct {
+	verbose bool
+}
+
+// NewDiskUsageCommand creates a new cobra.Command for `docker df`
+func NewDiskUsageCommand(dockerCli *command.DockerCli) *cobra.Command {
+	var opts diskUsageOptions
+
+	cmd := &cobra.Command{
+		Use:   "df [OPTIONS]",
+		Short: "Show docker disk usage",
+		Args:  cli.RequiresMaxArgs(1),
+		RunE: func(cmd *cobra.Command, args []string) error {
+			return runDiskUsage(dockerCli, opts)
+		},
+	}
+
+	flags := cmd.Flags()
+
+	flags.BoolVarP(&opts.verbose, "verbose", "v", false, "Show detailed information on space usage")
+
+	return cmd
+}
+
+func runDiskUsage(dockerCli *command.DockerCli, opts diskUsageOptions) error {
+	du, err := dockerCli.Client().DiskUsage(context.Background())
+	if err != nil {
+		return err
+	}
+
+	duCtx := formatter.DiskUsageContext{
+		Context: formatter.Context{
+			Output: dockerCli.Out(),
+		},
+		LayersSize: du.LayersSize,
+		Images:     du.Images,
+		Containers: du.Containers,
+		Volumes:    du.Volumes,
+		Verbose:    opts.verbose,
+	}
+
+	duCtx.Write()
+
+	return nil
+}

+ 90 - 0
cli/command/system/prune.go

@@ -0,0 +1,90 @@
+package system
+
+import (
+	"fmt"
+
+	"github.com/docker/docker/cli"
+	"github.com/docker/docker/cli/command"
+	"github.com/docker/docker/cli/command/prune"
+	units "github.com/docker/go-units"
+	"github.com/spf13/cobra"
+)
+
+type pruneOptions struct {
+	force bool
+	all   bool
+}
+
+// NewPruneCommand creates a new cobra.Command for `docker du`
+func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command {
+	var opts pruneOptions
+
+	cmd := &cobra.Command{
+		Use:   "prune [COMMAND]",
+		Short: "Remove unused data.",
+		Args:  cli.NoArgs,
+		RunE: func(cmd *cobra.Command, args []string) error {
+			return runPrune(dockerCli, opts)
+		},
+	}
+
+	flags := cmd.Flags()
+	flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation")
+	flags.BoolVarP(&opts.all, "all", "a", false, "Remove all unused images not just dangling ones")
+
+	return cmd
+}
+
+const (
+	warning = `WARNING! This will remove:
+	- all stopped containers
+	- all volumes not used by at least one container
+	%s
+Are you sure you want to continue?`
+
+	danglingImageDesc = "- all dangling images"
+	allImageDesc      = `- all images without at least one container associated to them`
+)
+
+func runPrune(dockerCli *command.DockerCli, opts pruneOptions) error {
+	var message string
+
+	if opts.all {
+		message = fmt.Sprintf(warning, allImageDesc)
+	} else {
+		message = fmt.Sprintf(warning, danglingImageDesc)
+	}
+
+	if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), message) {
+		return nil
+	}
+
+	var spaceReclaimed uint64
+
+	for _, pruneFn := range []func(dockerCli *command.DockerCli) (uint64, string, error){
+		prune.RunContainerPrune,
+		prune.RunVolumePrune,
+	} {
+		spc, output, err := pruneFn(dockerCli)
+		if err != nil {
+			return err
+		}
+		if spc > 0 {
+			spaceReclaimed += spc
+			fmt.Fprintln(dockerCli.Out(), output)
+		}
+	}
+
+	spc, output, err := prune.RunImagePrune(dockerCli, opts.all)
+	if err != nil {
+		return err
+	}
+	if spc > 0 {
+		spaceReclaimed += spc
+		fmt.Fprintln(dockerCli.Out(), output)
+	}
+
+	fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed)))
+
+	return nil
+}

+ 22 - 0
cli/command/utils.go

@@ -57,3 +57,25 @@ func PrettyPrint(i interface{}) string {
 		return capitalizeFirst(fmt.Sprintf("%s", t))
 		return capitalizeFirst(fmt.Sprintf("%s", t))
 	}
 	}
 }
 }
+
+// PromptForConfirmation request and check confirmation from user.
+// This will display the provided message followed by ' [y/N] '. If
+// the user input 'y' or 'Y' it returns true other false.  If no
+// message is provided "Are you sure you want to proceeed? [y/N] "
+// will be used instead.
+func PromptForConfirmation(ins *InStream, outs *OutStream, message string) bool {
+	if message == "" {
+		message = "Are you sure you want to proceeed?"
+	}
+	message += " [y/N] "
+
+	fmt.Fprintf(outs, message)
+
+	answer := ""
+	n, _ := fmt.Fscan(ins, &answer)
+	if n != 1 || (answer != "y" && answer != "Y") {
+		return false
+	}
+
+	return true
+}

+ 1 - 0
cli/command/volume/cmd.go

@@ -25,6 +25,7 @@ func NewVolumeCommand(dockerCli *command.DockerCli) *cobra.Command {
 		newInspectCommand(dockerCli),
 		newInspectCommand(dockerCli),
 		newListCommand(dockerCli),
 		newListCommand(dockerCli),
 		newRemoveCommand(dockerCli),
 		newRemoveCommand(dockerCli),
+		NewPruneCommand(dockerCli),
 	)
 	)
 	return cmd
 	return cmd
 }
 }

+ 74 - 0
cli/command/volume/prune.go

@@ -0,0 +1,74 @@
+package volume
+
+import (
+	"fmt"
+
+	"golang.org/x/net/context"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/cli"
+	"github.com/docker/docker/cli/command"
+	units "github.com/docker/go-units"
+	"github.com/spf13/cobra"
+)
+
+type pruneOptions struct {
+	force bool
+}
+
+// NewPruneCommand returns a new cobra prune command for volumes
+func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command {
+	var opts pruneOptions
+
+	cmd := &cobra.Command{
+		Use:   "prune",
+		Short: "Remove all unused volumes",
+		Args:  cli.NoArgs,
+		RunE: func(cmd *cobra.Command, args []string) error {
+			spaceReclaimed, output, err := runPrune(dockerCli, opts)
+			if err != nil {
+				return err
+			}
+			if output != "" {
+				fmt.Fprintln(dockerCli.Out(), output)
+			}
+			fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed)))
+			return nil
+		},
+	}
+
+	flags := cmd.Flags()
+	flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation")
+
+	return cmd
+}
+
+const warning = `WARNING! This will remove all volumes not used by at least one container.
+Are you sure you want to continue?`
+
+func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (spaceReclaimed uint64, output string, err error) {
+	if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) {
+		return
+	}
+
+	report, err := dockerCli.Client().VolumesPrune(context.Background(), types.VolumesPruneConfig{})
+	if err != nil {
+		return
+	}
+
+	if len(report.VolumesDeleted) > 0 {
+		output = "Deleted Volumes:\n"
+		for _, id := range report.VolumesDeleted {
+			output += id + "\n"
+		}
+		spaceReclaimed = report.SpaceReclaimed
+	}
+
+	return
+}
+
+// RunPrune call the Volume Prune API
+// This returns the amount of space reclaimed and a detailed output string
+func RunPrune(dockerCli *command.DockerCli) (uint64, string, error) {
+	return runPrune(dockerCli, pruneOptions{force: true})
+}

+ 26 - 0
client/container_prune.go

@@ -0,0 +1,26 @@
+package client
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"github.com/docker/docker/api/types"
+	"golang.org/x/net/context"
+)
+
+// ContainersPrune requests the daemon to delete unused data
+func (cli *Client) ContainersPrune(ctx context.Context, cfg types.ContainersPruneConfig) (types.ContainersPruneReport, error) {
+	var report types.ContainersPruneReport
+
+	serverResp, err := cli.post(ctx, "/containers/prune", nil, cfg, nil)
+	if err != nil {
+		return report, err
+	}
+	defer ensureReaderClosed(serverResp)
+
+	if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
+		return report, fmt.Errorf("Error retrieving disk usage: %v", err)
+	}
+
+	return report, nil
+}

+ 26 - 0
client/disk_usage.go

@@ -0,0 +1,26 @@
+package client
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"github.com/docker/docker/api/types"
+	"golang.org/x/net/context"
+)
+
+// DiskUsage requests the current data usage from the daemon
+func (cli *Client) DiskUsage(ctx context.Context) (types.DiskUsage, error) {
+	var du types.DiskUsage
+
+	serverResp, err := cli.get(ctx, "/system/df", nil, nil)
+	if err != nil {
+		return du, err
+	}
+	defer ensureReaderClosed(serverResp)
+
+	if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil {
+		return du, fmt.Errorf("Error retrieving disk usage: %v", err)
+	}
+
+	return du, nil
+}

+ 26 - 0
client/image_prune.go

@@ -0,0 +1,26 @@
+package client
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"github.com/docker/docker/api/types"
+	"golang.org/x/net/context"
+)
+
+// ImagesPrune requests the daemon to delete unused data
+func (cli *Client) ImagesPrune(ctx context.Context, cfg types.ImagesPruneConfig) (types.ImagesPruneReport, error) {
+	var report types.ImagesPruneReport
+
+	serverResp, err := cli.post(ctx, "/images/prune", nil, cfg, nil)
+	if err != nil {
+		return report, err
+	}
+	defer ensureReaderClosed(serverResp)
+
+	if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
+		return report, fmt.Errorf("Error retrieving disk usage: %v", err)
+	}
+
+	return report, nil
+}

+ 4 - 0
client/interface.go

@@ -61,6 +61,7 @@ type ContainerAPIClient interface {
 	ContainerWait(ctx context.Context, container string) (int, error)
 	ContainerWait(ctx context.Context, container string) (int, error)
 	CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error)
 	CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error)
 	CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error
 	CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error
+	ContainersPrune(ctx context.Context, cfg types.ContainersPruneConfig) (types.ContainersPruneReport, error)
 }
 }
 
 
 // ImageAPIClient defines API client methods for the images
 // ImageAPIClient defines API client methods for the images
@@ -78,6 +79,7 @@ type ImageAPIClient interface {
 	ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error)
 	ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error)
 	ImageSave(ctx context.Context, images []string) (io.ReadCloser, error)
 	ImageSave(ctx context.Context, images []string) (io.ReadCloser, error)
 	ImageTag(ctx context.Context, image, ref string) error
 	ImageTag(ctx context.Context, image, ref string) error
+	ImagesPrune(ctx context.Context, cfg types.ImagesPruneConfig) (types.ImagesPruneReport, error)
 }
 }
 
 
 // NetworkAPIClient defines API client methods for the networks
 // NetworkAPIClient defines API client methods for the networks
@@ -124,6 +126,7 @@ type SystemAPIClient interface {
 	Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error)
 	Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error)
 	Info(ctx context.Context) (types.Info, error)
 	Info(ctx context.Context) (types.Info, error)
 	RegistryLogin(ctx context.Context, auth types.AuthConfig) (types.AuthResponse, error)
 	RegistryLogin(ctx context.Context, auth types.AuthConfig) (types.AuthResponse, error)
+	DiskUsage(ctx context.Context) (types.DiskUsage, error)
 }
 }
 
 
 // VolumeAPIClient defines API client methods for the volumes
 // VolumeAPIClient defines API client methods for the volumes
@@ -133,4 +136,5 @@ type VolumeAPIClient interface {
 	VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error)
 	VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error)
 	VolumeList(ctx context.Context, filter filters.Args) (types.VolumesListResponse, error)
 	VolumeList(ctx context.Context, filter filters.Args) (types.VolumesListResponse, error)
 	VolumeRemove(ctx context.Context, volumeID string, force bool) error
 	VolumeRemove(ctx context.Context, volumeID string, force bool) error
+	VolumesPrune(ctx context.Context, cfg types.VolumesPruneConfig) (types.VolumesPruneReport, error)
 }
 }

+ 26 - 0
client/volume_prune.go

@@ -0,0 +1,26 @@
+package client
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"github.com/docker/docker/api/types"
+	"golang.org/x/net/context"
+)
+
+// VolumesPrune requests the daemon to delete unused data
+func (cli *Client) VolumesPrune(ctx context.Context, cfg types.VolumesPruneConfig) (types.VolumesPruneReport, error) {
+	var report types.VolumesPruneReport
+
+	serverResp, err := cli.post(ctx, "/volumes/prune", nil, cfg, nil)
+	if err != nil {
+		return report, err
+	}
+	defer ensureReaderClosed(serverResp)
+
+	if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
+		return report, fmt.Errorf("Error retrieving disk usage: %v", err)
+	}
+
+	return report, nil
+}

+ 100 - 0
daemon/disk_usage.go

@@ -0,0 +1,100 @@
+package daemon
+
+import (
+	"fmt"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/distribution/digest"
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/layer"
+	"github.com/docker/docker/pkg/directory"
+	"github.com/docker/docker/volume"
+)
+
+func (daemon *Daemon) getLayerRefs() map[layer.ChainID]int {
+	tmpImages := daemon.imageStore.Map()
+	layerRefs := map[layer.ChainID]int{}
+	for id, img := range tmpImages {
+		dgst := digest.Digest(id)
+		if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.imageStore.Children(id)) != 0 {
+			continue
+		}
+
+		rootFS := *img.RootFS
+		rootFS.DiffIDs = nil
+		for _, id := range img.RootFS.DiffIDs {
+			rootFS.Append(id)
+			chid := rootFS.ChainID()
+			layerRefs[chid]++
+		}
+	}
+
+	return layerRefs
+}
+
+// SystemDiskUsage returns information about the daemon data disk usage
+func (daemon *Daemon) SystemDiskUsage() (*types.DiskUsage, error) {
+	// Retrieve container list
+	allContainers, err := daemon.Containers(&types.ContainerListOptions{
+		Size: true,
+		All:  true,
+	})
+	if err != nil {
+		return nil, fmt.Errorf("failed to retrieve container list: %v", err)
+	}
+
+	// Get all top images with extra attributes
+	allImages, err := daemon.Images("", "", false, true)
+	if err != nil {
+		return nil, fmt.Errorf("failed to retrieve image list: %v", err)
+	}
+
+	// Get all local volumes
+	allVolumes := []*types.Volume{}
+	getLocalVols := func(v volume.Volume) error {
+		name := v.Name()
+		refs := daemon.volumes.Refs(v)
+
+		tv := volumeToAPIType(v)
+		tv.RefCount = len(refs)
+		sz, err := directory.Size(v.Path())
+		if err != nil {
+			logrus.Warnf("failed to determine size of volume %v", name)
+			sz = -1
+		}
+		tv.Size = sz
+		allVolumes = append(allVolumes, tv)
+
+		return nil
+	}
+
+	err = daemon.traverseLocalVolumes(getLocalVols)
+	if err != nil {
+		return nil, err
+	}
+
+	// Get total layers size on disk
+	layerRefs := daemon.getLayerRefs()
+	allLayers := daemon.layerStore.Map()
+	var allLayersSize int64
+	for _, l := range allLayers {
+		size, err := l.DiffSize()
+		if err == nil {
+			if _, ok := layerRefs[l.ChainID()]; ok {
+				allLayersSize += size
+			} else {
+				logrus.Warnf("found leaked image layer %v", l.ChainID())
+			}
+		} else {
+			logrus.Warnf("failed to get diff size for layer %v", l.ChainID())
+		}
+
+	}
+
+	return &types.DiskUsage{
+		LayersSize: allLayersSize,
+		Containers: allContainers,
+		Volumes:    allVolumes,
+		Images:     allImages,
+	}, nil
+}

+ 68 - 4
daemon/images.go

@@ -7,6 +7,7 @@ import (
 
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/filters"
 	"github.com/docker/docker/api/types/filters"
+	"github.com/docker/docker/container"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/layer"
 	"github.com/docker/docker/layer"
 	"github.com/docker/docker/reference"
 	"github.com/docker/docker/reference"
@@ -37,7 +38,7 @@ func (daemon *Daemon) Map() map[image.ID]*image.Image {
 // filter is a shell glob string applied to repository names. The argument
 // filter is a shell glob string applied to repository names. The argument
 // named all controls whether all images in the graph are filtered, or just
 // named all controls whether all images in the graph are filtered, or just
 // the heads.
 // the heads.
-func (daemon *Daemon) Images(filterArgs, filter string, all bool) ([]*types.Image, error) {
+func (daemon *Daemon) Images(filterArgs, filter string, all bool, withExtraAttrs bool) ([]*types.Image, error) {
 	var (
 	var (
 		allImages    map[image.ID]*image.Image
 		allImages    map[image.ID]*image.Image
 		err          error
 		err          error
@@ -83,6 +84,10 @@ func (daemon *Daemon) Images(filterArgs, filter string, all bool) ([]*types.Imag
 	}
 	}
 
 
 	images := []*types.Image{}
 	images := []*types.Image{}
+	var imagesMap map[*image.Image]*types.Image
+	var layerRefs map[layer.ChainID]int
+	var allLayers map[layer.ChainID]layer.Layer
+	var allContainers []*container.Container
 
 
 	var filterTagged bool
 	var filterTagged bool
 	if filter != "" {
 	if filter != "" {
@@ -171,21 +176,80 @@ func (daemon *Daemon) Images(filterArgs, filter string, all bool) ([]*types.Imag
 			continue
 			continue
 		}
 		}
 
 
+		if withExtraAttrs {
+			// lazyly init variables
+			if imagesMap == nil {
+				allContainers = daemon.List()
+				allLayers = daemon.layerStore.Map()
+				imagesMap = make(map[*image.Image]*types.Image)
+				layerRefs = make(map[layer.ChainID]int)
+			}
+
+			// Get container count
+			newImage.Containers = 0
+			for _, c := range allContainers {
+				if c.ImageID == id {
+					newImage.Containers++
+				}
+			}
+
+			// count layer references
+			rootFS := *img.RootFS
+			rootFS.DiffIDs = nil
+			for _, id := range img.RootFS.DiffIDs {
+				rootFS.Append(id)
+				chid := rootFS.ChainID()
+				layerRefs[chid]++
+				if _, ok := allLayers[chid]; !ok {
+					return nil, fmt.Errorf("layer %v was not found (corruption?)", chid)
+				}
+			}
+			imagesMap[img] = newImage
+		}
+
 		images = append(images, newImage)
 		images = append(images, newImage)
 	}
 	}
 
 
+	if withExtraAttrs {
+		// Get Shared and Unique sizes
+		for img, newImage := range imagesMap {
+			rootFS := *img.RootFS
+			rootFS.DiffIDs = nil
+
+			newImage.Size = 0
+			newImage.SharedSize = 0
+			for _, id := range img.RootFS.DiffIDs {
+				rootFS.Append(id)
+				chid := rootFS.ChainID()
+
+				diffSize, err := allLayers[chid].DiffSize()
+				if err != nil {
+					return nil, err
+				}
+
+				if layerRefs[chid] > 1 {
+					newImage.SharedSize += diffSize
+				} else {
+					newImage.Size += diffSize
+				}
+			}
+		}
+	}
+
 	sort.Sort(sort.Reverse(byCreated(images)))
 	sort.Sort(sort.Reverse(byCreated(images)))
 
 
 	return images, nil
 	return images, nil
 }
 }
 
 
-func newImage(image *image.Image, size int64) *types.Image {
+func newImage(image *image.Image, virtualSize int64) *types.Image {
 	newImage := new(types.Image)
 	newImage := new(types.Image)
 	newImage.ParentID = image.Parent.String()
 	newImage.ParentID = image.Parent.String()
 	newImage.ID = image.ID().String()
 	newImage.ID = image.ID().String()
 	newImage.Created = image.Created.Unix()
 	newImage.Created = image.Created.Unix()
-	newImage.Size = size
-	newImage.VirtualSize = size
+	newImage.Size = -1
+	newImage.VirtualSize = virtualSize
+	newImage.SharedSize = -1
+	newImage.Containers = -1
 	if image.Config != nil {
 	if image.Config != nil {
 		newImage.Labels = image.Config.Labels
 		newImage.Labels = image.Config.Labels
 	}
 	}

+ 152 - 0
daemon/prune.go

@@ -0,0 +1,152 @@
+package daemon
+
+import (
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/distribution/digest"
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/image"
+	"github.com/docker/docker/layer"
+	"github.com/docker/docker/pkg/directory"
+	"github.com/docker/docker/reference"
+	"github.com/docker/docker/volume"
+)
+
+// ContainersPrune remove unused containers
+func (daemon *Daemon) ContainersPrune(config *types.ContainersPruneConfig) (*types.ContainersPruneReport, error) {
+	rep := &types.ContainersPruneReport{}
+
+	allContainers := daemon.List()
+	for _, c := range allContainers {
+		if !c.IsRunning() {
+			cSize, _ := daemon.getSize(c)
+			// TODO: sets RmLink to true?
+			err := daemon.ContainerRm(c.ID, &types.ContainerRmConfig{})
+			if err != nil {
+				logrus.Warnf("failed to prune container %s: %v", c.ID)
+				continue
+			}
+			if cSize > 0 {
+				rep.SpaceReclaimed += uint64(cSize)
+			}
+			rep.ContainersDeleted = append(rep.ContainersDeleted, c.ID)
+		}
+	}
+
+	return rep, nil
+}
+
+// VolumesPrune remove unused local volumes
+func (daemon *Daemon) VolumesPrune(config *types.VolumesPruneConfig) (*types.VolumesPruneReport, error) {
+	rep := &types.VolumesPruneReport{}
+
+	pruneVols := func(v volume.Volume) error {
+		name := v.Name()
+		refs := daemon.volumes.Refs(v)
+
+		if len(refs) == 0 {
+			vSize, err := directory.Size(v.Path())
+			if err != nil {
+				logrus.Warnf("could not determine size of volume %s: %v", name, err)
+			}
+			err = daemon.volumes.Remove(v)
+			if err != nil {
+				logrus.Warnf("could not remove volume %s: %v", name, err)
+				return nil
+			}
+			rep.SpaceReclaimed += uint64(vSize)
+			rep.VolumesDeleted = append(rep.VolumesDeleted, name)
+		}
+
+		return nil
+	}
+
+	err := daemon.traverseLocalVolumes(pruneVols)
+
+	return rep, err
+}
+
+// ImagesPrune remove unused images
+func (daemon *Daemon) ImagesPrune(config *types.ImagesPruneConfig) (*types.ImagesPruneReport, error) {
+	rep := &types.ImagesPruneReport{}
+
+	var allImages map[image.ID]*image.Image
+	if config.DanglingOnly {
+		allImages = daemon.imageStore.Heads()
+	} else {
+		allImages = daemon.imageStore.Map()
+	}
+	allContainers := daemon.List()
+	imageRefs := map[string]bool{}
+	for _, c := range allContainers {
+		imageRefs[c.ID] = true
+	}
+
+	// Filter intermediary images and get their unique size
+	allLayers := daemon.layerStore.Map()
+	topImages := map[image.ID]*image.Image{}
+	for id, img := range allImages {
+		dgst := digest.Digest(id)
+		if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.imageStore.Children(id)) != 0 {
+			continue
+		}
+		topImages[id] = img
+	}
+
+	for id := range topImages {
+		dgst := digest.Digest(id)
+		hex := dgst.Hex()
+		if _, ok := imageRefs[hex]; ok {
+			continue
+		}
+
+		deletedImages := []types.ImageDelete{}
+		refs := daemon.referenceStore.References(dgst)
+		if len(refs) > 0 {
+			if config.DanglingOnly {
+				// Not a dangling image
+				continue
+			}
+
+			nrRefs := len(refs)
+			for _, ref := range refs {
+				// If nrRefs == 1, we have an image marked as myreponame:<none>
+				// i.e. the tag content was changed
+				if _, ok := ref.(reference.Canonical); ok && nrRefs > 1 {
+					continue
+				}
+				imgDel, err := daemon.ImageDelete(ref.String(), false, true)
+				if err != nil {
+					logrus.Warnf("could not delete reference %s: %v", ref.String(), err)
+					continue
+				}
+				deletedImages = append(deletedImages, imgDel...)
+			}
+		} else {
+			imgDel, err := daemon.ImageDelete(hex, false, true)
+			if err != nil {
+				logrus.Warnf("could not delete image %s: %v", hex, err)
+				continue
+			}
+			deletedImages = append(deletedImages, imgDel...)
+		}
+
+		rep.ImagesDeleted = append(rep.ImagesDeleted, deletedImages...)
+	}
+
+	// Compute how much space was freed
+	for _, d := range rep.ImagesDeleted {
+		if d.Deleted != "" {
+			chid := layer.ChainID(d.Deleted)
+			if l, ok := allLayers[chid]; ok {
+				diffSize, err := l.DiffSize()
+				if err != nil {
+					logrus.Warnf("failed to get layer %s size: %v", chid, err)
+					continue
+				}
+				rep.SpaceReclaimed += uint64(diffSize)
+			}
+		}
+	}
+
+	return rep, nil
+}

+ 32 - 2
daemon/volumes.go

@@ -7,12 +7,14 @@ import (
 	"path/filepath"
 	"path/filepath"
 	"strings"
 	"strings"
 
 
+	"github.com/Sirupsen/logrus"
 	dockererrors "github.com/docker/docker/api/errors"
 	dockererrors "github.com/docker/docker/api/errors"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
 	containertypes "github.com/docker/docker/api/types/container"
 	containertypes "github.com/docker/docker/api/types/container"
 	mounttypes "github.com/docker/docker/api/types/mount"
 	mounttypes "github.com/docker/docker/api/types/mount"
 	"github.com/docker/docker/container"
 	"github.com/docker/docker/container"
 	"github.com/docker/docker/volume"
 	"github.com/docker/docker/volume"
+	"github.com/docker/docker/volume/drivers"
 	"github.com/opencontainers/runc/libcontainer/label"
 	"github.com/opencontainers/runc/libcontainer/label"
 )
 )
 
 
@@ -27,8 +29,10 @@ type mounts []container.Mount
 // volumeToAPIType converts a volume.Volume to the type used by the remote API
 // volumeToAPIType converts a volume.Volume to the type used by the remote API
 func volumeToAPIType(v volume.Volume) *types.Volume {
 func volumeToAPIType(v volume.Volume) *types.Volume {
 	tv := &types.Volume{
 	tv := &types.Volume{
-		Name:   v.Name(),
-		Driver: v.DriverName(),
+		Name:     v.Name(),
+		Driver:   v.DriverName(),
+		Size:     -1,
+		RefCount: -1,
 	}
 	}
 	if v, ok := v.(volume.LabeledVolume); ok {
 	if v, ok := v.(volume.LabeledVolume); ok {
 		tv.Labels = v.Labels()
 		tv.Labels = v.Labels()
@@ -274,3 +278,29 @@ func backportMountSpec(container *container.Container) error {
 	}
 	}
 	return container.ToDiskLocking()
 	return container.ToDiskLocking()
 }
 }
+
+func (daemon *Daemon) traverseLocalVolumes(fn func(volume.Volume) error) error {
+	localVolumeDriver, err := volumedrivers.GetDriver(volume.DefaultDriverName)
+	if err != nil {
+		return fmt.Errorf("can't retrieve local volume driver: %v", err)
+	}
+	vols, err := localVolumeDriver.List()
+	if err != nil {
+		return fmt.Errorf("can't retrieve local volumes: %v", err)
+	}
+
+	for _, v := range vols {
+		name := v.Name()
+		_, err := daemon.volumes.Get(name)
+		if err != nil {
+			logrus.Warnf("failed to retrieve volume %s from store: %v", name, err)
+		}
+
+		err = fn(v)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}

+ 10 - 0
distribution/xfer/download_test.go

@@ -71,6 +71,16 @@ func createChainIDFromParent(parent layer.ChainID, dgsts ...layer.DiffID) layer.
 	return createChainIDFromParent(layer.ChainID(dgst), dgsts[1:]...)
 	return createChainIDFromParent(layer.ChainID(dgst), dgsts[1:]...)
 }
 }
 
 
+func (ls *mockLayerStore) Map() map[layer.ChainID]layer.Layer {
+	layers := map[layer.ChainID]layer.Layer{}
+
+	for k, v := range ls.layers {
+		layers[k] = v
+	}
+
+	return layers
+}
+
 func (ls *mockLayerStore) Register(reader io.Reader, parentID layer.ChainID) (layer.Layer, error) {
 func (ls *mockLayerStore) Register(reader io.Reader, parentID layer.ChainID) (layer.Layer, error) {
 	return ls.RegisterWithDescriptor(reader, parentID, distribution.Descriptor{})
 	return ls.RegisterWithDescriptor(reader, parentID, distribution.Descriptor{})
 }
 }

+ 201 - 0
docs/reference/api/docker_remote_api_v1.25.md

@@ -1567,6 +1567,38 @@ Upload a tar archive to be extracted to a path in the filesystem of container
     - no such file or directory (**path** resource does not exist)
     - no such file or directory (**path** resource does not exist)
 - **500** – server error
 - **500** – server error
 
 
+
+### Prune stopped containers
+
+`POST /containers/prune`
+
+Delete stopped containers
+
+**Example request**:
+
+    POST /containers/prune HTTP/1.1
+    Content-Type: application/json
+
+    {
+    }
+
+**Example response**:
+
+    HTTP/1.1 200 OK
+    Content-Type: application/json
+
+    {
+        "ContainersDeleted": [
+            "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148"
+        ],
+        "SpaceReclaimed": 109
+    }
+
+**Status codes**:
+
+-   **200** – no error
+-   **500** – server error
+
 ## 3.2 Images
 ## 3.2 Images
 
 
 ### List Images
 ### List Images
@@ -2194,6 +2226,54 @@ Search for an image on [Docker Hub](https://hub.docker.com).
 -   **200** – no error
 -   **200** – no error
 -   **500** – server error
 -   **500** – server error
 
 
+### Prune unused images
+
+`POST /images/prune`
+
+Delete unused images
+
+**Example request**:
+
+    POST /images/prune HTTP/1.1
+    Content-Type: application/json
+
+    {
+        "DanglingOnly": false
+    }
+
+**Example response**:
+
+    HTTP/1.1 200 OK
+    Content-Type: application/json
+
+    {
+        "ImagesDeleted": [
+            {
+                "Untagged": "busybox:latest"
+            },
+            {
+                "Untagged": "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6"
+            },
+            {
+                "Deleted": "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749"
+            },
+            {
+                "Deleted": "sha256:8ac8bfaff55af948c796026ee867448c5b5b5d9dd3549f4006d9759b25d4a893"
+            }
+        ],
+        "SpaceReclaimed": 1092588
+    }
+
+**JSON parameters**:
+
+- **DanglingOnly**: if `true` only delete unused *and* untagged images. Default to `false` if omitted
+
+**Status codes**:
+
+-   **200** – no error
+-   **500** – server error
+
+
 ## 3.3 Misc
 ## 3.3 Misc
 
 
 ### Check auth configuration
 ### Check auth configuration
@@ -2425,6 +2505,95 @@ Display system-wide information
 -   **200** – no error
 -   **200** – no error
 -   **500** – server error
 -   **500** – server error
 
 
+### Show docker data usage information
+
+`GET /system/df`
+
+Return docker data usage information
+
+**Example request**:
+
+    GET /system/df HTTP/1.1
+
+**Example response**:
+
+    {
+        "LayersSize": 1092588,
+        "Images": [
+            {
+                "Id": "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749",
+                "ParentId": "",
+                "RepoTags": [
+                    "busybox:latest"
+                ],
+                "RepoDigests": [
+                    "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6"
+                ],
+                "Created": 1466724217,
+                "Size": 1092588,
+                "SharedSize": 0,
+                "VirtualSize": 1092588,
+                "Labels": {},
+                "Containers": 1
+            }
+        ],
+        "Containers": [
+            {
+                "Id": "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148",
+                "Names": [
+                    "/top"
+                ],
+                "Image": "busybox",
+                "ImageID": "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749",
+                "Command": "top",
+                "Created": 1472592424,
+                "Ports": [],
+                "SizeRootFs": 1092588,
+                "Labels": {},
+                "State": "exited",
+                "Status": "Exited (0) 56 minutes ago",
+                "HostConfig": {
+                    "NetworkMode": "default"
+                },
+                "NetworkSettings": {
+                    "Networks": {
+                        "bridge": {
+                            "IPAMConfig": null,
+                            "Links": null,
+                            "Aliases": null,
+                            "NetworkID": "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92",
+                            "EndpointID": "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a",
+                            "Gateway": "172.18.0.1",
+                            "IPAddress": "172.18.0.2",
+                            "IPPrefixLen": 16,
+                            "IPv6Gateway": "",
+                            "GlobalIPv6Address": "",
+                            "GlobalIPv6PrefixLen": 0,
+                            "MacAddress": "02:42:ac:12:00:02"
+                        }
+                    }
+                },
+                "Mounts": []
+            }
+        ],
+        "Volumes": [
+                {
+                    "Name": "my-volume",
+                    "Driver": "local",
+                    "Mountpoint": "",
+                    "Labels": null,
+                    "Scope": "",
+                    "Size": 0,
+                    "RefCount": 0
+                }
+        ]
+    }
+
+**Status codes**:
+
+-   **200** – no error
+-   **500** – server error
+
 ### Show the docker version information
 ### Show the docker version information
 
 
 `GET /version`
 `GET /version`
@@ -3227,6 +3396,38 @@ Instruct the driver to remove the volume (`name`).
 -   **409** - volume is in use and cannot be removed
 -   **409** - volume is in use and cannot be removed
 -   **500** - server error
 -   **500** - server error
 
 
+### Prune unused volumes
+
+`POST /volumes/prune`
+
+Delete unused volumes
+
+**Example request**:
+
+    POST /volumes/prune HTTP/1.1
+    Content-Type: application/json
+
+    {
+    }
+
+**Example response**:
+
+    HTTP/1.1 200 OK
+    Content-Type: application/json
+
+    {
+        "VolumesDeleted": [
+            "my-volume"
+        ],
+        "SpaceReclaimed": 42
+    }
+
+**Status codes**:
+
+-   **200** – no error
+-   **500** – server error
+
+
 ## 3.5 Networks
 ## 3.5 Networks
 
 
 ### List networks
 ### List networks

+ 41 - 0
docs/reference/commandline/container_prune.md

@@ -0,0 +1,41 @@
+<!--[metadata]>
++++
+title = "container prune"
+description = "Remove all stopped containers"
+keywords = [container, prune, delete, remove]
+[menu.main]
+parent = "smn_cli"
++++
+<![end-metadata]-->
+
+# container prune
+
+```markdown
+Usage:	docker container prune
+
+Remove all stopped containers
+
+Options:
+  -f, --force   Do not prompt for confirmation
+      --help    Print usage
+```
+
+Example output:
+
+```bash
+$ docker container prune
+WARNING! This will remove all stopped containers.
+Are you sure you want to continue? [y/N] y
+Deleted Containers:
+4a7f7eebae0f63178aff7eb0aa39cd3f0627a203ab2df258c1a00b456cf20063
+f98f9c2aa1eaf727e4ec9c0283bc7d4aa4762fbdba7f26191f26c97f64090360
+
+Total reclaimed space: 212 B
+```
+
+## Related information
+
+* [system df](system_df.md)
+* [volume prune](container_prune.md)
+* [image prune](container_prune.md)
+* [system prune](system_prune.md)

+ 65 - 0
docs/reference/commandline/image_prune.md

@@ -0,0 +1,65 @@
+<!--[metadata]>
++++
+title = "image prune"
+description = "Remove all stopped images"
+keywords = [image, prune, delete, remove]
+[menu.main]
+parent = "smn_cli"
++++
+<![end-metadata]-->
+
+# image prune
+
+```markdown
+Usage:	docker image prune
+
+Remove unused images
+
+Options:
+  -a, --all     Remove all unused images, not just dangling ones
+  -f, --force   Do not prompt for confirmation
+      --help    Print usage
+```
+
+Remove all dangling images. If `-a` is specified, will also remove all images not referenced by any container.
+
+Example output:
+
+```bash
+$ docker image prune -a
+WARNING! This will remove all images without at least one container associated to them.
+Are you sure you want to continue? [y/N] y
+Deleted Images:
+untagged: alpine:latest
+untagged: alpine@sha256:3dcdb92d7432d56604d4545cbd324b14e647b313626d99b889d0626de158f73a
+deleted: sha256:4e38e38c8ce0b8d9041a9c4fefe786631d1416225e13b0bfe8cfa2321aec4bba
+deleted: sha256:4fe15f8d0ae69e169824f25f1d4da3015a48feeeeebb265cd2e328e15c6a869f
+untagged: alpine:3.3
+untagged: alpine@sha256:4fa633f4feff6a8f02acfc7424efd5cb3e76686ed3218abf4ca0fa4a2a358423
+untagged: my-jq:latest
+deleted: sha256:ae67841be6d008a374eff7c2a974cde3934ffe9536a7dc7ce589585eddd83aff
+deleted: sha256:34f6f1261650bc341eb122313372adc4512b4fceddc2a7ecbb84f0958ce5ad65
+deleted: sha256:cf4194e8d8db1cb2d117df33f2c75c0369c3a26d96725efb978cc69e046b87e7
+untagged: my-curl:latest
+deleted: sha256:b2789dd875bf427de7f9f6ae001940073b3201409b14aba7e5db71f408b8569e
+deleted: sha256:96daac0cb203226438989926fc34dd024f365a9a8616b93e168d303cfe4cb5e9
+deleted: sha256:5cbd97a14241c9cd83250d6b6fc0649833c4a3e84099b968dd4ba403e609945e
+deleted: sha256:a0971c4015c1e898c60bf95781c6730a05b5d8a2ae6827f53837e6c9d38efdec
+deleted: sha256:d8359ca3b681cc5396a4e790088441673ed3ce90ebc04de388bfcd31a0716b06
+deleted: sha256:83fc9ba8fb70e1da31dfcc3c88d093831dbd4be38b34af998df37e8ac538260c
+deleted: sha256:ae7041a4cc625a9c8e6955452f7afe602b401f662671cea3613f08f3d9343b35
+deleted: sha256:35e0f43a37755b832f0bbea91a2360b025ee351d7309dae0d9737bc96b6d0809
+deleted: sha256:0af941dd29f00e4510195dd00b19671bc591e29d1495630e7e0f7c44c1e6a8c0
+deleted: sha256:9fc896fc2013da84f84e45b3096053eb084417b42e6b35ea0cce5a3529705eac
+deleted: sha256:47cf20d8c26c46fff71be614d9f54997edacfe8d46d51769706e5aba94b16f2b
+deleted: sha256:2c675ee9ed53425e31a13e3390bf3f539bf8637000e4bcfbb85ee03ef4d910a1
+
+Total reclaimed space: 16.43 MB
+```
+
+## Related information
+
+* [system df](system_df.md)
+* [container prune](container_prune.md)
+* [volume prune](container_prune.md)
+* [system prune](system_prune.md)

+ 68 - 0
docs/reference/commandline/system_df.md

@@ -0,0 +1,68 @@
+<!--[metadata]>
++++
+title = "system df"
+description = "The system df command description and usage"
+keywords = [system, data, usage, disk]
+[menu.main]
+parent = "smn_cli"
++++
+<![end-metadata]-->
+
+# system df
+
+```markdown
+Usage:	docker system df [OPTIONS]
+
+Show docker filesystem usage
+
+Options:
+      --help      Print usage
+  -v, --verbose   Show detailed information on space usage
+```
+
+The `docker system df` command displays information regarding the
+amount of disk space used by the docker daemon.
+
+By default the command will just show a summary of the data used:
+```bash
+$ docker system df
+TYPE                TOTAL               ACTIVE              SIZE                RECLAIMABLE
+Images              5                   2                   16.43 MB            11.63 MB (70%)
+Containers          2                   0                   212 B               212 B (100%)
+Local Volumes       2                   1                   36 B                0 B (0%)
+```
+
+A more detailed view can be requested using the `-v, --verbose` flag:
+```bash
+$ docker system df -v
+Images space usage:
+
+REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE                SHARED SIZE         UNIQUE SIZE         CONTAINERS
+my-curl             latest              b2789dd875bf        6 minutes ago       11 MB               11 MB               5 B                 0
+my-jq               latest              ae67841be6d0        6 minutes ago       9.623 MB            8.991 MB            632.1 kB            0
+<none>              <none>              a0971c4015c1        6 minutes ago       11 MB               11 MB               0 B                 0
+alpine              latest              4e38e38c8ce0        9 weeks ago         4.799 MB            0 B                 4.799 MB            1
+alpine              3.3                 47cf20d8c26c        9 weeks ago         4.797 MB            4.797 MB            0 B                 1
+
+Containers space usage:
+
+CONTAINER ID        IMAGE               COMMAND             LOCAL VOLUMES       SIZE                CREATED             STATUS                      NAMES
+4a7f7eebae0f        alpine:latest       "sh"                1                   0 B                 16 minutes ago      Exited (0) 5 minutes ago    hopeful_yalow
+f98f9c2aa1ea        alpine:3.3          "sh"                1                   212 B               16 minutes ago      Exited (0) 48 seconds ago   anon-vol
+
+Local Volumes space usage:
+
+NAME                                                               LINKS               SIZE
+07c7bdf3e34ab76d921894c2b834f073721fccfbbcba792aa7648e3a7a664c2e   2                   36 B
+my-named-vol                                                       0                   0 B
+```
+
+* `SHARED SIZE` is the amount of space that an image shares with another one (i.e. their common data)
+* `UNIQUE SIZE` is the amount of space that is only used by a given image
+* `SIZE` is the virtual size of the image, it is the sum of `SHARED SIZE` and `UNIQUE SIZE`
+
+## Related Information
+* [system prune](system_prune.md)
+* [container prune](container_prune.md)
+* [volume prune](volume_prune.md)
+* [image prune](image_prune.md)

+ 70 - 0
docs/reference/commandline/system_prune.md

@@ -0,0 +1,70 @@
+<!--[metadata]>
++++
+title = "system prune"
+description = "Remove unused data"
+keywords = [system, prune, delete, remove]
+[menu.main]
+parent = "smn_cli"
++++
+<![end-metadata]-->
+
+# system prune
+
+```markdown
+Usage:	docker system prune COMMAND
+
+Delete unused data
+
+Options:
+  -a, --all     Remove all unused images not just dangling ones
+  -f, --force   Do not prompt for confirmation
+      --help    Print usage
+```
+
+Remove all unused containers, volumes and images (both dangling and unreferenced).
+
+Example output:
+
+```bash
+$ docker system prune -a
+WARNING! This will remove:
+	- all stopped containers
+	- all volumes not used by at least one container
+	- all images without at least one container associated to them
+Are you sure you want to continue? [y/N] y
+Deleted Containers:0998aa37185a1a7036b0e12cf1ac1b6442dcfa30a5c9650a42ed5010046f195b
+73958bfb884fa81fa4cc6baf61055667e940ea2357b4036acbbe25a60f442a4d
+
+Deleted Volumes:
+named-vol
+
+Deleted Images:
+untagged: my-curl:latest
+deleted: sha256:7d88582121f2a29031d92017754d62a0d1a215c97e8f0106c586546e7404447d
+deleted: sha256:dd14a93d83593d4024152f85d7c63f76aaa4e73e228377ba1d130ef5149f4d8b
+untagged: alpine:3.3
+deleted: sha256:695f3d04125db3266d4ab7bbb3c6b23aa4293923e762aa2562c54f49a28f009f
+untagged: alpine:latest
+deleted: sha256:ee4603260daafe1a8c2f3b78fd760922918ab2441cbb2853ed5c439e59c52f96
+deleted: sha256:9007f5987db353ec398a223bc5a135c5a9601798ba20a1abba537ea2f8ac765f
+deleted: sha256:71fa90c8f04769c9721459d5aa0936db640b92c8c91c9b589b54abd412d120ab
+deleted: sha256:bb1c3357b3c30ece26e6604aea7d2ec0ace4166ff34c3616701279c22444c0f3
+untagged: my-jq:latest
+deleted: sha256:6e66d724542af9bc4c4abf4a909791d7260b6d0110d8e220708b09e4ee1322e1
+deleted: sha256:07b3fa89d4b17009eb3988dfc592c7d30ab3ba52d2007832dffcf6d40e3eda7f
+deleted: sha256:3a88a5c81eb5c283e72db2dbc6d65cbfd8e80b6c89bb6e714cfaaa0eed99c548
+
+Total reclaimed space: 13.5 MB
+```
+
+## Related information
+
+* [volume create](volume_create.md)
+* [volume ls](volume_ls.md)
+* [volume inspect](volume_inspect.md)
+* [volume rm](volume_rm.md)
+* [Understand Data Volumes](../../tutorials/dockervolumes.md)
+* [system df](system_df.md)
+* [container prune](container_prune.md)
+* [image prune](container_prune.md)
+* [system prune](system_prune.md)

+ 48 - 0
docs/reference/commandline/volume_prune.md

@@ -0,0 +1,48 @@
+<!--[metadata]>
++++
+title = "volume prune"
+description = "Remove unused volumes"
+keywords = [volume, prune, delete]
+[menu.main]
+parent = "smn_cli"
++++
+<![end-metadata]-->
+
+# volume prune
+
+```markdown
+Usage:	docker volume prune
+
+Remove all unused volumes
+
+Options:
+  -f, --force   Do not prompt for confirmation
+      --help    Print usage
+```
+
+Remove all unused volumes. Unused volumes are those which are not referenced by any containers
+
+Example output:
+
+```bash
+$ docker volume prune
+WARNING! This will remove all volumes not used by at least one container.
+Are you sure you want to continue? [y/N] y
+Deleted Volumes:
+07c7bdf3e34ab76d921894c2b834f073721fccfbbcba792aa7648e3a7a664c2e
+my-named-vol
+
+Total reclaimed space: 36 B
+```
+
+## Related information
+
+* [volume create](volume_create.md)
+* [volume ls](volume_ls.md)
+* [volume inspect](volume_inspect.md)
+* [volume rm](volume_rm.md)
+* [Understand Data Volumes](../../tutorials/dockervolumes.md)
+* [system df](system_df.md)
+* [container prune](container_prune.md)
+* [image prune](container_prune.md)
+* [system prune](system_prune.md)

+ 1 - 0
layer/layer.go

@@ -170,6 +170,7 @@ type MountInit func(root string) error
 type Store interface {
 type Store interface {
 	Register(io.Reader, ChainID) (Layer, error)
 	Register(io.Reader, ChainID) (Layer, error)
 	Get(ChainID) (Layer, error)
 	Get(ChainID) (Layer, error)
+	Map() map[ChainID]Layer
 	Release(Layer) ([]Metadata, error)
 	Release(Layer) ([]Metadata, error)
 
 
 	CreateRWLayer(id string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error)
 	CreateRWLayer(id string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error)

+ 13 - 0
layer/layer_store.go

@@ -360,6 +360,19 @@ func (ls *layerStore) Get(l ChainID) (Layer, error) {
 	return layer.getReference(), nil
 	return layer.getReference(), nil
 }
 }
 
 
+func (ls *layerStore) Map() map[ChainID]Layer {
+	ls.layerL.Lock()
+	defer ls.layerL.Unlock()
+
+	layers := map[ChainID]Layer{}
+
+	for k, v := range ls.layerMap {
+		layers[k] = v
+	}
+
+	return layers
+}
+
 func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error {
 func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error {
 	err := ls.driver.Remove(layer.cacheID)
 	err := ls.driver.Remove(layer.cacheID)
 	if err != nil {
 	if err != nil {