فهرست منبع

Move all daemon image methods into imageService

imageService provides the backend for the image API and handles the
imageStore, and referenceStore.

Signed-off-by: Daniel Nephin <dnephin@docker.com>
Daniel Nephin 7 سال پیش
والد
کامیت
0dab53ff3c
42فایلهای تغییر یافته به همراه544 افزوده شده و 389 حذف شده
  1. 5 4
      cmd/dockerd/daemon.go
  2. 4 4
      daemon/cache.go
  3. 1 0
      daemon/cluster/cluster.go
  4. 6 3
      daemon/cluster/executor/backend.go
  5. 5 3
      daemon/cluster/executor/container/adapter.go
  6. 2 2
      daemon/cluster/executor/container/attachment.go
  7. 2 2
      daemon/cluster/executor/container/controller.go
  8. 5 3
      daemon/cluster/executor/container/executor.go
  9. 1 1
      daemon/cluster/executor/container/health_test.go
  10. 1 1
      daemon/cluster/executor/container/validate_test.go
  11. 9 6
      daemon/cluster/noderunner.go
  12. 1 1
      daemon/cluster/services.go
  13. 2 2
      daemon/commit.go
  14. 1 1
      daemon/container.go
  15. 5 31
      daemon/create.go
  16. 80 72
      daemon/daemon.go
  17. 5 7
      daemon/delete.go
  18. 4 46
      daemon/disk_usage.go
  19. 3 3
      daemon/export.go
  20. 202 9
      daemon/image.go
  21. 15 20
      daemon/image_builder.go
  22. 13 10
      daemon/image_commit.go
  23. 37 44
      daemon/image_delete.go
  24. 4 4
      daemon/image_exporter.go
  25. 4 4
      daemon/image_getsize_unix.go
  26. 6 6
      daemon/image_history.go
  27. 6 6
      daemon/image_import.go
  28. 7 7
      daemon/image_inspect.go
  29. 10 10
      daemon/image_prune.go
  30. 12 12
      daemon/image_pull.go
  31. 9 9
      daemon/image_push.go
  32. 5 2
      daemon/image_search.go
  33. 7 7
      daemon/image_tag.go
  34. 21 21
      daemon/images.go
  35. 3 2
      daemon/info.go
  36. 1 1
      daemon/inspect.go
  37. 4 4
      daemon/list.go
  38. 1 1
      daemon/oci_windows.go
  39. 1 1
      daemon/prune.go
  40. 4 4
      daemon/reload.go
  41. 29 12
      daemon/reload_test.go
  42. 1 1
      daemon/start.go

+ 5 - 4
cmd/dockerd/daemon.go

@@ -253,6 +253,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
 		Root:                   cli.Config.Root,
 		Name:                   name,
 		Backend:                d,
+		ImageBackend:           d.ImageService(),
 		PluginBackend:          d.PluginManager(),
 		NetworkSubnetsProvider: d,
 		DefaultAdvertiseAddr:   cli.Config.SwarmDefaultAdvertiseAddr,
@@ -345,12 +346,12 @@ func newRouterOptions(config *config.Config, daemon *daemon.Daemon) (routerOptio
 		return opts, errors.Wrap(err, "failed to create fscache")
 	}
 
-	manager, err := dockerfile.NewBuildManager(daemon, sm, buildCache, daemon.IDMappings())
+	manager, err := dockerfile.NewBuildManager(daemon.BuilderBackend(), sm, buildCache, daemon.IDMappings())
 	if err != nil {
 		return opts, err
 	}
 
-	bb, err := buildbackend.NewBackend(daemon, manager, buildCache)
+	bb, err := buildbackend.NewBackend(daemon.ImageService(), manager, buildCache)
 	if err != nil {
 		return opts, errors.Wrap(err, "failed to create buildmanager")
 	}
@@ -507,14 +508,14 @@ func initRouter(opts routerOptions) {
 		// we need to add the checkpoint router before the container router or the DELETE gets masked
 		checkpointrouter.NewRouter(opts.daemon, decoder),
 		container.NewRouter(opts.daemon, decoder),
-		image.NewRouter(opts.daemon),
+		image.NewRouter(opts.daemon.ImageService()),
 		systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildCache),
 		volume.NewRouter(opts.daemon),
 		build.NewRouter(opts.buildBackend, opts.daemon),
 		sessionrouter.NewRouter(opts.sessionManager),
 		swarmrouter.NewRouter(opts.cluster),
 		pluginrouter.NewRouter(opts.daemon.PluginManager()),
-		distributionrouter.NewRouter(opts.daemon),
+		distributionrouter.NewRouter(opts.daemon.ImageService()),
 	}
 
 	if opts.daemon.NetworkControllerEnabled() {

+ 4 - 4
daemon/cache.go

@@ -7,15 +7,15 @@ import (
 )
 
 // MakeImageCache creates a stateful image cache.
-func (daemon *Daemon) MakeImageCache(sourceRefs []string) builder.ImageCache {
+func (i *imageService) MakeImageCache(sourceRefs []string) builder.ImageCache {
 	if len(sourceRefs) == 0 {
-		return cache.NewLocal(daemon.imageStore)
+		return cache.NewLocal(i.imageStore)
 	}
 
-	cache := cache.New(daemon.imageStore)
+	cache := cache.New(i.imageStore)
 
 	for _, ref := range sourceRefs {
-		img, err := daemon.GetImage(ref)
+		img, err := i.GetImage(ref)
 		if err != nil {
 			logrus.Warnf("Could not look up %s for cache resolution, skipping: %+v", ref, err)
 			continue

+ 1 - 0
daemon/cluster/cluster.go

@@ -83,6 +83,7 @@ type Config struct {
 	Root                   string
 	Name                   string
 	Backend                executorpkg.Backend
+	ImageBackend           executorpkg.ImageBackend
 	PluginBackend          plugin.Backend
 	NetworkSubnetsProvider NetworkSubnetsProvider
 

+ 6 - 3
daemon/cluster/executor/backend.go

@@ -31,7 +31,6 @@ type Backend interface {
 	FindNetwork(idName string) (libnetwork.Network, error)
 	SetupIngress(clustertypes.NetworkCreateRequest, string) (<-chan struct{}, error)
 	ReleaseIngress() (<-chan struct{}, error)
-	PullImage(ctx context.Context, image, tag, platform string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error
 	CreateManagedContainer(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error)
 	ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error
 	ContainerStop(name string, seconds *int) error
@@ -58,9 +57,13 @@ type Backend interface {
 	UnsubscribeFromEvents(listener chan interface{})
 	UpdateAttachment(string, string, string, *network.NetworkingConfig) error
 	WaitForDetachment(context.Context, string, string, string, string) error
-	GetRepository(context.Context, reference.Named, *types.AuthConfig) (distribution.Repository, bool, error)
-	LookupImage(name string) (*types.ImageInspect, error)
 	PluginManager() *plugin.Manager
 	PluginGetter() *plugin.Store
 	GetAttachmentStore() *networkSettings.AttachmentStore
 }
+
+type ImageBackend interface {
+	PullImage(ctx context.Context, image, tag, platform string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error
+	GetRepository(context.Context, reference.Named, *types.AuthConfig) (distribution.Repository, bool, error)
+	LookupImage(name string) (*types.ImageInspect, error)
+}

+ 5 - 3
daemon/cluster/executor/container/adapter.go

@@ -36,11 +36,12 @@ import (
 // containerConfig.
 type containerAdapter struct {
 	backend      executorpkg.Backend
+	imageBackend executorpkg.ImageBackend
 	container    *containerConfig
 	dependencies exec.DependencyGetter
 }
 
-func newContainerAdapter(b executorpkg.Backend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*containerAdapter, error) {
+func newContainerAdapter(b executorpkg.Backend, i executorpkg.ImageBackend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*containerAdapter, error) {
 	ctnr, err := newContainerConfig(task, node)
 	if err != nil {
 		return nil, err
@@ -49,6 +50,7 @@ func newContainerAdapter(b executorpkg.Backend, task *api.Task, node *api.NodeDe
 	return &containerAdapter{
 		container:    ctnr,
 		backend:      b,
+		imageBackend: i,
 		dependencies: dependencies,
 	}, nil
 }
@@ -66,7 +68,7 @@ func (c *containerAdapter) pullImage(ctx context.Context) error {
 	named, err := reference.ParseNormalizedNamed(spec.Image)
 	if err == nil {
 		if _, ok := named.(reference.Canonical); ok {
-			_, err := c.backend.LookupImage(spec.Image)
+			_, err := c.imageBackend.LookupImage(spec.Image)
 			if err == nil {
 				return nil
 			}
@@ -92,7 +94,7 @@ func (c *containerAdapter) pullImage(ctx context.Context) error {
 		// TODO @jhowardmsft LCOW Support: This will need revisiting as
 		// the stack is built up to include LCOW support for swarm.
 		platform := runtime.GOOS
-		err := c.backend.PullImage(ctx, c.container.image(), "", platform, metaHeaders, authConfig, pw)
+		err := c.imageBackend.PullImage(ctx, c.container.image(), "", platform, metaHeaders, authConfig, pw)
 		pw.CloseWithError(err)
 	}()
 

+ 2 - 2
daemon/cluster/executor/container/attachment.go

@@ -20,8 +20,8 @@ type networkAttacherController struct {
 	closed  chan struct{}
 }
 
-func newNetworkAttacherController(b executorpkg.Backend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*networkAttacherController, error) {
-	adapter, err := newContainerAdapter(b, task, node, dependencies)
+func newNetworkAttacherController(b executorpkg.Backend, i executorpkg.ImageBackend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*networkAttacherController, error) {
+	adapter, err := newContainerAdapter(b, i, task, node, dependencies)
 	if err != nil {
 		return nil, err
 	}

+ 2 - 2
daemon/cluster/executor/container/controller.go

@@ -40,8 +40,8 @@ type controller struct {
 var _ exec.Controller = &controller{}
 
 // NewController returns a docker exec runner for the provided task.
-func newController(b executorpkg.Backend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*controller, error) {
-	adapter, err := newContainerAdapter(b, task, node, dependencies)
+func newController(b executorpkg.Backend, i executorpkg.ImageBackend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*controller, error) {
+	adapter, err := newContainerAdapter(b, i, task, node, dependencies)
 	if err != nil {
 		return nil, err
 	}

+ 5 - 3
daemon/cluster/executor/container/executor.go

@@ -26,6 +26,7 @@ import (
 
 type executor struct {
 	backend       executorpkg.Backend
+	imageBackend  executorpkg.ImageBackend
 	pluginBackend plugin.Backend
 	dependencies  exec.DependencyManager
 	mutex         sync.Mutex // This mutex protects the following node field
@@ -33,10 +34,11 @@ type executor struct {
 }
 
 // NewExecutor returns an executor from the docker client.
-func NewExecutor(b executorpkg.Backend, p plugin.Backend) exec.Executor {
+func NewExecutor(b executorpkg.Backend, p plugin.Backend, i executorpkg.ImageBackend) exec.Executor {
 	return &executor{
 		backend:       b,
 		pluginBackend: p,
+		imageBackend:  i,
 		dependencies:  agent.NewDependencyManager(),
 	}
 }
@@ -200,7 +202,7 @@ func (e *executor) Controller(t *api.Task) (exec.Controller, error) {
 	e.mutex.Unlock()
 
 	if t.Spec.GetAttachment() != nil {
-		return newNetworkAttacherController(e.backend, t, nodeDescription, dependencyGetter)
+		return newNetworkAttacherController(e.backend, e.imageBackend, t, nodeDescription, dependencyGetter)
 	}
 
 	var ctlr exec.Controller
@@ -229,7 +231,7 @@ func (e *executor) Controller(t *api.Task) (exec.Controller, error) {
 			return ctlr, fmt.Errorf("unsupported runtime type: %q", runtimeKind)
 		}
 	case *api.TaskSpec_Container:
-		c, err := newController(e.backend, t, nodeDescription, dependencyGetter)
+		c, err := newController(e.backend, e.imageBackend, t, nodeDescription, dependencyGetter)
 		if err != nil {
 			return ctlr, err
 		}

+ 1 - 1
daemon/cluster/executor/container/health_test.go

@@ -52,7 +52,7 @@ func TestHealthStates(t *testing.T) {
 		EventsService: e,
 	}
 
-	controller, err := newController(daemon, task, nil, nil)
+	controller, err := newController(daemon, nil, task, nil, nil)
 	if err != nil {
 		t.Fatalf("create controller fail %v", err)
 	}

+ 1 - 1
daemon/cluster/executor/container/validate_test.go

@@ -12,7 +12,7 @@ import (
 )
 
 func newTestControllerWithMount(m api.Mount) (*controller, error) {
-	return newController(&daemon.Daemon{}, &api.Task{
+	return newController(&daemon.Daemon{}, nil, &api.Task{
 		ID:        stringid.GenerateRandomID(),
 		ServiceID: stringid.GenerateRandomID(),
 		Spec: api.TaskSpec{

+ 9 - 6
daemon/cluster/noderunner.go

@@ -120,12 +120,15 @@ func (n *nodeRunner) start(conf nodeStartConfig) error {
 		JoinAddr:           joinAddr,
 		StateDir:           n.cluster.root,
 		JoinToken:          conf.joinToken,
-		Executor:           container.NewExecutor(n.cluster.config.Backend, n.cluster.config.PluginBackend),
-		HeartbeatTick:      1,
-		ElectionTick:       3,
-		UnlockKey:          conf.lockKey,
-		AutoLockManagers:   conf.autolock,
-		PluginGetter:       n.cluster.config.Backend.PluginGetter(),
+		Executor: container.NewExecutor(
+			n.cluster.config.Backend,
+			n.cluster.config.PluginBackend,
+			n.cluster.config.ImageBackend),
+		HeartbeatTick:    1,
+		ElectionTick:     3,
+		UnlockKey:        conf.lockKey,
+		AutoLockManagers: conf.autolock,
+		PluginGetter:     n.cluster.config.Backend.PluginGetter(),
 	}
 	if conf.availability != "" {
 		avail, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(conf.availability))]

+ 1 - 1
daemon/cluster/services.go

@@ -570,7 +570,7 @@ func (c *Cluster) imageWithDigestString(ctx context.Context, image string, authC
 			return "", errors.Errorf("image reference not tagged: %s", image)
 		}
 
-		repo, _, err := c.config.Backend.GetRepository(ctx, taggedRef, authConfig)
+		repo, _, err := c.config.ImageBackend.GetRepository(ctx, taggedRef, authConfig)
 		if err != nil {
 			return "", err
 		}

+ 2 - 2
daemon/commit.go

@@ -155,7 +155,7 @@ func (daemon *Daemon) CreateImageFromContainer(name string, c *backend.CreateIma
 		return "", err
 	}
 
-	id, err := daemon.commitImage(backend.CommitConfig{
+	id, err := daemon.imageService.CommitImage(backend.CommitConfig{
 		Author:              c.Author,
 		Comment:             c.Comment,
 		Config:              newConfig,
@@ -171,7 +171,7 @@ func (daemon *Daemon) CreateImageFromContainer(name string, c *backend.CreateIma
 
 	var imageRef string
 	if c.Repo != "" {
-		imageRef, err = daemon.TagImage(string(id), c.Repo, c.Tag)
+		imageRef, err = daemon.imageService.TagImage(string(id), c.Repo, c.Tag)
 		if err != nil {
 			return "", err
 		}

+ 1 - 1
daemon/container.go

@@ -158,7 +158,7 @@ func (daemon *Daemon) newContainer(name string, operatingSystem string, config *
 	base.ImageID = imgID
 	base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName}
 	base.Name = name
-	base.Driver = daemon.GraphDriverName(operatingSystem)
+	base.Driver = daemon.imageService.GraphDriverForOS(operatingSystem)
 	base.OS = operatingSystem
 	return base, err
 }

+ 5 - 31
daemon/create.go

@@ -15,7 +15,6 @@ import (
 	"github.com/docker/docker/container"
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/image"
-	"github.com/docker/docker/layer"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/pkg/system"
@@ -42,7 +41,7 @@ func (daemon *Daemon) containerCreate(params types.ContainerCreateConfig, manage
 
 	os := runtime.GOOS
 	if params.Config.Image != "" {
-		img, err := daemon.GetImage(params.Config.Image)
+		img, err := daemon.imageService.GetImage(params.Config.Image)
 		if err == nil {
 			os = img.OS
 		}
@@ -92,7 +91,7 @@ func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) (
 
 	os := runtime.GOOS
 	if params.Config.Image != "" {
-		img, err = daemon.GetImage(params.Config.Image)
+		img, err = daemon.imageService.GetImage(params.Config.Image)
 		if err != nil {
 			return nil, err
 		}
@@ -158,9 +157,11 @@ func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) (
 	}
 
 	// Set RWLayer for container after mount labels have been set
-	if err := daemon.setRWLayer(container); err != nil {
+	rwLayer, err := daemon.imageService.GetRWLayer(container, setupInitLayer(daemon.idMappings))
+	if err != nil {
 		return nil, errdefs.System(err)
 	}
+	container.RWLayer = rwLayer
 
 	rootIDs := daemon.idMappings.RootPair()
 	if err := idtools.MkdirAndChown(container.Root, 0700, rootIDs); err != nil {
@@ -254,33 +255,6 @@ func (daemon *Daemon) generateSecurityOpt(hostConfig *containertypes.HostConfig)
 	return nil, nil
 }
 
-func (daemon *Daemon) setRWLayer(container *container.Container) error {
-	var layerID layer.ChainID
-	if container.ImageID != "" {
-		img, err := daemon.imageStore.Get(container.ImageID)
-		if err != nil {
-			return err
-		}
-		layerID = img.RootFS.ChainID()
-	}
-
-	rwLayerOpts := &layer.CreateRWLayerOpts{
-		MountLabel: container.MountLabel,
-		InitFunc:   setupInitLayer(daemon.idMappings),
-		StorageOpt: container.HostConfig.StorageOpt,
-	}
-
-	// Indexing by OS is safe here as validation of OS has already been performed in create() (the only
-	// caller), and guaranteed non-nil
-	rwLayer, err := daemon.layerStores[container.OS].CreateRWLayer(container.ID, layerID, rwLayerOpts)
-	if err != nil {
-		return err
-	}
-	container.RWLayer = rwLayer
-
-	return nil
-}
-
 // VolumeCreate creates a volume with the specified name, driver, and opts
 // This is called directly from the Engine API
 func (daemon *Daemon) VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) {

+ 80 - 72
daemon/daemon.go

@@ -31,6 +31,7 @@ import (
 	"github.com/docker/docker/errdefs"
 	"github.com/sirupsen/logrus"
 	// register graph drivers
+	"github.com/docker/docker/builder"
 	_ "github.com/docker/docker/daemon/graphdriver/register"
 	"github.com/docker/docker/daemon/stats"
 	dmetadata "github.com/docker/docker/distribution/metadata"
@@ -57,7 +58,6 @@ import (
 	"github.com/docker/libnetwork"
 	"github.com/docker/libnetwork/cluster"
 	nwconfig "github.com/docker/libnetwork/config"
-	"github.com/docker/libtrust"
 	"github.com/pkg/errors"
 )
 
@@ -70,44 +70,40 @@ var (
 
 // Daemon holds information about the Docker daemon.
 type Daemon struct {
-	ID                        string
-	repository                string
-	containers                container.Store
-	containersReplica         container.ViewDB
-	execCommands              *exec.Store
-	downloadManager           *xfer.LayerDownloadManager
-	uploadManager             *xfer.LayerUploadManager
-	trustKey                  libtrust.PrivateKey
-	idIndex                   *truncindex.TruncIndex
-	configStore               *config.Config
-	statsCollector            *stats.Collector
-	defaultLogConfig          containertypes.LogConfig
-	RegistryService           registry.Service
-	EventsService             *events.Events
-	netController             libnetwork.NetworkController
-	volumes                   *store.VolumeStore
-	discoveryWatcher          discovery.Reloader
-	root                      string
-	seccompEnabled            bool
-	apparmorEnabled           bool
-	shutdown                  bool
-	idMappings                *idtools.IDMappings
-	graphDrivers              map[string]string // By operating system
-	referenceStore            refstore.Store
-	imageStore                image.Store
-	imageRoot                 string
-	layerStores               map[string]layer.Store // By operating system
-	distributionMetadataStore dmetadata.Store
-	PluginStore               *plugin.Store // todo: remove
-	pluginManager             *plugin.Manager
-	linkIndex                 *linkIndex
-	containerd                libcontainerd.Client
-	containerdRemote          libcontainerd.Remote
-	defaultIsolation          containertypes.Isolation // Default isolation mode on Windows
-	clusterProvider           cluster.Provider
-	cluster                   Cluster
-	genericResources          []swarm.GenericResource
-	metricsPluginListener     net.Listener
+	ID                string
+	repository        string
+	containers        container.Store
+	containersReplica container.ViewDB
+	execCommands      *exec.Store
+
+	imageService *imageService
+
+	idIndex          *truncindex.TruncIndex
+	configStore      *config.Config
+	statsCollector   *stats.Collector
+	defaultLogConfig containertypes.LogConfig
+	RegistryService  registry.Service
+	EventsService    *events.Events
+	netController    libnetwork.NetworkController
+	volumes          *store.VolumeStore
+	discoveryWatcher discovery.Reloader
+	root             string
+	seccompEnabled   bool
+	apparmorEnabled  bool
+	shutdown         bool
+	idMappings       *idtools.IDMappings
+	// TODO: move graphDrivers field to an InfoService
+	graphDrivers map[string]string // By operating system
+
+	PluginStore           *plugin.Store // todo: remove
+	pluginManager         *plugin.Manager
+	linkIndex             *linkIndex
+	containerd            libcontainerd.Client
+	defaultIsolation      containertypes.Isolation // Default isolation mode on Windows
+	clusterProvider       cluster.Provider
+	cluster               Cluster
+	genericResources      []swarm.GenericResource
+	metricsPluginListener net.Listener
 
 	machineMemory uint64
 
@@ -162,7 +158,7 @@ func (daemon *Daemon) restore() error {
 		// Ignore the container if it does not support the current driver being used by the graph
 		currentDriverForContainerOS := daemon.graphDrivers[container.OS]
 		if (container.Driver == "" && currentDriverForContainerOS == "aufs") || container.Driver == currentDriverForContainerOS {
-			rwlayer, err := daemon.layerStores[container.OS].GetRWLayer(container.ID)
+			rwlayer, err := daemon.imageService.GetRWLayerByID(container.ID, container.OS)
 			if err != nil {
 				logrus.Errorf("Failed to load container mount %v: %v", id, err)
 				continue
@@ -705,7 +701,7 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe
 	// be set through an environment variable, a daemon start parameter, or chosen through
 	// initialization of the layerstore through driver priority order for example.
 	d.graphDrivers = make(map[string]string)
-	d.layerStores = make(map[string]layer.Store)
+	layerStores := make(map[string]layer.Store)
 	if runtime.GOOS == "windows" {
 		d.graphDrivers[runtime.GOOS] = "windowsfilter"
 		if system.LCOWSupported() {
@@ -754,7 +750,7 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe
 	}
 
 	for operatingSystem, gd := range d.graphDrivers {
-		d.layerStores[operatingSystem], err = layer.NewStoreFromOptions(layer.StoreOptions{
+		layerStores[operatingSystem], err = layer.NewStoreFromOptions(layer.StoreOptions{
 			Root: config.Root,
 			MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"),
 			GraphDriver:               gd,
@@ -771,7 +767,7 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe
 
 	// As layerstore initialization may set the driver
 	for os := range d.graphDrivers {
-		d.graphDrivers[os] = d.layerStores[os].DriverName()
+		d.graphDrivers[os] = layerStores[os].DriverName()
 	}
 
 	// Configure and validate the kernels security support. Note this is a Linux/FreeBSD
@@ -780,22 +776,17 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe
 		return nil, err
 	}
 
-	logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads)
-	d.downloadManager = xfer.NewLayerDownloadManager(d.layerStores, *config.MaxConcurrentDownloads)
-	logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads)
-	d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads)
-
-	d.imageRoot = filepath.Join(config.Root, "image", d.graphDrivers[runtime.GOOS])
-	ifs, err := image.NewFSStoreBackend(filepath.Join(d.imageRoot, "imagedb"))
+	imageRoot := filepath.Join(config.Root, "image", d.graphDrivers[runtime.GOOS])
+	ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb"))
 	if err != nil {
 		return nil, err
 	}
 
 	lgrMap := make(map[string]image.LayerGetReleaser)
-	for os, ls := range d.layerStores {
+	for os, ls := range layerStores {
 		lgrMap[os] = ls
 	}
-	d.imageStore, err = image.NewImageStore(ifs, lgrMap)
+	imageStore, err := image.NewImageStore(ifs, lgrMap)
 	if err != nil {
 		return nil, err
 	}
@@ -829,14 +820,13 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe
 	// operating systems, the list of graphdrivers available isn't user configurable.
 	// For backwards compatibility, we just put it under the windowsfilter
 	// directory regardless.
-	refStoreLocation := filepath.Join(d.imageRoot, `repositories.json`)
+	refStoreLocation := filepath.Join(imageRoot, `repositories.json`)
 	rs, err := refstore.NewReferenceStore(refStoreLocation)
 	if err != nil {
 		return nil, fmt.Errorf("Couldn't create reference store repository: %s", err)
 	}
-	d.referenceStore = rs
 
-	d.distributionMetadataStore, err = dmetadata.NewFSMetadataStore(filepath.Join(d.imageRoot, "distribution"))
+	distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution"))
 	if err != nil {
 		return nil, err
 	}
@@ -844,7 +834,7 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe
 	// No content-addressability migration on Windows as it never supported pre-CA
 	if runtime.GOOS != "windows" {
 		migrationStart := time.Now()
-		if err := v1.Migrate(config.Root, d.graphDrivers[runtime.GOOS], d.layerStores[runtime.GOOS], d.imageStore, rs, d.distributionMetadataStore); err != nil {
+		if err := v1.Migrate(config.Root, d.graphDrivers[runtime.GOOS], layerStores[runtime.GOOS], imageStore, rs, distributionMetadataStore); err != nil {
 			logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err)
 		}
 		logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds())
@@ -870,7 +860,6 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe
 		return nil, err
 	}
 	d.execCommands = exec.NewStore()
-	d.trustKey = trustKey
 	d.idIndex = truncindex.NewTruncIndex([]string{})
 	d.statsCollector = d.newStatsCollector(1 * time.Second)
 
@@ -880,10 +869,23 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe
 	d.idMappings = idMappings
 	d.seccompEnabled = sysInfo.Seccomp
 	d.apparmorEnabled = sysInfo.AppArmor
-	d.containerdRemote = containerdRemote
 
 	d.linkIndex = newLinkIndex()
 
+	logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads)
+	logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads)
+	d.imageService = &imageService{
+		trustKey:                  trustKey,
+		uploadManager:             xfer.NewLayerUploadManager(*config.MaxConcurrentUploads),
+		downloadManager:           xfer.NewLayerDownloadManager(layerStores, *config.MaxConcurrentDownloads),
+		registryService:           registryService,
+		referenceStore:            rs,
+		distributionMetadataStore: distributionMetadataStore,
+		imageStore:                imageStore,
+		eventsService:             eventsService,
+		containers:                d.containers,
+	}
+
 	go d.execCommandGC()
 
 	d.containerd, err = containerdRemote.NewClient(ContainersNamespace, d)
@@ -1005,7 +1007,7 @@ func (daemon *Daemon) Shutdown() error {
 				logrus.Errorf("Stop container error: %v", err)
 				return
 			}
-			if mountid, err := daemon.layerStores[c.OS].GetMountID(c.ID); err == nil {
+			if mountid, err := daemon.imageService.GetContainerMountID(c.ID, c.OS); err == nil {
 				daemon.cleanupMountsByID(mountid)
 			}
 			logrus.Debugf("container stopped %s", c.ID)
@@ -1018,13 +1020,7 @@ func (daemon *Daemon) Shutdown() error {
 		}
 	}
 
-	for os, ls := range daemon.layerStores {
-		if ls != nil {
-			if err := ls.Cleanup(); err != nil {
-				logrus.Errorf("Error during layer Store.Cleanup(): %v %s", err, os)
-			}
-		}
-	}
+	daemon.imageService.Cleanup()
 
 	// If we are part of a cluster, clean up cluster's stuff
 	if daemon.clusterProvider != nil {
@@ -1064,7 +1060,7 @@ func (daemon *Daemon) Mount(container *container.Container) error {
 		if runtime.GOOS != "windows" {
 			daemon.Unmount(container)
 			return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
-				daemon.GraphDriverName(container.OS), container.ID, container.BaseFS, dir)
+				daemon.imageService.GraphDriverForOS(container.OS), container.ID, container.BaseFS, dir)
 		}
 	}
 	container.BaseFS = dir // TODO: combine these fields
@@ -1108,11 +1104,6 @@ func (daemon *Daemon) Subnets() ([]net.IPNet, []net.IPNet) {
 	return v4Subnets, v6Subnets
 }
 
-// GraphDriverName returns the name of the graph driver used by the layer.Store
-func (daemon *Daemon) GraphDriverName(os string) string {
-	return daemon.layerStores[os].DriverName()
-}
-
 // prepareTempDir prepares and returns the default directory to use
 // for temporary files.
 // If it doesn't exist, it is created. If it exists, its content is removed.
@@ -1323,3 +1314,20 @@ func fixMemorySwappiness(resources *containertypes.Resources) {
 func (daemon *Daemon) GetAttachmentStore() *network.AttachmentStore {
 	return &daemon.attachmentStore
 }
+
+// IDMappings returns uid/gid mappings for the builder
+func (daemon *Daemon) IDMappings() *idtools.IDMappings {
+	return daemon.idMappings
+}
+
+func (daemon *Daemon) ImageService() *imageService {
+	return daemon.imageService
+}
+
+// TODO: tmp hack to merge interfaces
+func (daemon *Daemon) BuilderBackend() builder.Backend {
+	return struct {
+		*Daemon
+		*imageService
+	}{daemon, daemon.imageService}
+}

+ 5 - 7
daemon/delete.go

@@ -10,7 +10,6 @@ import (
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/container"
 	"github.com/docker/docker/errdefs"
-	"github.com/docker/docker/layer"
 	"github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/volume"
 	volumestore "github.com/docker/docker/volume/store"
@@ -121,12 +120,11 @@ func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemo
 	// When container creation fails and `RWLayer` has not been created yet, we
 	// do not call `ReleaseRWLayer`
 	if container.RWLayer != nil {
-		metadata, err := daemon.layerStores[container.OS].ReleaseRWLayer(container.RWLayer)
-		layer.LogReleaseMetadata(metadata)
-		if err != nil && err != layer.ErrMountDoesNotExist && !os.IsNotExist(errors.Cause(err)) {
-			e := errors.Wrapf(err, "driver %q failed to remove root filesystem for %s", daemon.GraphDriverName(container.OS), container.ID)
-			container.SetRemovalError(e)
-			return e
+		err := daemon.imageService.ReleaseContainerLayer(container.RWLayer, container.OS)
+		if err != nil {
+			err = errors.Wrapf(err, "container %s", container.ID)
+			container.SetRemovalError(err)
+			return err
 		}
 		container.RWLayer = nil
 	}

+ 4 - 46
daemon/disk_usage.go

@@ -8,34 +8,11 @@ import (
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/filters"
-	"github.com/docker/docker/layer"
 	"github.com/docker/docker/pkg/directory"
 	"github.com/docker/docker/volume"
-	"github.com/opencontainers/go-digest"
 	"github.com/sirupsen/logrus"
 )
 
-func (daemon *Daemon) getLayerRefs() map[layer.ChainID]int {
-	tmpImages := daemon.imageStore.Map()
-	layerRefs := map[layer.ChainID]int{}
-	for id, img := range tmpImages {
-		dgst := digest.Digest(id)
-		if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.imageStore.Children(id)) != 0 {
-			continue
-		}
-
-		rootFS := *img.RootFS
-		rootFS.DiffIDs = nil
-		for _, id := range img.RootFS.DiffIDs {
-			rootFS.Append(id)
-			chid := rootFS.ChainID()
-			layerRefs[chid]++
-		}
-	}
-
-	return layerRefs
-}
-
 // SystemDiskUsage returns information about the daemon data disk usage
 func (daemon *Daemon) SystemDiskUsage(ctx context.Context) (*types.DiskUsage, error) {
 	if !atomic.CompareAndSwapInt32(&daemon.diskUsageRunning, 0, 1) {
@@ -53,7 +30,7 @@ func (daemon *Daemon) SystemDiskUsage(ctx context.Context) (*types.DiskUsage, er
 	}
 
 	// Get all top images with extra attributes
-	allImages, err := daemon.Images(filters.NewArgs(), false, true)
+	allImages, err := daemon.imageService.Images(filters.NewArgs(), false, true)
 	if err != nil {
 		return nil, fmt.Errorf("failed to retrieve image list: %v", err)
 	}
@@ -93,28 +70,9 @@ func (daemon *Daemon) SystemDiskUsage(ctx context.Context) (*types.DiskUsage, er
 		return nil, err
 	}
 
-	// Get total layers size on disk
-	var allLayersSize int64
-	layerRefs := daemon.getLayerRefs()
-	for _, ls := range daemon.layerStores {
-		allLayers := ls.Map()
-		for _, l := range allLayers {
-			select {
-			case <-ctx.Done():
-				return nil, ctx.Err()
-			default:
-				size, err := l.DiffSize()
-				if err == nil {
-					if _, ok := layerRefs[l.ChainID()]; ok {
-						allLayersSize += size
-					} else {
-						logrus.Warnf("found leaked image layer %v", l.ChainID())
-					}
-				} else {
-					logrus.Warnf("failed to get diff size for layer %v", l.ChainID())
-				}
-			}
-		}
+	allLayersSize, err := daemon.imageService.LayerDiskUsage(ctx)
+	if err != nil {
+		return nil, err
 	}
 
 	return &types.DiskUsage{

+ 3 - 3
daemon/export.go

@@ -51,13 +51,13 @@ func (daemon *Daemon) containerExport(container *container.Container) (arch io.R
 	if !system.IsOSSupported(container.OS) {
 		return nil, fmt.Errorf("cannot export %s: %s ", container.ID, system.ErrNotSupportedOperatingSystem)
 	}
-	rwlayer, err := daemon.layerStores[container.OS].GetRWLayer(container.ID)
+	rwlayer, err := daemon.imageService.GetRWLayerByID(container.ID, container.OS)
 	if err != nil {
 		return nil, err
 	}
 	defer func() {
 		if err != nil {
-			daemon.layerStores[container.OS].ReleaseRWLayer(rwlayer)
+			daemon.imageService.ReleaseContainerLayer(rwlayer, container.OS)
 		}
 	}()
 
@@ -78,7 +78,7 @@ func (daemon *Daemon) containerExport(container *container.Container) (arch io.R
 	arch = ioutils.NewReadCloserWrapper(archive, func() error {
 		err := archive.Close()
 		rwlayer.Unmount()
-		daemon.layerStores[container.OS].ReleaseRWLayer(rwlayer)
+		daemon.imageService.ReleaseContainerLayer(rwlayer, container.OS)
 		return err
 	})
 	daemon.LogContainerEvent(container, "export")

+ 202 - 9
daemon/image.go

@@ -1,11 +1,25 @@
 package daemon // import "github.com/docker/docker/daemon"
 
 import (
+	"context"
 	"fmt"
+	"os"
 
 	"github.com/docker/distribution/reference"
+	"github.com/docker/docker/api/types/events"
+	"github.com/docker/docker/container"
+	daemonevents "github.com/docker/docker/daemon/events"
+	"github.com/docker/docker/distribution/metadata"
+	"github.com/docker/docker/distribution/xfer"
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/image"
+	"github.com/docker/docker/layer"
+	dockerreference "github.com/docker/docker/reference"
+	"github.com/docker/docker/registry"
+	"github.com/docker/libtrust"
+	"github.com/opencontainers/go-digest"
+	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
 )
 
 // errImageDoesNotExist is error returned when no image can be found for a reference.
@@ -25,7 +39,8 @@ func (e errImageDoesNotExist) NotFound() {}
 
 // GetImageIDAndOS returns an image ID and operating system corresponding to the image referred to by
 // refOrID.
-func (daemon *Daemon) GetImageIDAndOS(refOrID string) (image.ID, string, error) {
+// called from list.go foldFilter()
+func (i imageService) GetImageIDAndOS(refOrID string) (image.ID, string, error) {
 	ref, err := reference.ParseAnyReference(refOrID)
 	if err != nil {
 		return "", "", errdefs.InvalidParameter(err)
@@ -37,23 +52,23 @@ func (daemon *Daemon) GetImageIDAndOS(refOrID string) (image.ID, string, error)
 			return "", "", errImageDoesNotExist{ref}
 		}
 		id := image.IDFromDigest(digested.Digest())
-		if img, err := daemon.imageStore.Get(id); err == nil {
+		if img, err := i.imageStore.Get(id); err == nil {
 			return id, img.OperatingSystem(), nil
 		}
 		return "", "", errImageDoesNotExist{ref}
 	}
 
-	if digest, err := daemon.referenceStore.Get(namedRef); err == nil {
+	if digest, err := i.referenceStore.Get(namedRef); err == nil {
 		// Search the image stores to get the operating system, defaulting to host OS.
 		id := image.IDFromDigest(digest)
-		if img, err := daemon.imageStore.Get(id); err == nil {
+		if img, err := i.imageStore.Get(id); err == nil {
 			return id, img.OperatingSystem(), nil
 		}
 	}
 
 	// Search based on ID
-	if id, err := daemon.imageStore.Search(refOrID); err == nil {
-		img, err := daemon.imageStore.Get(id)
+	if id, err := i.imageStore.Search(refOrID); err == nil {
+		img, err := i.imageStore.Get(id)
 		if err != nil {
 			return "", "", errImageDoesNotExist{ref}
 		}
@@ -64,10 +79,188 @@ func (daemon *Daemon) GetImageIDAndOS(refOrID string) (image.ID, string, error)
 }
 
 // GetImage returns an image corresponding to the image referred to by refOrID.
-func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) {
-	imgID, _, err := daemon.GetImageIDAndOS(refOrID)
+func (i *imageService) GetImage(refOrID string) (*image.Image, error) {
+	imgID, _, err := i.GetImageIDAndOS(refOrID)
 	if err != nil {
 		return nil, err
 	}
-	return daemon.imageStore.Get(imgID)
+	return i.imageStore.Get(imgID)
+}
+
+type containerStore interface {
+	// used by image delete
+	First(container.StoreFilter) *container.Container
+	// used by image prune, and image list
+	List() []*container.Container
+	// TODO: remove, only used for CommitBuildStep
+	Get(string) *container.Container
+}
+
+type imageService struct {
+	eventsService   *daemonevents.Events
+	containers      containerStore
+	downloadManager *xfer.LayerDownloadManager
+	uploadManager   *xfer.LayerUploadManager
+
+	// TODO: should accept a trust service instead of a key
+	trustKey libtrust.PrivateKey
+
+	registryService           registry.Service
+	referenceStore            dockerreference.Store
+	distributionMetadataStore metadata.Store
+	imageStore                image.Store
+	layerStores               map[string]layer.Store // By operating system
+
+	pruneRunning int32
+}
+
+// called from info.go
+func (i *imageService) CountImages() int {
+	return len(i.imageStore.Map())
+}
+
+// called from list.go to filter containers
+func (i *imageService) Children(id image.ID) []image.ID {
+	return i.imageStore.Children(id)
+}
+
+// TODO: accept an opt struct instead of container?
+// called from create.go
+func (i *imageService) GetRWLayer(container *container.Container, initFunc layer.MountInit) (layer.RWLayer, error) {
+	var layerID layer.ChainID
+	if container.ImageID != "" {
+		img, err := i.imageStore.Get(container.ImageID)
+		if err != nil {
+			return nil, err
+		}
+		layerID = img.RootFS.ChainID()
+	}
+
+	rwLayerOpts := &layer.CreateRWLayerOpts{
+		MountLabel: container.MountLabel,
+		InitFunc:   initFunc,
+		StorageOpt: container.HostConfig.StorageOpt,
+	}
+
+	// Indexing by OS is safe here as validation of OS has already been performed in create() (the only
+	// caller), and guaranteed non-nil
+	return i.layerStores[container.OS].CreateRWLayer(container.ID, layerID, rwLayerOpts)
+}
+
+// called from daemon.go Daemon.restore(), and Daemon.containerExport()
+func (i *imageService) GetRWLayerByID(cid string, os string) (layer.RWLayer, error) {
+	return i.layerStores[os].GetRWLayer(cid)
+}
+
+// called from info.go
+func (i *imageService) GraphDriverStatuses() map[string][][2]string {
+	result := make(map[string][][2]string)
+	for os, store := range i.layerStores {
+		result[os] = store.DriverStatus()
+	}
+	return result
+}
+
+// called from daemon.go Daemon.Shutdown(), and Daemon.Cleanup() (cleanup is actually continerCleanup)
+func (i *imageService) GetContainerMountID(cid string, os string) (string, error) {
+	return i.layerStores[os].GetMountID(cid)
+}
+
+// called from daemon.go Daemon.Shutdown()
+func (i *imageService) Cleanup() {
+	for os, ls := range i.layerStores {
+		if ls != nil {
+			if err := ls.Cleanup(); err != nil {
+				logrus.Errorf("Error during layer Store.Cleanup(): %v %s", err, os)
+			}
+		}
+	}
+}
+
+// moved from Daemon.GraphDriverName, multiple calls
+func (i *imageService) GraphDriverForOS(os string) string {
+	return i.layerStores[os].DriverName()
+}
+
+// called from delete.go Daemon.cleanupContainer(), and Daemon.containerExport()
+func (i *imageService) ReleaseContainerLayer(rwlayer layer.RWLayer, containerOS string) error {
+	metadata, err := i.layerStores[containerOS].ReleaseRWLayer(rwlayer)
+	layer.LogReleaseMetadata(metadata)
+	if err != nil && err != layer.ErrMountDoesNotExist && !os.IsNotExist(errors.Cause(err)) {
+		return errors.Wrapf(err, "driver %q failed to remove root filesystem",
+			i.layerStores[containerOS].DriverName())
+	}
+	return nil
+}
+
+// called from disk_usage.go
+func (i *imageService) LayerDiskUsage(ctx context.Context) (int64, error) {
+	var allLayersSize int64
+	layerRefs := i.getLayerRefs()
+	for _, ls := range i.layerStores {
+		allLayers := ls.Map()
+		for _, l := range allLayers {
+			select {
+			case <-ctx.Done():
+				return allLayersSize, ctx.Err()
+			default:
+				size, err := l.DiffSize()
+				if err == nil {
+					if _, ok := layerRefs[l.ChainID()]; ok {
+						allLayersSize += size
+					} else {
+						logrus.Warnf("found leaked image layer %v", l.ChainID())
+					}
+				} else {
+					logrus.Warnf("failed to get diff size for layer %v", l.ChainID())
+				}
+			}
+		}
+	}
+	return allLayersSize, nil
+}
+
+func (i *imageService) getLayerRefs() map[layer.ChainID]int {
+	tmpImages := i.imageStore.Map()
+	layerRefs := map[layer.ChainID]int{}
+	for id, img := range tmpImages {
+		dgst := digest.Digest(id)
+		if len(i.referenceStore.References(dgst)) == 0 && len(i.imageStore.Children(id)) != 0 {
+			continue
+		}
+
+		rootFS := *img.RootFS
+		rootFS.DiffIDs = nil
+		for _, id := range img.RootFS.DiffIDs {
+			rootFS.Append(id)
+			chid := rootFS.ChainID()
+			layerRefs[chid]++
+		}
+	}
+
+	return layerRefs
+}
+
+// LogImageEvent generates an event related to an image with only the default attributes.
+func (i *imageService) LogImageEvent(imageID, refName, action string) {
+	i.LogImageEventWithAttributes(imageID, refName, action, map[string]string{})
+}
+
+// LogImageEventWithAttributes generates an event related to an image with specific given attributes.
+func (i *imageService) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) {
+	img, err := i.GetImage(imageID)
+	if err == nil && img.Config != nil {
+		// image has not been removed yet.
+		// it could be missing if the event is `delete`.
+		copyAttributes(attributes, img.Config.Labels)
+	}
+	if refName != "" {
+		attributes["name"] = refName
+	}
+	actor := events.Actor{
+		ID:         imageID,
+		Attributes: attributes,
+	}
+
+	i.eventsService.Log(action, events.ImageEventType, actor)
 }

+ 15 - 20
daemon/build.go → daemon/image_builder.go

@@ -8,9 +8,9 @@ import (
 	"github.com/docker/docker/api/types/backend"
 	"github.com/docker/docker/builder"
 	"github.com/docker/docker/image"
+	"github.com/docker/docker/image/cache"
 	"github.com/docker/docker/layer"
 	"github.com/docker/docker/pkg/containerfs"
-	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/registry"
@@ -138,7 +138,7 @@ func newROLayerForImage(img *image.Image, layerStore layer.Store) (builder.ROLay
 }
 
 // TODO: could this use the regular daemon PullImage ?
-func (daemon *Daemon) pullForBuilder(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer, os string) (*image.Image, error) {
+func (i *imageService) pullForBuilder(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer, os string) (*image.Image, error) {
 	ref, err := reference.ParseNormalizedNamed(name)
 	if err != nil {
 		return nil, err
@@ -148,7 +148,7 @@ func (daemon *Daemon) pullForBuilder(ctx context.Context, name string, authConfi
 	pullRegistryAuth := &types.AuthConfig{}
 	if len(authConfigs) > 0 {
 		// The request came with a full auth config, use it
-		repoInfo, err := daemon.RegistryService.ResolveRepository(ref)
+		repoInfo, err := i.registryService.ResolveRepository(ref)
 		if err != nil {
 			return nil, err
 		}
@@ -157,26 +157,26 @@ func (daemon *Daemon) pullForBuilder(ctx context.Context, name string, authConfi
 		pullRegistryAuth = &resolvedConfig
 	}
 
-	if err := daemon.pullImageWithReference(ctx, ref, os, nil, pullRegistryAuth, output); err != nil {
+	if err := i.pullImageWithReference(ctx, ref, os, nil, pullRegistryAuth, output); err != nil {
 		return nil, err
 	}
-	return daemon.GetImage(name)
+	return i.GetImage(name)
 }
 
 // GetImageAndReleasableLayer returns an image and releaseable layer for a reference or ID.
 // Every call to GetImageAndReleasableLayer MUST call releasableLayer.Release() to prevent
 // leaking of layers.
-func (daemon *Daemon) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ROLayer, error) {
+func (i *imageService) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ROLayer, error) {
 	if refOrID == "" {
 		if !system.IsOSSupported(opts.OS) {
 			return nil, nil, system.ErrNotSupportedOperatingSystem
 		}
-		layer, err := newROLayerForImage(nil, daemon.layerStores[opts.OS])
+		layer, err := newROLayerForImage(nil, i.layerStores[opts.OS])
 		return nil, layer, err
 	}
 
 	if opts.PullOption != backend.PullOptionForcePull {
-		image, err := daemon.GetImage(refOrID)
+		image, err := i.GetImage(refOrID)
 		if err != nil && opts.PullOption == backend.PullOptionNoPull {
 			return nil, nil, err
 		}
@@ -185,41 +185,36 @@ func (daemon *Daemon) GetImageAndReleasableLayer(ctx context.Context, refOrID st
 			if !system.IsOSSupported(image.OperatingSystem()) {
 				return nil, nil, system.ErrNotSupportedOperatingSystem
 			}
-			layer, err := newROLayerForImage(image, daemon.layerStores[image.OperatingSystem()])
+			layer, err := newROLayerForImage(image, i.layerStores[image.OperatingSystem()])
 			return image, layer, err
 		}
 	}
 
-	image, err := daemon.pullForBuilder(ctx, refOrID, opts.AuthConfig, opts.Output, opts.OS)
+	image, err := i.pullForBuilder(ctx, refOrID, opts.AuthConfig, opts.Output, opts.OS)
 	if err != nil {
 		return nil, nil, err
 	}
 	if !system.IsOSSupported(image.OperatingSystem()) {
 		return nil, nil, system.ErrNotSupportedOperatingSystem
 	}
-	layer, err := newROLayerForImage(image, daemon.layerStores[image.OperatingSystem()])
+	layer, err := newROLayerForImage(image, i.layerStores[image.OperatingSystem()])
 	return image, layer, err
 }
 
 // CreateImage creates a new image by adding a config and ID to the image store.
 // This is similar to LoadImage() except that it receives JSON encoded bytes of
 // an image instead of a tar archive.
-func (daemon *Daemon) CreateImage(config []byte, parent string) (builder.Image, error) {
-	id, err := daemon.imageStore.Create(config)
+func (i *imageService) CreateImage(config []byte, parent string) (builder.Image, error) {
+	id, err := i.imageStore.Create(config)
 	if err != nil {
 		return nil, errors.Wrapf(err, "failed to create image")
 	}
 
 	if parent != "" {
-		if err := daemon.imageStore.SetParent(id, image.ID(parent)); err != nil {
+		if err := i.imageStore.SetParent(id, image.ID(parent)); err != nil {
 			return nil, errors.Wrapf(err, "failed to set parent %s", parent)
 		}
 	}
 
-	return daemon.imageStore.Get(id)
-}
-
-// IDMappings returns uid/gid mappings for the builder
-func (daemon *Daemon) IDMappings() *idtools.IDMappings {
-	return daemon.idMappings
+	return i.imageStore.Get(id)
 }

+ 13 - 10
daemon/image_commit.go

@@ -9,10 +9,12 @@ import (
 	"github.com/docker/docker/layer"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/system"
+	"github.com/pkg/errors"
 )
 
-func (daemon *Daemon) commitImage(c backend.CommitConfig) (image.ID, error) {
-	layerStore, ok := daemon.layerStores[c.ContainerOS]
+// CommitImage creates a new image from a commit config
+func (i *imageService) CommitImage(c backend.CommitConfig) (image.ID, error) {
+	layerStore, ok := i.layerStores[c.ContainerOS]
 	if !ok {
 		return "", system.ErrNotSupportedOperatingSystem
 	}
@@ -31,7 +33,7 @@ func (daemon *Daemon) commitImage(c backend.CommitConfig) (image.ID, error) {
 		parent = new(image.Image)
 		parent.RootFS = image.NewRootFS()
 	} else {
-		parent, err = daemon.imageStore.Get(image.ID(c.ParentImageID))
+		parent, err = i.imageStore.Get(image.ID(c.ParentImageID))
 		if err != nil {
 			return "", err
 		}
@@ -56,13 +58,13 @@ func (daemon *Daemon) commitImage(c backend.CommitConfig) (image.ID, error) {
 		return "", err
 	}
 
-	id, err := daemon.imageStore.Create(config)
+	id, err := i.imageStore.Create(config)
 	if err != nil {
 		return "", err
 	}
 
 	if c.ParentImageID != "" {
-		if err := daemon.imageStore.SetParent(id, image.ID(c.ParentImageID)); err != nil {
+		if err := i.imageStore.SetParent(id, image.ID(c.ParentImageID)); err != nil {
 			return "", err
 		}
 	}
@@ -112,13 +114,14 @@ func exportContainerRw(layerStore layer.Store, id, mountLabel string) (arch io.R
 //   * it doesn't log a container commit event
 //
 // This is a temporary shim. Should be removed when builder stops using commit.
-func (daemon *Daemon) CommitBuildStep(c backend.CommitConfig) (image.ID, error) {
-	container, err := daemon.GetContainer(c.ContainerID)
-	if err != nil {
-		return "", err
+func (i *imageService) CommitBuildStep(c backend.CommitConfig) (image.ID, error) {
+	container := i.containers.Get(c.ContainerID)
+	if container == nil {
+		// TODO: use typed error
+		return "", errors.Errorf("container not found: %s", c.ContainerID)
 	}
 	c.ContainerMountLabel = container.MountLabel
 	c.ContainerOS = container.OS
 	c.ParentImageID = string(container.ImageID)
-	return daemon.commitImage(c)
+	return i.CommitImage(c)
 }

+ 37 - 44
daemon/image_delete.go

@@ -60,14 +60,11 @@ const (
 // meaning any delete conflicts will cause the image to not be deleted and the
 // conflict will not be reported.
 //
-// FIXME: remove ImageDelete's dependency on Daemon, then move to the graph
-// package. This would require that we no longer need the daemon to determine
-// whether images are being used by a stopped or running container.
-func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) {
+func (i *imageService) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) {
 	start := time.Now()
 	records := []types.ImageDeleteResponseItem{}
 
-	imgID, operatingSystem, err := daemon.GetImageIDAndOS(imageRef)
+	imgID, operatingSystem, err := i.GetImageIDAndOS(imageRef)
 	if err != nil {
 		return nil, err
 	}
@@ -75,7 +72,11 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I
 		return nil, errors.Errorf("unable to delete image: %q", system.ErrNotSupportedOperatingSystem)
 	}
 
-	repoRefs := daemon.referenceStore.References(imgID.Digest())
+	repoRefs := i.referenceStore.References(imgID.Digest())
+
+	using := func(c *container.Container) bool {
+		return c.ImageID == imgID
+	}
 
 	var removedRepositoryRef bool
 	if !isImageIDPrefix(imgID.String(), imageRef) {
@@ -84,7 +85,7 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I
 		// true, there are multiple repository references to this
 		// image, or there are no containers using the given reference.
 		if !force && isSingleReference(repoRefs) {
-			if container := daemon.getContainerUsingImage(imgID); container != nil {
+			if container := i.containers.First(using); container != nil {
 				// If we removed the repository reference then
 				// this image would remain "dangling" and since
 				// we really want to avoid that the client must
@@ -99,17 +100,17 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I
 			return nil, err
 		}
 
-		parsedRef, err = daemon.removeImageRef(parsedRef)
+		parsedRef, err = i.removeImageRef(parsedRef)
 		if err != nil {
 			return nil, err
 		}
 
 		untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)}
 
-		daemon.LogImageEvent(imgID.String(), imgID.String(), "untag")
+		i.LogImageEvent(imgID.String(), imgID.String(), "untag")
 		records = append(records, untaggedRecord)
 
-		repoRefs = daemon.referenceStore.References(imgID.Digest())
+		repoRefs = i.referenceStore.References(imgID.Digest())
 
 		// If a tag reference was removed and the only remaining
 		// references to the same repository are digest references,
@@ -127,7 +128,7 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I
 				remainingRefs := []reference.Named{}
 				for _, repoRef := range repoRefs {
 					if _, repoRefIsCanonical := repoRef.(reference.Canonical); repoRefIsCanonical && parsedRef.Name() == repoRef.Name() {
-						if _, err := daemon.removeImageRef(repoRef); err != nil {
+						if _, err := i.removeImageRef(repoRef); err != nil {
 							return records, err
 						}
 
@@ -157,25 +158,25 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I
 			if !force {
 				c |= conflictSoft &^ conflictActiveReference
 			}
-			if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil {
+			if conflict := i.checkImageDeleteConflict(imgID, c); conflict != nil {
 				return nil, conflict
 			}
 
 			for _, repoRef := range repoRefs {
-				parsedRef, err := daemon.removeImageRef(repoRef)
+				parsedRef, err := i.removeImageRef(repoRef)
 				if err != nil {
 					return nil, err
 				}
 
 				untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)}
 
-				daemon.LogImageEvent(imgID.String(), imgID.String(), "untag")
+				i.LogImageEvent(imgID.String(), imgID.String(), "untag")
 				records = append(records, untaggedRecord)
 			}
 		}
 	}
 
-	if err := daemon.imageDeleteHelper(imgID, &records, force, prune, removedRepositoryRef); err != nil {
+	if err := i.imageDeleteHelper(imgID, &records, force, prune, removedRepositoryRef); err != nil {
 		return nil, err
 	}
 
@@ -223,26 +224,18 @@ func isImageIDPrefix(imageID, possiblePrefix string) bool {
 	return false
 }
 
-// getContainerUsingImage returns a container that was created using the given
-// imageID. Returns nil if there is no such container.
-func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *container.Container {
-	return daemon.containers.First(func(c *container.Container) bool {
-		return c.ImageID == imageID
-	})
-}
-
 // removeImageRef attempts to parse and remove the given image reference from
 // this daemon's store of repository tag/digest references. The given
 // repositoryRef must not be an image ID but a repository name followed by an
 // optional tag or digest reference. If tag or digest is omitted, the default
 // tag is used. Returns the resolved image reference and an error.
-func (daemon *Daemon) removeImageRef(ref reference.Named) (reference.Named, error) {
+func (i *imageService) removeImageRef(ref reference.Named) (reference.Named, error) {
 	ref = reference.TagNameOnly(ref)
 
 	// Ignore the boolean value returned, as far as we're concerned, this
 	// is an idempotent operation and it's okay if the reference didn't
 	// exist in the first place.
-	_, err := daemon.referenceStore.Delete(ref)
+	_, err := i.referenceStore.Delete(ref)
 
 	return ref, err
 }
@@ -252,18 +245,18 @@ func (daemon *Daemon) removeImageRef(ref reference.Named) (reference.Named, erro
 // on the first encountered error. Removed references are logged to this
 // daemon's event service. An "Untagged" types.ImageDeleteResponseItem is added to the
 // given list of records.
-func (daemon *Daemon) removeAllReferencesToImageID(imgID image.ID, records *[]types.ImageDeleteResponseItem) error {
-	imageRefs := daemon.referenceStore.References(imgID.Digest())
+func (i *imageService) removeAllReferencesToImageID(imgID image.ID, records *[]types.ImageDeleteResponseItem) error {
+	imageRefs := i.referenceStore.References(imgID.Digest())
 
 	for _, imageRef := range imageRefs {
-		parsedRef, err := daemon.removeImageRef(imageRef)
+		parsedRef, err := i.removeImageRef(imageRef)
 		if err != nil {
 			return err
 		}
 
 		untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)}
 
-		daemon.LogImageEvent(imgID.String(), imgID.String(), "untag")
+		i.LogImageEvent(imgID.String(), imgID.String(), "untag")
 		*records = append(*records, untaggedRecord)
 	}
 
@@ -303,15 +296,15 @@ func (idc *imageDeleteConflict) Conflict() {}
 // conflict is encountered, it will be returned immediately without deleting
 // the image. If quiet is true, any encountered conflicts will be ignored and
 // the function will return nil immediately without deleting the image.
-func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDeleteResponseItem, force, prune, quiet bool) error {
+func (i *imageService) imageDeleteHelper(imgID image.ID, records *[]types.ImageDeleteResponseItem, force, prune, quiet bool) error {
 	// First, determine if this image has any conflicts. Ignore soft conflicts
 	// if force is true.
 	c := conflictHard
 	if !force {
 		c |= conflictSoft
 	}
-	if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil {
-		if quiet && (!daemon.imageIsDangling(imgID) || conflict.used) {
+	if conflict := i.checkImageDeleteConflict(imgID, c); conflict != nil {
+		if quiet && (!i.imageIsDangling(imgID) || conflict.used) {
 			// Ignore conflicts UNLESS the image is "dangling" or not being used in
 			// which case we want the user to know.
 			return nil
@@ -322,23 +315,23 @@ func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDe
 		return conflict
 	}
 
-	parent, err := daemon.imageStore.GetParent(imgID)
+	parent, err := i.imageStore.GetParent(imgID)
 	if err != nil {
 		// There may be no parent
 		parent = ""
 	}
 
 	// Delete all repository tag/digest references to this image.
-	if err := daemon.removeAllReferencesToImageID(imgID, records); err != nil {
+	if err := i.removeAllReferencesToImageID(imgID, records); err != nil {
 		return err
 	}
 
-	removedLayers, err := daemon.imageStore.Delete(imgID)
+	removedLayers, err := i.imageStore.Delete(imgID)
 	if err != nil {
 		return err
 	}
 
-	daemon.LogImageEvent(imgID.String(), imgID.String(), "delete")
+	i.LogImageEvent(imgID.String(), imgID.String(), "delete")
 	*records = append(*records, types.ImageDeleteResponseItem{Deleted: imgID.String()})
 	for _, removedLayer := range removedLayers {
 		*records = append(*records, types.ImageDeleteResponseItem{Deleted: removedLayer.ChainID.String()})
@@ -353,7 +346,7 @@ func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDe
 	// either running or stopped).
 	// Do not force prunings, but do so quietly (stopping on any encountered
 	// conflicts).
-	return daemon.imageDeleteHelper(parent, records, false, true, true)
+	return i.imageDeleteHelper(parent, records, false, true, true)
 }
 
 // checkImageDeleteConflict determines whether there are any conflicts
@@ -362,9 +355,9 @@ func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDe
 // using the image. A soft conflict is any tags/digest referencing the given
 // image or any stopped container using the image. If ignoreSoftConflicts is
 // true, this function will not check for soft conflict conditions.
-func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType) *imageDeleteConflict {
+func (i *imageService) checkImageDeleteConflict(imgID image.ID, mask conflictType) *imageDeleteConflict {
 	// Check if the image has any descendant images.
-	if mask&conflictDependentChild != 0 && len(daemon.imageStore.Children(imgID)) > 0 {
+	if mask&conflictDependentChild != 0 && len(i.imageStore.Children(imgID)) > 0 {
 		return &imageDeleteConflict{
 			hard:    true,
 			imgID:   imgID,
@@ -377,7 +370,7 @@ func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType
 		running := func(c *container.Container) bool {
 			return c.IsRunning() && c.ImageID == imgID
 		}
-		if container := daemon.containers.First(running); container != nil {
+		if container := i.containers.First(running); container != nil {
 			return &imageDeleteConflict{
 				imgID:   imgID,
 				hard:    true,
@@ -388,7 +381,7 @@ func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType
 	}
 
 	// Check if any repository tags/digest reference this image.
-	if mask&conflictActiveReference != 0 && len(daemon.referenceStore.References(imgID.Digest())) > 0 {
+	if mask&conflictActiveReference != 0 && len(i.referenceStore.References(imgID.Digest())) > 0 {
 		return &imageDeleteConflict{
 			imgID:   imgID,
 			message: "image is referenced in multiple repositories",
@@ -400,7 +393,7 @@ func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType
 		stopped := func(c *container.Container) bool {
 			return !c.IsRunning() && c.ImageID == imgID
 		}
-		if container := daemon.containers.First(stopped); container != nil {
+		if container := i.containers.First(stopped); container != nil {
 			return &imageDeleteConflict{
 				imgID:   imgID,
 				used:    true,
@@ -415,6 +408,6 @@ func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType
 // imageIsDangling returns whether the given image is "dangling" which means
 // that there are no repository references to the given image and it has no
 // child images.
-func (daemon *Daemon) imageIsDangling(imgID image.ID) bool {
-	return !(len(daemon.referenceStore.References(imgID.Digest())) > 0 || len(daemon.imageStore.Children(imgID)) > 0)
+func (i *imageService) imageIsDangling(imgID image.ID) bool {
+	return !(len(i.referenceStore.References(imgID.Digest())) > 0 || len(i.imageStore.Children(imgID)) > 0)
 }

+ 4 - 4
daemon/image_exporter.go

@@ -11,15 +11,15 @@ import (
 // stream. All images with the given tag and all versions containing
 // the same tag are exported. names is the set of tags to export, and
 // outStream is the writer which the images are written to.
-func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error {
-	imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStores, daemon.referenceStore, daemon)
+func (i *imageService) ExportImage(names []string, outStream io.Writer) error {
+	imageExporter := tarexport.NewTarExporter(i.imageStore, i.layerStores, i.referenceStore, i)
 	return imageExporter.Save(names, outStream)
 }
 
 // LoadImage uploads a set of images into the repository. This is the
 // complement of ImageExport.  The input stream is an uncompressed tar
 // ball containing images and metadata.
-func (daemon *Daemon) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error {
-	imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStores, daemon.referenceStore, daemon)
+func (i *imageService) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error {
+	imageExporter := tarexport.NewTarExporter(i.imageStore, i.layerStores, i.referenceStore, i)
 	return imageExporter.Load(inTar, outStream, quiet)
 }

+ 4 - 4
daemon/getsize_unix.go → daemon/image_getsize_unix.go

@@ -9,7 +9,7 @@ import (
 )
 
 // getSize returns the real size & virtual size of the container.
-func (daemon *Daemon) getSize(containerID string) (int64, int64) {
+func (i *imageService) GetContainerLayerSize(containerID string) (int64, int64) {
 	var (
 		sizeRw, sizeRootfs int64
 		err                error
@@ -17,17 +17,17 @@ func (daemon *Daemon) getSize(containerID string) (int64, int64) {
 
 	// Safe to index by runtime.GOOS as Unix hosts don't support multiple
 	// container operating systems.
-	rwlayer, err := daemon.layerStores[runtime.GOOS].GetRWLayer(containerID)
+	rwlayer, err := i.layerStores[runtime.GOOS].GetRWLayer(containerID)
 	if err != nil {
 		logrus.Errorf("Failed to compute size of container rootfs %v: %v", containerID, err)
 		return sizeRw, sizeRootfs
 	}
-	defer daemon.layerStores[runtime.GOOS].ReleaseRWLayer(rwlayer)
+	defer i.layerStores[runtime.GOOS].ReleaseRWLayer(rwlayer)
 
 	sizeRw, err = rwlayer.Size()
 	if err != nil {
 		logrus.Errorf("Driver %s couldn't return diff size of container %s: %s",
-			daemon.GraphDriverName(runtime.GOOS), containerID, err)
+			i.layerStores[runtime.GOOS].DriverName(), containerID, err)
 		// FIXME: GetSize should return an error. Not changing it now in case
 		// there is a side-effect.
 		sizeRw = -1

+ 6 - 6
daemon/image_history.go

@@ -11,9 +11,9 @@ import (
 
 // ImageHistory returns a slice of ImageHistory structures for the specified image
 // name by walking the image lineage.
-func (daemon *Daemon) ImageHistory(name string) ([]*image.HistoryResponseItem, error) {
+func (i *imageService) ImageHistory(name string) ([]*image.HistoryResponseItem, error) {
 	start := time.Now()
-	img, err := daemon.GetImage(name)
+	img, err := i.GetImage(name)
 	if err != nil {
 		return nil, err
 	}
@@ -33,12 +33,12 @@ func (daemon *Daemon) ImageHistory(name string) ([]*image.HistoryResponseItem, e
 			}
 
 			rootFS.Append(img.RootFS.DiffIDs[layerCounter])
-			l, err := daemon.layerStores[img.OperatingSystem()].Get(rootFS.ChainID())
+			l, err := i.layerStores[img.OperatingSystem()].Get(rootFS.ChainID())
 			if err != nil {
 				return nil, err
 			}
 			layerSize, err = l.DiffSize()
-			layer.ReleaseAndLog(daemon.layerStores[img.OperatingSystem()], l)
+			layer.ReleaseAndLog(i.layerStores[img.OperatingSystem()], l)
 			if err != nil {
 				return nil, err
 			}
@@ -62,7 +62,7 @@ func (daemon *Daemon) ImageHistory(name string) ([]*image.HistoryResponseItem, e
 		h.ID = id.String()
 
 		var tags []string
-		for _, r := range daemon.referenceStore.References(id.Digest()) {
+		for _, r := range i.referenceStore.References(id.Digest()) {
 			if _, ok := r.(reference.NamedTagged); ok {
 				tags = append(tags, reference.FamiliarString(r))
 			}
@@ -74,7 +74,7 @@ func (daemon *Daemon) ImageHistory(name string) ([]*image.HistoryResponseItem, e
 		if id == "" {
 			break
 		}
-		histImg, err = daemon.GetImage(id.String())
+		histImg, err = i.GetImage(id.String())
 		if err != nil {
 			break
 		}

+ 6 - 6
daemon/import.go → daemon/image_import.go

@@ -27,7 +27,7 @@ import (
 // inConfig (if src is "-"), or from a URI specified in src. Progress output is
 // written to outStream. Repository and tag names can optionally be given in
 // the repo and tag arguments, respectively.
-func (daemon *Daemon) ImportImage(src string, repository, os string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error {
+func (i *imageService) ImportImage(src string, repository, os string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error {
 	var (
 		rc     io.ReadCloser
 		resp   *http.Response
@@ -91,11 +91,11 @@ func (daemon *Daemon) ImportImage(src string, repository, os string, tag string,
 	if err != nil {
 		return err
 	}
-	l, err := daemon.layerStores[os].Register(inflatedLayerData, "")
+	l, err := i.layerStores[os].Register(inflatedLayerData, "")
 	if err != nil {
 		return err
 	}
-	defer layer.ReleaseAndLog(daemon.layerStores[os], l)
+	defer layer.ReleaseAndLog(i.layerStores[os], l)
 
 	created := time.Now().UTC()
 	imgConfig, err := json.Marshal(&image.Image{
@@ -120,19 +120,19 @@ func (daemon *Daemon) ImportImage(src string, repository, os string, tag string,
 		return err
 	}
 
-	id, err := daemon.imageStore.Create(imgConfig)
+	id, err := i.imageStore.Create(imgConfig)
 	if err != nil {
 		return err
 	}
 
 	// FIXME: connect with commit code and call refstore directly
 	if newRef != nil {
-		if err := daemon.TagImageWithReference(id, newRef); err != nil {
+		if err := i.TagImageWithReference(id, newRef); err != nil {
 			return err
 		}
 	}
 
-	daemon.LogImageEvent(id.String(), id.String(), "import")
+	i.LogImageEvent(id.String(), id.String(), "import")
 	outStream.Write(streamformatter.FormatStatus("", id.String()))
 	return nil
 }

+ 7 - 7
daemon/image_inspect.go

@@ -13,15 +13,15 @@ import (
 
 // LookupImage looks up an image by name and returns it as an ImageInspect
 // structure.
-func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) {
-	img, err := daemon.GetImage(name)
+func (i *imageService) LookupImage(name string) (*types.ImageInspect, error) {
+	img, err := i.GetImage(name)
 	if err != nil {
 		return nil, errors.Wrapf(err, "no such image: %s", name)
 	}
 	if !system.IsOSSupported(img.OperatingSystem()) {
 		return nil, system.ErrNotSupportedOperatingSystem
 	}
-	refs := daemon.referenceStore.References(img.ID().Digest())
+	refs := i.referenceStore.References(img.ID().Digest())
 	repoTags := []string{}
 	repoDigests := []string{}
 	for _, ref := range refs {
@@ -37,11 +37,11 @@ func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) {
 	var layerMetadata map[string]string
 	layerID := img.RootFS.ChainID()
 	if layerID != "" {
-		l, err := daemon.layerStores[img.OperatingSystem()].Get(layerID)
+		l, err := i.layerStores[img.OperatingSystem()].Get(layerID)
 		if err != nil {
 			return nil, err
 		}
-		defer layer.ReleaseAndLog(daemon.layerStores[img.OperatingSystem()], l)
+		defer layer.ReleaseAndLog(i.layerStores[img.OperatingSystem()], l)
 		size, err = l.Size()
 		if err != nil {
 			return nil, err
@@ -58,7 +58,7 @@ func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) {
 		comment = img.History[len(img.History)-1].Comment
 	}
 
-	lastUpdated, err := daemon.imageStore.GetLastUpdated(img.ID())
+	lastUpdated, err := i.imageStore.GetLastUpdated(img.ID())
 	if err != nil {
 		return nil, err
 	}
@@ -86,7 +86,7 @@ func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) {
 		},
 	}
 
-	imageInspect.GraphDriver.Name = daemon.GraphDriverName(img.OperatingSystem())
+	imageInspect.GraphDriver.Name = i.GraphDriverForOS(img.OperatingSystem())
 	imageInspect.GraphDriver.Data = layerMetadata
 
 	return imageInspect, nil

+ 10 - 10
daemon/image_prune.go

@@ -22,11 +22,11 @@ var imagesAcceptedFilters = map[string]bool{
 }
 
 // ImagesPrune removes unused images
-func (daemon *Daemon) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) {
-	if !atomic.CompareAndSwapInt32(&daemon.pruneRunning, 0, 1) {
+func (i *imageService) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) {
+	if !atomic.CompareAndSwapInt32(&i.pruneRunning, 0, 1) {
 		return nil, errPruneRunning
 	}
-	defer atomic.StoreInt32(&daemon.pruneRunning, 0)
+	defer atomic.StoreInt32(&i.pruneRunning, 0)
 
 	// make sure that only accepted filters have been received
 	err := pruneFilters.Validate(imagesAcceptedFilters)
@@ -52,14 +52,14 @@ func (daemon *Daemon) ImagesPrune(ctx context.Context, pruneFilters filters.Args
 
 	var allImages map[image.ID]*image.Image
 	if danglingOnly {
-		allImages = daemon.imageStore.Heads()
+		allImages = i.imageStore.Heads()
 	} else {
-		allImages = daemon.imageStore.Map()
+		allImages = i.imageStore.Map()
 	}
 
 	// Filter intermediary images and get their unique size
 	allLayers := make(map[layer.ChainID]layer.Layer)
-	for _, ls := range daemon.layerStores {
+	for _, ls := range i.layerStores {
 		for k, v := range ls.Map() {
 			allLayers[k] = v
 		}
@@ -71,7 +71,7 @@ func (daemon *Daemon) ImagesPrune(ctx context.Context, pruneFilters filters.Args
 			return nil, ctx.Err()
 		default:
 			dgst := digest.Digest(id)
-			if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.imageStore.Children(id)) != 0 {
+			if len(i.referenceStore.References(dgst)) == 0 && len(i.imageStore.Children(id)) != 0 {
 				continue
 			}
 			if !until.IsZero() && img.Created.After(until) {
@@ -96,7 +96,7 @@ deleteImagesLoop:
 		}
 
 		deletedImages := []types.ImageDeleteResponseItem{}
-		refs := daemon.referenceStore.References(id.Digest())
+		refs := i.referenceStore.References(dgst)
 		if len(refs) > 0 {
 			shouldDelete := !danglingOnly
 			if !shouldDelete {
@@ -114,7 +114,7 @@ deleteImagesLoop:
 
 			if shouldDelete {
 				for _, ref := range refs {
-					imgDel, err := daemon.ImageDelete(ref.String(), false, true)
+					imgDel, err := i.ImageDelete(ref.String(), false, true)
 					if imageDeleteFailed(ref.String(), err) {
 						continue
 					}
@@ -123,7 +123,7 @@ deleteImagesLoop:
 			}
 		} else {
 			hex := id.Digest().Hex()
-			imgDel, err := daemon.ImageDelete(hex, false, true)
+			imgDel, err := i.ImageDelete(hex, false, true)
 			if imageDeleteFailed(hex, err) {
 				continue
 			}

+ 12 - 12
daemon/image_pull.go

@@ -19,7 +19,7 @@ import (
 
 // PullImage initiates a pull operation. image is the repository name to pull, and
 // tag may be either empty, or indicate a specific tag to pull.
-func (daemon *Daemon) PullImage(ctx context.Context, image, tag, os string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error {
+func (i *imageService) PullImage(ctx context.Context, image, tag, os string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error {
 	// Special case: "pull -a" may send an image name with a
 	// trailing :. This is ugly, but let's not break API
 	// compatibility.
@@ -44,10 +44,10 @@ func (daemon *Daemon) PullImage(ctx context.Context, image, tag, os string, meta
 		}
 	}
 
-	return daemon.pullImageWithReference(ctx, ref, os, metaHeaders, authConfig, outStream)
+	return i.pullImageWithReference(ctx, ref, os, metaHeaders, authConfig, outStream)
 }
 
-func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference.Named, os string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error {
+func (i *imageService) pullImageWithReference(ctx context.Context, ref reference.Named, os string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error {
 	// Include a buffer so that slow client connections don't affect
 	// transfer performance.
 	progressChan := make(chan progress.Progress, 100)
@@ -71,13 +71,13 @@ func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference.
 			MetaHeaders:      metaHeaders,
 			AuthConfig:       authConfig,
 			ProgressOutput:   progress.ChanOutput(progressChan),
-			RegistryService:  daemon.RegistryService,
-			ImageEventLogger: daemon.LogImageEvent,
-			MetadataStore:    daemon.distributionMetadataStore,
-			ImageStore:       distribution.NewImageConfigStoreFromStore(daemon.imageStore),
-			ReferenceStore:   daemon.referenceStore,
+			RegistryService:  i.registryService,
+			ImageEventLogger: i.LogImageEvent,
+			MetadataStore:    i.distributionMetadataStore,
+			ImageStore:       distribution.NewImageConfigStoreFromStore(i.imageStore),
+			ReferenceStore:   i.referenceStore,
 		},
-		DownloadManager: daemon.downloadManager,
+		DownloadManager: i.downloadManager,
 		Schema2Types:    distribution.ImageTypes,
 		OS:              os,
 	}
@@ -89,9 +89,9 @@ func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference.
 }
 
 // GetRepository returns a repository from the registry.
-func (daemon *Daemon) GetRepository(ctx context.Context, ref reference.Named, authConfig *types.AuthConfig) (dist.Repository, bool, error) {
+func (i *imageService) GetRepository(ctx context.Context, ref reference.Named, authConfig *types.AuthConfig) (dist.Repository, bool, error) {
 	// get repository info
-	repoInfo, err := daemon.RegistryService.ResolveRepository(ref)
+	repoInfo, err := i.registryService.ResolveRepository(ref)
 	if err != nil {
 		return nil, false, err
 	}
@@ -101,7 +101,7 @@ func (daemon *Daemon) GetRepository(ctx context.Context, ref reference.Named, au
 	}
 
 	// get endpoints
-	endpoints, err := daemon.RegistryService.LookupPullEndpoints(reference.Domain(repoInfo.Name))
+	endpoints, err := i.registryService.LookupPullEndpoints(reference.Domain(repoInfo.Name))
 	if err != nil {
 		return nil, false, err
 	}

+ 9 - 9
daemon/image_push.go

@@ -13,7 +13,7 @@ import (
 )
 
 // PushImage initiates a push operation on the repository named localName.
-func (daemon *Daemon) PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error {
+func (i *imageService) PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error {
 	ref, err := reference.ParseNormalizedNamed(image)
 	if err != nil {
 		return err
@@ -44,16 +44,16 @@ func (daemon *Daemon) PushImage(ctx context.Context, image, tag string, metaHead
 			MetaHeaders:      metaHeaders,
 			AuthConfig:       authConfig,
 			ProgressOutput:   progress.ChanOutput(progressChan),
-			RegistryService:  daemon.RegistryService,
-			ImageEventLogger: daemon.LogImageEvent,
-			MetadataStore:    daemon.distributionMetadataStore,
-			ImageStore:       distribution.NewImageConfigStoreFromStore(daemon.imageStore),
-			ReferenceStore:   daemon.referenceStore,
+			RegistryService:  i.registryService,
+			ImageEventLogger: i.LogImageEvent,
+			MetadataStore:    i.distributionMetadataStore,
+			ImageStore:       distribution.NewImageConfigStoreFromStore(i.imageStore),
+			ReferenceStore:   i.referenceStore,
 		},
 		ConfigMediaType: schema2.MediaTypeImageConfig,
-		LayerStores:     distribution.NewLayerProvidersFromStores(daemon.layerStores),
-		TrustKey:        daemon.trustKey,
-		UploadManager:   daemon.uploadManager,
+		LayerStores:     distribution.NewLayerProvidersFromStores(i.layerStores),
+		TrustKey:        i.trustKey,
+		UploadManager:   i.uploadManager,
 	}
 
 	err = distribution.Push(ctx, ref, imagePushConfig)

+ 5 - 2
daemon/search.go → daemon/image_search.go

@@ -19,7 +19,10 @@ var acceptedSearchFilterTags = map[string]bool{
 
 // SearchRegistryForImages queries the registry for images matching
 // term. authConfig is used to login.
-func (daemon *Daemon) SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int,
+//
+// TODO: this could be implemented in a registry service instead of the image
+// service.
+func (i *imageService) SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int,
 	authConfig *types.AuthConfig,
 	headers map[string][]string) (*registrytypes.SearchResults, error) {
 
@@ -60,7 +63,7 @@ func (daemon *Daemon) SearchRegistryForImages(ctx context.Context, filtersArgs s
 		}
 	}
 
-	unfilteredResult, err := daemon.RegistryService.Search(ctx, term, limit, authConfig, dockerversion.DockerUserAgent(ctx), headers)
+	unfilteredResult, err := i.registryService.Search(ctx, term, limit, authConfig, dockerversion.DockerUserAgent(ctx), headers)
 	if err != nil {
 		return nil, err
 	}

+ 7 - 7
daemon/image_tag.go

@@ -7,8 +7,8 @@ import (
 
 // TagImage creates the tag specified by newTag, pointing to the image named
 // imageName (alternatively, imageName can also be an image ID).
-func (daemon *Daemon) TagImage(imageName, repository, tag string) (string, error) {
-	imageID, _, err := daemon.GetImageIDAndOS(imageName)
+func (i *imageService) TagImage(imageName, repository, tag string) (string, error) {
+	imageID, _, err := i.GetImageIDAndOS(imageName)
 	if err != nil {
 		return "", err
 	}
@@ -23,19 +23,19 @@ func (daemon *Daemon) TagImage(imageName, repository, tag string) (string, error
 		}
 	}
 
-	err = daemon.TagImageWithReference(imageID, newTag)
+	err = i.TagImageWithReference(imageID, newTag)
 	return reference.FamiliarString(newTag), err
 }
 
 // TagImageWithReference adds the given reference to the image ID provided.
-func (daemon *Daemon) TagImageWithReference(imageID image.ID, newTag reference.Named) error {
-	if err := daemon.referenceStore.AddTag(newTag, imageID.Digest(), true); err != nil {
+func (i *imageService) TagImageWithReference(imageID image.ID, newTag reference.Named) error {
+	if err := i.referenceStore.AddTag(newTag, imageID.Digest(), true); err != nil {
 		return err
 	}
 
-	if err := daemon.imageStore.SetLastUpdated(imageID); err != nil {
+	if err := i.imageStore.SetLastUpdated(imageID); err != nil {
 		return err
 	}
-	daemon.LogImageEvent(imageID.String(), reference.FamiliarString(newTag), "tag")
+	i.LogImageEvent(imageID.String(), reference.FamiliarString(newTag), "tag")
 	return nil
 }

+ 21 - 21
daemon/images.go

@@ -34,8 +34,8 @@ func (r byCreated) Swap(i, j int)      { r[i], r[j] = r[j], r[i] }
 func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created }
 
 // Map returns a map of all images in the ImageStore
-func (daemon *Daemon) Map() map[image.ID]*image.Image {
-	return daemon.imageStore.Map()
+func (i *imageService) Map() map[image.ID]*image.Image {
+	return i.imageStore.Map()
 }
 
 // Images returns a filtered list of images. filterArgs is a JSON-encoded set
@@ -43,7 +43,7 @@ func (daemon *Daemon) Map() map[image.ID]*image.Image {
 // filter is a shell glob string applied to repository names. The argument
 // named all controls whether all images in the graph are filtered, or just
 // the heads.
-func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) {
+func (i *imageService) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) {
 	var (
 		allImages    map[image.ID]*image.Image
 		err          error
@@ -62,14 +62,14 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs
 		}
 	}
 	if danglingOnly {
-		allImages = daemon.imageStore.Heads()
+		allImages = i.imageStore.Heads()
 	} else {
-		allImages = daemon.imageStore.Map()
+		allImages = i.imageStore.Map()
 	}
 
 	var beforeFilter, sinceFilter *image.Image
 	err = imageFilters.WalkValues("before", func(value string) error {
-		beforeFilter, err = daemon.GetImage(value)
+		beforeFilter, err = i.GetImage(value)
 		return err
 	})
 	if err != nil {
@@ -77,7 +77,7 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs
 	}
 
 	err = imageFilters.WalkValues("since", func(value string) error {
-		sinceFilter, err = daemon.GetImage(value)
+		sinceFilter, err = i.GetImage(value)
 		return err
 	})
 	if err != nil {
@@ -124,7 +124,7 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs
 		layerID := img.RootFS.ChainID()
 		var size int64
 		if layerID != "" {
-			l, err := daemon.layerStores[img.OperatingSystem()].Get(layerID)
+			l, err := i.layerStores[img.OperatingSystem()].Get(layerID)
 			if err != nil {
 				// The layer may have been deleted between the call to `Map()` or
 				// `Heads()` and the call to `Get()`, so we just ignore this error
@@ -135,7 +135,7 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs
 			}
 
 			size, err = l.Size()
-			layer.ReleaseAndLog(daemon.layerStores[img.OperatingSystem()], l)
+			layer.ReleaseAndLog(i.layerStores[img.OperatingSystem()], l)
 			if err != nil {
 				return nil, err
 			}
@@ -143,7 +143,7 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs
 
 		newImage := newImage(img, size)
 
-		for _, ref := range daemon.referenceStore.References(id.Digest()) {
+		for _, ref := range i.referenceStore.References(id.Digest()) {
 			if imageFilters.Contains("reference") {
 				var found bool
 				var matchErr error
@@ -165,7 +165,7 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs
 			}
 		}
 		if newImage.RepoDigests == nil && newImage.RepoTags == nil {
-			if all || len(daemon.imageStore.Children(id)) == 0 {
+			if all || len(i.imageStore.Children(id)) == 0 {
 
 				if imageFilters.Contains("dangling") && !danglingOnly {
 					//dangling=false case, so dangling image is not needed
@@ -186,8 +186,8 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs
 		if withExtraAttrs {
 			// lazily init variables
 			if imagesMap == nil {
-				allContainers = daemon.List()
-				allLayers = daemon.layerStores[img.OperatingSystem()].Map()
+				allContainers = i.containers.List()
+				allLayers = i.layerStores[img.OperatingSystem()].Map()
 				imagesMap = make(map[*image.Image]*types.ImageSummary)
 				layerRefs = make(map[layer.ChainID]int)
 			}
@@ -249,20 +249,20 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs
 // This new image contains only the layers from it's parent + 1 extra layer which contains the diff of all the layers in between.
 // The existing image(s) is not destroyed.
 // If no parent is specified, a new image with the diff of all the specified image's layers merged into a new layer that has no parents.
-func (daemon *Daemon) SquashImage(id, parent string) (string, error) {
+func (i *imageService) SquashImage(id, parent string) (string, error) {
 
 	var (
 		img *image.Image
 		err error
 	)
-	if img, err = daemon.imageStore.Get(image.ID(id)); err != nil {
+	if img, err = i.imageStore.Get(image.ID(id)); err != nil {
 		return "", err
 	}
 
 	var parentImg *image.Image
 	var parentChainID layer.ChainID
 	if len(parent) != 0 {
-		parentImg, err = daemon.imageStore.Get(image.ID(parent))
+		parentImg, err = i.imageStore.Get(image.ID(parent))
 		if err != nil {
 			return "", errors.Wrap(err, "error getting specified parent layer")
 		}
@@ -272,11 +272,11 @@ func (daemon *Daemon) SquashImage(id, parent string) (string, error) {
 		parentImg = &image.Image{RootFS: rootFS}
 	}
 
-	l, err := daemon.layerStores[img.OperatingSystem()].Get(img.RootFS.ChainID())
+	l, err := i.layerStores[img.OperatingSystem()].Get(img.RootFS.ChainID())
 	if err != nil {
 		return "", errors.Wrap(err, "error getting image layer")
 	}
-	defer daemon.layerStores[img.OperatingSystem()].Release(l)
+	defer i.layerStores[img.OperatingSystem()].Release(l)
 
 	ts, err := l.TarStreamFrom(parentChainID)
 	if err != nil {
@@ -284,11 +284,11 @@ func (daemon *Daemon) SquashImage(id, parent string) (string, error) {
 	}
 	defer ts.Close()
 
-	newL, err := daemon.layerStores[img.OperatingSystem()].Register(ts, parentChainID)
+	newL, err := i.layerStores[img.OperatingSystem()].Register(ts, parentChainID)
 	if err != nil {
 		return "", errors.Wrap(err, "error registering layer")
 	}
-	defer daemon.layerStores[img.OperatingSystem()].Release(newL)
+	defer i.layerStores[img.OperatingSystem()].Release(newL)
 
 	newImage := *img
 	newImage.RootFS = nil
@@ -323,7 +323,7 @@ func (daemon *Daemon) SquashImage(id, parent string) (string, error) {
 		return "", errors.Wrap(err, "error marshalling image config")
 	}
 
-	newImgID, err := daemon.imageStore.Create(b)
+	newImgID, err := i.imageStore.Create(b)
 	if err != nil {
 		return "", errors.Wrap(err, "error creating new image after squash")
 	}

+ 3 - 2
daemon/info.go

@@ -80,8 +80,9 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
 
 	var ds [][2]string
 	drivers := ""
+	statuses := daemon.imageService.GraphDriverStatuses()
 	for os, gd := range daemon.graphDrivers {
-		ds = append(ds, daemon.layerStores[os].DriverStatus()...)
+		ds = append(ds, statuses[os]...)
 		drivers += gd
 		if len(daemon.graphDrivers) > 1 {
 			drivers += fmt.Sprintf(" (%s) ", os)
@@ -95,7 +96,7 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
 		ContainersRunning:  cRunning,
 		ContainersPaused:   cPaused,
 		ContainersStopped:  cStopped,
-		Images:             len(daemon.imageStore.Map()),
+		Images:             daemon.imageService.CountImages(),
 		Driver:             drivers,
 		DriverStatus:       ds,
 		Plugins:            daemon.showPluginsInfo(),

+ 1 - 1
daemon/inspect.go

@@ -79,7 +79,7 @@ func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.Co
 	container.Unlock()
 
 	if size {
-		sizeRw, sizeRootFs := daemon.getSize(base.ID)
+		sizeRw, sizeRootFs := daemon.imageService.GetContainerLayerSize(base.ID)
 		base.SizeRw = &sizeRw
 		base.SizeRootFs = &sizeRootFs
 	}

+ 4 - 4
daemon/list.go

@@ -239,7 +239,7 @@ func (daemon *Daemon) reducePsContainer(container *container.Snapshot, ctx *list
 
 	// release lock because size calculation is slow
 	if ctx.Size {
-		sizeRw, sizeRootFs := daemon.getSize(newC.ID)
+		sizeRw, sizeRootFs := daemon.imageService.GetContainerLayerSize(newC.ID)
 		newC.SizeRw = sizeRw
 		newC.SizeRootFs = sizeRootFs
 	}
@@ -323,7 +323,7 @@ func (daemon *Daemon) foldFilter(view container.View, config *types.ContainerLis
 	if psFilters.Contains("ancestor") {
 		ancestorFilter = true
 		psFilters.WalkValues("ancestor", func(ancestor string) error {
-			id, _, err := daemon.GetImageIDAndOS(ancestor)
+			id, _, err := daemon.imageService.GetImageIDAndOS(ancestor)
 			if err != nil {
 				logrus.Warnf("Error while looking up for image %v", ancestor)
 				return nil
@@ -333,7 +333,7 @@ func (daemon *Daemon) foldFilter(view container.View, config *types.ContainerLis
 				return nil
 			}
 			// Then walk down the graph and put the imageIds in imagesFilter
-			populateImageFilterByParents(imagesFilter, id, daemon.imageStore.Children)
+			populateImageFilterByParents(imagesFilter, id, daemon.imageService.Children)
 			return nil
 		})
 	}
@@ -591,7 +591,7 @@ func (daemon *Daemon) refreshImage(s *container.Snapshot, ctx *listContext) (*ty
 	c := s.Container
 	image := s.Image // keep the original ref if still valid (hasn't changed)
 	if image != s.ImageID {
-		id, _, err := daemon.GetImageIDAndOS(image)
+		id, _, err := daemon.imageService.GetImageIDAndOS(image)
 		if _, isDNE := err.(errImageDoesNotExist); err != nil && !isDNE {
 			return nil, err
 		}

+ 1 - 1
daemon/oci_windows.go

@@ -24,7 +24,7 @@ const (
 )
 
 func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) {
-	img, err := daemon.GetImage(string(c.ImageID))
+	img, err := daemon.imageService.GetImage(string(c.ImageID))
 	if err != nil {
 		return nil, err
 	}

+ 1 - 1
daemon/prune.go

@@ -75,7 +75,7 @@ func (daemon *Daemon) ContainersPrune(ctx context.Context, pruneFilters filters.
 			if !matchLabels(pruneFilters, c.Config.Labels) {
 				continue
 			}
-			cSize, _ := daemon.getSize(c.ID)
+			cSize, _ := daemon.imageService.GetContainerLayerSize(c.ID)
 			// TODO: sets RmLink to true?
 			err := daemon.ContainerRm(c.ID, &types.ContainerRmConfig{})
 			if err != nil {

+ 4 - 4
daemon/reload.go

@@ -90,8 +90,8 @@ func (daemon *Daemon) reloadMaxConcurrentDownloadsAndUploads(conf *config.Config
 		daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads
 	}
 	logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads)
-	if daemon.downloadManager != nil {
-		daemon.downloadManager.SetConcurrency(*daemon.configStore.MaxConcurrentDownloads)
+	if daemon.imageService.downloadManager != nil {
+		daemon.imageService.downloadManager.SetConcurrency(*daemon.configStore.MaxConcurrentDownloads)
 	}
 
 	// prepare reload event attributes with updatable configurations
@@ -106,8 +106,8 @@ func (daemon *Daemon) reloadMaxConcurrentDownloadsAndUploads(conf *config.Config
 		daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads
 	}
 	logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads)
-	if daemon.uploadManager != nil {
-		daemon.uploadManager.SetConcurrency(*daemon.configStore.MaxConcurrentUploads)
+	if daemon.imageService.uploadManager != nil {
+		daemon.imageService.uploadManager.SetConcurrency(*daemon.configStore.MaxConcurrentUploads)
 	}
 
 	// prepare reload event attributes with updatable configurations

+ 29 - 12
daemon/reload_test.go

@@ -15,11 +15,13 @@ import (
 )
 
 func TestDaemonReloadLabels(t *testing.T) {
-	daemon := &Daemon{}
-	daemon.configStore = &config.Config{
-		CommonConfig: config.CommonConfig{
-			Labels: []string{"foo:bar"},
+	daemon := &Daemon{
+		configStore: &config.Config{
+			CommonConfig: config.CommonConfig{
+				Labels: []string{"foo:bar"},
+			},
 		},
+		imageService: &imageService{},
 	}
 
 	valuesSets := make(map[string]interface{})
@@ -43,7 +45,8 @@ func TestDaemonReloadLabels(t *testing.T) {
 
 func TestDaemonReloadAllowNondistributableArtifacts(t *testing.T) {
 	daemon := &Daemon{
-		configStore: &config.Config{},
+		configStore:  &config.Config{},
+		imageService: &imageService{},
 	}
 
 	var err error
@@ -97,7 +100,9 @@ func TestDaemonReloadAllowNondistributableArtifacts(t *testing.T) {
 }
 
 func TestDaemonReloadMirrors(t *testing.T) {
-	daemon := &Daemon{}
+	daemon := &Daemon{
+		imageService: &imageService{},
+	}
 	var err error
 	daemon.RegistryService, err = registry.NewService(registry.ServiceOptions{
 		InsecureRegistries: []string{},
@@ -194,7 +199,9 @@ func TestDaemonReloadMirrors(t *testing.T) {
 }
 
 func TestDaemonReloadInsecureRegistries(t *testing.T) {
-	daemon := &Daemon{}
+	daemon := &Daemon{
+		imageService: &imageService{},
+	}
 	var err error
 	// initialize daemon with existing insecure registries: "127.0.0.0/8", "10.10.1.11:5000", "10.10.1.22:5000"
 	daemon.RegistryService, err = registry.NewService(registry.ServiceOptions{
@@ -284,7 +291,9 @@ func TestDaemonReloadInsecureRegistries(t *testing.T) {
 }
 
 func TestDaemonReloadNotAffectOthers(t *testing.T) {
-	daemon := &Daemon{}
+	daemon := &Daemon{
+		imageService: &imageService{},
+	}
 	daemon.configStore = &config.Config{
 		CommonConfig: config.CommonConfig{
 			Labels: []string{"foo:bar"},
@@ -316,7 +325,9 @@ func TestDaemonReloadNotAffectOthers(t *testing.T) {
 }
 
 func TestDaemonDiscoveryReload(t *testing.T) {
-	daemon := &Daemon{}
+	daemon := &Daemon{
+		imageService: &imageService{},
+	}
 	daemon.configStore = &config.Config{
 		CommonConfig: config.CommonConfig{
 			ClusterStore:     "memory://127.0.0.1",
@@ -393,7 +404,9 @@ func TestDaemonDiscoveryReload(t *testing.T) {
 }
 
 func TestDaemonDiscoveryReloadFromEmptyDiscovery(t *testing.T) {
-	daemon := &Daemon{}
+	daemon := &Daemon{
+		imageService: &imageService{},
+	}
 	daemon.configStore = &config.Config{}
 
 	valuesSet := make(map[string]interface{})
@@ -438,7 +451,9 @@ func TestDaemonDiscoveryReloadFromEmptyDiscovery(t *testing.T) {
 }
 
 func TestDaemonDiscoveryReloadOnlyClusterAdvertise(t *testing.T) {
-	daemon := &Daemon{}
+	daemon := &Daemon{
+		imageService: &imageService{},
+	}
 	daemon.configStore = &config.Config{
 		CommonConfig: config.CommonConfig{
 			ClusterStore: "memory://127.0.0.1",
@@ -482,7 +497,9 @@ func TestDaemonDiscoveryReloadOnlyClusterAdvertise(t *testing.T) {
 }
 
 func TestDaemonReloadNetworkDiagnosticPort(t *testing.T) {
-	daemon := &Daemon{}
+	daemon := &Daemon{
+		imageService: &imageService{},
+	}
 	daemon.configStore = &config.Config{}
 
 	valuesSet := make(map[string]interface{})

+ 1 - 1
daemon/start.go

@@ -223,7 +223,7 @@ func (daemon *Daemon) Cleanup(container *container.Container) {
 	if err := daemon.conditionalUnmountOnCleanup(container); err != nil {
 		// FIXME: remove once reference counting for graphdrivers has been refactored
 		// Ensure that all the mounts are gone
-		if mountid, err := daemon.layerStores[container.OS].GetMountID(container.ID); err == nil {
+		if mountid, err := daemon.imageService.GetContainerMountID(container.ID, container.OS); err == nil {
 			daemon.cleanupMountsByID(mountid)
 		}
 	}