Browse Source

Extract container store from the daemon.

- Generalize in an interface.
- Stop abusing of List for everything.

Signed-off-by: David Calavera <david.calavera@gmail.com>
David Calavera 9 years ago
parent
commit
c62c9636c5

+ 10 - 9
daemon/history.go → container/history.go

@@ -1,34 +1,35 @@
-package daemon
+package container
 
-import (
-	"sort"
-
-	"github.com/docker/docker/container"
-)
+import "sort"
 
 // History is a convenience type for storing a list of containers,
-// ordered by creation date.
-type History []*container.Container
+// sorted by creation date in descendant order.
+type History []*Container
 
+// Len returns the number of containers in the history.
 func (history *History) Len() int {
 	return len(*history)
 }
 
+// Less compares two containers and returns true if the second one
+// was created before the first one.
 func (history *History) Less(i, j int) bool {
 	containers := *history
 	return containers[j].Created.Before(containers[i].Created)
 }
 
+// Swap switches containers i and j positions in the history.
 func (history *History) Swap(i, j int) {
 	containers := *history
 	containers[i], containers[j] = containers[j], containers[i]
 }
 
 // Add the given container to history.
-func (history *History) Add(container *container.Container) {
+func (history *History) Add(container *Container) {
 	*history = append(*history, container)
 }
 
+// sort orders the history by creation date in descendant order.
 func (history *History) sort() {
 	sort.Sort(history)
 }

+ 91 - 0
container/memory_store.go

@@ -0,0 +1,91 @@
+package container
+
+import "sync"
+
+// memoryStore implements a Store in memory.
+type memoryStore struct {
+	s map[string]*Container
+	sync.Mutex
+}
+
+// NewMemoryStore initializes a new memory store.
+func NewMemoryStore() Store {
+	return &memoryStore{
+		s: make(map[string]*Container),
+	}
+}
+
+// Add appends a new container to the memory store.
+// It overrides the id if it existed before.
+func (c *memoryStore) Add(id string, cont *Container) {
+	c.Lock()
+	c.s[id] = cont
+	c.Unlock()
+}
+
+// Get returns a container from the store by id.
+func (c *memoryStore) Get(id string) *Container {
+	c.Lock()
+	res := c.s[id]
+	c.Unlock()
+	return res
+}
+
+// Delete removes a container from the store by id.
+func (c *memoryStore) Delete(id string) {
+	c.Lock()
+	delete(c.s, id)
+	c.Unlock()
+}
+
+// List returns a sorted list of containers from the store.
+// The containers are ordered by creation date.
+func (c *memoryStore) List() []*Container {
+	containers := new(History)
+	c.Lock()
+	for _, cont := range c.s {
+		containers.Add(cont)
+	}
+	c.Unlock()
+	containers.sort()
+	return *containers
+}
+
+// Size returns the number of containers in the store.
+func (c *memoryStore) Size() int {
+	c.Lock()
+	defer c.Unlock()
+	return len(c.s)
+}
+
+// First returns the first container found in the store by a given filter.
+func (c *memoryStore) First(filter StoreFilter) *Container {
+	c.Lock()
+	defer c.Unlock()
+	for _, cont := range c.s {
+		if filter(cont) {
+			return cont
+		}
+	}
+	return nil
+}
+
+// ApplyAll calls the reducer function with every container in the store.
+// This operation is asyncronous in the memory store.
+func (c *memoryStore) ApplyAll(apply StoreReducer) {
+	c.Lock()
+	defer c.Unlock()
+
+	wg := new(sync.WaitGroup)
+	for _, cont := range c.s {
+		wg.Add(1)
+		go func(container *Container) {
+			apply(container)
+			wg.Done()
+		}(cont)
+	}
+
+	wg.Wait()
+}
+
+var _ Store = &memoryStore{}

+ 106 - 0
container/memory_store_test.go

@@ -0,0 +1,106 @@
+package container
+
+import (
+	"testing"
+	"time"
+)
+
+func TestNewMemoryStore(t *testing.T) {
+	s := NewMemoryStore()
+	m, ok := s.(*memoryStore)
+	if !ok {
+		t.Fatalf("store is not a memory store %v", s)
+	}
+	if m.s == nil {
+		t.Fatal("expected store map to not be nil")
+	}
+}
+
+func TestAddContainers(t *testing.T) {
+	s := NewMemoryStore()
+	s.Add("id", NewBaseContainer("id", "root"))
+	if s.Size() != 1 {
+		t.Fatalf("expected store size 1, got %v", s.Size())
+	}
+}
+
+func TestGetContainer(t *testing.T) {
+	s := NewMemoryStore()
+	s.Add("id", NewBaseContainer("id", "root"))
+	c := s.Get("id")
+	if c == nil {
+		t.Fatal("expected container to not be nil")
+	}
+}
+
+func TestDeleteContainer(t *testing.T) {
+	s := NewMemoryStore()
+	s.Add("id", NewBaseContainer("id", "root"))
+	s.Delete("id")
+	if c := s.Get("id"); c != nil {
+		t.Fatalf("expected container to be nil after removal, got %v", c)
+	}
+
+	if s.Size() != 0 {
+		t.Fatalf("expected store size to be 0, got %v", s.Size())
+	}
+}
+
+func TestListContainers(t *testing.T) {
+	s := NewMemoryStore()
+
+	cont := NewBaseContainer("id", "root")
+	cont.Created = time.Now()
+	cont2 := NewBaseContainer("id2", "root")
+	cont2.Created = time.Now().Add(24 * time.Hour)
+
+	s.Add("id", cont)
+	s.Add("id2", cont2)
+
+	list := s.List()
+	if len(list) != 2 {
+		t.Fatalf("expected list size 2, got %v", len(list))
+	}
+	if list[0].ID != "id2" {
+		t.Fatalf("expected older container to be first, got %v", list[0].ID)
+	}
+}
+
+func TestFirstContainer(t *testing.T) {
+	s := NewMemoryStore()
+
+	s.Add("id", NewBaseContainer("id", "root"))
+	s.Add("id2", NewBaseContainer("id2", "root"))
+
+	first := s.First(func(cont *Container) bool {
+		return cont.ID == "id2"
+	})
+
+	if first == nil {
+		t.Fatal("expected container to not be nil")
+	}
+	if first.ID != "id2" {
+		t.Fatalf("expected id2, got %v", first)
+	}
+}
+
+func TestApplyAllContainer(t *testing.T) {
+	s := NewMemoryStore()
+
+	s.Add("id", NewBaseContainer("id", "root"))
+	s.Add("id2", NewBaseContainer("id2", "root"))
+
+	s.ApplyAll(func(cont *Container) {
+		if cont.ID == "id2" {
+			cont.ID = "newID"
+		}
+	})
+
+	cont := s.Get("id2")
+	if cont == nil {
+		t.Fatal("expected container to not be nil")
+	}
+	if cont.ID != "newID" {
+		t.Fatalf("expected newID, got %v", cont)
+	}
+}

+ 28 - 0
container/store.go

@@ -0,0 +1,28 @@
+package container
+
+// StoreFilter defines a function to filter
+// container in the store.
+type StoreFilter func(*Container) bool
+
+// StoreReducer defines a function to
+// manipulate containers in the store
+type StoreReducer func(*Container)
+
+// Store defines an interface that
+// any container store must implement.
+type Store interface {
+	// Add appends a new container to the store.
+	Add(string, *Container)
+	// Get returns a container from the store by the identifier it was stored with.
+	Get(string) *Container
+	// Delete removes a container from the store by the identifier it was stored with.
+	Delete(string)
+	// List returns a list of containers from the store.
+	List() []*Container
+	// Size returns the number of containers in the store.
+	Size() int
+	// First returns the first container found in the store by a given filter.
+	First(StoreFilter) *Container
+	// ApplyAll calls the reducer function with every container in the store.
+	ApplyAll(StoreReducer)
+}

+ 12 - 53
daemon/daemon.go

@@ -99,46 +99,11 @@ func (e ErrImageDoesNotExist) Error() string {
 	return fmt.Sprintf("no such id: %s", e.RefOrID)
 }
 
-type contStore struct {
-	s map[string]*container.Container
-	sync.Mutex
-}
-
-func (c *contStore) Add(id string, cont *container.Container) {
-	c.Lock()
-	c.s[id] = cont
-	c.Unlock()
-}
-
-func (c *contStore) Get(id string) *container.Container {
-	c.Lock()
-	res := c.s[id]
-	c.Unlock()
-	return res
-}
-
-func (c *contStore) Delete(id string) {
-	c.Lock()
-	delete(c.s, id)
-	c.Unlock()
-}
-
-func (c *contStore) List() []*container.Container {
-	containers := new(History)
-	c.Lock()
-	for _, cont := range c.s {
-		containers.Add(cont)
-	}
-	c.Unlock()
-	containers.sort()
-	return *containers
-}
-
 // Daemon holds information about the Docker daemon.
 type Daemon struct {
 	ID                        string
 	repository                string
-	containers                *contStore
+	containers                container.Store
 	execCommands              *exec.Store
 	referenceStore            reference.Store
 	downloadManager           *xfer.LayerDownloadManager
@@ -811,7 +776,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
 
 	d.ID = trustKey.PublicKey().KeyID()
 	d.repository = daemonRepo
-	d.containers = &contStore{s: make(map[string]*container.Container)}
+	d.containers = container.NewMemoryStore()
 	d.execCommands = exec.NewStore()
 	d.referenceStore = referenceStore
 	d.distributionMetadataStore = distributionMetadataStore
@@ -890,24 +855,18 @@ func (daemon *Daemon) shutdownContainer(c *container.Container) error {
 func (daemon *Daemon) Shutdown() error {
 	daemon.shutdown = true
 	if daemon.containers != nil {
-		group := sync.WaitGroup{}
 		logrus.Debug("starting clean shutdown of all containers...")
-		for _, cont := range daemon.List() {
-			if !cont.IsRunning() {
-				continue
+		daemon.containers.ApplyAll(func(c *container.Container) {
+			if !c.IsRunning() {
+				return
 			}
-			logrus.Debugf("stopping %s", cont.ID)
-			group.Add(1)
-			go func(c *container.Container) {
-				defer group.Done()
-				if err := daemon.shutdownContainer(c); err != nil {
-					logrus.Errorf("Stop container error: %v", err)
-					return
-				}
-				logrus.Debugf("container stopped %s", c.ID)
-			}(cont)
-		}
-		group.Wait()
+			logrus.Debugf("stopping %s", c.ID)
+			if err := daemon.shutdownContainer(c); err != nil {
+				logrus.Errorf("Stop container error: %v", err)
+				return
+			}
+			logrus.Debugf("container stopped %s", c.ID)
+		})
 	}
 
 	// trigger libnetwork Stop only if it's initialized

+ 6 - 9
daemon/daemon_test.go

@@ -61,15 +61,12 @@ func TestGetContainer(t *testing.T) {
 		},
 	}
 
-	store := &contStore{
-		s: map[string]*container.Container{
-			c1.ID: c1,
-			c2.ID: c2,
-			c3.ID: c3,
-			c4.ID: c4,
-			c5.ID: c5,
-		},
-	}
+	store := container.NewMemoryStore()
+	store.Add(c1.ID, c1)
+	store.Add(c2.ID, c2)
+	store.Add(c3.ID, c3)
+	store.Add(c4.ID, c4)
+	store.Add(c5.ID, c5)
 
 	index := truncindex.NewTruncIndex([]string{})
 	index.Add(c1.ID)

+ 1 - 1
daemon/delete_test.go

@@ -20,7 +20,7 @@ func TestContainerDoubleDelete(t *testing.T) {
 		repository: tmp,
 		root:       tmp,
 	}
-	daemon.containers = &contStore{s: make(map[string]*container.Container)}
+	daemon.containers = container.NewMemoryStore()
 
 	container := &container.Container{
 		CommonContainer: container.CommonContainer{

+ 20 - 32
daemon/image_delete.go

@@ -179,13 +179,9 @@ func isImageIDPrefix(imageID, possiblePrefix string) bool {
 // getContainerUsingImage returns a container that was created using the given
 // imageID. Returns nil if there is no such container.
 func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *container.Container {
-	for _, container := range daemon.List() {
-		if container.ImageID == imageID {
-			return container
-		}
-	}
-
-	return nil
+	return daemon.containers.First(func(c *container.Container) bool {
+		return c.ImageID == imageID
+	})
 }
 
 // removeImageRef attempts to parse and remove the given image reference from
@@ -328,19 +324,15 @@ func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType
 
 	if mask&conflictRunningContainer != 0 {
 		// Check if any running container is using the image.
-		for _, container := range daemon.List() {
-			if !container.IsRunning() {
-				// Skip this until we check for soft conflicts later.
-				continue
-			}
-
-			if container.ImageID == imgID {
-				return &imageDeleteConflict{
-					imgID:   imgID,
-					hard:    true,
-					used:    true,
-					message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)),
-				}
+		running := func(c *container.Container) bool {
+			return c.IsRunning() && c.ImageID == imgID
+		}
+		if container := daemon.containers.First(running); container != nil {
+			return &imageDeleteConflict{
+				imgID:   imgID,
+				hard:    true,
+				used:    true,
+				message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)),
 			}
 		}
 	}
@@ -355,18 +347,14 @@ func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType
 
 	if mask&conflictStoppedContainer != 0 {
 		// Check if any stopped containers reference this image.
-		for _, container := range daemon.List() {
-			if container.IsRunning() {
-				// Skip this as it was checked above in hard conflict conditions.
-				continue
-			}
-
-			if container.ImageID == imgID {
-				return &imageDeleteConflict{
-					imgID:   imgID,
-					used:    true,
-					message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)),
-				}
+		stopped := func(c *container.Container) bool {
+			return !c.IsRunning() && c.ImageID == imgID
+		}
+		if container := daemon.containers.First(stopped); container != nil {
+			return &imageDeleteConflict{
+				imgID:   imgID,
+				used:    true,
+				message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)),
 			}
 		}
 	}

+ 12 - 10
daemon/info.go

@@ -4,9 +4,11 @@ import (
 	"os"
 	"runtime"
 	"strings"
+	"sync/atomic"
 	"time"
 
 	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/container"
 	"github.com/docker/docker/dockerversion"
 	"github.com/docker/docker/pkg/fileutils"
 	"github.com/docker/docker/pkg/parsers/kernel"
@@ -54,24 +56,24 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
 	initPath := utils.DockerInitPath("")
 	sysInfo := sysinfo.New(true)
 
-	var cRunning, cPaused, cStopped int
-	for _, c := range daemon.List() {
+	var cRunning, cPaused, cStopped int32
+	daemon.containers.ApplyAll(func(c *container.Container) {
 		switch c.StateString() {
 		case "paused":
-			cPaused++
+			atomic.AddInt32(&cPaused, 1)
 		case "running":
-			cRunning++
+			atomic.AddInt32(&cRunning, 1)
 		default:
-			cStopped++
+			atomic.AddInt32(&cStopped, 1)
 		}
-	}
+	})
 
 	v := &types.Info{
 		ID:                 daemon.ID,
-		Containers:         len(daemon.List()),
-		ContainersRunning:  cRunning,
-		ContainersPaused:   cPaused,
-		ContainersStopped:  cStopped,
+		Containers:         int(cRunning + cPaused + cStopped),
+		ContainersRunning:  int(cRunning),
+		ContainersPaused:   int(cPaused),
+		ContainersStopped:  int(cStopped),
 		Images:             len(daemon.imageStore.Map()),
 		Driver:             daemon.GraphDriverName(),
 		DriverStatus:       daemon.layerStore.DriverStatus(),

+ 3 - 6
daemon/links_test.go

@@ -39,12 +39,9 @@ func TestMigrateLegacySqliteLinks(t *testing.T) {
 		},
 	}
 
-	store := &contStore{
-		s: map[string]*container.Container{
-			c1.ID: c1,
-			c2.ID: c2,
-		},
-	}
+	store := container.NewMemoryStore()
+	store.Add(c1.ID, c1)
+	store.Add(c2.ID, c2)
 
 	d := &Daemon{root: tmpDir, containers: store}
 	db, err := graphdb.NewSqliteConn(filepath.Join(d.root, "linkgraph.db"))