6558158dc3
Instead of implementing refcounts at each graphdriver, implement this in
the layer package which is what the engine actually interacts with now.
This means interacting directly with the graphdriver is no longer
explicitly safe with regard to Get/Put calls being refcounted.
In addition, with the containerd, layers may still be mounted after
a daemon restart since we will no longer explicitly kill containers when
we shutdown or startup engine.
Because of this ref counts would need to be repopulated.
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
(cherry picked from commit 65d79e3e5e
)
95 lines
2 KiB
Go
95 lines
2 KiB
Go
package main
|
|
|
|
import (
|
|
"fmt"
|
|
"io/ioutil"
|
|
"os"
|
|
"runtime"
|
|
"strings"
|
|
"sync"
|
|
|
|
"github.com/docker/docker/pkg/integration/checker"
|
|
"github.com/go-check/check"
|
|
)
|
|
|
|
func (s *DockerSuite) BenchmarkConcurrentContainerActions(c *check.C) {
|
|
maxConcurrency := runtime.GOMAXPROCS(0)
|
|
numIterations := c.N
|
|
outerGroup := &sync.WaitGroup{}
|
|
outerGroup.Add(maxConcurrency)
|
|
chErr := make(chan error, numIterations*2*maxConcurrency)
|
|
|
|
for i := 0; i < maxConcurrency; i++ {
|
|
go func() {
|
|
defer outerGroup.Done()
|
|
innerGroup := &sync.WaitGroup{}
|
|
innerGroup.Add(2)
|
|
|
|
go func() {
|
|
defer innerGroup.Done()
|
|
for i := 0; i < numIterations; i++ {
|
|
args := []string{"run", "-d", defaultSleepImage}
|
|
args = append(args, defaultSleepCommand...)
|
|
out, _, err := dockerCmdWithError(args...)
|
|
if err != nil {
|
|
chErr <- fmt.Errorf(out)
|
|
return
|
|
}
|
|
|
|
id := strings.TrimSpace(out)
|
|
tmpDir, err := ioutil.TempDir("", "docker-concurrent-test-"+id)
|
|
if err != nil {
|
|
chErr <- err
|
|
return
|
|
}
|
|
defer os.RemoveAll(tmpDir)
|
|
out, _, err = dockerCmdWithError("cp", id+":/tmp", tmpDir)
|
|
if err != nil {
|
|
chErr <- fmt.Errorf(out)
|
|
return
|
|
}
|
|
|
|
out, _, err = dockerCmdWithError("kill", id)
|
|
if err != nil {
|
|
chErr <- fmt.Errorf(out)
|
|
}
|
|
|
|
out, _, err = dockerCmdWithError("start", id)
|
|
if err != nil {
|
|
chErr <- fmt.Errorf(out)
|
|
}
|
|
|
|
out, _, err = dockerCmdWithError("kill", id)
|
|
if err != nil {
|
|
chErr <- fmt.Errorf(out)
|
|
}
|
|
|
|
// don't do an rm -f here since it can potentially ignore errors from the graphdriver
|
|
out, _, err = dockerCmdWithError("rm", id)
|
|
if err != nil {
|
|
chErr <- fmt.Errorf(out)
|
|
}
|
|
}
|
|
}()
|
|
|
|
go func() {
|
|
defer innerGroup.Done()
|
|
for i := 0; i < numIterations; i++ {
|
|
out, _, err := dockerCmdWithError("ps")
|
|
if err != nil {
|
|
chErr <- fmt.Errorf(out)
|
|
}
|
|
}
|
|
}()
|
|
|
|
innerGroup.Wait()
|
|
}()
|
|
}
|
|
|
|
outerGroup.Wait()
|
|
close(chErr)
|
|
|
|
for err := range chErr {
|
|
c.Assert(err, checker.IsNil)
|
|
}
|
|
}
|