Merge pull request #29289 from vdemeester/integration-cli-startwithbusybox
[integration-cli] Use *check.C in Start/Stop on Daemon struct
This commit is contained in:
commit
8f57ac390f
23 changed files with 435 additions and 601 deletions
|
@ -84,7 +84,7 @@ func (s *DockerRegistrySuite) TearDownTest(c *check.C) {
|
|||
s.reg.Close()
|
||||
}
|
||||
if s.d != nil {
|
||||
s.d.Stop()
|
||||
s.d.Stop(c)
|
||||
}
|
||||
s.ds.TearDownTest(c)
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ func (s *DockerSchema1RegistrySuite) TearDownTest(c *check.C) {
|
|||
s.reg.Close()
|
||||
}
|
||||
if s.d != nil {
|
||||
s.d.Stop()
|
||||
s.d.Stop(c)
|
||||
}
|
||||
s.ds.TearDownTest(c)
|
||||
}
|
||||
|
@ -154,7 +154,7 @@ func (s *DockerRegistryAuthHtpasswdSuite) TearDownTest(c *check.C) {
|
|||
s.reg.Close()
|
||||
}
|
||||
if s.d != nil {
|
||||
s.d.Stop()
|
||||
s.d.Stop(c)
|
||||
}
|
||||
s.ds.TearDownTest(c)
|
||||
}
|
||||
|
@ -189,7 +189,7 @@ func (s *DockerRegistryAuthTokenSuite) TearDownTest(c *check.C) {
|
|||
s.reg.Close()
|
||||
}
|
||||
if s.d != nil {
|
||||
s.d.Stop()
|
||||
s.d.Stop(c)
|
||||
}
|
||||
s.ds.TearDownTest(c)
|
||||
}
|
||||
|
@ -226,7 +226,7 @@ func (s *DockerDaemonSuite) SetUpTest(c *check.C) {
|
|||
func (s *DockerDaemonSuite) TearDownTest(c *check.C) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
if s.d != nil {
|
||||
s.d.Stop()
|
||||
s.d.Stop(c)
|
||||
}
|
||||
s.ds.TearDownTest(c)
|
||||
}
|
||||
|
@ -283,8 +283,7 @@ func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *daemo
|
|||
}
|
||||
d.ListenAddr = fmt.Sprintf("0.0.0.0:%d", d.Port)
|
||||
args := []string{"--iptables=false", "--swarm-default-advertise-addr=lo"} // avoid networking conflicts
|
||||
err := d.StartWithBusybox(args...)
|
||||
c.Assert(err, check.IsNil)
|
||||
d.StartWithBusybox(c, args...)
|
||||
|
||||
if joinSwarm == true {
|
||||
if len(s.daemons) > 0 {
|
||||
|
@ -315,7 +314,7 @@ func (s *DockerSwarmSuite) TearDownTest(c *check.C) {
|
|||
s.daemonsLock.Lock()
|
||||
for _, d := range s.daemons {
|
||||
if d != nil {
|
||||
d.Stop()
|
||||
d.Stop(c)
|
||||
// FIXME(vdemeester) should be handled by SwarmDaemon ?
|
||||
// raft state file is quite big (64MB) so remove it after every test
|
||||
walDir := filepath.Join(d.Root, "swarm/raft/wal")
|
||||
|
|
|
@ -34,6 +34,8 @@ import (
|
|||
// SockRoot holds the path of the default docker integration daemon socket
|
||||
var SockRoot = filepath.Join(os.TempDir(), "docker-integration")
|
||||
|
||||
var errDaemonNotStarted = errors.New("daemon not started")
|
||||
|
||||
// Daemon represents a Docker daemon for the testing framework.
|
||||
type Daemon struct {
|
||||
GlobalFlags []string
|
||||
|
@ -171,9 +173,14 @@ func (d *Daemon) getClientConfig() (*clientConfig, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
// Start will start the daemon and return once it is ready to receive requests.
|
||||
// You can specify additional daemon flags.
|
||||
func (d *Daemon) Start(args ...string) error {
|
||||
// Start starts the daemon and return once it is ready to receive requests.
|
||||
func (d *Daemon) Start(c *check.C, args ...string) {
|
||||
c.Assert(d.StartWithError(args...), checker.IsNil, check.Commentf("Error starting daemon with arguments: %v", args))
|
||||
}
|
||||
|
||||
// StartWithError starts the daemon and return once it is ready to receive requests.
|
||||
// It returns an error in case it couldn't start.
|
||||
func (d *Daemon) StartWithError(args ...string) error {
|
||||
logFile, err := os.OpenFile(filepath.Join(d.Folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "[%s] Could not create %s/docker.log", d.id, d.Folder)
|
||||
|
@ -295,17 +302,15 @@ func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error {
|
|||
|
||||
// StartWithBusybox will first start the daemon with Daemon.Start()
|
||||
// then save the busybox image from the main daemon and load it into this Daemon instance.
|
||||
func (d *Daemon) StartWithBusybox(arg ...string) error {
|
||||
if err := d.Start(arg...); err != nil {
|
||||
return err
|
||||
}
|
||||
return d.LoadBusybox()
|
||||
func (d *Daemon) StartWithBusybox(c *check.C, arg ...string) {
|
||||
d.Start(c, arg...)
|
||||
c.Assert(d.LoadBusybox(), checker.IsNil, check.Commentf("Error loading busybox image to current daeom: %s", d.id))
|
||||
}
|
||||
|
||||
// Kill will send a SIGKILL to the daemon
|
||||
func (d *Daemon) Kill() error {
|
||||
if d.cmd == nil || d.Wait == nil {
|
||||
return errors.New("daemon not started")
|
||||
return errDaemonNotStarted
|
||||
}
|
||||
|
||||
defer func() {
|
||||
|
@ -337,7 +342,7 @@ func (d *Daemon) Interrupt() error {
|
|||
// Signal sends the specified signal to the daemon if running
|
||||
func (d *Daemon) Signal(signal os.Signal) error {
|
||||
if d.cmd == nil || d.Wait == nil {
|
||||
return errors.New("daemon not started")
|
||||
return errDaemonNotStarted
|
||||
}
|
||||
return d.cmd.Process.Signal(signal)
|
||||
}
|
||||
|
@ -353,12 +358,28 @@ func (d *Daemon) DumpStackAndQuit() {
|
|||
}
|
||||
|
||||
// Stop will send a SIGINT every second and wait for the daemon to stop.
|
||||
// If it times out, a SIGKILL is sent.
|
||||
// Stop will not delete the daemon directory. If a purged daemon is needed,
|
||||
// instantiate a new one with NewDaemon.
|
||||
// If an error occurs while starting the daemon, the test will fail.
|
||||
func (d *Daemon) Stop(c *check.C) {
|
||||
err := d.StopWithError()
|
||||
if err != nil {
|
||||
if err != errDaemonNotStarted {
|
||||
c.Fatalf("Error while stopping the daemon %s : %v", d.id, err)
|
||||
} else {
|
||||
c.Logf("Daemon %s is not started", d.id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// StopWithError will send a SIGINT every second and wait for the daemon to stop.
|
||||
// If it timeouts, a SIGKILL is sent.
|
||||
// Stop will not delete the daemon directory. If a purged daemon is needed,
|
||||
// instantiate a new one with NewDaemon.
|
||||
func (d *Daemon) Stop() error {
|
||||
func (d *Daemon) StopWithError() error {
|
||||
if d.cmd == nil || d.Wait == nil {
|
||||
return errors.New("daemon not started")
|
||||
return errDaemonNotStarted
|
||||
}
|
||||
|
||||
defer func() {
|
||||
|
@ -370,6 +391,9 @@ func (d *Daemon) Stop() error {
|
|||
tick := time.Tick(time.Second)
|
||||
|
||||
if err := d.cmd.Process.Signal(os.Interrupt); err != nil {
|
||||
if strings.Contains(err.Error(), "os: process already finished") {
|
||||
return errDaemonNotStarted
|
||||
}
|
||||
return errors.Errorf("could not send signal: %v", err)
|
||||
}
|
||||
out1:
|
||||
|
@ -414,9 +438,24 @@ out2:
|
|||
return nil
|
||||
}
|
||||
|
||||
// Restart will restart the daemon by first stopping it and then starting it.
|
||||
func (d *Daemon) Restart(arg ...string) error {
|
||||
d.Stop()
|
||||
// Restart will restart the daemon by first stopping it and the starting it.
|
||||
// If an error occurs while starting the daemon, the test will fail.
|
||||
func (d *Daemon) Restart(c *check.C, args ...string) {
|
||||
d.Stop(c)
|
||||
d.handleUserns()
|
||||
d.Start(c, args...)
|
||||
}
|
||||
|
||||
// RestartWithError will restart the daemon by first stopping it and then starting it.
|
||||
func (d *Daemon) RestartWithError(arg ...string) error {
|
||||
if err := d.StopWithError(); err != nil {
|
||||
return err
|
||||
}
|
||||
d.handleUserns()
|
||||
return d.StartWithError(arg...)
|
||||
}
|
||||
|
||||
func (d *Daemon) handleUserns() {
|
||||
// in the case of tests running a user namespace-enabled daemon, we have resolved
|
||||
// d.Root to be the actual final path of the graph dir after the "uid.gid" of
|
||||
// remapped root is added--we need to subtract it from the path before calling
|
||||
|
@ -425,7 +464,6 @@ func (d *Daemon) Restart(arg ...string) error {
|
|||
if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
|
||||
d.Root = filepath.Dir(d.Root)
|
||||
}
|
||||
return d.Start(arg...)
|
||||
}
|
||||
|
||||
// LoadBusybox will load the stored busybox into a newly started daemon
|
||||
|
|
|
@ -51,15 +51,11 @@ func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) {
|
|||
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
|
||||
|
||||
// Current state restoring after restarts
|
||||
err = d1.Stop()
|
||||
c.Assert(err, checker.IsNil)
|
||||
err = d2.Stop()
|
||||
c.Assert(err, checker.IsNil)
|
||||
d1.Stop(c)
|
||||
d2.Stop(c)
|
||||
|
||||
err = d1.Start()
|
||||
c.Assert(err, checker.IsNil)
|
||||
err = d2.Start()
|
||||
c.Assert(err, checker.IsNil)
|
||||
d1.Start(c)
|
||||
d2.Start(c)
|
||||
|
||||
info, err = d1.SwarmInfo()
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
@ -240,7 +236,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesMultipleAgents(c *check.C) {
|
|||
waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances)
|
||||
|
||||
// reconciliation on d2 node down
|
||||
c.Assert(d2.Stop(), checker.IsNil)
|
||||
d2.Stop(c)
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances)
|
||||
|
||||
|
@ -629,7 +625,7 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) {
|
|||
c.Assert(d1.GetNode(c, d2.NodeID).ManagerStatus.Leader, checker.False)
|
||||
c.Assert(d1.GetNode(c, d3.NodeID).ManagerStatus.Leader, checker.False)
|
||||
|
||||
d1.Stop() // stop the leader
|
||||
d1.Stop(c)
|
||||
|
||||
var (
|
||||
leader *daemon.Swarm // keep track of leader
|
||||
|
@ -666,7 +662,7 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) {
|
|||
stableleader := leader
|
||||
|
||||
// add the d1, the initial leader, back
|
||||
d1.Start()
|
||||
d1.Start(c)
|
||||
|
||||
// TODO(stevvooe): may need to wait for rejoin here
|
||||
|
||||
|
@ -688,7 +684,7 @@ func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) {
|
|||
|
||||
d1.CreateService(c, simpleTestService)
|
||||
|
||||
c.Assert(d2.Stop(), checker.IsNil)
|
||||
d2.Stop(c)
|
||||
|
||||
// make sure there is a leader
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil)
|
||||
|
@ -697,7 +693,7 @@ func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) {
|
|||
s.Spec.Name = "top1"
|
||||
})
|
||||
|
||||
c.Assert(d3.Stop(), checker.IsNil)
|
||||
d3.Stop(c)
|
||||
|
||||
// make sure there is a leader
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil)
|
||||
|
@ -709,7 +705,7 @@ func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) {
|
|||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(status, checker.Equals, http.StatusInternalServerError, check.Commentf("deadline exceeded", string(out)))
|
||||
|
||||
c.Assert(d2.Start(), checker.IsNil)
|
||||
d2.Start(c)
|
||||
|
||||
// make sure there is a leader
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil)
|
||||
|
@ -771,8 +767,7 @@ func (s *DockerSwarmSuite) TestAPISwarmNodeRemove(c *check.C) {
|
|||
c.Assert(len(nodes), checker.Equals, 2, check.Commentf("nodes: %#v", nodes))
|
||||
|
||||
// Restart the node that was removed
|
||||
err = d2.Restart()
|
||||
c.Assert(err, checker.IsNil)
|
||||
d2.Restart(c)
|
||||
|
||||
// Give some time for the node to rejoin
|
||||
time.Sleep(1 * time.Second)
|
||||
|
@ -899,8 +894,8 @@ func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *check.C) {
|
|||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d.CheckLocalNodeState, checker.Equals, swarm.LocalNodeStatePending)
|
||||
|
||||
c.Assert(d.Stop(), checker.IsNil)
|
||||
c.Assert(d.Start(), checker.IsNil)
|
||||
d.Stop(c)
|
||||
d.Start(c)
|
||||
|
||||
info, err := d.SwarmInfo()
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
@ -914,25 +909,25 @@ func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *check.C) {
|
|||
id := d1.CreateService(c, simpleTestService, setInstances(instances))
|
||||
|
||||
d1.GetService(c, id)
|
||||
d1.Stop()
|
||||
d1.Start()
|
||||
d1.Stop(c)
|
||||
d1.Start(c)
|
||||
d1.GetService(c, id)
|
||||
|
||||
d2 := s.AddDaemon(c, true, true)
|
||||
d2.GetService(c, id)
|
||||
d2.Stop()
|
||||
d2.Start()
|
||||
d2.Stop(c)
|
||||
d2.Start(c)
|
||||
d2.GetService(c, id)
|
||||
|
||||
d3 := s.AddDaemon(c, true, true)
|
||||
d3.GetService(c, id)
|
||||
d3.Stop()
|
||||
d3.Start()
|
||||
d3.Stop(c)
|
||||
d3.Start(c)
|
||||
d3.GetService(c, id)
|
||||
|
||||
d3.Kill()
|
||||
time.Sleep(1 * time.Second) // time to handle signal
|
||||
d3.Start()
|
||||
d3.Start(c)
|
||||
d3.GetService(c, id)
|
||||
}
|
||||
|
||||
|
@ -993,7 +988,7 @@ func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *check.C) {
|
|||
waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances)
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 0)
|
||||
|
||||
c.Assert(d2.Stop(), checker.IsNil)
|
||||
d2.Stop(c)
|
||||
|
||||
c.Assert(d1.Init(swarm.InitRequest{
|
||||
ForceNewCluster: true,
|
||||
|
@ -1210,7 +1205,7 @@ func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) {
|
|||
for _, d := range nodes {
|
||||
go func(daemon *daemon.Swarm) {
|
||||
defer wg.Done()
|
||||
if err := daemon.Stop(); err != nil {
|
||||
if err := daemon.StopWithError(); err != nil {
|
||||
errs <- err
|
||||
}
|
||||
// FIXME(vdemeester) This is duplicated…
|
||||
|
@ -1235,7 +1230,7 @@ func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) {
|
|||
for _, d := range nodes {
|
||||
go func(daemon *daemon.Swarm) {
|
||||
defer wg.Done()
|
||||
if err := daemon.Start("--iptables=false"); err != nil {
|
||||
if err := daemon.StartWithError("--iptables=false"); err != nil {
|
||||
errs <- err
|
||||
}
|
||||
}(d)
|
||||
|
|
|
@ -35,12 +35,12 @@ func (s *DockerAuthzV2Suite) SetUpTest(c *check.C) {
|
|||
s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
|
||||
Experimental: experimentalDaemon,
|
||||
})
|
||||
c.Assert(s.d.Start(), check.IsNil)
|
||||
s.d.Start(c)
|
||||
}
|
||||
|
||||
func (s *DockerAuthzV2Suite) TearDownTest(c *check.C) {
|
||||
if s.d != nil {
|
||||
s.d.Stop()
|
||||
s.d.Stop(c)
|
||||
s.ds.TearDownTest(c)
|
||||
}
|
||||
}
|
||||
|
@ -52,12 +52,12 @@ func (s *DockerAuthzV2Suite) TestAuthZPluginAllowNonVolumeRequest(c *check.C) {
|
|||
c.Assert(err, checker.IsNil)
|
||||
// start the daemon with the plugin and load busybox, --net=none build fails otherwise
|
||||
// because it needs to pull busybox
|
||||
c.Assert(s.d.Restart("--authorization-plugin="+authzPluginNameWithTag), check.IsNil)
|
||||
s.d.Restart(c, "--authorization-plugin="+authzPluginNameWithTag)
|
||||
c.Assert(s.d.LoadBusybox(), check.IsNil)
|
||||
|
||||
// defer disabling the plugin
|
||||
defer func() {
|
||||
c.Assert(s.d.Restart(), check.IsNil)
|
||||
s.d.Restart(c)
|
||||
_, err = s.d.Cmd("plugin", "disable", authzPluginNameWithTag)
|
||||
c.Assert(err, checker.IsNil)
|
||||
_, err = s.d.Cmd("plugin", "rm", authzPluginNameWithTag)
|
||||
|
@ -82,11 +82,11 @@ func (s *DockerAuthzV2Suite) TestAuthZPluginRejectVolumeRequests(c *check.C) {
|
|||
c.Assert(err, checker.IsNil)
|
||||
|
||||
// restart the daemon with the plugin
|
||||
c.Assert(s.d.Restart("--authorization-plugin="+authzPluginNameWithTag), check.IsNil)
|
||||
s.d.Restart(c, "--authorization-plugin="+authzPluginNameWithTag)
|
||||
|
||||
// defer disabling the plugin
|
||||
defer func() {
|
||||
c.Assert(s.d.Restart(), check.IsNil)
|
||||
s.d.Restart(c)
|
||||
_, err = s.d.Cmd("plugin", "disable", authzPluginNameWithTag)
|
||||
c.Assert(err, checker.IsNil)
|
||||
_, err = s.d.Cmd("plugin", "rm", authzPluginNameWithTag)
|
||||
|
@ -121,16 +121,16 @@ func (s *DockerAuthzV2Suite) TestAuthZPluginBadManifestFailsDaemonStart(c *check
|
|||
c.Assert(err, checker.IsNil)
|
||||
|
||||
// start the daemon with the plugin, it will error
|
||||
c.Assert(s.d.Restart("--authorization-plugin="+authzPluginBadManifestName), check.NotNil)
|
||||
c.Assert(s.d.RestartWithError("--authorization-plugin="+authzPluginBadManifestName), check.NotNil)
|
||||
|
||||
// restarting the daemon without requiring the plugin will succeed
|
||||
c.Assert(s.d.Restart(), check.IsNil)
|
||||
s.d.Restart(c)
|
||||
}
|
||||
|
||||
func (s *DockerAuthzV2Suite) TestNonexistentAuthZPluginFailsDaemonStart(c *check.C) {
|
||||
// start the daemon with a non-existent authz plugin, it will error
|
||||
c.Assert(s.d.Restart("--authorization-plugin="+nonexistentAuthzPluginName), check.NotNil)
|
||||
c.Assert(s.d.RestartWithError("--authorization-plugin="+nonexistentAuthzPluginName), check.NotNil)
|
||||
|
||||
// restarting the daemon without requiring the plugin will succeed
|
||||
c.Assert(s.d.Restart(), check.IsNil)
|
||||
s.d.Start(c)
|
||||
}
|
||||
|
|
|
@ -72,7 +72,7 @@ func (s *DockerAuthzSuite) SetUpTest(c *check.C) {
|
|||
|
||||
func (s *DockerAuthzSuite) TearDownTest(c *check.C) {
|
||||
if s.d != nil {
|
||||
s.d.Stop()
|
||||
s.d.Stop(c)
|
||||
s.ds.TearDownTest(c)
|
||||
s.ctrl = nil
|
||||
}
|
||||
|
@ -206,7 +206,7 @@ func (s *DockerAuthzSuite) TearDownSuite(c *check.C) {
|
|||
func (s *DockerAuthzSuite) TestAuthZPluginAllowRequest(c *check.C) {
|
||||
// start the daemon and load busybox, --net=none build fails otherwise
|
||||
// cause it needs to pull busybox
|
||||
c.Assert(s.d.Start("--authorization-plugin="+testAuthZPlugin), check.IsNil)
|
||||
s.d.Start(c, "--authorization-plugin="+testAuthZPlugin)
|
||||
s.ctrl.reqRes.Allow = true
|
||||
s.ctrl.resRes.Allow = true
|
||||
c.Assert(s.d.LoadBusybox(), check.IsNil)
|
||||
|
@ -231,7 +231,7 @@ func (s *DockerAuthzSuite) TestAuthZPluginTls(c *check.C) {
|
|||
const testDaemonHTTPSAddr = "tcp://localhost:4271"
|
||||
// start the daemon and load busybox, --net=none build fails otherwise
|
||||
// cause it needs to pull busybox
|
||||
if err := s.d.Start(
|
||||
s.d.Start(c,
|
||||
"--authorization-plugin="+testAuthZPlugin,
|
||||
"--tlsverify",
|
||||
"--tlscacert",
|
||||
|
@ -240,9 +240,7 @@ func (s *DockerAuthzSuite) TestAuthZPluginTls(c *check.C) {
|
|||
"fixtures/https/server-cert.pem",
|
||||
"--tlskey",
|
||||
"fixtures/https/server-key.pem",
|
||||
"-H", testDaemonHTTPSAddr); err != nil {
|
||||
c.Fatalf("Could not start daemon with busybox: %v", err)
|
||||
}
|
||||
"-H", testDaemonHTTPSAddr)
|
||||
|
||||
s.ctrl.reqRes.Allow = true
|
||||
s.ctrl.resRes.Allow = true
|
||||
|
@ -266,8 +264,7 @@ func (s *DockerAuthzSuite) TestAuthZPluginTls(c *check.C) {
|
|||
}
|
||||
|
||||
func (s *DockerAuthzSuite) TestAuthZPluginDenyRequest(c *check.C) {
|
||||
err := s.d.Start("--authorization-plugin=" + testAuthZPlugin)
|
||||
c.Assert(err, check.IsNil)
|
||||
s.d.Start(c, "--authorization-plugin="+testAuthZPlugin)
|
||||
s.ctrl.reqRes.Allow = false
|
||||
s.ctrl.reqRes.Msg = unauthorizedMessage
|
||||
|
||||
|
@ -283,8 +280,7 @@ func (s *DockerAuthzSuite) TestAuthZPluginDenyRequest(c *check.C) {
|
|||
|
||||
// TestAuthZPluginAPIDenyResponse validates that when authorization plugin deny the request, the status code is forbidden
|
||||
func (s *DockerAuthzSuite) TestAuthZPluginAPIDenyResponse(c *check.C) {
|
||||
err := s.d.Start("--authorization-plugin=" + testAuthZPlugin)
|
||||
c.Assert(err, check.IsNil)
|
||||
s.d.Start(c, "--authorization-plugin="+testAuthZPlugin)
|
||||
s.ctrl.reqRes.Allow = false
|
||||
s.ctrl.resRes.Msg = unauthorizedMessage
|
||||
|
||||
|
@ -303,8 +299,7 @@ func (s *DockerAuthzSuite) TestAuthZPluginAPIDenyResponse(c *check.C) {
|
|||
}
|
||||
|
||||
func (s *DockerAuthzSuite) TestAuthZPluginDenyResponse(c *check.C) {
|
||||
err := s.d.Start("--authorization-plugin=" + testAuthZPlugin)
|
||||
c.Assert(err, check.IsNil)
|
||||
s.d.Start(c, "--authorization-plugin="+testAuthZPlugin)
|
||||
s.ctrl.reqRes.Allow = true
|
||||
s.ctrl.resRes.Allow = false
|
||||
s.ctrl.resRes.Msg = unauthorizedMessage
|
||||
|
@ -324,7 +319,7 @@ func (s *DockerAuthzSuite) TestAuthZPluginAllowEventStream(c *check.C) {
|
|||
testRequires(c, DaemonIsLinux)
|
||||
|
||||
// start the daemon and load busybox to avoid pulling busybox from Docker Hub
|
||||
c.Assert(s.d.Start("--authorization-plugin="+testAuthZPlugin), check.IsNil)
|
||||
s.d.Start(c, "--authorization-plugin="+testAuthZPlugin)
|
||||
s.ctrl.reqRes.Allow = true
|
||||
s.ctrl.resRes.Allow = true
|
||||
c.Assert(s.d.LoadBusybox(), check.IsNil)
|
||||
|
@ -383,8 +378,7 @@ func (s *DockerAuthzSuite) TestAuthZPluginAllowEventStream(c *check.C) {
|
|||
}
|
||||
|
||||
func (s *DockerAuthzSuite) TestAuthZPluginErrorResponse(c *check.C) {
|
||||
err := s.d.Start("--authorization-plugin=" + testAuthZPlugin)
|
||||
c.Assert(err, check.IsNil)
|
||||
s.d.Start(c, "--authorization-plugin="+testAuthZPlugin)
|
||||
s.ctrl.reqRes.Allow = true
|
||||
s.ctrl.resRes.Err = errorMessage
|
||||
|
||||
|
@ -396,8 +390,7 @@ func (s *DockerAuthzSuite) TestAuthZPluginErrorResponse(c *check.C) {
|
|||
}
|
||||
|
||||
func (s *DockerAuthzSuite) TestAuthZPluginErrorRequest(c *check.C) {
|
||||
err := s.d.Start("--authorization-plugin=" + testAuthZPlugin)
|
||||
c.Assert(err, check.IsNil)
|
||||
s.d.Start(c, "--authorization-plugin="+testAuthZPlugin)
|
||||
s.ctrl.reqRes.Err = errorMessage
|
||||
|
||||
// Ensure command is blocked
|
||||
|
@ -408,7 +401,7 @@ func (s *DockerAuthzSuite) TestAuthZPluginErrorRequest(c *check.C) {
|
|||
}
|
||||
|
||||
func (s *DockerAuthzSuite) TestAuthZPluginEnsureNoDuplicatePluginRegistration(c *check.C) {
|
||||
c.Assert(s.d.Start("--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin), check.IsNil)
|
||||
s.d.Start(c, "--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin)
|
||||
|
||||
s.ctrl.reqRes.Allow = true
|
||||
s.ctrl.resRes.Allow = true
|
||||
|
@ -422,7 +415,7 @@ func (s *DockerAuthzSuite) TestAuthZPluginEnsureNoDuplicatePluginRegistration(c
|
|||
}
|
||||
|
||||
func (s *DockerAuthzSuite) TestAuthZPluginEnsureLoadImportWorking(c *check.C) {
|
||||
c.Assert(s.d.Start("--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin), check.IsNil)
|
||||
s.d.Start(c, "--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin)
|
||||
s.ctrl.reqRes.Allow = true
|
||||
s.ctrl.resRes.Allow = true
|
||||
c.Assert(s.d.LoadBusybox(), check.IsNil)
|
||||
|
@ -449,7 +442,7 @@ func (s *DockerAuthzSuite) TestAuthZPluginEnsureLoadImportWorking(c *check.C) {
|
|||
}
|
||||
|
||||
func (s *DockerAuthzSuite) TestAuthZPluginHeader(c *check.C) {
|
||||
c.Assert(s.d.Start("--debug", "--authorization-plugin="+testAuthZPlugin), check.IsNil)
|
||||
s.d.Start(c, "--debug", "--authorization-plugin="+testAuthZPlugin)
|
||||
s.ctrl.reqRes.Allow = true
|
||||
s.ctrl.resRes.Allow = true
|
||||
c.Assert(s.d.LoadBusybox(), check.IsNil)
|
||||
|
|
|
@ -18,9 +18,7 @@ import (
|
|||
func (s *DockerDaemonSuite) TestDaemonRestartWithPluginEnabled(c *check.C) {
|
||||
testRequires(c, Network)
|
||||
|
||||
if err := s.d.Start(); err != nil {
|
||||
c.Fatalf("Could not start daemon: %v", err)
|
||||
}
|
||||
s.d.Start(c)
|
||||
|
||||
if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil {
|
||||
c.Fatalf("Could not install plugin: %v %s", err, out)
|
||||
|
@ -35,9 +33,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithPluginEnabled(c *check.C) {
|
|||
}
|
||||
}()
|
||||
|
||||
if err := s.d.Restart(); err != nil {
|
||||
c.Fatalf("Could not restart daemon: %v", err)
|
||||
}
|
||||
s.d.Restart(c)
|
||||
|
||||
out, err := s.d.Cmd("plugin", "ls")
|
||||
if err != nil {
|
||||
|
@ -51,9 +47,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithPluginEnabled(c *check.C) {
|
|||
func (s *DockerDaemonSuite) TestDaemonRestartWithPluginDisabled(c *check.C) {
|
||||
testRequires(c, Network)
|
||||
|
||||
if err := s.d.Start(); err != nil {
|
||||
c.Fatalf("Could not start daemon: %v", err)
|
||||
}
|
||||
s.d.Start(c)
|
||||
|
||||
if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName, "--disable"); err != nil {
|
||||
c.Fatalf("Could not install plugin: %v %s", err, out)
|
||||
|
@ -65,9 +59,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithPluginDisabled(c *check.C) {
|
|||
}
|
||||
}()
|
||||
|
||||
if err := s.d.Restart(); err != nil {
|
||||
c.Fatalf("Could not restart daemon: %v", err)
|
||||
}
|
||||
s.d.Restart(c)
|
||||
|
||||
out, err := s.d.Cmd("plugin", "ls")
|
||||
if err != nil {
|
||||
|
@ -82,16 +74,12 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithPluginDisabled(c *check.C) {
|
|||
func (s *DockerDaemonSuite) TestDaemonKillLiveRestoreWithPlugins(c *check.C) {
|
||||
testRequires(c, Network, IsAmd64)
|
||||
|
||||
if err := s.d.Start("--live-restore"); err != nil {
|
||||
c.Fatalf("Could not start daemon: %v", err)
|
||||
}
|
||||
s.d.Start(c, "--live-restore")
|
||||
if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil {
|
||||
c.Fatalf("Could not install plugin: %v %s", err, out)
|
||||
}
|
||||
defer func() {
|
||||
if err := s.d.Restart("--live-restore"); err != nil {
|
||||
c.Fatalf("Could not restart daemon: %v", err)
|
||||
}
|
||||
s.d.Restart(c, "--live-restore")
|
||||
if out, err := s.d.Cmd("plugin", "disable", pName); err != nil {
|
||||
c.Fatalf("Could not disable plugin: %v %s", err, out)
|
||||
}
|
||||
|
@ -115,16 +103,12 @@ func (s *DockerDaemonSuite) TestDaemonKillLiveRestoreWithPlugins(c *check.C) {
|
|||
func (s *DockerDaemonSuite) TestDaemonShutdownLiveRestoreWithPlugins(c *check.C) {
|
||||
testRequires(c, Network, IsAmd64)
|
||||
|
||||
if err := s.d.Start("--live-restore"); err != nil {
|
||||
c.Fatalf("Could not start daemon: %v", err)
|
||||
}
|
||||
s.d.Start(c, "--live-restore")
|
||||
if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil {
|
||||
c.Fatalf("Could not install plugin: %v %s", err, out)
|
||||
}
|
||||
defer func() {
|
||||
if err := s.d.Restart("--live-restore"); err != nil {
|
||||
c.Fatalf("Could not restart daemon: %v", err)
|
||||
}
|
||||
s.d.Restart(c, "--live-restore")
|
||||
if out, err := s.d.Cmd("plugin", "disable", pName); err != nil {
|
||||
c.Fatalf("Could not disable plugin: %v %s", err, out)
|
||||
}
|
||||
|
@ -147,17 +131,13 @@ func (s *DockerDaemonSuite) TestDaemonShutdownLiveRestoreWithPlugins(c *check.C)
|
|||
func (s *DockerDaemonSuite) TestDaemonShutdownWithPlugins(c *check.C) {
|
||||
testRequires(c, Network)
|
||||
|
||||
if err := s.d.Start(); err != nil {
|
||||
c.Fatalf("Could not start daemon: %v", err)
|
||||
}
|
||||
s.d.Start(c)
|
||||
if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil {
|
||||
c.Fatalf("Could not install plugin: %v %s", err, out)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := s.d.Restart(); err != nil {
|
||||
c.Fatalf("Could not restart daemon: %v", err)
|
||||
}
|
||||
s.d.Restart(c)
|
||||
if out, err := s.d.Cmd("plugin", "disable", pName); err != nil {
|
||||
c.Fatalf("Could not disable plugin: %v %s", err, out)
|
||||
}
|
||||
|
@ -190,9 +170,7 @@ func (s *DockerDaemonSuite) TestVolumePlugin(c *check.C) {
|
|||
destDir := "/tmp/data/"
|
||||
destFile := "foo"
|
||||
|
||||
if err := s.d.Start(); err != nil {
|
||||
c.Fatalf("Could not start daemon: %v", err)
|
||||
}
|
||||
s.d.Start(c)
|
||||
out, err := s.d.Cmd("plugin", "install", pName, "--grant-all-permissions")
|
||||
if err != nil {
|
||||
c.Fatalf("Could not install plugin: %v %s", err, out)
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -400,7 +400,7 @@ func (s *DockerDaemonSuite) TestDaemonEvents(c *check.C) {
|
|||
daemonConfig := `{"labels":["foo=bar"]}`
|
||||
fmt.Fprintf(configFile, "%s", daemonConfig)
|
||||
configFile.Close()
|
||||
c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil)
|
||||
s.d.Start(c, fmt.Sprintf("--config-file=%s", configFilePath))
|
||||
|
||||
// Get daemon ID
|
||||
out, err := s.d.Cmd("info")
|
||||
|
@ -444,7 +444,7 @@ func (s *DockerDaemonSuite) TestDaemonEventsWithFilters(c *check.C) {
|
|||
daemonConfig := `{"labels":["foo=bar"]}`
|
||||
fmt.Fprintf(configFile, "%s", daemonConfig)
|
||||
configFile.Close()
|
||||
c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil)
|
||||
s.d.Start(c, fmt.Sprintf("--config-file=%s", configFilePath))
|
||||
|
||||
// Get daemon ID
|
||||
out, err := s.d.Cmd("info")
|
||||
|
|
|
@ -81,17 +81,13 @@ func (s *DockerSuite) TestExecAfterContainerRestart(c *check.C) {
|
|||
|
||||
func (s *DockerDaemonSuite) TestExecAfterDaemonRestart(c *check.C) {
|
||||
// TODO Windows CI: Requires a little work to get this ported.
|
||||
testRequires(c, DaemonIsLinux)
|
||||
testRequires(c, SameHostDaemon)
|
||||
|
||||
err := s.d.StartWithBusybox()
|
||||
c.Assert(err, checker.IsNil)
|
||||
testRequires(c, DaemonIsLinux, SameHostDaemon)
|
||||
s.d.StartWithBusybox(c)
|
||||
|
||||
out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top")
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Could not run top: %s", out))
|
||||
|
||||
err = s.d.Restart()
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Could not restart daemon"))
|
||||
s.d.Restart(c)
|
||||
|
||||
out, err = s.d.Cmd("start", "top")
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Could not start top after daemon restart: %s", out))
|
||||
|
|
|
@ -59,7 +59,7 @@ func (s *DockerExternalGraphdriverSuite) SetUpTest(c *check.C) {
|
|||
|
||||
func (s *DockerExternalGraphdriverSuite) TearDownTest(c *check.C) {
|
||||
if s.d != nil {
|
||||
s.d.Stop()
|
||||
s.d.Stop(c)
|
||||
s.ds.TearDownTest(c)
|
||||
}
|
||||
}
|
||||
|
@ -349,15 +349,12 @@ func (s *DockerExternalGraphdriverSuite) TestExternalGraphDriver(c *check.C) {
|
|||
}
|
||||
|
||||
func (s *DockerExternalGraphdriverSuite) testExternalGraphDriver(name string, ext string, c *check.C) {
|
||||
if err := s.d.StartWithBusybox("-s", name); err != nil {
|
||||
b, _ := ioutil.ReadFile(s.d.LogFileName())
|
||||
c.Assert(err, check.IsNil, check.Commentf("\n%s", string(b)))
|
||||
}
|
||||
s.d.StartWithBusybox(c, "-s", name)
|
||||
|
||||
out, err := s.d.Cmd("run", "--name=graphtest", "busybox", "sh", "-c", "echo hello > /hello")
|
||||
c.Assert(err, check.IsNil, check.Commentf(out))
|
||||
|
||||
err = s.d.Restart("-s", name)
|
||||
s.d.Restart(c, "-s", name)
|
||||
|
||||
out, err = s.d.Cmd("inspect", "--format={{.GraphDriver.Name}}", "graphtest")
|
||||
c.Assert(err, check.IsNil, check.Commentf(out))
|
||||
|
@ -373,8 +370,7 @@ func (s *DockerExternalGraphdriverSuite) testExternalGraphDriver(name string, ex
|
|||
out, err = s.d.Cmd("info")
|
||||
c.Assert(err, check.IsNil, check.Commentf(out))
|
||||
|
||||
err = s.d.Stop()
|
||||
c.Assert(err, check.IsNil)
|
||||
s.d.Stop(c)
|
||||
|
||||
// Don't check s.ec.exists, because the daemon no longer calls the
|
||||
// Exists function.
|
||||
|
@ -396,7 +392,7 @@ func (s *DockerExternalGraphdriverSuite) testExternalGraphDriver(name string, ex
|
|||
func (s *DockerExternalGraphdriverSuite) TestExternalGraphDriverPull(c *check.C) {
|
||||
testRequires(c, Network, ExperimentalDaemon)
|
||||
|
||||
c.Assert(s.d.Start(), check.IsNil)
|
||||
s.d.Start(c)
|
||||
|
||||
out, err := s.d.Cmd("pull", "busybox:latest")
|
||||
c.Assert(err, check.IsNil, check.Commentf(out))
|
||||
|
|
|
@ -58,7 +58,7 @@ func (s *DockerExternalVolumeSuite) SetUpTest(c *check.C) {
|
|||
|
||||
func (s *DockerExternalVolumeSuite) TearDownTest(c *check.C) {
|
||||
if s.d != nil {
|
||||
s.d.Stop()
|
||||
s.d.Stop(c)
|
||||
s.ds.TearDownTest(c)
|
||||
}
|
||||
}
|
||||
|
@ -285,8 +285,7 @@ func (s *DockerExternalVolumeSuite) TearDownSuite(c *check.C) {
|
|||
}
|
||||
|
||||
func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverNamed(c *check.C) {
|
||||
err := s.d.StartWithBusybox()
|
||||
c.Assert(err, checker.IsNil)
|
||||
s.d.StartWithBusybox(c)
|
||||
|
||||
out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test")
|
||||
c.Assert(err, checker.IsNil, check.Commentf(out))
|
||||
|
@ -308,8 +307,7 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverNamed(c *check.C) {
|
|||
}
|
||||
|
||||
func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnnamed(c *check.C) {
|
||||
err := s.d.StartWithBusybox()
|
||||
c.Assert(err, checker.IsNil)
|
||||
s.d.StartWithBusybox(c)
|
||||
|
||||
out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test")
|
||||
c.Assert(err, checker.IsNil, check.Commentf(out))
|
||||
|
@ -323,8 +321,7 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnnamed(c *check.C)
|
|||
}
|
||||
|
||||
func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverVolumesFrom(c *check.C) {
|
||||
err := s.d.StartWithBusybox()
|
||||
c.Assert(err, checker.IsNil)
|
||||
s.d.StartWithBusybox(c)
|
||||
|
||||
out, err := s.d.Cmd("run", "--name", "vol-test1", "-v", "/foo", "--volume-driver", volumePluginName, "busybox:latest")
|
||||
c.Assert(err, checker.IsNil, check.Commentf(out))
|
||||
|
@ -343,8 +340,7 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverVolumesFrom(c *check
|
|||
}
|
||||
|
||||
func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverDeleteContainer(c *check.C) {
|
||||
err := s.d.StartWithBusybox()
|
||||
c.Assert(err, checker.IsNil)
|
||||
s.d.StartWithBusybox(c)
|
||||
|
||||
out, err := s.d.Cmd("run", "--name", "vol-test1", "-v", "/foo", "--volume-driver", volumePluginName, "busybox:latest")
|
||||
c.Assert(err, checker.IsNil, check.Commentf(out))
|
||||
|
@ -401,8 +397,7 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverLookupNotBlocked(c *
|
|||
}
|
||||
|
||||
func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverRetryNotImmediatelyExists(c *check.C) {
|
||||
err := s.d.StartWithBusybox()
|
||||
c.Assert(err, checker.IsNil)
|
||||
s.d.StartWithBusybox(c)
|
||||
|
||||
specPath := "/etc/docker/plugins/test-external-volume-driver-retry.spec"
|
||||
os.RemoveAll(specPath)
|
||||
|
@ -429,7 +424,7 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverRetryNotImmediatelyE
|
|||
c.Fatal("volume creates fail when plugin not immediately available")
|
||||
}
|
||||
|
||||
_, err = s.d.Cmd("volume", "rm", "external-volume-test")
|
||||
_, err := s.d.Cmd("volume", "rm", "external-volume-test")
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
c.Assert(s.ec.activations, checker.Equals, 1)
|
||||
|
@ -490,8 +485,7 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverGet(c *check.C) {
|
|||
|
||||
func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverWithDaemonRestart(c *check.C) {
|
||||
dockerCmd(c, "volume", "create", "-d", volumePluginName, "abc1")
|
||||
err := s.d.Restart()
|
||||
c.Assert(err, checker.IsNil)
|
||||
s.d.Restart(c)
|
||||
|
||||
dockerCmd(c, "run", "--name=test", "-v", "abc1:/foo", "busybox", "true")
|
||||
var mounts []types.MountPoint
|
||||
|
@ -503,7 +497,7 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverWithDaemonRestart(c
|
|||
// Ensures that the daemon handles when the plugin responds to a `Get` request with a null volume and a null error.
|
||||
// Prior the daemon would panic in this scenario.
|
||||
func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverGetEmptyResponse(c *check.C) {
|
||||
c.Assert(s.d.Start(), checker.IsNil)
|
||||
s.d.Start(c)
|
||||
|
||||
out, err := s.d.Cmd("volume", "create", "-d", volumePluginName, "abc2", "--opt", "ninja=1")
|
||||
c.Assert(err, checker.IsNil, check.Commentf(out))
|
||||
|
@ -515,7 +509,7 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverGetEmptyResponse(c *
|
|||
|
||||
// Ensure only cached paths are used in volume list to prevent N+1 calls to `VolumeDriver.Path`
|
||||
func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverPathCalls(c *check.C) {
|
||||
c.Assert(s.d.Start(), checker.IsNil)
|
||||
s.d.Start(c)
|
||||
c.Assert(s.ec.paths, checker.Equals, 0)
|
||||
|
||||
out, err := s.d.Cmd("volume", "create", "test", "--driver=test-external-volume-driver")
|
||||
|
@ -533,8 +527,7 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverPathCalls(c *check.C
|
|||
}
|
||||
|
||||
func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverMountID(c *check.C) {
|
||||
err := s.d.StartWithBusybox()
|
||||
c.Assert(err, checker.IsNil)
|
||||
s.d.StartWithBusybox(c)
|
||||
|
||||
out, err := s.d.Cmd("run", "--rm", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test")
|
||||
c.Assert(err, checker.IsNil, check.Commentf(out))
|
||||
|
@ -543,7 +536,7 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverMountID(c *check.C)
|
|||
|
||||
// Check that VolumeDriver.Capabilities gets called, and only called once
|
||||
func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverCapabilities(c *check.C) {
|
||||
c.Assert(s.d.Start(), checker.IsNil)
|
||||
s.d.Start(c)
|
||||
c.Assert(s.ec.caps, checker.Equals, 0)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
|
@ -561,7 +554,7 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverOutOfBandDelete(c *c
|
|||
p := newVolumePlugin(c, driverName)
|
||||
defer p.Close()
|
||||
|
||||
c.Assert(s.d.StartWithBusybox(), checker.IsNil)
|
||||
s.d.StartWithBusybox(c)
|
||||
|
||||
out, err := s.d.Cmd("volume", "create", "-d", driverName, "--name", "test")
|
||||
c.Assert(err, checker.IsNil, check.Commentf(out))
|
||||
|
@ -606,7 +599,7 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverOutOfBandDelete(c *c
|
|||
}
|
||||
|
||||
func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnmountOnMountFail(c *check.C) {
|
||||
c.Assert(s.d.StartWithBusybox(), checker.IsNil)
|
||||
s.d.StartWithBusybox(c)
|
||||
s.d.Cmd("volume", "create", "-d", "test-external-volume-driver", "--opt=invalidOption=1", "--name=testumount")
|
||||
|
||||
out, _ := s.d.Cmd("run", "-v", "testumount:/foo", "busybox", "true")
|
||||
|
|
|
@ -76,9 +76,8 @@ func (s *DockerSuite) TestInfoDiscoveryBackend(c *check.C) {
|
|||
})
|
||||
discoveryBackend := "consul://consuladdr:consulport/some/path"
|
||||
discoveryAdvertise := "1.1.1.1:2375"
|
||||
err := d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend), fmt.Sprintf("--cluster-advertise=%s", discoveryAdvertise))
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer d.Stop()
|
||||
d.Start(c, fmt.Sprintf("--cluster-store=%s", discoveryBackend), fmt.Sprintf("--cluster-advertise=%s", discoveryAdvertise))
|
||||
defer d.Stop(c)
|
||||
|
||||
out, err := d.Cmd("info")
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
@ -97,12 +96,12 @@ func (s *DockerSuite) TestInfoDiscoveryInvalidAdvertise(c *check.C) {
|
|||
discoveryBackend := "consul://consuladdr:consulport/some/path"
|
||||
|
||||
// --cluster-advertise with an invalid string is an error
|
||||
err := d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend), "--cluster-advertise=invalid")
|
||||
c.Assert(err, checker.Not(checker.IsNil))
|
||||
err := d.StartWithError(fmt.Sprintf("--cluster-store=%s", discoveryBackend), "--cluster-advertise=invalid")
|
||||
c.Assert(err, checker.NotNil)
|
||||
|
||||
// --cluster-advertise without --cluster-store is also an error
|
||||
err = d.Start("--cluster-advertise=1.1.1.1:2375")
|
||||
c.Assert(err, checker.Not(checker.IsNil))
|
||||
err = d.StartWithError("--cluster-advertise=1.1.1.1:2375")
|
||||
c.Assert(err, checker.NotNil)
|
||||
}
|
||||
|
||||
// TestInfoDiscoveryAdvertiseInterfaceName verifies that a daemon run with `--cluster-advertise`
|
||||
|
@ -116,9 +115,8 @@ func (s *DockerSuite) TestInfoDiscoveryAdvertiseInterfaceName(c *check.C) {
|
|||
discoveryBackend := "consul://consuladdr:consulport/some/path"
|
||||
discoveryAdvertise := "eth0"
|
||||
|
||||
err := d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend), fmt.Sprintf("--cluster-advertise=%s:2375", discoveryAdvertise))
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer d.Stop()
|
||||
d.Start(c, fmt.Sprintf("--cluster-store=%s", discoveryBackend), fmt.Sprintf("--cluster-advertise=%s:2375", discoveryAdvertise))
|
||||
defer d.Stop(c)
|
||||
|
||||
iface, err := net.InterfaceByName(discoveryAdvertise)
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
@ -181,9 +179,8 @@ func (s *DockerSuite) TestInfoDebug(c *check.C) {
|
|||
d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
|
||||
Experimental: experimentalDaemon,
|
||||
})
|
||||
err := d.Start("--debug")
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer d.Stop()
|
||||
d.Start(c, "--debug")
|
||||
defer d.Stop(c)
|
||||
|
||||
out, err := d.Cmd("--debug", "info")
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
@ -205,9 +202,8 @@ func (s *DockerSuite) TestInsecureRegistries(c *check.C) {
|
|||
d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
|
||||
Experimental: experimentalDaemon,
|
||||
})
|
||||
err := d.Start("--insecure-registry="+registryCIDR, "--insecure-registry="+registryHost)
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer d.Stop()
|
||||
d.Start(c, "--insecure-registry="+registryCIDR, "--insecure-registry="+registryHost)
|
||||
defer d.Stop(c)
|
||||
|
||||
out, err := d.Cmd("info")
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
@ -222,8 +218,7 @@ func (s *DockerDaemonSuite) TestRegistryMirrors(c *check.C) {
|
|||
registryMirror1 := "https://192.168.1.2"
|
||||
registryMirror2 := "http://registry.mirror.com:5000"
|
||||
|
||||
err := s.d.Start("--registry-mirror="+registryMirror1, "--registry-mirror="+registryMirror2)
|
||||
c.Assert(err, checker.IsNil)
|
||||
s.d.Start(c, "--registry-mirror="+registryMirror1, "--registry-mirror="+registryMirror2)
|
||||
|
||||
out, err := s.d.Cmd("info")
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
@ -236,8 +231,7 @@ func (s *DockerDaemonSuite) TestRegistryMirrors(c *check.C) {
|
|||
func (s *DockerDaemonSuite) TestInfoLabels(c *check.C) {
|
||||
testRequires(c, SameHostDaemon, DaemonIsLinux)
|
||||
|
||||
err := s.d.Start("--label", `test.empty=`, "--label", `test.empty=`, "--label", `test.label="1"`, "--label", `test.label="2"`)
|
||||
c.Assert(err, checker.IsNil)
|
||||
s.d.Start(c, "--label", `test.empty=`, "--label", `test.empty=`, "--label", `test.label="1"`, "--label", `test.label="2"`)
|
||||
|
||||
out, err := s.d.Cmd("info")
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
|
|
@ -55,7 +55,7 @@ func (s *DockerNetworkSuite) SetUpTest(c *check.C) {
|
|||
|
||||
func (s *DockerNetworkSuite) TearDownTest(c *check.C) {
|
||||
if s.d != nil {
|
||||
s.d.Stop()
|
||||
s.d.Stop(c)
|
||||
s.ds.TearDownTest(c)
|
||||
}
|
||||
}
|
||||
|
@ -807,9 +807,8 @@ func (s *DockerDaemonSuite) TestDockerNetworkNoDiscoveryDefaultBridgeNetwork(c *
|
|||
c.Assert(err, check.IsNil, check.Commentf(out))
|
||||
defer deleteInterface(c, bridgeName)
|
||||
|
||||
err = s.d.StartWithBusybox("--bridge", bridgeName)
|
||||
c.Assert(err, check.IsNil)
|
||||
defer s.d.Restart()
|
||||
s.d.StartWithBusybox(c, "--bridge", bridgeName)
|
||||
defer s.d.Restart(c)
|
||||
|
||||
// run two containers and store first container's etc/hosts content
|
||||
out, err = s.d.Cmd("run", "-d", "busybox", "top")
|
||||
|
@ -989,7 +988,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkDriverUngracefulRestart(c *check.C
|
|||
server := httptest.NewServer(mux)
|
||||
setupRemoteNetworkDrivers(c, mux, server.URL, dnd, did)
|
||||
|
||||
s.d.StartWithBusybox()
|
||||
s.d.StartWithBusybox(c)
|
||||
_, err := s.d.Cmd("network", "create", "-d", dnd, "--subnet", "1.1.1.0/24", "net1")
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
|
@ -1002,9 +1001,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkDriverUngracefulRestart(c *check.C
|
|||
server.Close()
|
||||
|
||||
startTime := time.Now().Unix()
|
||||
if err = s.d.Restart(); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
s.d.Restart(c)
|
||||
lapse := time.Now().Unix() - startTime
|
||||
if lapse > 60 {
|
||||
// In normal scenarios, daemon restart takes ~1 second.
|
||||
|
@ -1092,13 +1089,13 @@ func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksGracefulDaemonRest
|
|||
cName := "bb"
|
||||
nwList := []string{"nw1", "nw2", "nw3"}
|
||||
|
||||
s.d.StartWithBusybox()
|
||||
s.d.StartWithBusybox(c)
|
||||
|
||||
connectContainerToNetworks(c, s.d, cName, nwList)
|
||||
verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList)
|
||||
|
||||
// Reload daemon
|
||||
s.d.Restart()
|
||||
s.d.Restart(c)
|
||||
|
||||
_, err := s.d.Cmd("start", cName)
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
@ -1110,14 +1107,14 @@ func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksUngracefulDaemonRe
|
|||
cName := "cc"
|
||||
nwList := []string{"nw1", "nw2", "nw3"}
|
||||
|
||||
s.d.StartWithBusybox()
|
||||
s.d.StartWithBusybox(c)
|
||||
|
||||
connectContainerToNetworks(c, s.d, cName, nwList)
|
||||
verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList)
|
||||
|
||||
// Kill daemon and restart
|
||||
c.Assert(s.d.Kill(), checker.IsNil)
|
||||
c.Assert(s.d.Restart(), checker.IsNil)
|
||||
s.d.Restart(c)
|
||||
|
||||
// Restart container
|
||||
_, err := s.d.Cmd("start", cName)
|
||||
|
@ -1134,7 +1131,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkRunNetByID(c *check.C) {
|
|||
|
||||
func (s *DockerNetworkSuite) TestDockerNetworkHostModeUngracefulDaemonRestart(c *check.C) {
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace)
|
||||
s.d.StartWithBusybox()
|
||||
s.d.StartWithBusybox(c)
|
||||
|
||||
// Run a few containers on host network
|
||||
for i := 0; i < 10; i++ {
|
||||
|
@ -1149,7 +1146,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkHostModeUngracefulDaemonRestart(c
|
|||
|
||||
// Kill daemon ungracefully and restart
|
||||
c.Assert(s.d.Kill(), checker.IsNil)
|
||||
c.Assert(s.d.Restart(), checker.IsNil)
|
||||
s.d.Restart(c)
|
||||
|
||||
// make sure all the containers are up and running
|
||||
for i := 0; i < 10; i++ {
|
||||
|
@ -1266,7 +1263,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnectToStoppedContaine
|
|||
c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network"))
|
||||
|
||||
// Restart docker daemon to test the config has persisted to disk
|
||||
s.d.Restart()
|
||||
s.d.Restart(c)
|
||||
networks = inspectField(c, "foo", "NetworkSettings.Networks")
|
||||
c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network"))
|
||||
|
||||
|
@ -1285,7 +1282,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnectToStoppedContaine
|
|||
c.Assert(networks, checker.Not(checker.Contains), "test", check.Commentf("Should not contain 'test' network"))
|
||||
|
||||
// Restart docker daemon to test the config has persisted to disk
|
||||
s.d.Restart()
|
||||
s.d.Restart(c)
|
||||
networks = inspectField(c, "foo", "NetworkSettings.Networks")
|
||||
c.Assert(networks, checker.Not(checker.Contains), "test", check.Commentf("Should not contain 'test' network"))
|
||||
|
||||
|
@ -1666,10 +1663,8 @@ func (s *DockerNetworkSuite) TestDockerNetworkCreateDeleteSpecialCharacters(c *c
|
|||
|
||||
func (s *DockerDaemonSuite) TestDaemonRestartRestoreBridgeNetwork(t *check.C) {
|
||||
testRequires(t, DaemonIsLinux)
|
||||
if err := s.d.StartWithBusybox("--live-restore"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer s.d.Stop()
|
||||
s.d.StartWithBusybox(t, "--live-restore")
|
||||
defer s.d.Stop(t)
|
||||
oldCon := "old"
|
||||
|
||||
_, err := s.d.Cmd("run", "-d", "--name", oldCon, "-p", "80:80", "busybox", "top")
|
||||
|
@ -1686,9 +1681,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartRestoreBridgeNetwork(t *check.C) {
|
|||
}
|
||||
|
||||
// restart the daemon
|
||||
if err := s.d.Start("--live-restore"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
s.d.Start(t, "--live-restore")
|
||||
|
||||
// start a new container, the new container's ip should not be the same with
|
||||
// old running container.
|
||||
|
|
|
@ -40,8 +40,7 @@ func (s *DockerDaemonSuite) TestCLIProxyProxyTCPSock(c *check.C) {
|
|||
|
||||
c.Assert(ip, checker.Not(checker.Equals), "")
|
||||
|
||||
err = s.d.Start("-H", "tcp://"+ip+":2375")
|
||||
c.Assert(err, checker.IsNil)
|
||||
s.d.Start(c, "-H", "tcp://"+ip+":2375")
|
||||
cmd := exec.Command(dockerBinary, "info")
|
||||
cmd.Env = []string{"DOCKER_HOST=tcp://" + ip + ":2375", "HTTP_PROXY=127.0.0.1:9999"}
|
||||
out, _, err := runCommandWithOutput(cmd)
|
||||
|
|
|
@ -62,7 +62,7 @@ func (s *DockerSwarmSuite) TestPruneNetwork(c *check.C) {
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestPruneImageDangling(c *check.C) {
|
||||
c.Assert(s.d.StartWithBusybox(), checker.IsNil)
|
||||
s.d.StartWithBusybox(c)
|
||||
|
||||
out, _, err := s.d.BuildImageWithOut("test",
|
||||
`FROM busybox
|
||||
|
|
|
@ -89,13 +89,12 @@ func (s *DockerRegistrySuite) TestUserAgentPassThrough(c *check.C) {
|
|||
c.Assert(err, check.IsNil)
|
||||
registerUserAgentHandler(loginReg, &loginUA)
|
||||
|
||||
err = s.d.Start(
|
||||
s.d.Start(c,
|
||||
"--insecure-registry", buildReg.hostport,
|
||||
"--insecure-registry", pullReg.hostport,
|
||||
"--insecure-registry", pushReg.hostport,
|
||||
"--insecure-registry", loginReg.hostport,
|
||||
"--disable-legacy-registry=true")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
dockerfileName, cleanup1, err := makefile(fmt.Sprintf("FROM %s", buildRepoName))
|
||||
c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile"))
|
||||
|
|
|
@ -4434,7 +4434,7 @@ exec "$@"`,
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestRunWithUlimitAndDaemonDefault(c *check.C) {
|
||||
c.Assert(s.d.StartWithBusybox("--debug", "--default-ulimit=nofile=65535"), checker.IsNil)
|
||||
s.d.StartWithBusybox(c, "--debug", "--default-ulimit=nofile=65535")
|
||||
|
||||
name := "test-A"
|
||||
_, err := s.d.Cmd("run", "--name", name, "-d", "busybox", "top")
|
||||
|
|
|
@ -1449,8 +1449,7 @@ func (s *DockerSuite) TestRunUserDeviceAllowed(c *check.C) {
|
|||
func (s *DockerDaemonSuite) TestRunSeccompJSONNewFormat(c *check.C) {
|
||||
testRequires(c, SameHostDaemon, seccompEnabled)
|
||||
|
||||
err := s.d.StartWithBusybox()
|
||||
c.Assert(err, check.IsNil)
|
||||
s.d.StartWithBusybox(c)
|
||||
|
||||
jsonData := `{
|
||||
"defaultAction": "SCMP_ACT_ALLOW",
|
||||
|
@ -1475,8 +1474,7 @@ func (s *DockerDaemonSuite) TestRunSeccompJSONNewFormat(c *check.C) {
|
|||
func (s *DockerDaemonSuite) TestRunSeccompJSONNoNameAndNames(c *check.C) {
|
||||
testRequires(c, SameHostDaemon, seccompEnabled)
|
||||
|
||||
err := s.d.StartWithBusybox()
|
||||
c.Assert(err, check.IsNil)
|
||||
s.d.StartWithBusybox(c)
|
||||
|
||||
jsonData := `{
|
||||
"defaultAction": "SCMP_ACT_ALLOW",
|
||||
|
@ -1502,8 +1500,7 @@ func (s *DockerDaemonSuite) TestRunSeccompJSONNoNameAndNames(c *check.C) {
|
|||
func (s *DockerDaemonSuite) TestRunSeccompJSONNoArchAndArchMap(c *check.C) {
|
||||
testRequires(c, SameHostDaemon, seccompEnabled)
|
||||
|
||||
err := s.d.StartWithBusybox()
|
||||
c.Assert(err, check.IsNil)
|
||||
s.d.StartWithBusybox(c)
|
||||
|
||||
jsonData := `{
|
||||
"archMap": [
|
||||
|
@ -1540,11 +1537,10 @@ func (s *DockerDaemonSuite) TestRunSeccompJSONNoArchAndArchMap(c *check.C) {
|
|||
func (s *DockerDaemonSuite) TestRunWithDaemonDefaultSeccompProfile(c *check.C) {
|
||||
testRequires(c, SameHostDaemon, seccompEnabled)
|
||||
|
||||
err := s.d.StartWithBusybox()
|
||||
c.Assert(err, check.IsNil)
|
||||
s.d.StartWithBusybox(c)
|
||||
|
||||
// 1) verify I can run containers with the Docker default shipped profile which allows chmod
|
||||
_, err = s.d.Cmd("run", "busybox", "chmod", "777", ".")
|
||||
_, err := s.d.Cmd("run", "busybox", "chmod", "777", ".")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
jsonData := `{
|
||||
|
@ -1563,8 +1559,7 @@ func (s *DockerDaemonSuite) TestRunWithDaemonDefaultSeccompProfile(c *check.C) {
|
|||
c.Assert(err, check.IsNil)
|
||||
|
||||
// 2) restart the daemon and add a custom seccomp profile in which we deny chmod
|
||||
err = s.d.Restart("--seccomp-profile=" + tmpFile.Name())
|
||||
c.Assert(err, check.IsNil)
|
||||
s.d.Restart(c, "--seccomp-profile="+tmpFile.Name())
|
||||
|
||||
out, err := s.d.Cmd("run", "busybox", "chmod", "777", ".")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
|
|
@ -100,23 +100,23 @@ func (s *DockerSwarmSuite) TestSwarmIncompatibleDaemon(c *check.C) {
|
|||
info, err := d.SwarmInfo()
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
|
||||
c.Assert(d.Stop(), checker.IsNil)
|
||||
d.Stop(c)
|
||||
|
||||
// start a daemon with --cluster-store and --cluster-advertise
|
||||
err = d.Start("--cluster-store=consul://consuladdr:consulport/some/path", "--cluster-advertise=1.1.1.1:2375")
|
||||
err = d.StartWithError("--cluster-store=consul://consuladdr:consulport/some/path", "--cluster-advertise=1.1.1.1:2375")
|
||||
c.Assert(err, checker.NotNil)
|
||||
content, err := d.ReadLogFile()
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(string(content), checker.Contains, "--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode")
|
||||
|
||||
// start a daemon with --live-restore
|
||||
err = d.Start("--live-restore")
|
||||
err = d.StartWithError("--live-restore")
|
||||
c.Assert(err, checker.NotNil)
|
||||
content, err = d.ReadLogFile()
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(string(content), checker.Contains, "--live-restore daemon configuration is incompatible with swarm mode")
|
||||
// restart for teardown
|
||||
c.Assert(d.Start(), checker.IsNil)
|
||||
d.Start(c)
|
||||
}
|
||||
|
||||
// Test case for #24090
|
||||
|
@ -291,7 +291,7 @@ func (s *DockerSwarmSuite) TestSwarmContainerAutoStart(c *check.C) {
|
|||
c.Assert(err, checker.IsNil, check.Commentf(out))
|
||||
c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "")
|
||||
|
||||
d.Restart()
|
||||
d.Restart(c)
|
||||
|
||||
out, err = d.Cmd("ps", "-q")
|
||||
c.Assert(err, checker.IsNil, check.Commentf(out))
|
||||
|
@ -806,7 +806,7 @@ func getNodeStatus(c *check.C, d *daemon.Swarm) swarm.LocalNodeState {
|
|||
}
|
||||
|
||||
func checkSwarmLockedToUnlocked(c *check.C, d *daemon.Swarm, unlockKey string) {
|
||||
c.Assert(d.Restart(), checker.IsNil)
|
||||
d.Restart(c)
|
||||
status := getNodeStatus(c, d)
|
||||
if status == swarm.LocalNodeStateLocked {
|
||||
// it must not have updated to be unlocked in time - unlock, wait 3 seconds, and try again
|
||||
|
@ -818,19 +818,19 @@ func checkSwarmLockedToUnlocked(c *check.C, d *daemon.Swarm, unlockKey string) {
|
|||
c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive)
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
c.Assert(d.Restart(), checker.IsNil)
|
||||
d.Restart(c)
|
||||
}
|
||||
|
||||
c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive)
|
||||
}
|
||||
|
||||
func checkSwarmUnlockedToLocked(c *check.C, d *daemon.Swarm) {
|
||||
c.Assert(d.Restart(), checker.IsNil)
|
||||
d.Restart(c)
|
||||
status := getNodeStatus(c, d)
|
||||
if status == swarm.LocalNodeStateActive {
|
||||
// it must not have updated to be unlocked in time - wait 3 seconds, and try again
|
||||
time.Sleep(3 * time.Second)
|
||||
c.Assert(d.Restart(), checker.IsNil)
|
||||
d.Restart(c)
|
||||
}
|
||||
c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked)
|
||||
}
|
||||
|
@ -859,7 +859,7 @@ func (s *DockerSwarmSuite) TestSwarmInitLocked(c *check.C) {
|
|||
c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive)
|
||||
|
||||
// It starts off locked
|
||||
c.Assert(d.Restart(), checker.IsNil)
|
||||
d.Restart(c)
|
||||
c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked)
|
||||
|
||||
cmd := d.Command("swarm", "unlock")
|
||||
|
@ -898,7 +898,7 @@ func (s *DockerSwarmSuite) TestSwarmLeaveLocked(c *check.C) {
|
|||
c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs))
|
||||
|
||||
// It starts off locked
|
||||
c.Assert(d.Restart("--swarm-default-advertise-addr=lo"), checker.IsNil)
|
||||
d.Restart(c, "--swarm-default-advertise-addr=lo")
|
||||
|
||||
info, err := d.SwarmInfo()
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
@ -933,11 +933,11 @@ func (s *DockerSwarmSuite) TestSwarmLockUnlockCluster(c *check.C) {
|
|||
d3 := s.AddDaemon(c, true, true)
|
||||
|
||||
// they start off unlocked
|
||||
c.Assert(d2.Restart(), checker.IsNil)
|
||||
d2.Restart(c)
|
||||
c.Assert(getNodeStatus(c, d2), checker.Equals, swarm.LocalNodeStateActive)
|
||||
|
||||
// stop this one so it does not get autolock info
|
||||
c.Assert(d2.Stop(), checker.IsNil)
|
||||
d2.Stop(c)
|
||||
|
||||
// enable autolock
|
||||
outs, err := d1.Cmd("swarm", "update", "--autolock")
|
||||
|
@ -970,7 +970,7 @@ func (s *DockerSwarmSuite) TestSwarmLockUnlockCluster(c *check.C) {
|
|||
}
|
||||
|
||||
// d2 never got the cluster update, so it is still set to unlocked
|
||||
c.Assert(d2.Start(), checker.IsNil)
|
||||
d2.Start(c)
|
||||
c.Assert(getNodeStatus(c, d2), checker.Equals, swarm.LocalNodeStateActive)
|
||||
|
||||
// d2 is now set to lock
|
||||
|
@ -1000,7 +1000,7 @@ func (s *DockerSwarmSuite) TestSwarmLockUnlockCluster(c *check.C) {
|
|||
|
||||
// managers who join now are never set to locked in the first place
|
||||
d4 := s.AddDaemon(c, true, true)
|
||||
c.Assert(d4.Restart(), checker.IsNil)
|
||||
d4.Restart(c)
|
||||
c.Assert(getNodeStatus(c, d4), checker.Equals, swarm.LocalNodeStateActive)
|
||||
}
|
||||
|
||||
|
@ -1028,7 +1028,7 @@ func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *check.C) {
|
|||
|
||||
// joined workers start off unlocked
|
||||
d2 := s.AddDaemon(c, true, false)
|
||||
c.Assert(d2.Restart(), checker.IsNil)
|
||||
d2.Restart(c)
|
||||
c.Assert(getNodeStatus(c, d2), checker.Equals, swarm.LocalNodeStateActive)
|
||||
|
||||
// promote worker
|
||||
|
@ -1074,7 +1074,7 @@ func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *check.C) {
|
|||
}, checker.Not(checker.Equals), string(d3cert))
|
||||
|
||||
// by now, it should *never* be locked on restart
|
||||
c.Assert(d3.Restart(), checker.IsNil)
|
||||
d3.Restart(c)
|
||||
c.Assert(getNodeStatus(c, d3), checker.Equals, swarm.LocalNodeStateActive)
|
||||
}
|
||||
|
||||
|
@ -1108,7 +1108,7 @@ func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *check.C) {
|
|||
c.Assert(newUnlockKey, checker.Not(checker.Equals), "")
|
||||
c.Assert(newUnlockKey, checker.Not(checker.Equals), unlockKey)
|
||||
|
||||
c.Assert(d.Restart(), checker.IsNil)
|
||||
d.Restart(c)
|
||||
c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked)
|
||||
|
||||
outs, _ = d.Cmd("node", "ls")
|
||||
|
@ -1129,7 +1129,7 @@ func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *check.C) {
|
|||
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
c.Assert(d.Restart(), checker.IsNil)
|
||||
d.Restart(c)
|
||||
|
||||
cmd = d.Command("swarm", "unlock")
|
||||
cmd.Stdin = bytes.NewBufferString(unlockKey)
|
||||
|
@ -1191,8 +1191,8 @@ func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *check.C) {
|
|||
c.Assert(newUnlockKey, checker.Not(checker.Equals), "")
|
||||
c.Assert(newUnlockKey, checker.Not(checker.Equals), unlockKey)
|
||||
|
||||
c.Assert(d2.Restart(), checker.IsNil)
|
||||
c.Assert(d3.Restart(), checker.IsNil)
|
||||
d2.Restart(c)
|
||||
d3.Restart(c)
|
||||
|
||||
for _, d := range []*daemon.Swarm{d2, d3} {
|
||||
c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked)
|
||||
|
@ -1215,7 +1215,7 @@ func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *check.C) {
|
|||
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
c.Assert(d.Restart(), checker.IsNil)
|
||||
d.Restart(c)
|
||||
|
||||
cmd = d.Command("swarm", "unlock")
|
||||
cmd.Stdin = bytes.NewBufferString(unlockKey)
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
func (s *DockerDaemonSuite) TestDaemonUserNamespaceRootSetting(c *check.C) {
|
||||
testRequires(c, DaemonIsLinux, SameHostDaemon, UserNamespaceInKernel)
|
||||
|
||||
c.Assert(s.d.StartWithBusybox("--userns-remap", "default"), checker.IsNil)
|
||||
s.d.StartWithBusybox(c, "--userns-remap", "default")
|
||||
|
||||
tmpDir, err := ioutil.TempDir("", "userns")
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
|
|
@ -49,8 +49,7 @@ func (s *DockerRegistrySuite) TestV2Only(c *check.C) {
|
|||
|
||||
repoName := fmt.Sprintf("%s/busybox", reg.hostport)
|
||||
|
||||
err = s.d.Start("--insecure-registry", reg.hostport, "--disable-legacy-registry=true")
|
||||
c.Assert(err, check.IsNil)
|
||||
s.d.Start(c, "--insecure-registry", reg.hostport, "--disable-legacy-registry=true")
|
||||
|
||||
dockerfileName, cleanup, err := makefile(fmt.Sprintf("FROM %s/busybox", reg.hostport))
|
||||
c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile"))
|
||||
|
@ -98,8 +97,7 @@ func (s *DockerRegistrySuite) TestV1(c *check.C) {
|
|||
v1Repo++
|
||||
})
|
||||
|
||||
err = s.d.Start("--insecure-registry", reg.hostport, "--disable-legacy-registry=false")
|
||||
c.Assert(err, check.IsNil)
|
||||
s.d.Start(c, "--insecure-registry", reg.hostport, "--disable-legacy-registry=false")
|
||||
|
||||
dockerfileName, cleanup, err := makefile(fmt.Sprintf("FROM %s/busybox", reg.hostport))
|
||||
c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile"))
|
||||
|
|
|
@ -61,7 +61,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkMacvlanPersistance(c *check.C) {
|
|||
dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.60", "dm-persist")
|
||||
assertNwIsAvailable(c, "dm-persist")
|
||||
// Restart docker daemon to test the config has persisted to disk
|
||||
s.d.Restart()
|
||||
s.d.Restart(c)
|
||||
// verify network is recreated from persistence
|
||||
assertNwIsAvailable(c, "dm-persist")
|
||||
// cleanup the master interface that also collects the slave dev
|
||||
|
@ -80,7 +80,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkIpvlanPersistance(c *check.C) {
|
|||
dockerCmd(c, "network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.70", "di-persist")
|
||||
assertNwIsAvailable(c, "di-persist")
|
||||
// Restart docker daemon to test the config has persisted to disk
|
||||
s.d.Restart()
|
||||
s.d.Restart(c)
|
||||
// verify network is recreated from persistence
|
||||
assertNwIsAvailable(c, "di-persist")
|
||||
// cleanup the master interface that also collects the slave dev
|
||||
|
|
|
@ -43,15 +43,13 @@ func (s *DockerHubPullSuite) SetUpSuite(c *check.C) {
|
|||
s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
|
||||
Experimental: experimentalDaemon,
|
||||
})
|
||||
err := s.d.Start()
|
||||
c.Assert(err, checker.IsNil, check.Commentf("starting push/pull test daemon: %v", err))
|
||||
s.d.Start(c)
|
||||
}
|
||||
|
||||
// TearDownSuite stops the suite daemon.
|
||||
func (s *DockerHubPullSuite) TearDownSuite(c *check.C) {
|
||||
if s.d != nil {
|
||||
err := s.d.Stop()
|
||||
c.Assert(err, checker.IsNil, check.Commentf("stopping push/pull test daemon: %v", err))
|
||||
s.d.Stop(c)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue