integration/internal/container: use consistent name for api-client
The `client` variable was colliding with the `client` import. In some cases the confusing `cli` name (it's not the "cli") was used. Given that such names can easily start spreading (through copy/paste, or "code by example"), let's make a one-time pass through all of them in this package to use the same name. Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
parent
26be2bc6b9
commit
3cb52a6359
4 changed files with 28 additions and 28 deletions
|
@ -28,7 +28,7 @@ type TestContainerConfig struct {
|
|||
}
|
||||
|
||||
// create creates a container with the specified options
|
||||
func create(ctx context.Context, t *testing.T, client client.APIClient, ops ...func(*TestContainerConfig)) (container.CreateResponse, error) {
|
||||
func create(ctx context.Context, t *testing.T, apiClient client.APIClient, ops ...func(*TestContainerConfig)) (container.CreateResponse, error) {
|
||||
t.Helper()
|
||||
cmd := []string{"top"}
|
||||
if runtime.GOOS == "windows" {
|
||||
|
@ -47,30 +47,30 @@ func create(ctx context.Context, t *testing.T, client client.APIClient, ops ...f
|
|||
op(config)
|
||||
}
|
||||
|
||||
return client.ContainerCreate(ctx, config.Config, config.HostConfig, config.NetworkingConfig, config.Platform, config.Name)
|
||||
return apiClient.ContainerCreate(ctx, config.Config, config.HostConfig, config.NetworkingConfig, config.Platform, config.Name)
|
||||
}
|
||||
|
||||
// Create creates a container with the specified options, asserting that there was no error
|
||||
func Create(ctx context.Context, t *testing.T, client client.APIClient, ops ...func(*TestContainerConfig)) string {
|
||||
func Create(ctx context.Context, t *testing.T, apiClient client.APIClient, ops ...func(*TestContainerConfig)) string {
|
||||
t.Helper()
|
||||
c, err := create(ctx, t, client, ops...)
|
||||
c, err := create(ctx, t, apiClient, ops...)
|
||||
assert.NilError(t, err)
|
||||
|
||||
return c.ID
|
||||
}
|
||||
|
||||
// CreateExpectingErr creates a container, expecting an error with the specified message
|
||||
func CreateExpectingErr(ctx context.Context, t *testing.T, client client.APIClient, errMsg string, ops ...func(*TestContainerConfig)) {
|
||||
_, err := create(ctx, t, client, ops...)
|
||||
func CreateExpectingErr(ctx context.Context, t *testing.T, apiClient client.APIClient, errMsg string, ops ...func(*TestContainerConfig)) {
|
||||
_, err := create(ctx, t, apiClient, ops...)
|
||||
assert.ErrorContains(t, err, errMsg)
|
||||
}
|
||||
|
||||
// Run creates and start a container with the specified options
|
||||
func Run(ctx context.Context, t *testing.T, client client.APIClient, ops ...func(*TestContainerConfig)) string {
|
||||
func Run(ctx context.Context, t *testing.T, apiClient client.APIClient, ops ...func(*TestContainerConfig)) string {
|
||||
t.Helper()
|
||||
id := Create(ctx, t, client, ops...)
|
||||
id := Create(ctx, t, apiClient, ops...)
|
||||
|
||||
err := client.ContainerStart(ctx, id, types.ContainerStartOptions{})
|
||||
err := apiClient.ContainerStart(ctx, id, types.ContainerStartOptions{})
|
||||
assert.NilError(t, err)
|
||||
|
||||
return id
|
||||
|
@ -83,23 +83,23 @@ type RunResult struct {
|
|||
Stderr *bytes.Buffer
|
||||
}
|
||||
|
||||
func RunAttach(ctx context.Context, t *testing.T, client client.APIClient, ops ...func(config *TestContainerConfig)) RunResult {
|
||||
func RunAttach(ctx context.Context, t *testing.T, apiClient client.APIClient, ops ...func(config *TestContainerConfig)) RunResult {
|
||||
t.Helper()
|
||||
|
||||
ops = append(ops, func(c *TestContainerConfig) {
|
||||
c.Config.AttachStdout = true
|
||||
c.Config.AttachStderr = true
|
||||
})
|
||||
id := Create(ctx, t, client, ops...)
|
||||
id := Create(ctx, t, apiClient, ops...)
|
||||
|
||||
aresp, err := client.ContainerAttach(ctx, id, types.ContainerAttachOptions{
|
||||
aresp, err := apiClient.ContainerAttach(ctx, id, types.ContainerAttachOptions{
|
||||
Stream: true,
|
||||
Stdout: true,
|
||||
Stderr: true,
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
|
||||
err = client.ContainerStart(ctx, id, types.ContainerStartOptions{})
|
||||
err = apiClient.ContainerStart(ctx, id, types.ContainerStartOptions{})
|
||||
assert.NilError(t, err)
|
||||
|
||||
s, err := demultiplexStreams(ctx, aresp)
|
||||
|
@ -109,7 +109,7 @@ func RunAttach(ctx context.Context, t *testing.T, client client.APIClient, ops .
|
|||
|
||||
// Inspect to get the exit code. A new context is used here to make sure that if the context passed as argument as
|
||||
// reached timeout during the demultiplexStream call, we still return a RunResult.
|
||||
resp, err := client.ContainerInspect(context.Background(), id)
|
||||
resp, err := apiClient.ContainerInspect(context.Background(), id)
|
||||
assert.NilError(t, err)
|
||||
|
||||
return RunResult{ContainerID: id, ExitCode: resp.State.ExitCode, Stdout: &s.stdout, Stderr: &s.stderr}
|
||||
|
|
|
@ -34,7 +34,7 @@ func (res *ExecResult) Combined() string {
|
|||
// containing stdout, stderr, and exit code. Note:
|
||||
// - this is a synchronous operation;
|
||||
// - cmd stdin is closed.
|
||||
func Exec(ctx context.Context, cli client.APIClient, id string, cmd []string, ops ...func(*types.ExecConfig)) (ExecResult, error) {
|
||||
func Exec(ctx context.Context, apiClient client.APIClient, id string, cmd []string, ops ...func(*types.ExecConfig)) (ExecResult, error) {
|
||||
// prepare exec
|
||||
execConfig := types.ExecConfig{
|
||||
AttachStdout: true,
|
||||
|
@ -46,14 +46,14 @@ func Exec(ctx context.Context, cli client.APIClient, id string, cmd []string, op
|
|||
op(&execConfig)
|
||||
}
|
||||
|
||||
cresp, err := cli.ContainerExecCreate(ctx, id, execConfig)
|
||||
cresp, err := apiClient.ContainerExecCreate(ctx, id, execConfig)
|
||||
if err != nil {
|
||||
return ExecResult{}, err
|
||||
}
|
||||
execID := cresp.ID
|
||||
|
||||
// run it, with stdout/stderr attached
|
||||
aresp, err := cli.ContainerExecAttach(ctx, execID, types.ExecStartCheck{})
|
||||
aresp, err := apiClient.ContainerExecAttach(ctx, execID, types.ExecStartCheck{})
|
||||
if err != nil {
|
||||
return ExecResult{}, err
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ func Exec(ctx context.Context, cli client.APIClient, id string, cmd []string, op
|
|||
}
|
||||
|
||||
// get the exit code
|
||||
iresp, err := cli.ContainerExecInspect(ctx, execID)
|
||||
iresp, err := apiClient.ContainerExecInspect(ctx, execID)
|
||||
if err != nil {
|
||||
return ExecResult{}, err
|
||||
}
|
||||
|
|
|
@ -11,9 +11,9 @@ import (
|
|||
)
|
||||
|
||||
// GetContainerNS gets the value of the specified namespace of a container
|
||||
func GetContainerNS(ctx context.Context, t *testing.T, client client.APIClient, cID, nsName string) string {
|
||||
func GetContainerNS(ctx context.Context, t *testing.T, apiClient client.APIClient, cID, nsName string) string {
|
||||
t.Helper()
|
||||
res, err := Exec(ctx, client, cID, []string{"readlink", "/proc/self/ns/" + nsName})
|
||||
res, err := Exec(ctx, apiClient, cID, []string{"readlink", "/proc/self/ns/" + nsName})
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, is.Len(res.Stderr(), 0))
|
||||
assert.Equal(t, 0, res.ExitCode)
|
||||
|
|
|
@ -11,9 +11,9 @@ import (
|
|||
)
|
||||
|
||||
// IsStopped verifies the container is in stopped state.
|
||||
func IsStopped(ctx context.Context, client client.APIClient, containerID string) func(log poll.LogT) poll.Result {
|
||||
func IsStopped(ctx context.Context, apiClient client.APIClient, containerID string) func(log poll.LogT) poll.Result {
|
||||
return func(log poll.LogT) poll.Result {
|
||||
inspect, err := client.ContainerInspect(ctx, containerID)
|
||||
inspect, err := apiClient.ContainerInspect(ctx, containerID)
|
||||
|
||||
switch {
|
||||
case err != nil:
|
||||
|
@ -27,9 +27,9 @@ func IsStopped(ctx context.Context, client client.APIClient, containerID string)
|
|||
}
|
||||
|
||||
// IsInState verifies the container is in one of the specified state, e.g., "running", "exited", etc.
|
||||
func IsInState(ctx context.Context, client client.APIClient, containerID string, state ...string) func(log poll.LogT) poll.Result {
|
||||
func IsInState(ctx context.Context, apiClient client.APIClient, containerID string, state ...string) func(log poll.LogT) poll.Result {
|
||||
return func(log poll.LogT) poll.Result {
|
||||
inspect, err := client.ContainerInspect(ctx, containerID)
|
||||
inspect, err := apiClient.ContainerInspect(ctx, containerID)
|
||||
if err != nil {
|
||||
return poll.Error(err)
|
||||
}
|
||||
|
@ -43,9 +43,9 @@ func IsInState(ctx context.Context, client client.APIClient, containerID string,
|
|||
}
|
||||
|
||||
// IsSuccessful verifies state.Status == "exited" && state.ExitCode == 0
|
||||
func IsSuccessful(ctx context.Context, client client.APIClient, containerID string) func(log poll.LogT) poll.Result {
|
||||
func IsSuccessful(ctx context.Context, apiClient client.APIClient, containerID string) func(log poll.LogT) poll.Result {
|
||||
return func(log poll.LogT) poll.Result {
|
||||
inspect, err := client.ContainerInspect(ctx, containerID)
|
||||
inspect, err := apiClient.ContainerInspect(ctx, containerID)
|
||||
if err != nil {
|
||||
return poll.Error(err)
|
||||
}
|
||||
|
@ -60,9 +60,9 @@ func IsSuccessful(ctx context.Context, client client.APIClient, containerID stri
|
|||
}
|
||||
|
||||
// IsRemoved verifies the container has been removed
|
||||
func IsRemoved(ctx context.Context, cli client.APIClient, containerID string) func(log poll.LogT) poll.Result {
|
||||
func IsRemoved(ctx context.Context, apiClient client.APIClient, containerID string) func(log poll.LogT) poll.Result {
|
||||
return func(log poll.LogT) poll.Result {
|
||||
inspect, err := cli.ContainerInspect(ctx, containerID)
|
||||
inspect, err := apiClient.ContainerInspect(ctx, containerID)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
return poll.Success()
|
||||
|
|
Loading…
Add table
Reference in a new issue