Merge pull request #30452 from dnephin/cherry-compose-refactor
Compose fixes for 1.13.1
This commit is contained in:
commit
426c4cb747
49 changed files with 5409 additions and 1058 deletions
|
@ -237,7 +237,7 @@ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
|
|||
# Please edit hack/dockerfile/install-binaries.sh to update them.
|
||||
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
|
||||
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy bindata
|
||||
|
||||
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||
ENTRYPOINT ["hack/dind"]
|
||||
|
|
|
@ -50,6 +50,16 @@ const (
|
|||
PropagationSlave Propagation = "slave"
|
||||
)
|
||||
|
||||
// Propagations is the list of all valid mount propagations
|
||||
var Propagations = []Propagation{
|
||||
PropagationRPrivate,
|
||||
PropagationPrivate,
|
||||
PropagationRShared,
|
||||
PropagationShared,
|
||||
PropagationRSlave,
|
||||
PropagationSlave,
|
||||
}
|
||||
|
||||
// BindOptions defines options specific to mounts of type "bind".
|
||||
type BindOptions struct {
|
||||
Propagation Propagation `json:",omitempty"`
|
||||
|
|
|
@ -6,24 +6,26 @@ import (
|
|||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/cli/compose/convert"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/opts"
|
||||
)
|
||||
|
||||
const (
|
||||
labelNamespace = "com.docker.stack.namespace"
|
||||
)
|
||||
|
||||
func getStackLabels(namespace string, labels map[string]string) map[string]string {
|
||||
if labels == nil {
|
||||
labels = make(map[string]string)
|
||||
}
|
||||
labels[labelNamespace] = namespace
|
||||
return labels
|
||||
}
|
||||
|
||||
func getStackFilter(namespace string) filters.Args {
|
||||
filter := filters.NewArgs()
|
||||
filter.Add("label", labelNamespace+"="+namespace)
|
||||
filter.Add("label", convert.LabelNamespace+"="+namespace)
|
||||
return filter
|
||||
}
|
||||
|
||||
func getStackFilterFromOpt(namespace string, opt opts.FilterOpt) filters.Args {
|
||||
filter := opt.Value()
|
||||
filter.Add("label", convert.LabelNamespace+"="+namespace)
|
||||
return filter
|
||||
}
|
||||
|
||||
func getAllStacksFilter() filters.Args {
|
||||
filter := filters.NewArgs()
|
||||
filter.Add("label", convert.LabelNamespace)
|
||||
return filter
|
||||
}
|
||||
|
||||
|
@ -46,11 +48,3 @@ func getStackNetworks(
|
|||
ctx,
|
||||
types.NetworkListOptions{Filters: getStackFilter(namespace)})
|
||||
}
|
||||
|
||||
type namespace struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (n namespace) scope(name string) string {
|
||||
return n.name + "_" + name
|
||||
}
|
||||
|
|
|
@ -7,21 +7,15 @@ import (
|
|||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aanand/compose-file/loader"
|
||||
composetypes "github.com/aanand/compose-file/types"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
networktypes "github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/docker/docker/cli/compose/convert"
|
||||
"github.com/docker/docker/cli/compose/loader"
|
||||
composetypes "github.com/docker/docker/cli/compose/types"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
"github.com/docker/docker/opts"
|
||||
runconfigopts "github.com/docker/docker/runconfig/opts"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
@ -120,17 +114,17 @@ func deployCompose(ctx context.Context, dockerCli *command.DockerCli, opts deplo
|
|||
return err
|
||||
}
|
||||
|
||||
namespace := namespace{name: opts.namespace}
|
||||
namespace := convert.NewNamespace(opts.namespace)
|
||||
|
||||
serviceNetworks := getServicesDeclaredNetworks(config.Services)
|
||||
networks, externalNetworks := convertNetworks(namespace, config.Networks, serviceNetworks)
|
||||
networks, externalNetworks := convert.Networks(namespace, config.Networks, serviceNetworks)
|
||||
if err := validateExternalNetworks(ctx, dockerCli, externalNetworks); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createNetworks(ctx, dockerCli, namespace, networks); err != nil {
|
||||
return err
|
||||
}
|
||||
services, err := convertServices(namespace, config)
|
||||
services, err := convert.Services(namespace, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -192,50 +186,6 @@ func getConfigFile(filename string) (*composetypes.ConfigFile, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func convertNetworks(
|
||||
namespace namespace,
|
||||
networks map[string]composetypes.NetworkConfig,
|
||||
servicesNetworks map[string]struct{},
|
||||
) (map[string]types.NetworkCreate, []string) {
|
||||
if networks == nil {
|
||||
networks = make(map[string]composetypes.NetworkConfig)
|
||||
}
|
||||
|
||||
externalNetworks := []string{}
|
||||
result := make(map[string]types.NetworkCreate)
|
||||
|
||||
for internalName := range servicesNetworks {
|
||||
network := networks[internalName]
|
||||
if network.External.External {
|
||||
externalNetworks = append(externalNetworks, network.External.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
createOpts := types.NetworkCreate{
|
||||
Labels: getStackLabels(namespace.name, network.Labels),
|
||||
Driver: network.Driver,
|
||||
Options: network.DriverOpts,
|
||||
}
|
||||
|
||||
if network.Ipam.Driver != "" || len(network.Ipam.Config) > 0 {
|
||||
createOpts.IPAM = &networktypes.IPAM{}
|
||||
}
|
||||
|
||||
if network.Ipam.Driver != "" {
|
||||
createOpts.IPAM.Driver = network.Ipam.Driver
|
||||
}
|
||||
for _, ipamConfig := range network.Ipam.Config {
|
||||
config := networktypes.IPAMConfig{
|
||||
Subnet: ipamConfig.Subnet,
|
||||
}
|
||||
createOpts.IPAM.Config = append(createOpts.IPAM.Config, config)
|
||||
}
|
||||
result[internalName] = createOpts
|
||||
}
|
||||
|
||||
return result, externalNetworks
|
||||
}
|
||||
|
||||
func validateExternalNetworks(
|
||||
ctx context.Context,
|
||||
dockerCli *command.DockerCli,
|
||||
|
@ -261,12 +211,12 @@ func validateExternalNetworks(
|
|||
func createNetworks(
|
||||
ctx context.Context,
|
||||
dockerCli *command.DockerCli,
|
||||
namespace namespace,
|
||||
namespace convert.Namespace,
|
||||
networks map[string]types.NetworkCreate,
|
||||
) error {
|
||||
client := dockerCli.Client()
|
||||
|
||||
existingNetworks, err := getStackNetworks(ctx, client, namespace.name)
|
||||
existingNetworks, err := getStackNetworks(ctx, client, namespace.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -277,7 +227,7 @@ func createNetworks(
|
|||
}
|
||||
|
||||
for internalName, createOpts := range networks {
|
||||
name := namespace.scope(internalName)
|
||||
name := namespace.Scope(internalName)
|
||||
if _, exists := existingNetworkMap[name]; exists {
|
||||
continue
|
||||
}
|
||||
|
@ -295,176 +245,17 @@ func createNetworks(
|
|||
return nil
|
||||
}
|
||||
|
||||
func convertServiceNetworks(
|
||||
networks map[string]*composetypes.ServiceNetworkConfig,
|
||||
networkConfigs map[string]composetypes.NetworkConfig,
|
||||
namespace namespace,
|
||||
name string,
|
||||
) ([]swarm.NetworkAttachmentConfig, error) {
|
||||
if len(networks) == 0 {
|
||||
return []swarm.NetworkAttachmentConfig{
|
||||
{
|
||||
Target: namespace.scope("default"),
|
||||
Aliases: []string{name},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
nets := []swarm.NetworkAttachmentConfig{}
|
||||
for networkName, network := range networks {
|
||||
networkConfig, ok := networkConfigs[networkName]
|
||||
if !ok {
|
||||
return []swarm.NetworkAttachmentConfig{}, fmt.Errorf("invalid network: %s", networkName)
|
||||
}
|
||||
var aliases []string
|
||||
if network != nil {
|
||||
aliases = network.Aliases
|
||||
}
|
||||
target := namespace.scope(networkName)
|
||||
if networkConfig.External.External {
|
||||
target = networkConfig.External.Name
|
||||
}
|
||||
nets = append(nets, swarm.NetworkAttachmentConfig{
|
||||
Target: target,
|
||||
Aliases: append(aliases, name),
|
||||
})
|
||||
}
|
||||
return nets, nil
|
||||
}
|
||||
|
||||
func convertVolumes(
|
||||
serviceVolumes []string,
|
||||
stackVolumes map[string]composetypes.VolumeConfig,
|
||||
namespace namespace,
|
||||
) ([]mount.Mount, error) {
|
||||
var mounts []mount.Mount
|
||||
|
||||
for _, volumeSpec := range serviceVolumes {
|
||||
mount, err := convertVolumeToMount(volumeSpec, stackVolumes, namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mounts = append(mounts, mount)
|
||||
}
|
||||
return mounts, nil
|
||||
}
|
||||
|
||||
func convertVolumeToMount(
|
||||
volumeSpec string,
|
||||
stackVolumes map[string]composetypes.VolumeConfig,
|
||||
namespace namespace,
|
||||
) (mount.Mount, error) {
|
||||
var source, target string
|
||||
var mode []string
|
||||
|
||||
// TODO: split Windows path mappings properly
|
||||
parts := strings.SplitN(volumeSpec, ":", 3)
|
||||
|
||||
for _, part := range parts {
|
||||
if strings.TrimSpace(part) == "" {
|
||||
return mount.Mount{}, fmt.Errorf("invalid volume: %s", volumeSpec)
|
||||
}
|
||||
}
|
||||
|
||||
switch len(parts) {
|
||||
case 3:
|
||||
source = parts[0]
|
||||
target = parts[1]
|
||||
mode = strings.Split(parts[2], ",")
|
||||
case 2:
|
||||
source = parts[0]
|
||||
target = parts[1]
|
||||
case 1:
|
||||
target = parts[0]
|
||||
}
|
||||
|
||||
if source == "" {
|
||||
// Anonymous volume
|
||||
return mount.Mount{
|
||||
Type: mount.TypeVolume,
|
||||
Target: target,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TODO: catch Windows paths here
|
||||
if strings.HasPrefix(source, "/") {
|
||||
return mount.Mount{
|
||||
Type: mount.TypeBind,
|
||||
Source: source,
|
||||
Target: target,
|
||||
ReadOnly: isReadOnly(mode),
|
||||
BindOptions: getBindOptions(mode),
|
||||
}, nil
|
||||
}
|
||||
|
||||
stackVolume, exists := stackVolumes[source]
|
||||
if !exists {
|
||||
return mount.Mount{}, fmt.Errorf("undefined volume: %s", source)
|
||||
}
|
||||
|
||||
var volumeOptions *mount.VolumeOptions
|
||||
if stackVolume.External.Name != "" {
|
||||
source = stackVolume.External.Name
|
||||
} else {
|
||||
volumeOptions = &mount.VolumeOptions{
|
||||
Labels: getStackLabels(namespace.name, stackVolume.Labels),
|
||||
NoCopy: isNoCopy(mode),
|
||||
}
|
||||
|
||||
if stackVolume.Driver != "" {
|
||||
volumeOptions.DriverConfig = &mount.Driver{
|
||||
Name: stackVolume.Driver,
|
||||
Options: stackVolume.DriverOpts,
|
||||
}
|
||||
}
|
||||
source = namespace.scope(source)
|
||||
}
|
||||
return mount.Mount{
|
||||
Type: mount.TypeVolume,
|
||||
Source: source,
|
||||
Target: target,
|
||||
ReadOnly: isReadOnly(mode),
|
||||
VolumeOptions: volumeOptions,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func modeHas(mode []string, field string) bool {
|
||||
for _, item := range mode {
|
||||
if item == field {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isReadOnly(mode []string) bool {
|
||||
return modeHas(mode, "ro")
|
||||
}
|
||||
|
||||
func isNoCopy(mode []string) bool {
|
||||
return modeHas(mode, "nocopy")
|
||||
}
|
||||
|
||||
func getBindOptions(mode []string) *mount.BindOptions {
|
||||
for _, item := range mode {
|
||||
if strings.Contains(item, "private") || strings.Contains(item, "shared") || strings.Contains(item, "slave") {
|
||||
return &mount.BindOptions{Propagation: mount.Propagation(item)}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deployServices(
|
||||
ctx context.Context,
|
||||
dockerCli *command.DockerCli,
|
||||
services map[string]swarm.ServiceSpec,
|
||||
namespace namespace,
|
||||
namespace convert.Namespace,
|
||||
sendAuth bool,
|
||||
) error {
|
||||
apiClient := dockerCli.Client()
|
||||
out := dockerCli.Out()
|
||||
|
||||
existingServices, err := getServices(ctx, apiClient, namespace.name)
|
||||
existingServices, err := getServices(ctx, apiClient, namespace.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -475,7 +266,7 @@ func deployServices(
|
|||
}
|
||||
|
||||
for internalName, serviceSpec := range services {
|
||||
name := namespace.scope(internalName)
|
||||
name := namespace.Scope(internalName)
|
||||
|
||||
encodedAuth := ""
|
||||
if sendAuth {
|
||||
|
@ -523,287 +314,3 @@ func deployServices(
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func convertServices(
|
||||
namespace namespace,
|
||||
config *composetypes.Config,
|
||||
) (map[string]swarm.ServiceSpec, error) {
|
||||
result := make(map[string]swarm.ServiceSpec)
|
||||
|
||||
services := config.Services
|
||||
volumes := config.Volumes
|
||||
networks := config.Networks
|
||||
|
||||
for _, service := range services {
|
||||
serviceSpec, err := convertService(namespace, service, networks, volumes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result[service.Name] = serviceSpec
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func convertService(
|
||||
namespace namespace,
|
||||
service composetypes.ServiceConfig,
|
||||
networkConfigs map[string]composetypes.NetworkConfig,
|
||||
volumes map[string]composetypes.VolumeConfig,
|
||||
) (swarm.ServiceSpec, error) {
|
||||
name := namespace.scope(service.Name)
|
||||
|
||||
endpoint, err := convertEndpointSpec(service.Ports)
|
||||
if err != nil {
|
||||
return swarm.ServiceSpec{}, err
|
||||
}
|
||||
|
||||
mode, err := convertDeployMode(service.Deploy.Mode, service.Deploy.Replicas)
|
||||
if err != nil {
|
||||
return swarm.ServiceSpec{}, err
|
||||
}
|
||||
|
||||
mounts, err := convertVolumes(service.Volumes, volumes, namespace)
|
||||
if err != nil {
|
||||
// TODO: better error message (include service name)
|
||||
return swarm.ServiceSpec{}, err
|
||||
}
|
||||
|
||||
resources, err := convertResources(service.Deploy.Resources)
|
||||
if err != nil {
|
||||
return swarm.ServiceSpec{}, err
|
||||
}
|
||||
|
||||
restartPolicy, err := convertRestartPolicy(
|
||||
service.Restart, service.Deploy.RestartPolicy)
|
||||
if err != nil {
|
||||
return swarm.ServiceSpec{}, err
|
||||
}
|
||||
|
||||
healthcheck, err := convertHealthcheck(service.HealthCheck)
|
||||
if err != nil {
|
||||
return swarm.ServiceSpec{}, err
|
||||
}
|
||||
|
||||
networks, err := convertServiceNetworks(service.Networks, networkConfigs, namespace, service.Name)
|
||||
if err != nil {
|
||||
return swarm.ServiceSpec{}, err
|
||||
}
|
||||
|
||||
var logDriver *swarm.Driver
|
||||
if service.Logging != nil {
|
||||
logDriver = &swarm.Driver{
|
||||
Name: service.Logging.Driver,
|
||||
Options: service.Logging.Options,
|
||||
}
|
||||
}
|
||||
|
||||
serviceSpec := swarm.ServiceSpec{
|
||||
Annotations: swarm.Annotations{
|
||||
Name: name,
|
||||
Labels: getStackLabels(namespace.name, service.Deploy.Labels),
|
||||
},
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
ContainerSpec: swarm.ContainerSpec{
|
||||
Image: service.Image,
|
||||
Command: service.Entrypoint,
|
||||
Args: service.Command,
|
||||
Hostname: service.Hostname,
|
||||
Hosts: convertExtraHosts(service.ExtraHosts),
|
||||
Healthcheck: healthcheck,
|
||||
Env: convertEnvironment(service.Environment),
|
||||
Labels: getStackLabels(namespace.name, service.Labels),
|
||||
Dir: service.WorkingDir,
|
||||
User: service.User,
|
||||
Mounts: mounts,
|
||||
StopGracePeriod: service.StopGracePeriod,
|
||||
TTY: service.Tty,
|
||||
OpenStdin: service.StdinOpen,
|
||||
},
|
||||
LogDriver: logDriver,
|
||||
Resources: resources,
|
||||
RestartPolicy: restartPolicy,
|
||||
Placement: &swarm.Placement{
|
||||
Constraints: service.Deploy.Placement.Constraints,
|
||||
},
|
||||
},
|
||||
EndpointSpec: endpoint,
|
||||
Mode: mode,
|
||||
Networks: networks,
|
||||
UpdateConfig: convertUpdateConfig(service.Deploy.UpdateConfig),
|
||||
}
|
||||
|
||||
return serviceSpec, nil
|
||||
}
|
||||
|
||||
func convertExtraHosts(extraHosts map[string]string) []string {
|
||||
hosts := []string{}
|
||||
for host, ip := range extraHosts {
|
||||
hosts = append(hosts, fmt.Sprintf("%s %s", ip, host))
|
||||
}
|
||||
return hosts
|
||||
}
|
||||
|
||||
func convertHealthcheck(healthcheck *composetypes.HealthCheckConfig) (*container.HealthConfig, error) {
|
||||
if healthcheck == nil {
|
||||
return nil, nil
|
||||
}
|
||||
var (
|
||||
err error
|
||||
timeout, interval time.Duration
|
||||
retries int
|
||||
)
|
||||
if healthcheck.Disable {
|
||||
if len(healthcheck.Test) != 0 {
|
||||
return nil, fmt.Errorf("command and disable key can't be set at the same time")
|
||||
}
|
||||
return &container.HealthConfig{
|
||||
Test: []string{"NONE"},
|
||||
}, nil
|
||||
|
||||
}
|
||||
if healthcheck.Timeout != "" {
|
||||
timeout, err = time.ParseDuration(healthcheck.Timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if healthcheck.Interval != "" {
|
||||
interval, err = time.ParseDuration(healthcheck.Interval)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if healthcheck.Retries != nil {
|
||||
retries = int(*healthcheck.Retries)
|
||||
}
|
||||
return &container.HealthConfig{
|
||||
Test: healthcheck.Test,
|
||||
Timeout: timeout,
|
||||
Interval: interval,
|
||||
Retries: retries,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func convertRestartPolicy(restart string, source *composetypes.RestartPolicy) (*swarm.RestartPolicy, error) {
|
||||
// TODO: log if restart is being ignored
|
||||
if source == nil {
|
||||
policy, err := runconfigopts.ParseRestartPolicy(restart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO: is this an accurate convertion?
|
||||
switch {
|
||||
case policy.IsNone():
|
||||
return nil, nil
|
||||
case policy.IsAlways(), policy.IsUnlessStopped():
|
||||
return &swarm.RestartPolicy{
|
||||
Condition: swarm.RestartPolicyConditionAny,
|
||||
}, nil
|
||||
case policy.IsOnFailure():
|
||||
attempts := uint64(policy.MaximumRetryCount)
|
||||
return &swarm.RestartPolicy{
|
||||
Condition: swarm.RestartPolicyConditionOnFailure,
|
||||
MaxAttempts: &attempts,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return &swarm.RestartPolicy{
|
||||
Condition: swarm.RestartPolicyCondition(source.Condition),
|
||||
Delay: source.Delay,
|
||||
MaxAttempts: source.MaxAttempts,
|
||||
Window: source.Window,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func convertUpdateConfig(source *composetypes.UpdateConfig) *swarm.UpdateConfig {
|
||||
if source == nil {
|
||||
return nil
|
||||
}
|
||||
parallel := uint64(1)
|
||||
if source.Parallelism != nil {
|
||||
parallel = *source.Parallelism
|
||||
}
|
||||
return &swarm.UpdateConfig{
|
||||
Parallelism: parallel,
|
||||
Delay: source.Delay,
|
||||
FailureAction: source.FailureAction,
|
||||
Monitor: source.Monitor,
|
||||
MaxFailureRatio: source.MaxFailureRatio,
|
||||
}
|
||||
}
|
||||
|
||||
func convertResources(source composetypes.Resources) (*swarm.ResourceRequirements, error) {
|
||||
resources := &swarm.ResourceRequirements{}
|
||||
var err error
|
||||
if source.Limits != nil {
|
||||
var cpus int64
|
||||
if source.Limits.NanoCPUs != "" {
|
||||
cpus, err = opts.ParseCPUs(source.Limits.NanoCPUs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
resources.Limits = &swarm.Resources{
|
||||
NanoCPUs: cpus,
|
||||
MemoryBytes: int64(source.Limits.MemoryBytes),
|
||||
}
|
||||
}
|
||||
if source.Reservations != nil {
|
||||
var cpus int64
|
||||
if source.Reservations.NanoCPUs != "" {
|
||||
cpus, err = opts.ParseCPUs(source.Reservations.NanoCPUs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
resources.Reservations = &swarm.Resources{
|
||||
NanoCPUs: cpus,
|
||||
MemoryBytes: int64(source.Reservations.MemoryBytes),
|
||||
}
|
||||
}
|
||||
return resources, nil
|
||||
}
|
||||
|
||||
func convertEndpointSpec(source []string) (*swarm.EndpointSpec, error) {
|
||||
portConfigs := []swarm.PortConfig{}
|
||||
ports, portBindings, err := nat.ParsePortSpecs(source)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for port := range ports {
|
||||
portConfigs = append(
|
||||
portConfigs,
|
||||
opts.ConvertPortToPortConfig(port, portBindings)...)
|
||||
}
|
||||
|
||||
return &swarm.EndpointSpec{Ports: portConfigs}, nil
|
||||
}
|
||||
|
||||
func convertEnvironment(source map[string]string) []string {
|
||||
var output []string
|
||||
|
||||
for name, value := range source {
|
||||
output = append(output, fmt.Sprintf("%s=%s", name, value))
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
func convertDeployMode(mode string, replicas *uint64) (swarm.ServiceMode, error) {
|
||||
serviceMode := swarm.ServiceMode{}
|
||||
|
||||
switch mode {
|
||||
case "global":
|
||||
if replicas != nil {
|
||||
return serviceMode, fmt.Errorf("replicas can only be used with replicated mode")
|
||||
}
|
||||
serviceMode.Global = &swarm.GlobalService{}
|
||||
case "replicated", "":
|
||||
serviceMode.Replicated = &swarm.ReplicatedService{Replicas: replicas}
|
||||
default:
|
||||
return serviceMode, fmt.Errorf("Unknown mode: %s", mode)
|
||||
}
|
||||
return serviceMode, nil
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/docker/docker/cli/compose/convert"
|
||||
)
|
||||
|
||||
func deployBundle(ctx context.Context, dockerCli *command.DockerCli, opts deployOptions) error {
|
||||
|
@ -18,20 +19,20 @@ func deployBundle(ctx context.Context, dockerCli *command.DockerCli, opts deploy
|
|||
return err
|
||||
}
|
||||
|
||||
namespace := namespace{name: opts.namespace}
|
||||
namespace := convert.NewNamespace(opts.namespace)
|
||||
|
||||
networks := make(map[string]types.NetworkCreate)
|
||||
for _, service := range bundle.Services {
|
||||
for _, networkName := range service.Networks {
|
||||
networks[networkName] = types.NetworkCreate{
|
||||
Labels: getStackLabels(namespace.name, nil),
|
||||
Labels: convert.AddStackLabel(namespace, nil),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
services := make(map[string]swarm.ServiceSpec)
|
||||
for internalName, service := range bundle.Services {
|
||||
name := namespace.scope(internalName)
|
||||
name := namespace.Scope(internalName)
|
||||
|
||||
var ports []swarm.PortConfig
|
||||
for _, portSpec := range service.Ports {
|
||||
|
@ -44,7 +45,7 @@ func deployBundle(ctx context.Context, dockerCli *command.DockerCli, opts deploy
|
|||
nets := []swarm.NetworkAttachmentConfig{}
|
||||
for _, networkName := range service.Networks {
|
||||
nets = append(nets, swarm.NetworkAttachmentConfig{
|
||||
Target: namespace.scope(networkName),
|
||||
Target: namespace.Scope(networkName),
|
||||
Aliases: []string{networkName},
|
||||
})
|
||||
}
|
||||
|
@ -52,7 +53,7 @@ func deployBundle(ctx context.Context, dockerCli *command.DockerCli, opts deploy
|
|||
serviceSpec := swarm.ServiceSpec{
|
||||
Annotations: swarm.Annotations{
|
||||
Name: name,
|
||||
Labels: getStackLabels(namespace.name, service.Labels),
|
||||
Labels: convert.AddStackLabel(namespace, service.Labels),
|
||||
},
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
ContainerSpec: swarm.ContainerSpec{
|
||||
|
@ -63,7 +64,7 @@ func deployBundle(ctx context.Context, dockerCli *command.DockerCli, opts deploy
|
|||
// Service Labels will not be copied to Containers
|
||||
// automatically during the deployment so we apply
|
||||
// it here.
|
||||
Labels: getStackLabels(namespace.name, nil),
|
||||
Labels: convert.AddStackLabel(namespace, nil),
|
||||
},
|
||||
},
|
||||
EndpointSpec: &swarm.EndpointSpec{
|
||||
|
|
|
@ -1,54 +0,0 @@
|
|||
package stack
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
composetypes "github.com/aanand/compose-file/types"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/pkg/testutil/assert"
|
||||
)
|
||||
|
||||
func TestConvertVolumeToMountAnonymousVolume(t *testing.T) {
|
||||
stackVolumes := map[string]composetypes.VolumeConfig{}
|
||||
namespace := namespace{name: "foo"}
|
||||
expected := mount.Mount{
|
||||
Type: mount.TypeVolume,
|
||||
Target: "/foo/bar",
|
||||
}
|
||||
mnt, err := convertVolumeToMount("/foo/bar", stackVolumes, namespace)
|
||||
assert.NilError(t, err)
|
||||
assert.DeepEqual(t, mnt, expected)
|
||||
}
|
||||
|
||||
func TestConvertVolumeToMountInvalidFormat(t *testing.T) {
|
||||
namespace := namespace{name: "foo"}
|
||||
invalids := []string{"::", "::cc", ":bb:", "aa::", "aa::cc", "aa:bb:", " : : ", " : :cc", " :bb: ", "aa: : ", "aa: :cc", "aa:bb: "}
|
||||
for _, vol := range invalids {
|
||||
_, err := convertVolumeToMount(vol, map[string]composetypes.VolumeConfig{}, namespace)
|
||||
assert.Error(t, err, "invalid volume: "+vol)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertResourcesOnlyMemory(t *testing.T) {
|
||||
source := composetypes.Resources{
|
||||
Limits: &composetypes.Resource{
|
||||
MemoryBytes: composetypes.UnitBytes(300000000),
|
||||
},
|
||||
Reservations: &composetypes.Resource{
|
||||
MemoryBytes: composetypes.UnitBytes(200000000),
|
||||
},
|
||||
}
|
||||
resources, err := convertResources(source)
|
||||
assert.NilError(t, err)
|
||||
|
||||
expected := &swarm.ResourceRequirements{
|
||||
Limits: &swarm.Resources{
|
||||
MemoryBytes: 300000000,
|
||||
},
|
||||
Reservations: &swarm.Resources{
|
||||
MemoryBytes: 200000000,
|
||||
},
|
||||
}
|
||||
assert.DeepEqual(t, resources, expected)
|
||||
}
|
|
@ -9,9 +9,9 @@ import (
|
|||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cli/command"
|
||||
"github.com/docker/docker/cli/compose/convert"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -81,23 +81,19 @@ func getStacks(
|
|||
ctx context.Context,
|
||||
apiclient client.APIClient,
|
||||
) ([]*stack, error) {
|
||||
|
||||
filter := filters.NewArgs()
|
||||
filter.Add("label", labelNamespace)
|
||||
|
||||
services, err := apiclient.ServiceList(
|
||||
ctx,
|
||||
types.ServiceListOptions{Filters: filter})
|
||||
types.ServiceListOptions{Filters: getAllStacksFilter()})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := make(map[string]*stack, 0)
|
||||
for _, service := range services {
|
||||
labels := service.Spec.Labels
|
||||
name, ok := labels[labelNamespace]
|
||||
name, ok := labels[convert.LabelNamespace]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cannot get label %s for service %s",
|
||||
labelNamespace, service.ID)
|
||||
convert.LabelNamespace, service.ID)
|
||||
}
|
||||
ztack, ok := m[name]
|
||||
if !ok {
|
||||
|
|
|
@ -46,9 +46,7 @@ func runPS(dockerCli *command.DockerCli, opts psOptions) error {
|
|||
client := dockerCli.Client()
|
||||
ctx := context.Background()
|
||||
|
||||
filter := opts.filter.Value()
|
||||
filter.Add("label", labelNamespace+"="+opts.namespace)
|
||||
|
||||
filter := getStackFilterFromOpt(opts.namespace, opts.filter)
|
||||
tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: filter})
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -43,9 +43,7 @@ func runServices(dockerCli *command.DockerCli, opts servicesOptions) error {
|
|||
ctx := context.Background()
|
||||
client := dockerCli.Client()
|
||||
|
||||
filter := opts.filter.Value()
|
||||
filter.Add("label", labelNamespace+"="+opts.namespace)
|
||||
|
||||
filter := getStackFilterFromOpt(opts.namespace, opts.filter)
|
||||
services, err := client.ServiceList(ctx, types.ServiceListOptions{Filters: filter})
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
89
cli/compose/convert/compose.go
Normal file
89
cli/compose/convert/compose.go
Normal file
|
@ -0,0 +1,89 @@
|
|||
package convert
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/api/types"
|
||||
networktypes "github.com/docker/docker/api/types/network"
|
||||
composetypes "github.com/docker/docker/cli/compose/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// LabelNamespace is the label used to track stack resources
|
||||
LabelNamespace = "com.docker.stack.namespace"
|
||||
)
|
||||
|
||||
// Namespace mangles names by prepending the name
|
||||
type Namespace struct {
|
||||
name string
|
||||
}
|
||||
|
||||
// Scope prepends the namespace to a name
|
||||
func (n Namespace) Scope(name string) string {
|
||||
return n.name + "_" + name
|
||||
}
|
||||
|
||||
// Name returns the name of the namespace
|
||||
func (n Namespace) Name() string {
|
||||
return n.name
|
||||
}
|
||||
|
||||
// NewNamespace returns a new Namespace for scoping of names
|
||||
func NewNamespace(name string) Namespace {
|
||||
return Namespace{name: name}
|
||||
}
|
||||
|
||||
// AddStackLabel returns labels with the namespace label added
|
||||
func AddStackLabel(namespace Namespace, labels map[string]string) map[string]string {
|
||||
if labels == nil {
|
||||
labels = make(map[string]string)
|
||||
}
|
||||
labels[LabelNamespace] = namespace.name
|
||||
return labels
|
||||
}
|
||||
|
||||
type networkMap map[string]composetypes.NetworkConfig
|
||||
|
||||
// Networks converts networks from the compose-file type to the engine API type
|
||||
func Networks(
|
||||
namespace Namespace,
|
||||
networks networkMap,
|
||||
servicesNetworks map[string]struct{},
|
||||
) (map[string]types.NetworkCreate, []string) {
|
||||
if networks == nil {
|
||||
networks = make(map[string]composetypes.NetworkConfig)
|
||||
}
|
||||
|
||||
externalNetworks := []string{}
|
||||
result := make(map[string]types.NetworkCreate)
|
||||
|
||||
for internalName := range servicesNetworks {
|
||||
network := networks[internalName]
|
||||
if network.External.External {
|
||||
externalNetworks = append(externalNetworks, network.External.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
createOpts := types.NetworkCreate{
|
||||
Labels: AddStackLabel(namespace, network.Labels),
|
||||
Driver: network.Driver,
|
||||
Options: network.DriverOpts,
|
||||
Internal: network.Internal,
|
||||
}
|
||||
|
||||
if network.Ipam.Driver != "" || len(network.Ipam.Config) > 0 {
|
||||
createOpts.IPAM = &networktypes.IPAM{}
|
||||
}
|
||||
|
||||
if network.Ipam.Driver != "" {
|
||||
createOpts.IPAM.Driver = network.Ipam.Driver
|
||||
}
|
||||
for _, ipamConfig := range network.Ipam.Config {
|
||||
config := networktypes.IPAMConfig{
|
||||
Subnet: ipamConfig.Subnet,
|
||||
}
|
||||
createOpts.IPAM.Config = append(createOpts.IPAM.Config, config)
|
||||
}
|
||||
result[internalName] = createOpts
|
||||
}
|
||||
|
||||
return result, externalNetworks
|
||||
}
|
90
cli/compose/convert/compose_test.go
Normal file
90
cli/compose/convert/compose_test.go
Normal file
|
@ -0,0 +1,90 @@
|
|||
package convert
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
composetypes "github.com/docker/docker/cli/compose/types"
|
||||
"github.com/docker/docker/pkg/testutil/assert"
|
||||
)
|
||||
|
||||
func TestNamespaceScope(t *testing.T) {
|
||||
scoped := Namespace{name: "foo"}.Scope("bar")
|
||||
assert.Equal(t, scoped, "foo_bar")
|
||||
}
|
||||
|
||||
func TestAddStackLabel(t *testing.T) {
|
||||
labels := map[string]string{
|
||||
"something": "labeled",
|
||||
}
|
||||
actual := AddStackLabel(Namespace{name: "foo"}, labels)
|
||||
expected := map[string]string{
|
||||
"something": "labeled",
|
||||
LabelNamespace: "foo",
|
||||
}
|
||||
assert.DeepEqual(t, actual, expected)
|
||||
}
|
||||
|
||||
func TestNetworks(t *testing.T) {
|
||||
namespace := Namespace{name: "foo"}
|
||||
source := networkMap{
|
||||
"normal": composetypes.NetworkConfig{
|
||||
Driver: "overlay",
|
||||
DriverOpts: map[string]string{
|
||||
"opt": "value",
|
||||
},
|
||||
Ipam: composetypes.IPAMConfig{
|
||||
Driver: "driver",
|
||||
Config: []*composetypes.IPAMPool{
|
||||
{
|
||||
Subnet: "10.0.0.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"something": "labeled",
|
||||
},
|
||||
},
|
||||
"outside": composetypes.NetworkConfig{
|
||||
External: composetypes.External{
|
||||
External: true,
|
||||
Name: "special",
|
||||
},
|
||||
},
|
||||
}
|
||||
expected := map[string]types.NetworkCreate{
|
||||
"default": {
|
||||
Labels: map[string]string{
|
||||
LabelNamespace: "foo",
|
||||
},
|
||||
},
|
||||
"normal": {
|
||||
Driver: "overlay",
|
||||
IPAM: &network.IPAM{
|
||||
Driver: "driver",
|
||||
Config: []network.IPAMConfig{
|
||||
{
|
||||
Subnet: "10.0.0.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
Options: map[string]string{
|
||||
"opt": "value",
|
||||
},
|
||||
Labels: map[string]string{
|
||||
LabelNamespace: "foo",
|
||||
"something": "labeled",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
serviceNetworks := map[string]struct{}{
|
||||
"default": {},
|
||||
"normal": {},
|
||||
"outside": {},
|
||||
}
|
||||
networks, externals := Networks(namespace, source, serviceNetworks)
|
||||
assert.DeepEqual(t, networks, expected)
|
||||
assert.DeepEqual(t, externals, []string{"special"})
|
||||
}
|
338
cli/compose/convert/service.go
Normal file
338
cli/compose/convert/service.go
Normal file
|
@ -0,0 +1,338 @@
|
|||
package convert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
composetypes "github.com/docker/docker/cli/compose/types"
|
||||
"github.com/docker/docker/opts"
|
||||
runconfigopts "github.com/docker/docker/runconfig/opts"
|
||||
"github.com/docker/go-connections/nat"
|
||||
)
|
||||
|
||||
// Services from compose-file types to engine API types
|
||||
func Services(
|
||||
namespace Namespace,
|
||||
config *composetypes.Config,
|
||||
) (map[string]swarm.ServiceSpec, error) {
|
||||
result := make(map[string]swarm.ServiceSpec)
|
||||
|
||||
services := config.Services
|
||||
volumes := config.Volumes
|
||||
networks := config.Networks
|
||||
|
||||
for _, service := range services {
|
||||
serviceSpec, err := convertService(namespace, service, networks, volumes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result[service.Name] = serviceSpec
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func convertService(
|
||||
namespace Namespace,
|
||||
service composetypes.ServiceConfig,
|
||||
networkConfigs map[string]composetypes.NetworkConfig,
|
||||
volumes map[string]composetypes.VolumeConfig,
|
||||
) (swarm.ServiceSpec, error) {
|
||||
name := namespace.Scope(service.Name)
|
||||
|
||||
endpoint, err := convertEndpointSpec(service.Ports)
|
||||
if err != nil {
|
||||
return swarm.ServiceSpec{}, err
|
||||
}
|
||||
|
||||
mode, err := convertDeployMode(service.Deploy.Mode, service.Deploy.Replicas)
|
||||
if err != nil {
|
||||
return swarm.ServiceSpec{}, err
|
||||
}
|
||||
|
||||
mounts, err := Volumes(service.Volumes, volumes, namespace)
|
||||
if err != nil {
|
||||
// TODO: better error message (include service name)
|
||||
return swarm.ServiceSpec{}, err
|
||||
}
|
||||
|
||||
resources, err := convertResources(service.Deploy.Resources)
|
||||
if err != nil {
|
||||
return swarm.ServiceSpec{}, err
|
||||
}
|
||||
|
||||
restartPolicy, err := convertRestartPolicy(
|
||||
service.Restart, service.Deploy.RestartPolicy)
|
||||
if err != nil {
|
||||
return swarm.ServiceSpec{}, err
|
||||
}
|
||||
|
||||
healthcheck, err := convertHealthcheck(service.HealthCheck)
|
||||
if err != nil {
|
||||
return swarm.ServiceSpec{}, err
|
||||
}
|
||||
|
||||
networks, err := convertServiceNetworks(service.Networks, networkConfigs, namespace, service.Name)
|
||||
if err != nil {
|
||||
return swarm.ServiceSpec{}, err
|
||||
}
|
||||
|
||||
var logDriver *swarm.Driver
|
||||
if service.Logging != nil {
|
||||
logDriver = &swarm.Driver{
|
||||
Name: service.Logging.Driver,
|
||||
Options: service.Logging.Options,
|
||||
}
|
||||
}
|
||||
|
||||
serviceSpec := swarm.ServiceSpec{
|
||||
Annotations: swarm.Annotations{
|
||||
Name: name,
|
||||
Labels: AddStackLabel(namespace, service.Deploy.Labels),
|
||||
},
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
ContainerSpec: swarm.ContainerSpec{
|
||||
Image: service.Image,
|
||||
Command: service.Entrypoint,
|
||||
Args: service.Command,
|
||||
Hostname: service.Hostname,
|
||||
Hosts: convertExtraHosts(service.ExtraHosts),
|
||||
Healthcheck: healthcheck,
|
||||
Env: convertEnvironment(service.Environment),
|
||||
Labels: AddStackLabel(namespace, service.Labels),
|
||||
Dir: service.WorkingDir,
|
||||
User: service.User,
|
||||
Mounts: mounts,
|
||||
StopGracePeriod: service.StopGracePeriod,
|
||||
TTY: service.Tty,
|
||||
OpenStdin: service.StdinOpen,
|
||||
},
|
||||
LogDriver: logDriver,
|
||||
Resources: resources,
|
||||
RestartPolicy: restartPolicy,
|
||||
Placement: &swarm.Placement{
|
||||
Constraints: service.Deploy.Placement.Constraints,
|
||||
},
|
||||
},
|
||||
EndpointSpec: endpoint,
|
||||
Mode: mode,
|
||||
Networks: networks,
|
||||
UpdateConfig: convertUpdateConfig(service.Deploy.UpdateConfig),
|
||||
}
|
||||
|
||||
return serviceSpec, nil
|
||||
}
|
||||
|
||||
func convertServiceNetworks(
|
||||
networks map[string]*composetypes.ServiceNetworkConfig,
|
||||
networkConfigs networkMap,
|
||||
namespace Namespace,
|
||||
name string,
|
||||
) ([]swarm.NetworkAttachmentConfig, error) {
|
||||
if len(networks) == 0 {
|
||||
return []swarm.NetworkAttachmentConfig{
|
||||
{
|
||||
Target: namespace.Scope("default"),
|
||||
Aliases: []string{name},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
nets := []swarm.NetworkAttachmentConfig{}
|
||||
for networkName, network := range networks {
|
||||
networkConfig, ok := networkConfigs[networkName]
|
||||
if !ok {
|
||||
return []swarm.NetworkAttachmentConfig{}, fmt.Errorf(
|
||||
"service %q references network %q, which is not declared", name, networkName)
|
||||
}
|
||||
var aliases []string
|
||||
if network != nil {
|
||||
aliases = network.Aliases
|
||||
}
|
||||
target := namespace.Scope(networkName)
|
||||
if networkConfig.External.External {
|
||||
target = networkConfig.External.Name
|
||||
}
|
||||
nets = append(nets, swarm.NetworkAttachmentConfig{
|
||||
Target: target,
|
||||
Aliases: append(aliases, name),
|
||||
})
|
||||
}
|
||||
return nets, nil
|
||||
}
|
||||
|
||||
func convertExtraHosts(extraHosts map[string]string) []string {
|
||||
hosts := []string{}
|
||||
for host, ip := range extraHosts {
|
||||
hosts = append(hosts, fmt.Sprintf("%s %s", ip, host))
|
||||
}
|
||||
return hosts
|
||||
}
|
||||
|
||||
func convertHealthcheck(healthcheck *composetypes.HealthCheckConfig) (*container.HealthConfig, error) {
|
||||
if healthcheck == nil {
|
||||
return nil, nil
|
||||
}
|
||||
var (
|
||||
err error
|
||||
timeout, interval time.Duration
|
||||
retries int
|
||||
)
|
||||
if healthcheck.Disable {
|
||||
if len(healthcheck.Test) != 0 {
|
||||
return nil, fmt.Errorf("test and disable can't be set at the same time")
|
||||
}
|
||||
return &container.HealthConfig{
|
||||
Test: []string{"NONE"},
|
||||
}, nil
|
||||
|
||||
}
|
||||
if healthcheck.Timeout != "" {
|
||||
timeout, err = time.ParseDuration(healthcheck.Timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if healthcheck.Interval != "" {
|
||||
interval, err = time.ParseDuration(healthcheck.Interval)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if healthcheck.Retries != nil {
|
||||
retries = int(*healthcheck.Retries)
|
||||
}
|
||||
return &container.HealthConfig{
|
||||
Test: healthcheck.Test,
|
||||
Timeout: timeout,
|
||||
Interval: interval,
|
||||
Retries: retries,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func convertRestartPolicy(restart string, source *composetypes.RestartPolicy) (*swarm.RestartPolicy, error) {
|
||||
// TODO: log if restart is being ignored
|
||||
if source == nil {
|
||||
policy, err := runconfigopts.ParseRestartPolicy(restart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch {
|
||||
case policy.IsNone():
|
||||
return nil, nil
|
||||
case policy.IsAlways(), policy.IsUnlessStopped():
|
||||
return &swarm.RestartPolicy{
|
||||
Condition: swarm.RestartPolicyConditionAny,
|
||||
}, nil
|
||||
case policy.IsOnFailure():
|
||||
attempts := uint64(policy.MaximumRetryCount)
|
||||
return &swarm.RestartPolicy{
|
||||
Condition: swarm.RestartPolicyConditionOnFailure,
|
||||
MaxAttempts: &attempts,
|
||||
}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown restart policy: %s", restart)
|
||||
}
|
||||
}
|
||||
return &swarm.RestartPolicy{
|
||||
Condition: swarm.RestartPolicyCondition(source.Condition),
|
||||
Delay: source.Delay,
|
||||
MaxAttempts: source.MaxAttempts,
|
||||
Window: source.Window,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func convertUpdateConfig(source *composetypes.UpdateConfig) *swarm.UpdateConfig {
|
||||
if source == nil {
|
||||
return nil
|
||||
}
|
||||
parallel := uint64(1)
|
||||
if source.Parallelism != nil {
|
||||
parallel = *source.Parallelism
|
||||
}
|
||||
return &swarm.UpdateConfig{
|
||||
Parallelism: parallel,
|
||||
Delay: source.Delay,
|
||||
FailureAction: source.FailureAction,
|
||||
Monitor: source.Monitor,
|
||||
MaxFailureRatio: source.MaxFailureRatio,
|
||||
}
|
||||
}
|
||||
|
||||
func convertResources(source composetypes.Resources) (*swarm.ResourceRequirements, error) {
|
||||
resources := &swarm.ResourceRequirements{}
|
||||
var err error
|
||||
if source.Limits != nil {
|
||||
var cpus int64
|
||||
if source.Limits.NanoCPUs != "" {
|
||||
cpus, err = opts.ParseCPUs(source.Limits.NanoCPUs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
resources.Limits = &swarm.Resources{
|
||||
NanoCPUs: cpus,
|
||||
MemoryBytes: int64(source.Limits.MemoryBytes),
|
||||
}
|
||||
}
|
||||
if source.Reservations != nil {
|
||||
var cpus int64
|
||||
if source.Reservations.NanoCPUs != "" {
|
||||
cpus, err = opts.ParseCPUs(source.Reservations.NanoCPUs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
resources.Reservations = &swarm.Resources{
|
||||
NanoCPUs: cpus,
|
||||
MemoryBytes: int64(source.Reservations.MemoryBytes),
|
||||
}
|
||||
}
|
||||
return resources, nil
|
||||
|
||||
}
|
||||
|
||||
func convertEndpointSpec(source []string) (*swarm.EndpointSpec, error) {
|
||||
portConfigs := []swarm.PortConfig{}
|
||||
ports, portBindings, err := nat.ParsePortSpecs(source)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for port := range ports {
|
||||
portConfigs = append(
|
||||
portConfigs,
|
||||
opts.ConvertPortToPortConfig(port, portBindings)...)
|
||||
}
|
||||
|
||||
return &swarm.EndpointSpec{Ports: portConfigs}, nil
|
||||
}
|
||||
|
||||
func convertEnvironment(source map[string]string) []string {
|
||||
var output []string
|
||||
|
||||
for name, value := range source {
|
||||
output = append(output, fmt.Sprintf("%s=%s", name, value))
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
func convertDeployMode(mode string, replicas *uint64) (swarm.ServiceMode, error) {
|
||||
serviceMode := swarm.ServiceMode{}
|
||||
|
||||
switch mode {
|
||||
case "global":
|
||||
if replicas != nil {
|
||||
return serviceMode, fmt.Errorf("replicas can only be used with replicated mode")
|
||||
}
|
||||
serviceMode.Global = &swarm.GlobalService{}
|
||||
case "replicated", "":
|
||||
serviceMode.Replicated = &swarm.ReplicatedService{Replicas: replicas}
|
||||
default:
|
||||
return serviceMode, fmt.Errorf("Unknown mode: %s", mode)
|
||||
}
|
||||
return serviceMode, nil
|
||||
}
|
216
cli/compose/convert/service_test.go
Normal file
216
cli/compose/convert/service_test.go
Normal file
|
@ -0,0 +1,216 @@
|
|||
package convert
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
composetypes "github.com/docker/docker/cli/compose/types"
|
||||
"github.com/docker/docker/pkg/testutil/assert"
|
||||
)
|
||||
|
||||
func TestConvertRestartPolicyFromNone(t *testing.T) {
|
||||
policy, err := convertRestartPolicy("no", nil)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, policy, (*swarm.RestartPolicy)(nil))
|
||||
}
|
||||
|
||||
func TestConvertRestartPolicyFromUnknown(t *testing.T) {
|
||||
_, err := convertRestartPolicy("unknown", nil)
|
||||
assert.Error(t, err, "unknown restart policy: unknown")
|
||||
}
|
||||
|
||||
func TestConvertRestartPolicyFromAlways(t *testing.T) {
|
||||
policy, err := convertRestartPolicy("always", nil)
|
||||
expected := &swarm.RestartPolicy{
|
||||
Condition: swarm.RestartPolicyConditionAny,
|
||||
}
|
||||
assert.NilError(t, err)
|
||||
assert.DeepEqual(t, policy, expected)
|
||||
}
|
||||
|
||||
func TestConvertRestartPolicyFromFailure(t *testing.T) {
|
||||
policy, err := convertRestartPolicy("on-failure:4", nil)
|
||||
attempts := uint64(4)
|
||||
expected := &swarm.RestartPolicy{
|
||||
Condition: swarm.RestartPolicyConditionOnFailure,
|
||||
MaxAttempts: &attempts,
|
||||
}
|
||||
assert.NilError(t, err)
|
||||
assert.DeepEqual(t, policy, expected)
|
||||
}
|
||||
|
||||
func TestConvertEnvironment(t *testing.T) {
|
||||
source := map[string]string{
|
||||
"foo": "bar",
|
||||
"key": "value",
|
||||
}
|
||||
env := convertEnvironment(source)
|
||||
sort.Strings(env)
|
||||
assert.DeepEqual(t, env, []string{"foo=bar", "key=value"})
|
||||
}
|
||||
|
||||
func TestConvertResourcesFull(t *testing.T) {
|
||||
source := composetypes.Resources{
|
||||
Limits: &composetypes.Resource{
|
||||
NanoCPUs: "0.003",
|
||||
MemoryBytes: composetypes.UnitBytes(300000000),
|
||||
},
|
||||
Reservations: &composetypes.Resource{
|
||||
NanoCPUs: "0.002",
|
||||
MemoryBytes: composetypes.UnitBytes(200000000),
|
||||
},
|
||||
}
|
||||
resources, err := convertResources(source)
|
||||
assert.NilError(t, err)
|
||||
|
||||
expected := &swarm.ResourceRequirements{
|
||||
Limits: &swarm.Resources{
|
||||
NanoCPUs: 3000000,
|
||||
MemoryBytes: 300000000,
|
||||
},
|
||||
Reservations: &swarm.Resources{
|
||||
NanoCPUs: 2000000,
|
||||
MemoryBytes: 200000000,
|
||||
},
|
||||
}
|
||||
assert.DeepEqual(t, resources, expected)
|
||||
}
|
||||
|
||||
func TestConvertResourcesOnlyMemory(t *testing.T) {
|
||||
source := composetypes.Resources{
|
||||
Limits: &composetypes.Resource{
|
||||
MemoryBytes: composetypes.UnitBytes(300000000),
|
||||
},
|
||||
Reservations: &composetypes.Resource{
|
||||
MemoryBytes: composetypes.UnitBytes(200000000),
|
||||
},
|
||||
}
|
||||
resources, err := convertResources(source)
|
||||
assert.NilError(t, err)
|
||||
|
||||
expected := &swarm.ResourceRequirements{
|
||||
Limits: &swarm.Resources{
|
||||
MemoryBytes: 300000000,
|
||||
},
|
||||
Reservations: &swarm.Resources{
|
||||
MemoryBytes: 200000000,
|
||||
},
|
||||
}
|
||||
assert.DeepEqual(t, resources, expected)
|
||||
}
|
||||
|
||||
func TestConvertHealthcheck(t *testing.T) {
|
||||
retries := uint64(10)
|
||||
source := &composetypes.HealthCheckConfig{
|
||||
Test: []string{"EXEC", "touch", "/foo"},
|
||||
Timeout: "30s",
|
||||
Interval: "2ms",
|
||||
Retries: &retries,
|
||||
}
|
||||
expected := &container.HealthConfig{
|
||||
Test: source.Test,
|
||||
Timeout: 30 * time.Second,
|
||||
Interval: 2 * time.Millisecond,
|
||||
Retries: 10,
|
||||
}
|
||||
|
||||
healthcheck, err := convertHealthcheck(source)
|
||||
assert.NilError(t, err)
|
||||
assert.DeepEqual(t, healthcheck, expected)
|
||||
}
|
||||
|
||||
func TestConvertHealthcheckDisable(t *testing.T) {
|
||||
source := &composetypes.HealthCheckConfig{Disable: true}
|
||||
expected := &container.HealthConfig{
|
||||
Test: []string{"NONE"},
|
||||
}
|
||||
|
||||
healthcheck, err := convertHealthcheck(source)
|
||||
assert.NilError(t, err)
|
||||
assert.DeepEqual(t, healthcheck, expected)
|
||||
}
|
||||
|
||||
func TestConvertHealthcheckDisableWithTest(t *testing.T) {
|
||||
source := &composetypes.HealthCheckConfig{
|
||||
Disable: true,
|
||||
Test: []string{"EXEC", "touch"},
|
||||
}
|
||||
_, err := convertHealthcheck(source)
|
||||
assert.Error(t, err, "test and disable can't be set")
|
||||
}
|
||||
|
||||
func TestConvertServiceNetworksOnlyDefault(t *testing.T) {
|
||||
networkConfigs := networkMap{}
|
||||
networks := map[string]*composetypes.ServiceNetworkConfig{}
|
||||
|
||||
configs, err := convertServiceNetworks(
|
||||
networks, networkConfigs, NewNamespace("foo"), "service")
|
||||
|
||||
expected := []swarm.NetworkAttachmentConfig{
|
||||
{
|
||||
Target: "foo_default",
|
||||
Aliases: []string{"service"},
|
||||
},
|
||||
}
|
||||
|
||||
assert.NilError(t, err)
|
||||
assert.DeepEqual(t, configs, expected)
|
||||
}
|
||||
|
||||
func TestConvertServiceNetworks(t *testing.T) {
|
||||
networkConfigs := networkMap{
|
||||
"front": composetypes.NetworkConfig{
|
||||
External: composetypes.External{
|
||||
External: true,
|
||||
Name: "fronttier",
|
||||
},
|
||||
},
|
||||
"back": composetypes.NetworkConfig{},
|
||||
}
|
||||
networks := map[string]*composetypes.ServiceNetworkConfig{
|
||||
"front": {
|
||||
Aliases: []string{"something"},
|
||||
},
|
||||
"back": {
|
||||
Aliases: []string{"other"},
|
||||
},
|
||||
}
|
||||
|
||||
configs, err := convertServiceNetworks(
|
||||
networks, networkConfigs, NewNamespace("foo"), "service")
|
||||
|
||||
expected := []swarm.NetworkAttachmentConfig{
|
||||
{
|
||||
Target: "foo_back",
|
||||
Aliases: []string{"other", "service"},
|
||||
},
|
||||
{
|
||||
Target: "fronttier",
|
||||
Aliases: []string{"something", "service"},
|
||||
},
|
||||
}
|
||||
|
||||
sortedConfigs := byTargetSort(configs)
|
||||
sort.Sort(&sortedConfigs)
|
||||
|
||||
assert.NilError(t, err)
|
||||
assert.DeepEqual(t, []swarm.NetworkAttachmentConfig(sortedConfigs), expected)
|
||||
}
|
||||
|
||||
type byTargetSort []swarm.NetworkAttachmentConfig
|
||||
|
||||
func (s byTargetSort) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s byTargetSort) Less(i, j int) bool {
|
||||
return strings.Compare(s[i].Target, s[j].Target) < 0
|
||||
}
|
||||
|
||||
func (s byTargetSort) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
128
cli/compose/convert/volume.go
Normal file
128
cli/compose/convert/volume.go
Normal file
|
@ -0,0 +1,128 @@
|
|||
package convert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
composetypes "github.com/docker/docker/cli/compose/types"
|
||||
)
|
||||
|
||||
type volumes map[string]composetypes.VolumeConfig
|
||||
|
||||
// Volumes from compose-file types to engine api types
|
||||
func Volumes(serviceVolumes []string, stackVolumes volumes, namespace Namespace) ([]mount.Mount, error) {
|
||||
var mounts []mount.Mount
|
||||
|
||||
for _, volumeSpec := range serviceVolumes {
|
||||
mount, err := convertVolumeToMount(volumeSpec, stackVolumes, namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mounts = append(mounts, mount)
|
||||
}
|
||||
return mounts, nil
|
||||
}
|
||||
|
||||
func convertVolumeToMount(volumeSpec string, stackVolumes volumes, namespace Namespace) (mount.Mount, error) {
|
||||
var source, target string
|
||||
var mode []string
|
||||
|
||||
// TODO: split Windows path mappings properly
|
||||
parts := strings.SplitN(volumeSpec, ":", 3)
|
||||
|
||||
for _, part := range parts {
|
||||
if strings.TrimSpace(part) == "" {
|
||||
return mount.Mount{}, fmt.Errorf("invalid volume: %s", volumeSpec)
|
||||
}
|
||||
}
|
||||
|
||||
switch len(parts) {
|
||||
case 3:
|
||||
source = parts[0]
|
||||
target = parts[1]
|
||||
mode = strings.Split(parts[2], ",")
|
||||
case 2:
|
||||
source = parts[0]
|
||||
target = parts[1]
|
||||
case 1:
|
||||
target = parts[0]
|
||||
}
|
||||
|
||||
if source == "" {
|
||||
// Anonymous volume
|
||||
return mount.Mount{
|
||||
Type: mount.TypeVolume,
|
||||
Target: target,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TODO: catch Windows paths here
|
||||
if strings.HasPrefix(source, "/") {
|
||||
return mount.Mount{
|
||||
Type: mount.TypeBind,
|
||||
Source: source,
|
||||
Target: target,
|
||||
ReadOnly: isReadOnly(mode),
|
||||
BindOptions: getBindOptions(mode),
|
||||
}, nil
|
||||
}
|
||||
|
||||
stackVolume, exists := stackVolumes[source]
|
||||
if !exists {
|
||||
return mount.Mount{}, fmt.Errorf("undefined volume: %s", source)
|
||||
}
|
||||
|
||||
var volumeOptions *mount.VolumeOptions
|
||||
if stackVolume.External.Name != "" {
|
||||
source = stackVolume.External.Name
|
||||
} else {
|
||||
volumeOptions = &mount.VolumeOptions{
|
||||
Labels: AddStackLabel(namespace, stackVolume.Labels),
|
||||
NoCopy: isNoCopy(mode),
|
||||
}
|
||||
|
||||
if stackVolume.Driver != "" {
|
||||
volumeOptions.DriverConfig = &mount.Driver{
|
||||
Name: stackVolume.Driver,
|
||||
Options: stackVolume.DriverOpts,
|
||||
}
|
||||
}
|
||||
source = namespace.Scope(source)
|
||||
}
|
||||
return mount.Mount{
|
||||
Type: mount.TypeVolume,
|
||||
Source: source,
|
||||
Target: target,
|
||||
ReadOnly: isReadOnly(mode),
|
||||
VolumeOptions: volumeOptions,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func modeHas(mode []string, field string) bool {
|
||||
for _, item := range mode {
|
||||
if item == field {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isReadOnly(mode []string) bool {
|
||||
return modeHas(mode, "ro")
|
||||
}
|
||||
|
||||
func isNoCopy(mode []string) bool {
|
||||
return modeHas(mode, "nocopy")
|
||||
}
|
||||
|
||||
func getBindOptions(mode []string) *mount.BindOptions {
|
||||
for _, item := range mode {
|
||||
for _, propagation := range mount.Propagations {
|
||||
if mount.Propagation(item) == propagation {
|
||||
return &mount.BindOptions{Propagation: mount.Propagation(item)}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
133
cli/compose/convert/volume_test.go
Normal file
133
cli/compose/convert/volume_test.go
Normal file
|
@ -0,0 +1,133 @@
|
|||
package convert
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
composetypes "github.com/docker/docker/cli/compose/types"
|
||||
"github.com/docker/docker/pkg/testutil/assert"
|
||||
)
|
||||
|
||||
func TestIsReadOnly(t *testing.T) {
|
||||
assert.Equal(t, isReadOnly([]string{"foo", "bar", "ro"}), true)
|
||||
assert.Equal(t, isReadOnly([]string{"ro"}), true)
|
||||
assert.Equal(t, isReadOnly([]string{}), false)
|
||||
assert.Equal(t, isReadOnly([]string{"foo", "rw"}), false)
|
||||
assert.Equal(t, isReadOnly([]string{"foo"}), false)
|
||||
}
|
||||
|
||||
func TestIsNoCopy(t *testing.T) {
|
||||
assert.Equal(t, isNoCopy([]string{"foo", "bar", "nocopy"}), true)
|
||||
assert.Equal(t, isNoCopy([]string{"nocopy"}), true)
|
||||
assert.Equal(t, isNoCopy([]string{}), false)
|
||||
assert.Equal(t, isNoCopy([]string{"foo", "rw"}), false)
|
||||
}
|
||||
|
||||
func TestGetBindOptions(t *testing.T) {
|
||||
opts := getBindOptions([]string{"slave"})
|
||||
expected := mount.BindOptions{Propagation: mount.PropagationSlave}
|
||||
assert.Equal(t, *opts, expected)
|
||||
}
|
||||
|
||||
func TestGetBindOptionsNone(t *testing.T) {
|
||||
opts := getBindOptions([]string{"ro"})
|
||||
assert.Equal(t, opts, (*mount.BindOptions)(nil))
|
||||
}
|
||||
|
||||
func TestConvertVolumeToMountNamedVolume(t *testing.T) {
|
||||
stackVolumes := volumes{
|
||||
"normal": composetypes.VolumeConfig{
|
||||
Driver: "glusterfs",
|
||||
DriverOpts: map[string]string{
|
||||
"opt": "value",
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"something": "labeled",
|
||||
},
|
||||
},
|
||||
}
|
||||
namespace := NewNamespace("foo")
|
||||
expected := mount.Mount{
|
||||
Type: mount.TypeVolume,
|
||||
Source: "foo_normal",
|
||||
Target: "/foo",
|
||||
ReadOnly: true,
|
||||
VolumeOptions: &mount.VolumeOptions{
|
||||
Labels: map[string]string{
|
||||
LabelNamespace: "foo",
|
||||
"something": "labeled",
|
||||
},
|
||||
DriverConfig: &mount.Driver{
|
||||
Name: "glusterfs",
|
||||
Options: map[string]string{
|
||||
"opt": "value",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
mount, err := convertVolumeToMount("normal:/foo:ro", stackVolumes, namespace)
|
||||
assert.NilError(t, err)
|
||||
assert.DeepEqual(t, mount, expected)
|
||||
}
|
||||
|
||||
func TestConvertVolumeToMountNamedVolumeExternal(t *testing.T) {
|
||||
stackVolumes := volumes{
|
||||
"outside": composetypes.VolumeConfig{
|
||||
External: composetypes.External{
|
||||
External: true,
|
||||
Name: "special",
|
||||
},
|
||||
},
|
||||
}
|
||||
namespace := NewNamespace("foo")
|
||||
expected := mount.Mount{
|
||||
Type: mount.TypeVolume,
|
||||
Source: "special",
|
||||
Target: "/foo",
|
||||
}
|
||||
mount, err := convertVolumeToMount("outside:/foo", stackVolumes, namespace)
|
||||
assert.NilError(t, err)
|
||||
assert.DeepEqual(t, mount, expected)
|
||||
}
|
||||
|
||||
func TestConvertVolumeToMountBind(t *testing.T) {
|
||||
stackVolumes := volumes{}
|
||||
namespace := NewNamespace("foo")
|
||||
expected := mount.Mount{
|
||||
Type: mount.TypeBind,
|
||||
Source: "/bar",
|
||||
Target: "/foo",
|
||||
ReadOnly: true,
|
||||
BindOptions: &mount.BindOptions{Propagation: mount.PropagationShared},
|
||||
}
|
||||
mount, err := convertVolumeToMount("/bar:/foo:ro,shared", stackVolumes, namespace)
|
||||
assert.NilError(t, err)
|
||||
assert.DeepEqual(t, mount, expected)
|
||||
}
|
||||
|
||||
func TestConvertVolumeToMountVolumeDoesNotExist(t *testing.T) {
|
||||
namespace := NewNamespace("foo")
|
||||
_, err := convertVolumeToMount("unknown:/foo:ro", volumes{}, namespace)
|
||||
assert.Error(t, err, "undefined volume: unknown")
|
||||
}
|
||||
|
||||
func TestConvertVolumeToMountAnonymousVolume(t *testing.T) {
|
||||
stackVolumes := map[string]composetypes.VolumeConfig{}
|
||||
namespace := NewNamespace("foo")
|
||||
expected := mount.Mount{
|
||||
Type: mount.TypeVolume,
|
||||
Target: "/foo/bar",
|
||||
}
|
||||
mnt, err := convertVolumeToMount("/foo/bar", stackVolumes, namespace)
|
||||
assert.NilError(t, err)
|
||||
assert.DeepEqual(t, mnt, expected)
|
||||
}
|
||||
|
||||
func TestConvertVolumeToMountInvalidFormat(t *testing.T) {
|
||||
namespace := NewNamespace("foo")
|
||||
invalids := []string{"::", "::cc", ":bb:", "aa::", "aa::cc", "aa:bb:", " : : ", " : :cc", " :bb: ", "aa: : ", "aa: :cc", "aa:bb: "}
|
||||
for _, vol := range invalids {
|
||||
_, err := convertVolumeToMount(vol, map[string]composetypes.VolumeConfig{}, namespace)
|
||||
assert.Error(t, err, "invalid volume: "+vol)
|
||||
}
|
||||
}
|
|
@ -3,10 +3,11 @@ package interpolation
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/aanand/compose-file/template"
|
||||
"github.com/aanand/compose-file/types"
|
||||
"github.com/docker/docker/cli/compose/template"
|
||||
"github.com/docker/docker/cli/compose/types"
|
||||
)
|
||||
|
||||
// Interpolate replaces variables in a string with the values from a mapping
|
||||
func Interpolate(config types.Dict, section string, mapping template.Mapping) (types.Dict, error) {
|
||||
out := types.Dict{}
|
||||
|
59
cli/compose/interpolation/interpolation_test.go
Normal file
59
cli/compose/interpolation/interpolation_test.go
Normal file
|
@ -0,0 +1,59 @@
|
|||
package interpolation
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/docker/docker/cli/compose/types"
|
||||
)
|
||||
|
||||
var defaults = map[string]string{
|
||||
"USER": "jenny",
|
||||
"FOO": "bar",
|
||||
}
|
||||
|
||||
func defaultMapping(name string) (string, bool) {
|
||||
val, ok := defaults[name]
|
||||
return val, ok
|
||||
}
|
||||
|
||||
func TestInterpolate(t *testing.T) {
|
||||
services := types.Dict{
|
||||
"servicea": types.Dict{
|
||||
"image": "example:${USER}",
|
||||
"volumes": []interface{}{"$FOO:/target"},
|
||||
"logging": types.Dict{
|
||||
"driver": "${FOO}",
|
||||
"options": types.Dict{
|
||||
"user": "$USER",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
expected := types.Dict{
|
||||
"servicea": types.Dict{
|
||||
"image": "example:jenny",
|
||||
"volumes": []interface{}{"bar:/target"},
|
||||
"logging": types.Dict{
|
||||
"driver": "bar",
|
||||
"options": types.Dict{
|
||||
"user": "jenny",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
result, err := Interpolate(services, "service", defaultMapping)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expected, result)
|
||||
}
|
||||
|
||||
func TestInvalidInterpolation(t *testing.T) {
|
||||
services := types.Dict{
|
||||
"servicea": types.Dict{
|
||||
"image": "${",
|
||||
},
|
||||
}
|
||||
_, err := Interpolate(services, "service", defaultMapping)
|
||||
assert.EqualError(t, err, `Invalid interpolation format for "image" option in service "servicea": "${"`)
|
||||
}
|
8
cli/compose/loader/example1.env
Normal file
8
cli/compose/loader/example1.env
Normal file
|
@ -0,0 +1,8 @@
|
|||
# passed through
|
||||
FOO=1
|
||||
|
||||
# overridden in example2.env
|
||||
BAR=1
|
||||
|
||||
# overridden in full-example.yml
|
||||
BAZ=1
|
1
cli/compose/loader/example2.env
Normal file
1
cli/compose/loader/example2.env
Normal file
|
@ -0,0 +1 @@
|
|||
BAR=2
|
287
cli/compose/loader/full-example.yml
Normal file
287
cli/compose/loader/full-example.yml
Normal file
|
@ -0,0 +1,287 @@
|
|||
version: "3"
|
||||
|
||||
services:
|
||||
foo:
|
||||
cap_add:
|
||||
- ALL
|
||||
|
||||
cap_drop:
|
||||
- NET_ADMIN
|
||||
- SYS_ADMIN
|
||||
|
||||
cgroup_parent: m-executor-abcd
|
||||
|
||||
# String or list
|
||||
command: bundle exec thin -p 3000
|
||||
# command: ["bundle", "exec", "thin", "-p", "3000"]
|
||||
|
||||
container_name: my-web-container
|
||||
|
||||
depends_on:
|
||||
- db
|
||||
- redis
|
||||
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 6
|
||||
labels: [FOO=BAR]
|
||||
update_config:
|
||||
parallelism: 3
|
||||
delay: 10s
|
||||
failure_action: continue
|
||||
monitor: 60s
|
||||
max_failure_ratio: 0.3
|
||||
resources:
|
||||
limits:
|
||||
cpus: '0.001'
|
||||
memory: 50M
|
||||
reservations:
|
||||
cpus: '0.0001'
|
||||
memory: 20M
|
||||
restart_policy:
|
||||
condition: on_failure
|
||||
delay: 5s
|
||||
max_attempts: 3
|
||||
window: 120s
|
||||
placement:
|
||||
constraints: [node=foo]
|
||||
|
||||
devices:
|
||||
- "/dev/ttyUSB0:/dev/ttyUSB0"
|
||||
|
||||
# String or list
|
||||
# dns: 8.8.8.8
|
||||
dns:
|
||||
- 8.8.8.8
|
||||
- 9.9.9.9
|
||||
|
||||
# String or list
|
||||
# dns_search: example.com
|
||||
dns_search:
|
||||
- dc1.example.com
|
||||
- dc2.example.com
|
||||
|
||||
domainname: foo.com
|
||||
|
||||
# String or list
|
||||
# entrypoint: /code/entrypoint.sh -p 3000
|
||||
entrypoint: ["/code/entrypoint.sh", "-p", "3000"]
|
||||
|
||||
# String or list
|
||||
# env_file: .env
|
||||
env_file:
|
||||
- ./example1.env
|
||||
- ./example2.env
|
||||
|
||||
# Mapping or list
|
||||
# Mapping values can be strings, numbers or null
|
||||
# Booleans are not allowed - must be quoted
|
||||
environment:
|
||||
RACK_ENV: development
|
||||
SHOW: 'true'
|
||||
SESSION_SECRET:
|
||||
BAZ: 3
|
||||
# environment:
|
||||
# - RACK_ENV=development
|
||||
# - SHOW=true
|
||||
# - SESSION_SECRET
|
||||
|
||||
# Items can be strings or numbers
|
||||
expose:
|
||||
- "3000"
|
||||
- 8000
|
||||
|
||||
external_links:
|
||||
- redis_1
|
||||
- project_db_1:mysql
|
||||
- project_db_1:postgresql
|
||||
|
||||
# Mapping or list
|
||||
# Mapping values must be strings
|
||||
# extra_hosts:
|
||||
# somehost: "162.242.195.82"
|
||||
# otherhost: "50.31.209.229"
|
||||
extra_hosts:
|
||||
- "somehost:162.242.195.82"
|
||||
- "otherhost:50.31.209.229"
|
||||
|
||||
hostname: foo
|
||||
|
||||
healthcheck:
|
||||
test: echo "hello world"
|
||||
interval: 10s
|
||||
timeout: 1s
|
||||
retries: 5
|
||||
|
||||
# Any valid image reference - repo, tag, id, sha
|
||||
image: redis
|
||||
# image: ubuntu:14.04
|
||||
# image: tutum/influxdb
|
||||
# image: example-registry.com:4000/postgresql
|
||||
# image: a4bc65fd
|
||||
# image: busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d
|
||||
|
||||
ipc: host
|
||||
|
||||
# Mapping or list
|
||||
# Mapping values can be strings, numbers or null
|
||||
labels:
|
||||
com.example.description: "Accounting webapp"
|
||||
com.example.number: 42
|
||||
com.example.empty-label:
|
||||
# labels:
|
||||
# - "com.example.description=Accounting webapp"
|
||||
# - "com.example.number=42"
|
||||
# - "com.example.empty-label"
|
||||
|
||||
links:
|
||||
- db
|
||||
- db:database
|
||||
- redis
|
||||
|
||||
logging:
|
||||
driver: syslog
|
||||
options:
|
||||
syslog-address: "tcp://192.168.0.42:123"
|
||||
|
||||
mac_address: 02:42:ac:11:65:43
|
||||
|
||||
# network_mode: "bridge"
|
||||
# network_mode: "host"
|
||||
# network_mode: "none"
|
||||
# Use the network mode of an arbitrary container from another service
|
||||
# network_mode: "service:db"
|
||||
# Use the network mode of another container, specified by name or id
|
||||
# network_mode: "container:some-container"
|
||||
network_mode: "container:0cfeab0f748b9a743dc3da582046357c6ef497631c1a016d28d2bf9b4f899f7b"
|
||||
|
||||
networks:
|
||||
some-network:
|
||||
aliases:
|
||||
- alias1
|
||||
- alias3
|
||||
other-network:
|
||||
ipv4_address: 172.16.238.10
|
||||
ipv6_address: 2001:3984:3989::10
|
||||
other-other-network:
|
||||
|
||||
pid: "host"
|
||||
|
||||
ports:
|
||||
- 3000
|
||||
- "3000-3005"
|
||||
- "8000:8000"
|
||||
- "9090-9091:8080-8081"
|
||||
- "49100:22"
|
||||
- "127.0.0.1:8001:8001"
|
||||
- "127.0.0.1:5000-5010:5000-5010"
|
||||
|
||||
privileged: true
|
||||
|
||||
read_only: true
|
||||
|
||||
restart: always
|
||||
|
||||
security_opt:
|
||||
- label=level:s0:c100,c200
|
||||
- label=type:svirt_apache_t
|
||||
|
||||
stdin_open: true
|
||||
|
||||
stop_grace_period: 20s
|
||||
|
||||
stop_signal: SIGUSR1
|
||||
|
||||
# String or list
|
||||
# tmpfs: /run
|
||||
tmpfs:
|
||||
- /run
|
||||
- /tmp
|
||||
|
||||
tty: true
|
||||
|
||||
ulimits:
|
||||
# Single number or mapping with soft + hard limits
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 20000
|
||||
hard: 40000
|
||||
|
||||
user: someone
|
||||
|
||||
volumes:
|
||||
# Just specify a path and let the Engine create a volume
|
||||
- /var/lib/mysql
|
||||
# Specify an absolute path mapping
|
||||
- /opt/data:/var/lib/mysql
|
||||
# Path on the host, relative to the Compose file
|
||||
- .:/code
|
||||
- ./static:/var/www/html
|
||||
# User-relative path
|
||||
- ~/configs:/etc/configs/:ro
|
||||
# Named volume
|
||||
- datavolume:/var/lib/mysql
|
||||
|
||||
working_dir: /code
|
||||
|
||||
networks:
|
||||
# Entries can be null, which specifies simply that a network
|
||||
# called "{project name}_some-network" should be created and
|
||||
# use the default driver
|
||||
some-network:
|
||||
|
||||
other-network:
|
||||
driver: overlay
|
||||
|
||||
driver_opts:
|
||||
# Values can be strings or numbers
|
||||
foo: "bar"
|
||||
baz: 1
|
||||
|
||||
ipam:
|
||||
driver: overlay
|
||||
# driver_opts:
|
||||
# # Values can be strings or numbers
|
||||
# com.docker.network.enable_ipv6: "true"
|
||||
# com.docker.network.numeric_value: 1
|
||||
config:
|
||||
- subnet: 172.16.238.0/24
|
||||
# gateway: 172.16.238.1
|
||||
- subnet: 2001:3984:3989::/64
|
||||
# gateway: 2001:3984:3989::1
|
||||
|
||||
external-network:
|
||||
# Specifies that a pre-existing network called "external-network"
|
||||
# can be referred to within this file as "external-network"
|
||||
external: true
|
||||
|
||||
other-external-network:
|
||||
# Specifies that a pre-existing network called "my-cool-network"
|
||||
# can be referred to within this file as "other-external-network"
|
||||
external:
|
||||
name: my-cool-network
|
||||
|
||||
volumes:
|
||||
# Entries can be null, which specifies simply that a volume
|
||||
# called "{project name}_some-volume" should be created and
|
||||
# use the default driver
|
||||
some-volume:
|
||||
|
||||
other-volume:
|
||||
driver: flocker
|
||||
|
||||
driver_opts:
|
||||
# Values can be strings or numbers
|
||||
foo: "bar"
|
||||
baz: 1
|
||||
|
||||
external-volume:
|
||||
# Specifies that a pre-existing volume called "external-volume"
|
||||
# can be referred to within this file as "external-volume"
|
||||
external: true
|
||||
|
||||
other-external-volume:
|
||||
# Specifies that a pre-existing volume called "my-cool-volume"
|
||||
# can be referred to within this file as "other-external-volume"
|
||||
external:
|
||||
name: my-cool-volume
|
|
@ -9,9 +9,9 @@ import (
|
|||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/aanand/compose-file/interpolation"
|
||||
"github.com/aanand/compose-file/schema"
|
||||
"github.com/aanand/compose-file/types"
|
||||
"github.com/docker/docker/cli/compose/interpolation"
|
||||
"github.com/docker/docker/cli/compose/schema"
|
||||
"github.com/docker/docker/cli/compose/types"
|
||||
"github.com/docker/docker/runconfig/opts"
|
||||
units "github.com/docker/go-units"
|
||||
shellwords "github.com/mattn/go-shellwords"
|
||||
|
@ -117,6 +117,8 @@ func Load(configDetails types.ConfigDetails) (*types.Config, error) {
|
|||
return &cfg, nil
|
||||
}
|
||||
|
||||
// GetUnsupportedProperties returns the list of any unsupported properties that are
|
||||
// used in the Compose files.
|
||||
func GetUnsupportedProperties(configDetails types.ConfigDetails) []string {
|
||||
unsupported := map[string]bool{}
|
||||
|
||||
|
@ -141,6 +143,8 @@ func sortedKeys(set map[string]bool) []string {
|
|||
return keys
|
||||
}
|
||||
|
||||
// GetDeprecatedProperties returns the list of any deprecated properties that
|
||||
// are used in the compose files.
|
||||
func GetDeprecatedProperties(configDetails types.ConfigDetails) map[string]string {
|
||||
return getProperties(getServices(getConfigDict(configDetails)), types.DeprecatedProperties)
|
||||
}
|
||||
|
@ -161,6 +165,8 @@ func getProperties(services types.Dict, propertyMap map[string]string) map[strin
|
|||
return output
|
||||
}
|
||||
|
||||
// ForbiddenPropertiesError is returned when there are properties in the Compose
|
||||
// file that are forbidden.
|
||||
type ForbiddenPropertiesError struct {
|
||||
Properties map[string]string
|
||||
}
|
782
cli/compose/loader/loader_test.go
Normal file
782
cli/compose/loader/loader_test.go
Normal file
|
@ -0,0 +1,782 @@
|
|||
package loader
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/cli/compose/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func buildConfigDetails(source types.Dict) types.ConfigDetails {
|
||||
workingDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return types.ConfigDetails{
|
||||
WorkingDir: workingDir,
|
||||
ConfigFiles: []types.ConfigFile{
|
||||
{Filename: "filename.yml", Config: source},
|
||||
},
|
||||
Environment: nil,
|
||||
}
|
||||
}
|
||||
|
||||
var sampleYAML = `
|
||||
version: "3"
|
||||
services:
|
||||
foo:
|
||||
image: busybox
|
||||
networks:
|
||||
with_me:
|
||||
bar:
|
||||
image: busybox
|
||||
environment:
|
||||
- FOO=1
|
||||
networks:
|
||||
- with_ipam
|
||||
volumes:
|
||||
hello:
|
||||
driver: default
|
||||
driver_opts:
|
||||
beep: boop
|
||||
networks:
|
||||
default:
|
||||
driver: bridge
|
||||
driver_opts:
|
||||
beep: boop
|
||||
with_ipam:
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.28.0.0/16
|
||||
`
|
||||
|
||||
var sampleDict = types.Dict{
|
||||
"version": "3",
|
||||
"services": types.Dict{
|
||||
"foo": types.Dict{
|
||||
"image": "busybox",
|
||||
"networks": types.Dict{"with_me": nil},
|
||||
},
|
||||
"bar": types.Dict{
|
||||
"image": "busybox",
|
||||
"environment": []interface{}{"FOO=1"},
|
||||
"networks": []interface{}{"with_ipam"},
|
||||
},
|
||||
},
|
||||
"volumes": types.Dict{
|
||||
"hello": types.Dict{
|
||||
"driver": "default",
|
||||
"driver_opts": types.Dict{
|
||||
"beep": "boop",
|
||||
},
|
||||
},
|
||||
},
|
||||
"networks": types.Dict{
|
||||
"default": types.Dict{
|
||||
"driver": "bridge",
|
||||
"driver_opts": types.Dict{
|
||||
"beep": "boop",
|
||||
},
|
||||
},
|
||||
"with_ipam": types.Dict{
|
||||
"ipam": types.Dict{
|
||||
"driver": "default",
|
||||
"config": []interface{}{
|
||||
types.Dict{
|
||||
"subnet": "172.28.0.0/16",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var sampleConfig = types.Config{
|
||||
Services: []types.ServiceConfig{
|
||||
{
|
||||
Name: "foo",
|
||||
Image: "busybox",
|
||||
Environment: map[string]string{},
|
||||
Networks: map[string]*types.ServiceNetworkConfig{
|
||||
"with_me": nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "bar",
|
||||
Image: "busybox",
|
||||
Environment: map[string]string{"FOO": "1"},
|
||||
Networks: map[string]*types.ServiceNetworkConfig{
|
||||
"with_ipam": nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
Networks: map[string]types.NetworkConfig{
|
||||
"default": {
|
||||
Driver: "bridge",
|
||||
DriverOpts: map[string]string{
|
||||
"beep": "boop",
|
||||
},
|
||||
},
|
||||
"with_ipam": {
|
||||
Ipam: types.IPAMConfig{
|
||||
Driver: "default",
|
||||
Config: []*types.IPAMPool{
|
||||
{
|
||||
Subnet: "172.28.0.0/16",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: map[string]types.VolumeConfig{
|
||||
"hello": {
|
||||
Driver: "default",
|
||||
DriverOpts: map[string]string{
|
||||
"beep": "boop",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestParseYAML(t *testing.T) {
|
||||
dict, err := ParseYAML([]byte(sampleYAML))
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
assert.Equal(t, sampleDict, dict)
|
||||
}
|
||||
|
||||
func TestLoad(t *testing.T) {
|
||||
actual, err := Load(buildConfigDetails(sampleDict))
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
assert.Equal(t, serviceSort(sampleConfig.Services), serviceSort(actual.Services))
|
||||
assert.Equal(t, sampleConfig.Networks, actual.Networks)
|
||||
assert.Equal(t, sampleConfig.Volumes, actual.Volumes)
|
||||
}
|
||||
|
||||
func TestParseAndLoad(t *testing.T) {
|
||||
actual, err := loadYAML(sampleYAML)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
assert.Equal(t, serviceSort(sampleConfig.Services), serviceSort(actual.Services))
|
||||
assert.Equal(t, sampleConfig.Networks, actual.Networks)
|
||||
assert.Equal(t, sampleConfig.Volumes, actual.Volumes)
|
||||
}
|
||||
|
||||
func TestInvalidTopLevelObjectType(t *testing.T) {
|
||||
_, err := loadYAML("1")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "Top-level object must be a mapping")
|
||||
|
||||
_, err = loadYAML("\"hello\"")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "Top-level object must be a mapping")
|
||||
|
||||
_, err = loadYAML("[\"hello\"]")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "Top-level object must be a mapping")
|
||||
}
|
||||
|
||||
func TestNonStringKeys(t *testing.T) {
|
||||
_, err := loadYAML(`
|
||||
version: "3"
|
||||
123:
|
||||
foo:
|
||||
image: busybox
|
||||
`)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "Non-string key at top level: 123")
|
||||
|
||||
_, err = loadYAML(`
|
||||
version: "3"
|
||||
services:
|
||||
foo:
|
||||
image: busybox
|
||||
123:
|
||||
image: busybox
|
||||
`)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "Non-string key in services: 123")
|
||||
|
||||
_, err = loadYAML(`
|
||||
version: "3"
|
||||
services:
|
||||
foo:
|
||||
image: busybox
|
||||
networks:
|
||||
default:
|
||||
ipam:
|
||||
config:
|
||||
- 123: oh dear
|
||||
`)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "Non-string key in networks.default.ipam.config[0]: 123")
|
||||
|
||||
_, err = loadYAML(`
|
||||
version: "3"
|
||||
services:
|
||||
dict-env:
|
||||
image: busybox
|
||||
environment:
|
||||
1: FOO
|
||||
`)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "Non-string key in services.dict-env.environment: 1")
|
||||
}
|
||||
|
||||
func TestSupportedVersion(t *testing.T) {
|
||||
_, err := loadYAML(`
|
||||
version: "3"
|
||||
services:
|
||||
foo:
|
||||
image: busybox
|
||||
`)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = loadYAML(`
|
||||
version: "3.0"
|
||||
services:
|
||||
foo:
|
||||
image: busybox
|
||||
`)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestUnsupportedVersion(t *testing.T) {
|
||||
_, err := loadYAML(`
|
||||
version: "2"
|
||||
services:
|
||||
foo:
|
||||
image: busybox
|
||||
`)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "version")
|
||||
|
||||
_, err = loadYAML(`
|
||||
version: "2.0"
|
||||
services:
|
||||
foo:
|
||||
image: busybox
|
||||
`)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "version")
|
||||
}
|
||||
|
||||
func TestInvalidVersion(t *testing.T) {
|
||||
_, err := loadYAML(`
|
||||
version: 3
|
||||
services:
|
||||
foo:
|
||||
image: busybox
|
||||
`)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "version must be a string")
|
||||
}
|
||||
|
||||
func TestV1Unsupported(t *testing.T) {
|
||||
_, err := loadYAML(`
|
||||
foo:
|
||||
image: busybox
|
||||
`)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestNonMappingObject(t *testing.T) {
|
||||
_, err := loadYAML(`
|
||||
version: "3"
|
||||
services:
|
||||
- foo:
|
||||
image: busybox
|
||||
`)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "services must be a mapping")
|
||||
|
||||
_, err = loadYAML(`
|
||||
version: "3"
|
||||
services:
|
||||
foo: busybox
|
||||
`)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "services.foo must be a mapping")
|
||||
|
||||
_, err = loadYAML(`
|
||||
version: "3"
|
||||
networks:
|
||||
- default:
|
||||
driver: bridge
|
||||
`)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "networks must be a mapping")
|
||||
|
||||
_, err = loadYAML(`
|
||||
version: "3"
|
||||
networks:
|
||||
default: bridge
|
||||
`)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "networks.default must be a mapping")
|
||||
|
||||
_, err = loadYAML(`
|
||||
version: "3"
|
||||
volumes:
|
||||
- data:
|
||||
driver: local
|
||||
`)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "volumes must be a mapping")
|
||||
|
||||
_, err = loadYAML(`
|
||||
version: "3"
|
||||
volumes:
|
||||
data: local
|
||||
`)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "volumes.data must be a mapping")
|
||||
}
|
||||
|
||||
func TestNonStringImage(t *testing.T) {
|
||||
_, err := loadYAML(`
|
||||
version: "3"
|
||||
services:
|
||||
foo:
|
||||
image: ["busybox", "latest"]
|
||||
`)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "services.foo.image must be a string")
|
||||
}
|
||||
|
||||
func TestValidEnvironment(t *testing.T) {
|
||||
config, err := loadYAML(`
|
||||
version: "3"
|
||||
services:
|
||||
dict-env:
|
||||
image: busybox
|
||||
environment:
|
||||
FOO: "1"
|
||||
BAR: 2
|
||||
BAZ: 2.5
|
||||
QUUX:
|
||||
list-env:
|
||||
image: busybox
|
||||
environment:
|
||||
- FOO=1
|
||||
- BAR=2
|
||||
- BAZ=2.5
|
||||
- QUUX=
|
||||
`)
|
||||
assert.NoError(t, err)
|
||||
|
||||
expected := map[string]string{
|
||||
"FOO": "1",
|
||||
"BAR": "2",
|
||||
"BAZ": "2.5",
|
||||
"QUUX": "",
|
||||
}
|
||||
|
||||
assert.Equal(t, 2, len(config.Services))
|
||||
|
||||
for _, service := range config.Services {
|
||||
assert.Equal(t, expected, service.Environment)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidEnvironmentValue(t *testing.T) {
|
||||
_, err := loadYAML(`
|
||||
version: "3"
|
||||
services:
|
||||
dict-env:
|
||||
image: busybox
|
||||
environment:
|
||||
FOO: ["1"]
|
||||
`)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "services.dict-env.environment.FOO must be a string, number or null")
|
||||
}
|
||||
|
||||
func TestInvalidEnvironmentObject(t *testing.T) {
|
||||
_, err := loadYAML(`
|
||||
version: "3"
|
||||
services:
|
||||
dict-env:
|
||||
image: busybox
|
||||
environment: "FOO=1"
|
||||
`)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "services.dict-env.environment must be a mapping")
|
||||
}
|
||||
|
||||
func TestEnvironmentInterpolation(t *testing.T) {
|
||||
config, err := loadYAML(`
|
||||
version: "3"
|
||||
services:
|
||||
test:
|
||||
image: busybox
|
||||
labels:
|
||||
- home1=$HOME
|
||||
- home2=${HOME}
|
||||
- nonexistent=$NONEXISTENT
|
||||
- default=${NONEXISTENT-default}
|
||||
networks:
|
||||
test:
|
||||
driver: $HOME
|
||||
volumes:
|
||||
test:
|
||||
driver: $HOME
|
||||
`)
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
||||
home := os.Getenv("HOME")
|
||||
|
||||
expectedLabels := map[string]string{
|
||||
"home1": home,
|
||||
"home2": home,
|
||||
"nonexistent": "",
|
||||
"default": "default",
|
||||
}
|
||||
|
||||
assert.Equal(t, expectedLabels, config.Services[0].Labels)
|
||||
assert.Equal(t, home, config.Networks["test"].Driver)
|
||||
assert.Equal(t, home, config.Volumes["test"].Driver)
|
||||
}
|
||||
|
||||
func TestUnsupportedProperties(t *testing.T) {
|
||||
dict, err := ParseYAML([]byte(`
|
||||
version: "3"
|
||||
services:
|
||||
web:
|
||||
image: web
|
||||
build: ./web
|
||||
links:
|
||||
- bar
|
||||
db:
|
||||
image: db
|
||||
build: ./db
|
||||
`))
|
||||
assert.NoError(t, err)
|
||||
|
||||
configDetails := buildConfigDetails(dict)
|
||||
|
||||
_, err = Load(configDetails)
|
||||
assert.NoError(t, err)
|
||||
|
||||
unsupported := GetUnsupportedProperties(configDetails)
|
||||
assert.Equal(t, []string{"build", "links"}, unsupported)
|
||||
}
|
||||
|
||||
func TestDeprecatedProperties(t *testing.T) {
|
||||
dict, err := ParseYAML([]byte(`
|
||||
version: "3"
|
||||
services:
|
||||
web:
|
||||
image: web
|
||||
container_name: web
|
||||
db:
|
||||
image: db
|
||||
container_name: db
|
||||
expose: ["5434"]
|
||||
`))
|
||||
assert.NoError(t, err)
|
||||
|
||||
configDetails := buildConfigDetails(dict)
|
||||
|
||||
_, err = Load(configDetails)
|
||||
assert.NoError(t, err)
|
||||
|
||||
deprecated := GetDeprecatedProperties(configDetails)
|
||||
assert.Equal(t, 2, len(deprecated))
|
||||
assert.Contains(t, deprecated, "container_name")
|
||||
assert.Contains(t, deprecated, "expose")
|
||||
}
|
||||
|
||||
func TestForbiddenProperties(t *testing.T) {
|
||||
_, err := loadYAML(`
|
||||
version: "3"
|
||||
services:
|
||||
foo:
|
||||
image: busybox
|
||||
volumes:
|
||||
- /data
|
||||
volume_driver: some-driver
|
||||
bar:
|
||||
extends:
|
||||
service: foo
|
||||
`)
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.IsType(t, &ForbiddenPropertiesError{}, err)
|
||||
fmt.Println(err)
|
||||
forbidden := err.(*ForbiddenPropertiesError).Properties
|
||||
|
||||
assert.Equal(t, 2, len(forbidden))
|
||||
assert.Contains(t, forbidden, "volume_driver")
|
||||
assert.Contains(t, forbidden, "extends")
|
||||
}
|
||||
|
||||
func durationPtr(value time.Duration) *time.Duration {
|
||||
return &value
|
||||
}
|
||||
|
||||
func int64Ptr(value int64) *int64 {
|
||||
return &value
|
||||
}
|
||||
|
||||
func uint64Ptr(value uint64) *uint64 {
|
||||
return &value
|
||||
}
|
||||
|
||||
func TestFullExample(t *testing.T) {
|
||||
bytes, err := ioutil.ReadFile("full-example.yml")
|
||||
assert.NoError(t, err)
|
||||
|
||||
config, err := loadYAML(string(bytes))
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
|
||||
workingDir, err := os.Getwd()
|
||||
assert.NoError(t, err)
|
||||
|
||||
homeDir := os.Getenv("HOME")
|
||||
stopGracePeriod := time.Duration(20 * time.Second)
|
||||
|
||||
expectedServiceConfig := types.ServiceConfig{
|
||||
Name: "foo",
|
||||
|
||||
CapAdd: []string{"ALL"},
|
||||
CapDrop: []string{"NET_ADMIN", "SYS_ADMIN"},
|
||||
CgroupParent: "m-executor-abcd",
|
||||
Command: []string{"bundle", "exec", "thin", "-p", "3000"},
|
||||
ContainerName: "my-web-container",
|
||||
DependsOn: []string{"db", "redis"},
|
||||
Deploy: types.DeployConfig{
|
||||
Mode: "replicated",
|
||||
Replicas: uint64Ptr(6),
|
||||
Labels: map[string]string{"FOO": "BAR"},
|
||||
UpdateConfig: &types.UpdateConfig{
|
||||
Parallelism: uint64Ptr(3),
|
||||
Delay: time.Duration(10 * time.Second),
|
||||
FailureAction: "continue",
|
||||
Monitor: time.Duration(60 * time.Second),
|
||||
MaxFailureRatio: 0.3,
|
||||
},
|
||||
Resources: types.Resources{
|
||||
Limits: &types.Resource{
|
||||
NanoCPUs: "0.001",
|
||||
MemoryBytes: 50 * 1024 * 1024,
|
||||
},
|
||||
Reservations: &types.Resource{
|
||||
NanoCPUs: "0.0001",
|
||||
MemoryBytes: 20 * 1024 * 1024,
|
||||
},
|
||||
},
|
||||
RestartPolicy: &types.RestartPolicy{
|
||||
Condition: "on_failure",
|
||||
Delay: durationPtr(5 * time.Second),
|
||||
MaxAttempts: uint64Ptr(3),
|
||||
Window: durationPtr(2 * time.Minute),
|
||||
},
|
||||
Placement: types.Placement{
|
||||
Constraints: []string{"node=foo"},
|
||||
},
|
||||
},
|
||||
Devices: []string{"/dev/ttyUSB0:/dev/ttyUSB0"},
|
||||
DNS: []string{"8.8.8.8", "9.9.9.9"},
|
||||
DNSSearch: []string{"dc1.example.com", "dc2.example.com"},
|
||||
DomainName: "foo.com",
|
||||
Entrypoint: []string{"/code/entrypoint.sh", "-p", "3000"},
|
||||
Environment: map[string]string{
|
||||
"RACK_ENV": "development",
|
||||
"SHOW": "true",
|
||||
"SESSION_SECRET": "",
|
||||
"FOO": "1",
|
||||
"BAR": "2",
|
||||
"BAZ": "3",
|
||||
},
|
||||
Expose: []string{"3000", "8000"},
|
||||
ExternalLinks: []string{
|
||||
"redis_1",
|
||||
"project_db_1:mysql",
|
||||
"project_db_1:postgresql",
|
||||
},
|
||||
ExtraHosts: map[string]string{
|
||||
"otherhost": "50.31.209.229",
|
||||
"somehost": "162.242.195.82",
|
||||
},
|
||||
HealthCheck: &types.HealthCheckConfig{
|
||||
Test: []string{
|
||||
"CMD-SHELL",
|
||||
"echo \"hello world\"",
|
||||
},
|
||||
Interval: "10s",
|
||||
Timeout: "1s",
|
||||
Retries: uint64Ptr(5),
|
||||
},
|
||||
Hostname: "foo",
|
||||
Image: "redis",
|
||||
Ipc: "host",
|
||||
Labels: map[string]string{
|
||||
"com.example.description": "Accounting webapp",
|
||||
"com.example.number": "42",
|
||||
"com.example.empty-label": "",
|
||||
},
|
||||
Links: []string{
|
||||
"db",
|
||||
"db:database",
|
||||
"redis",
|
||||
},
|
||||
Logging: &types.LoggingConfig{
|
||||
Driver: "syslog",
|
||||
Options: map[string]string{
|
||||
"syslog-address": "tcp://192.168.0.42:123",
|
||||
},
|
||||
},
|
||||
MacAddress: "02:42:ac:11:65:43",
|
||||
NetworkMode: "container:0cfeab0f748b9a743dc3da582046357c6ef497631c1a016d28d2bf9b4f899f7b",
|
||||
Networks: map[string]*types.ServiceNetworkConfig{
|
||||
"some-network": {
|
||||
Aliases: []string{"alias1", "alias3"},
|
||||
Ipv4Address: "",
|
||||
Ipv6Address: "",
|
||||
},
|
||||
"other-network": {
|
||||
Ipv4Address: "172.16.238.10",
|
||||
Ipv6Address: "2001:3984:3989::10",
|
||||
},
|
||||
"other-other-network": nil,
|
||||
},
|
||||
Pid: "host",
|
||||
Ports: []string{
|
||||
"3000",
|
||||
"3000-3005",
|
||||
"8000:8000",
|
||||
"9090-9091:8080-8081",
|
||||
"49100:22",
|
||||
"127.0.0.1:8001:8001",
|
||||
"127.0.0.1:5000-5010:5000-5010",
|
||||
},
|
||||
Privileged: true,
|
||||
ReadOnly: true,
|
||||
Restart: "always",
|
||||
SecurityOpt: []string{
|
||||
"label=level:s0:c100,c200",
|
||||
"label=type:svirt_apache_t",
|
||||
},
|
||||
StdinOpen: true,
|
||||
StopSignal: "SIGUSR1",
|
||||
StopGracePeriod: &stopGracePeriod,
|
||||
Tmpfs: []string{"/run", "/tmp"},
|
||||
Tty: true,
|
||||
Ulimits: map[string]*types.UlimitsConfig{
|
||||
"nproc": {
|
||||
Single: 65535,
|
||||
},
|
||||
"nofile": {
|
||||
Soft: 20000,
|
||||
Hard: 40000,
|
||||
},
|
||||
},
|
||||
User: "someone",
|
||||
Volumes: []string{
|
||||
"/var/lib/mysql",
|
||||
"/opt/data:/var/lib/mysql",
|
||||
fmt.Sprintf("%s:/code", workingDir),
|
||||
fmt.Sprintf("%s/static:/var/www/html", workingDir),
|
||||
fmt.Sprintf("%s/configs:/etc/configs/:ro", homeDir),
|
||||
"datavolume:/var/lib/mysql",
|
||||
},
|
||||
WorkingDir: "/code",
|
||||
}
|
||||
|
||||
assert.Equal(t, []types.ServiceConfig{expectedServiceConfig}, config.Services)
|
||||
|
||||
expectedNetworkConfig := map[string]types.NetworkConfig{
|
||||
"some-network": {},
|
||||
|
||||
"other-network": {
|
||||
Driver: "overlay",
|
||||
DriverOpts: map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "1",
|
||||
},
|
||||
Ipam: types.IPAMConfig{
|
||||
Driver: "overlay",
|
||||
Config: []*types.IPAMPool{
|
||||
{Subnet: "172.16.238.0/24"},
|
||||
{Subnet: "2001:3984:3989::/64"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"external-network": {
|
||||
External: types.External{
|
||||
Name: "external-network",
|
||||
External: true,
|
||||
},
|
||||
},
|
||||
|
||||
"other-external-network": {
|
||||
External: types.External{
|
||||
Name: "my-cool-network",
|
||||
External: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
assert.Equal(t, expectedNetworkConfig, config.Networks)
|
||||
|
||||
expectedVolumeConfig := map[string]types.VolumeConfig{
|
||||
"some-volume": {},
|
||||
"other-volume": {
|
||||
Driver: "flocker",
|
||||
DriverOpts: map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "1",
|
||||
},
|
||||
},
|
||||
"external-volume": {
|
||||
External: types.External{
|
||||
Name: "external-volume",
|
||||
External: true,
|
||||
},
|
||||
},
|
||||
"other-external-volume": {
|
||||
External: types.External{
|
||||
Name: "my-cool-volume",
|
||||
External: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
assert.Equal(t, expectedVolumeConfig, config.Volumes)
|
||||
}
|
||||
|
||||
func loadYAML(yaml string) (*types.Config, error) {
|
||||
dict, err := ParseYAML([]byte(yaml))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return Load(buildConfigDetails(dict))
|
||||
}
|
||||
|
||||
func serviceSort(services []types.ServiceConfig) []types.ServiceConfig {
|
||||
sort.Sort(servicesByName(services))
|
||||
return services
|
||||
}
|
||||
|
||||
type servicesByName []types.ServiceConfig
|
||||
|
||||
func (sbn servicesByName) Len() int { return len(sbn) }
|
||||
func (sbn servicesByName) Swap(i, j int) { sbn[i], sbn[j] = sbn[j], sbn[i] }
|
||||
func (sbn servicesByName) Less(i, j int) bool { return sbn[i].Name < sbn[j].Name }
|
237
cli/compose/schema/bindata.go
Normal file
237
cli/compose/schema/bindata.go
Normal file
File diff suppressed because one or more lines are too long
383
cli/compose/schema/data/config_schema_v3.0.json
Normal file
383
cli/compose/schema/data/config_schema_v3.0.json
Normal file
|
@ -0,0 +1,383 @@
|
|||
{
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"id": "config_schema_v3.0.json",
|
||||
"type": "object",
|
||||
"required": ["version"],
|
||||
|
||||
"properties": {
|
||||
"version": {
|
||||
"type": "string"
|
||||
},
|
||||
|
||||
"services": {
|
||||
"id": "#/properties/services",
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
"^[a-zA-Z0-9._-]+$": {
|
||||
"$ref": "#/definitions/service"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
|
||||
"networks": {
|
||||
"id": "#/properties/networks",
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
"^[a-zA-Z0-9._-]+$": {
|
||||
"$ref": "#/definitions/network"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
"volumes": {
|
||||
"id": "#/properties/volumes",
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
"^[a-zA-Z0-9._-]+$": {
|
||||
"$ref": "#/definitions/volume"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
}
|
||||
},
|
||||
|
||||
"additionalProperties": false,
|
||||
|
||||
"definitions": {
|
||||
|
||||
"service": {
|
||||
"id": "#/definitions/service",
|
||||
"type": "object",
|
||||
|
||||
"properties": {
|
||||
"deploy": {"$ref": "#/definitions/deployment"},
|
||||
"build": {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {"type": "string"},
|
||||
"dockerfile": {"type": "string"},
|
||||
"args": {"$ref": "#/definitions/list_or_dict"}
|
||||
},
|
||||
"additionalProperties": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
|
||||
"cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
|
||||
"cgroup_parent": {"type": "string"},
|
||||
"command": {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{"type": "array", "items": {"type": "string"}}
|
||||
]
|
||||
},
|
||||
"container_name": {"type": "string"},
|
||||
"depends_on": {"$ref": "#/definitions/list_of_strings"},
|
||||
"devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
|
||||
"dns": {"$ref": "#/definitions/string_or_list"},
|
||||
"dns_search": {"$ref": "#/definitions/string_or_list"},
|
||||
"domainname": {"type": "string"},
|
||||
"entrypoint": {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{"type": "array", "items": {"type": "string"}}
|
||||
]
|
||||
},
|
||||
"env_file": {"$ref": "#/definitions/string_or_list"},
|
||||
"environment": {"$ref": "#/definitions/list_or_dict"},
|
||||
|
||||
"expose": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": ["string", "number"],
|
||||
"format": "expose"
|
||||
},
|
||||
"uniqueItems": true
|
||||
},
|
||||
|
||||
"external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
|
||||
"extra_hosts": {"$ref": "#/definitions/list_or_dict"},
|
||||
"healthcheck": {"$ref": "#/definitions/healthcheck"},
|
||||
"hostname": {"type": "string"},
|
||||
"image": {"type": "string"},
|
||||
"ipc": {"type": "string"},
|
||||
"labels": {"$ref": "#/definitions/list_or_dict"},
|
||||
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
|
||||
|
||||
"logging": {
|
||||
"type": "object",
|
||||
|
||||
"properties": {
|
||||
"driver": {"type": "string"},
|
||||
"options": {
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
"^.+$": {"type": ["string", "number", "null"]}
|
||||
}
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
|
||||
"mac_address": {"type": "string"},
|
||||
"network_mode": {"type": "string"},
|
||||
|
||||
"networks": {
|
||||
"oneOf": [
|
||||
{"$ref": "#/definitions/list_of_strings"},
|
||||
{
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
"^[a-zA-Z0-9._-]+$": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"aliases": {"$ref": "#/definitions/list_of_strings"},
|
||||
"ipv4_address": {"type": "string"},
|
||||
"ipv6_address": {"type": "string"}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
{"type": "null"}
|
||||
]
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"pid": {"type": ["string", "null"]},
|
||||
|
||||
"ports": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": ["string", "number"],
|
||||
"format": "ports"
|
||||
},
|
||||
"uniqueItems": true
|
||||
},
|
||||
|
||||
"privileged": {"type": "boolean"},
|
||||
"read_only": {"type": "boolean"},
|
||||
"restart": {"type": "string"},
|
||||
"security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
|
||||
"shm_size": {"type": ["number", "string"]},
|
||||
"sysctls": {"$ref": "#/definitions/list_or_dict"},
|
||||
"stdin_open": {"type": "boolean"},
|
||||
"stop_grace_period": {"type": "string", "format": "duration"},
|
||||
"stop_signal": {"type": "string"},
|
||||
"tmpfs": {"$ref": "#/definitions/string_or_list"},
|
||||
"tty": {"type": "boolean"},
|
||||
"ulimits": {
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
"^[a-z]+$": {
|
||||
"oneOf": [
|
||||
{"type": "integer"},
|
||||
{
|
||||
"type":"object",
|
||||
"properties": {
|
||||
"hard": {"type": "integer"},
|
||||
"soft": {"type": "integer"}
|
||||
},
|
||||
"required": ["soft", "hard"],
|
||||
"additionalProperties": false
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"user": {"type": "string"},
|
||||
"userns_mode": {"type": "string"},
|
||||
"volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
|
||||
"working_dir": {"type": "string"}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
|
||||
"healthcheck": {
|
||||
"id": "#/definitions/healthcheck",
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"disable": {"type": "boolean"},
|
||||
"interval": {"type": "string"},
|
||||
"retries": {"type": "number"},
|
||||
"test": {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{"type": "array", "items": {"type": "string"}}
|
||||
]
|
||||
},
|
||||
"timeout": {"type": "string"}
|
||||
}
|
||||
},
|
||||
"deployment": {
|
||||
"id": "#/definitions/deployment",
|
||||
"type": ["object", "null"],
|
||||
"properties": {
|
||||
"mode": {"type": "string"},
|
||||
"replicas": {"type": "integer"},
|
||||
"labels": {"$ref": "#/definitions/list_or_dict"},
|
||||
"update_config": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"parallelism": {"type": "integer"},
|
||||
"delay": {"type": "string", "format": "duration"},
|
||||
"failure_action": {"type": "string"},
|
||||
"monitor": {"type": "string", "format": "duration"},
|
||||
"max_failure_ratio": {"type": "number"}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"limits": {"$ref": "#/definitions/resource"},
|
||||
"reservations": {"$ref": "#/definitions/resource"}
|
||||
}
|
||||
},
|
||||
"restart_policy": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"condition": {"type": "string"},
|
||||
"delay": {"type": "string", "format": "duration"},
|
||||
"max_attempts": {"type": "integer"},
|
||||
"window": {"type": "string", "format": "duration"}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"placement": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"constraints": {"type": "array", "items": {"type": "string"}}
|
||||
},
|
||||
"additionalProperties": false
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
|
||||
"resource": {
|
||||
"id": "#/definitions/resource",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"cpus": {"type": "string"},
|
||||
"memory": {"type": "string"}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
|
||||
"network": {
|
||||
"id": "#/definitions/network",
|
||||
"type": ["object", "null"],
|
||||
"properties": {
|
||||
"driver": {"type": "string"},
|
||||
"driver_opts": {
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
"^.+$": {"type": ["string", "number"]}
|
||||
}
|
||||
},
|
||||
"ipam": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"driver": {"type": "string"},
|
||||
"config": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"subnet": {"type": "string"}
|
||||
},
|
||||
"additionalProperties": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"external": {
|
||||
"type": ["boolean", "object"],
|
||||
"properties": {
|
||||
"name": {"type": "string"}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"internal": {"type": "boolean"},
|
||||
"labels": {"$ref": "#/definitions/list_or_dict"}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
|
||||
"volume": {
|
||||
"id": "#/definitions/volume",
|
||||
"type": ["object", "null"],
|
||||
"properties": {
|
||||
"driver": {"type": "string"},
|
||||
"driver_opts": {
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
"^.+$": {"type": ["string", "number"]}
|
||||
}
|
||||
},
|
||||
"external": {
|
||||
"type": ["boolean", "object"],
|
||||
"properties": {
|
||||
"name": {"type": "string"}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"labels": {"$ref": "#/definitions/list_or_dict"}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
|
||||
"string_or_list": {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{"$ref": "#/definitions/list_of_strings"}
|
||||
]
|
||||
},
|
||||
|
||||
"list_of_strings": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"uniqueItems": true
|
||||
},
|
||||
|
||||
"list_or_dict": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
".+": {
|
||||
"type": ["string", "number", "null"]
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
{"type": "array", "items": {"type": "string"}, "uniqueItems": true}
|
||||
]
|
||||
},
|
||||
|
||||
"constraints": {
|
||||
"service": {
|
||||
"id": "#/definitions/constraints/service",
|
||||
"anyOf": [
|
||||
{"required": ["build"]},
|
||||
{"required": ["image"]}
|
||||
],
|
||||
"properties": {
|
||||
"build": {
|
||||
"required": ["context"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
package schema
|
||||
|
||||
//go:generate go-bindata -pkg schema data
|
||||
//go:generate go-bindata -pkg schema -nometadata data
|
||||
|
||||
import (
|
||||
"fmt"
|
35
cli/compose/schema/schema_test.go
Normal file
35
cli/compose/schema/schema_test.go
Normal file
|
@ -0,0 +1,35 @@
|
|||
package schema
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type dict map[string]interface{}
|
||||
|
||||
func TestValid(t *testing.T) {
|
||||
config := dict{
|
||||
"version": "2.1",
|
||||
"services": dict{
|
||||
"foo": dict{
|
||||
"image": "busybox",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
assert.NoError(t, Validate(config))
|
||||
}
|
||||
|
||||
func TestUndefinedTopLevelOption(t *testing.T) {
|
||||
config := dict{
|
||||
"version": "2.1",
|
||||
"helicopters": dict{
|
||||
"foo": dict{
|
||||
"image": "busybox",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
assert.Error(t, Validate(config))
|
||||
}
|
|
@ -16,6 +16,8 @@ var patternString = fmt.Sprintf(
|
|||
|
||||
var pattern = regexp.MustCompile(patternString)
|
||||
|
||||
// InvalidTemplateError is returned when a variable template is not in a valid
|
||||
// format
|
||||
type InvalidTemplateError struct {
|
||||
Template string
|
||||
}
|
||||
|
@ -24,23 +26,14 @@ func (e InvalidTemplateError) Error() string {
|
|||
return fmt.Sprintf("Invalid template: %#v", e.Template)
|
||||
}
|
||||
|
||||
// A user-supplied function which maps from variable names to values.
|
||||
// Mapping is a user-supplied function which maps from variable names to values.
|
||||
// Returns the value as a string and a bool indicating whether
|
||||
// the value is present, to distinguish between an empty string
|
||||
// and the absence of a value.
|
||||
type Mapping func(string) (string, bool)
|
||||
|
||||
// Substitute variables in the string with their values
|
||||
func Substitute(template string, mapping Mapping) (result string, err *InvalidTemplateError) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if e, ok := r.(*InvalidTemplateError); ok {
|
||||
err = e
|
||||
} else {
|
||||
panic(r)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
result = pattern.ReplaceAllStringFunc(template, func(substring string) string {
|
||||
matches := pattern.FindStringSubmatch(substring)
|
||||
groups := make(map[string]string)
|
||||
|
@ -87,11 +80,11 @@ func Substitute(template string, mapping Mapping) (result string, err *InvalidTe
|
|||
return escaped
|
||||
}
|
||||
|
||||
panic(&InvalidTemplateError{Template: template})
|
||||
err = &InvalidTemplateError{Template: template}
|
||||
return ""
|
||||
})
|
||||
|
||||
return
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Split the string at the first occurrence of sep, and return the part before the separator,
|
||||
|
@ -102,7 +95,6 @@ func partition(s, sep string) (string, string) {
|
|||
if strings.Contains(s, sep) {
|
||||
parts := strings.SplitN(s, sep, 2)
|
||||
return parts[0], parts[1]
|
||||
} else {
|
||||
return s, ""
|
||||
}
|
||||
return s, ""
|
||||
}
|
83
cli/compose/template/template_test.go
Normal file
83
cli/compose/template/template_test.go
Normal file
|
@ -0,0 +1,83 @@
|
|||
package template
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var defaults = map[string]string{
|
||||
"FOO": "first",
|
||||
"BAR": "",
|
||||
}
|
||||
|
||||
func defaultMapping(name string) (string, bool) {
|
||||
val, ok := defaults[name]
|
||||
return val, ok
|
||||
}
|
||||
|
||||
func TestEscaped(t *testing.T) {
|
||||
result, err := Substitute("$${foo}", defaultMapping)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "${foo}", result)
|
||||
}
|
||||
|
||||
func TestInvalid(t *testing.T) {
|
||||
invalidTemplates := []string{
|
||||
"${",
|
||||
"$}",
|
||||
"${}",
|
||||
"${ }",
|
||||
"${ foo}",
|
||||
"${foo }",
|
||||
"${foo!}",
|
||||
}
|
||||
|
||||
for _, template := range invalidTemplates {
|
||||
_, err := Substitute(template, defaultMapping)
|
||||
assert.Error(t, err)
|
||||
assert.IsType(t, &InvalidTemplateError{}, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoValueNoDefault(t *testing.T) {
|
||||
for _, template := range []string{"This ${missing} var", "This ${BAR} var"} {
|
||||
result, err := Substitute(template, defaultMapping)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "This var", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueNoDefault(t *testing.T) {
|
||||
for _, template := range []string{"This $FOO var", "This ${FOO} var"} {
|
||||
result, err := Substitute(template, defaultMapping)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "This first var", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoValueWithDefault(t *testing.T) {
|
||||
for _, template := range []string{"ok ${missing:-def}", "ok ${missing-def}"} {
|
||||
result, err := Substitute(template, defaultMapping)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "ok def", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyValueWithSoftDefault(t *testing.T) {
|
||||
result, err := Substitute("ok ${BAR:-def}", defaultMapping)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "ok def", result)
|
||||
}
|
||||
|
||||
func TestEmptyValueWithHardDefault(t *testing.T) {
|
||||
result, err := Substitute("ok ${BAR-def}", defaultMapping)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "ok ", result)
|
||||
}
|
||||
|
||||
func TestNonAlphanumericDefault(t *testing.T) {
|
||||
result, err := Substitute("ok ${BAR:-/non:-alphanumeric}", defaultMapping)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "ok /non:-alphanumeric", result)
|
||||
}
|
|
@ -4,6 +4,7 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
// UnsupportedProperties not yet supported by this implementation of the compose file
|
||||
var UnsupportedProperties = []string{
|
||||
"build",
|
||||
"cap_add",
|
||||
|
@ -24,14 +25,20 @@ var UnsupportedProperties = []string{
|
|||
"security_opt",
|
||||
"shm_size",
|
||||
"stop_signal",
|
||||
"sysctls",
|
||||
"tmpfs",
|
||||
"userns_mode",
|
||||
}
|
||||
|
||||
// DeprecatedProperties that were removed from the v3 format, but their
|
||||
// use should not impact the behaviour of the application.
|
||||
var DeprecatedProperties = map[string]string{
|
||||
"container_name": "Setting the container name is not supported.",
|
||||
"expose": "Exposing ports is unnecessary - services on the same network can access each other's containers on any port.",
|
||||
}
|
||||
|
||||
// ForbiddenProperties that are not supported in this implementation of the
|
||||
// compose file.
|
||||
var ForbiddenProperties = map[string]string{
|
||||
"extends": "Support for `extends` is not implemented yet. Use `docker-compose config` to generate a configuration with all `extends` options resolved, and deploy from that.",
|
||||
"volume_driver": "Instead of setting the volume driver on the service, define a volume using the top-level `volumes` option and specify the driver there.",
|
||||
|
@ -43,25 +50,30 @@ var ForbiddenProperties = map[string]string{
|
|||
"memswap_limit": "Set resource limits using deploy.resources",
|
||||
}
|
||||
|
||||
// Dict is a mapping of strings to interface{}
|
||||
type Dict map[string]interface{}
|
||||
|
||||
// ConfigFile is a filename and the contents of the file as a Dict
|
||||
type ConfigFile struct {
|
||||
Filename string
|
||||
Config Dict
|
||||
}
|
||||
|
||||
// ConfigDetails are the details about a group of ConfigFiles
|
||||
type ConfigDetails struct {
|
||||
WorkingDir string
|
||||
ConfigFiles []ConfigFile
|
||||
Environment map[string]string
|
||||
}
|
||||
|
||||
// Config is a full compose file configuration
|
||||
type Config struct {
|
||||
Services []ServiceConfig
|
||||
Networks map[string]NetworkConfig
|
||||
Volumes map[string]VolumeConfig
|
||||
}
|
||||
|
||||
// ServiceConfig is the configuration of one service
|
||||
type ServiceConfig struct {
|
||||
Name string
|
||||
|
||||
|
@ -73,8 +85,8 @@ type ServiceConfig struct {
|
|||
DependsOn []string `mapstructure:"depends_on"`
|
||||
Deploy DeployConfig
|
||||
Devices []string
|
||||
Dns []string `compose:"string_or_list"`
|
||||
DnsSearch []string `mapstructure:"dns_search" compose:"string_or_list"`
|
||||
DNS []string `compose:"string_or_list"`
|
||||
DNSSearch []string `mapstructure:"dns_search" compose:"string_or_list"`
|
||||
DomainName string `mapstructure:"domainname"`
|
||||
Entrypoint []string `compose:"shell_command"`
|
||||
Environment map[string]string `compose:"list_or_dict_equals"`
|
||||
|
@ -108,11 +120,13 @@ type ServiceConfig struct {
|
|||
WorkingDir string `mapstructure:"working_dir"`
|
||||
}
|
||||
|
||||
// LoggingConfig the logging configuration for a service
|
||||
type LoggingConfig struct {
|
||||
Driver string
|
||||
Options map[string]string
|
||||
}
|
||||
|
||||
// DeployConfig the deployment configuration for a service
|
||||
type DeployConfig struct {
|
||||
Mode string
|
||||
Replicas *uint64
|
||||
|
@ -123,6 +137,7 @@ type DeployConfig struct {
|
|||
Placement Placement
|
||||
}
|
||||
|
||||
// HealthCheckConfig the healthcheck configuration for a service
|
||||
type HealthCheckConfig struct {
|
||||
Test []string `compose:"healthcheck"`
|
||||
Timeout string
|
||||
|
@ -131,6 +146,7 @@ type HealthCheckConfig struct {
|
|||
Disable bool
|
||||
}
|
||||
|
||||
// UpdateConfig the service update configuration
|
||||
type UpdateConfig struct {
|
||||
Parallelism *uint64
|
||||
Delay time.Duration
|
||||
|
@ -139,19 +155,23 @@ type UpdateConfig struct {
|
|||
MaxFailureRatio float32 `mapstructure:"max_failure_ratio"`
|
||||
}
|
||||
|
||||
// Resources the resource limits and reservations
|
||||
type Resources struct {
|
||||
Limits *Resource
|
||||
Reservations *Resource
|
||||
}
|
||||
|
||||
// Resource is a resource to be limited or reserved
|
||||
type Resource struct {
|
||||
// TODO: types to convert from units and ratios
|
||||
NanoCPUs string `mapstructure:"cpus"`
|
||||
MemoryBytes UnitBytes `mapstructure:"memory"`
|
||||
}
|
||||
|
||||
// UnitBytes is the bytes type
|
||||
type UnitBytes int64
|
||||
|
||||
// RestartPolicy the service restart policy
|
||||
type RestartPolicy struct {
|
||||
Condition string
|
||||
Delay *time.Duration
|
||||
|
@ -159,39 +179,47 @@ type RestartPolicy struct {
|
|||
Window *time.Duration
|
||||
}
|
||||
|
||||
// Placement constraints for the service
|
||||
type Placement struct {
|
||||
Constraints []string
|
||||
}
|
||||
|
||||
// ServiceNetworkConfig is the network configuration for a service
|
||||
type ServiceNetworkConfig struct {
|
||||
Aliases []string
|
||||
Ipv4Address string `mapstructure:"ipv4_address"`
|
||||
Ipv6Address string `mapstructure:"ipv6_address"`
|
||||
}
|
||||
|
||||
// UlimitsConfig the ulimit configuration
|
||||
type UlimitsConfig struct {
|
||||
Single int
|
||||
Soft int
|
||||
Hard int
|
||||
}
|
||||
|
||||
// NetworkConfig for a network
|
||||
type NetworkConfig struct {
|
||||
Driver string
|
||||
DriverOpts map[string]string `mapstructure:"driver_opts"`
|
||||
Ipam IPAMConfig
|
||||
External External
|
||||
Internal bool
|
||||
Labels map[string]string `compose:"list_or_dict_equals"`
|
||||
}
|
||||
|
||||
// IPAMConfig for a network
|
||||
type IPAMConfig struct {
|
||||
Driver string
|
||||
Config []*IPAMPool
|
||||
}
|
||||
|
||||
// IPAMPool for a network
|
||||
type IPAMPool struct {
|
||||
Subnet string
|
||||
}
|
||||
|
||||
// VolumeConfig for a volume
|
||||
type VolumeConfig struct {
|
||||
Driver string
|
||||
DriverOpts map[string]string `mapstructure:"driver_opts"`
|
|
@ -6,3 +6,4 @@ CONTAINERD_COMMIT=03e5862ec0d8d3b3f750e19fca3ee367e13c090e
|
|||
TINI_COMMIT=949e6facb77383876aeff8a6944dde66b3089574
|
||||
LIBNETWORK_COMMIT=0f534354b813003a754606689722fe253101bc4e
|
||||
VNDR_COMMIT=f56bd4504b4fad07a357913687fb652ee54bb3b0
|
||||
BINDATA_COMMIT=a0ff2567cfb70903282db057e799fd826784d41d
|
||||
|
|
|
@ -46,6 +46,14 @@ install_proxy() {
|
|||
go build -ldflags="$PROXY_LDFLAGS" -o /usr/local/bin/docker-proxy github.com/docker/libnetwork/cmd/proxy
|
||||
}
|
||||
|
||||
install_bindata() {
|
||||
echo "Install go-bindata version $BINDATA_COMMIT"
|
||||
git clone https://github.com/jteeuwen/go-bindata "$GOPATH/src/github.com/jteeuwen/go-bindata"
|
||||
cd $GOPATH/src/github.com/jteeuwen/go-bindata
|
||||
git checkout -q "$BINDATA_COMMIT"
|
||||
go build -o /usr/local/bin/go-bindata github.com/jteeuwen/go-bindata/go-bindata
|
||||
}
|
||||
|
||||
for prog in "$@"
|
||||
do
|
||||
case $prog in
|
||||
|
@ -99,6 +107,10 @@ do
|
|||
go build -v -o /usr/local/bin/vndr .
|
||||
;;
|
||||
|
||||
bindata)
|
||||
install_bindata
|
||||
;;
|
||||
|
||||
*)
|
||||
echo echo "Usage: $0 [tomlv|runc|containerd|tini|proxy]"
|
||||
exit 1
|
||||
|
|
|
@ -261,7 +261,7 @@ Function Validate-GoFormat($headCommit, $upstreamCommit) {
|
|||
|
||||
# Get a list of all go source-code files which have changed. Ignore exit code on next call - always process regardless
|
||||
$files=@(); $files = Invoke-Expression "git diff $upstreamCommit...$headCommit --diff-filter=ACMR --name-only -- `'*.go`'"
|
||||
$files = $files | Select-String -NotMatch "^vendor/"
|
||||
$files = $files | Select-String -NotMatch "^vendor/" | Select-String -NotMatch "^cli/compose/schema/bindata.go"
|
||||
$badFiles=@(); $files | %{
|
||||
# Deliberately ignore error on next line - treat as failed
|
||||
$content=Invoke-Expression "git show $headCommit`:$_"
|
||||
|
|
28
hack/validate/compose-bindata
Executable file
28
hack/validate/compose-bindata
Executable file
|
@ -0,0 +1,28 @@
|
|||
#!/bin/bash
|
||||
|
||||
export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
source "${SCRIPTDIR}/.validate"
|
||||
|
||||
IFS=$'\n'
|
||||
files=( $(validate_diff --diff-filter=ACMR --name-only -- 'cli/compose/schema/data' || true) )
|
||||
unset IFS
|
||||
|
||||
if [ ${#files[@]} -gt 0 ]; then
|
||||
go generate github.com/docker/docker/cli/compose/schema 2> /dev/null
|
||||
# Let see if the working directory is clean
|
||||
diffs="$(git status --porcelain -- cli/compose/schema 2>/dev/null)"
|
||||
if [ "$diffs" ]; then
|
||||
{
|
||||
echo 'The result of `go generate github.com/docker/docker/cli/compose/schema` differs'
|
||||
echo
|
||||
echo "$diffs"
|
||||
echo
|
||||
echo 'Please run `go generate github.com/docker/docker/cli/compose/schema`'
|
||||
} >&2
|
||||
false
|
||||
else
|
||||
echo 'Congratulations! cli/compose/schema/bindata.go is up-to-date.'
|
||||
fi
|
||||
else
|
||||
echo 'No cli/compose/schema/data changes in diff.'
|
||||
fi
|
|
@ -4,7 +4,9 @@ export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
|||
source "${SCRIPTDIR}/.validate"
|
||||
|
||||
IFS=$'\n'
|
||||
files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) )
|
||||
files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' |
|
||||
grep -v '^vendor/' |
|
||||
grep -v '^cli/compose/schema/bindata.go' || true) )
|
||||
unset IFS
|
||||
|
||||
badFiles=()
|
||||
|
|
|
@ -4,7 +4,7 @@ export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
|||
source "${SCRIPTDIR}/.validate"
|
||||
|
||||
IFS=$'\n'
|
||||
files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' | grep -v '^api/types/container/' || true) )
|
||||
files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' | grep -v '^api/types/container/' | grep -v '^cli/compose/schema/bindata.go' || true) )
|
||||
unset IFS
|
||||
|
||||
errors=()
|
||||
|
|
|
@ -8,7 +8,6 @@ files=( $(validate_diff --diff-filter=ACMR --name-only -- 'api/types/' 'api/swag
|
|||
unset IFS
|
||||
|
||||
if [ ${#files[@]} -gt 0 ]; then
|
||||
# We run vndr to and see if we have a diff afterwards
|
||||
${SCRIPTDIR}/../generate-swagger-api.sh 2> /dev/null
|
||||
# Let see if the working directory is clean
|
||||
diffs="$(git status --porcelain -- api/types/ 2>/dev/null)"
|
||||
|
|
|
@ -7,6 +7,8 @@ import (
|
|||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
// TestingT is an interface which defines the methods of testing.T that are
|
||||
|
@ -49,7 +51,8 @@ func NilError(t TestingT, err error) {
|
|||
// they are not "deeply equal".
|
||||
func DeepEqual(t TestingT, actual, expected interface{}) {
|
||||
if !reflect.DeepEqual(actual, expected) {
|
||||
fatal(t, "Expected '%v' (%T) got '%v' (%T)", expected, expected, actual, actual)
|
||||
fatal(t, "Expected (%T):\n%v\n\ngot (%T):\n%s\n",
|
||||
expected, spew.Sdump(expected), actual, spew.Sdump(actual))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -132,7 +132,6 @@ github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff
|
|||
github.com/docker/go-metrics 86138d05f285fd9737a99bee2d9be30866b59d72
|
||||
|
||||
# composefile
|
||||
github.com/aanand/compose-file a3e58764f50597b6217fec07e9bff7225c4a1719
|
||||
github.com/mitchellh/mapstructure f3009df150dadf309fdee4a54ed65c124afad715
|
||||
github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a
|
||||
github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45
|
||||
|
|
191
vendor/github.com/aanand/compose-file/LICENSE
generated
vendored
191
vendor/github.com/aanand/compose-file/LICENSE
generated
vendored
|
@ -1,191 +0,0 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2016 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
237
vendor/github.com/aanand/compose-file/schema/bindata.go
generated
vendored
237
vendor/github.com/aanand/compose-file/schema/bindata.go
generated
vendored
File diff suppressed because one or more lines are too long
27
vendor/github.com/pmezard/go-difflib/LICENSE
generated
vendored
Normal file
27
vendor/github.com/pmezard/go-difflib/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2013, Patrick Mezard
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
The names of its contributors may not be used to endorse or promote
|
||||
products derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
772
vendor/github.com/pmezard/go-difflib/difflib/difflib.go
generated
vendored
Normal file
772
vendor/github.com/pmezard/go-difflib/difflib/difflib.go
generated
vendored
Normal file
|
@ -0,0 +1,772 @@
|
|||
// Package difflib is a partial port of Python difflib module.
|
||||
//
|
||||
// It provides tools to compare sequences of strings and generate textual diffs.
|
||||
//
|
||||
// The following class and functions have been ported:
|
||||
//
|
||||
// - SequenceMatcher
|
||||
//
|
||||
// - unified_diff
|
||||
//
|
||||
// - context_diff
|
||||
//
|
||||
// Getting unified diffs was the main goal of the port. Keep in mind this code
|
||||
// is mostly suitable to output text differences in a human friendly way, there
|
||||
// are no guarantees generated diffs are consumable by patch(1).
|
||||
package difflib
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func max(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func calculateRatio(matches, length int) float64 {
|
||||
if length > 0 {
|
||||
return 2.0 * float64(matches) / float64(length)
|
||||
}
|
||||
return 1.0
|
||||
}
|
||||
|
||||
type Match struct {
|
||||
A int
|
||||
B int
|
||||
Size int
|
||||
}
|
||||
|
||||
type OpCode struct {
|
||||
Tag byte
|
||||
I1 int
|
||||
I2 int
|
||||
J1 int
|
||||
J2 int
|
||||
}
|
||||
|
||||
// SequenceMatcher compares sequence of strings. The basic
|
||||
// algorithm predates, and is a little fancier than, an algorithm
|
||||
// published in the late 1980's by Ratcliff and Obershelp under the
|
||||
// hyperbolic name "gestalt pattern matching". The basic idea is to find
|
||||
// the longest contiguous matching subsequence that contains no "junk"
|
||||
// elements (R-O doesn't address junk). The same idea is then applied
|
||||
// recursively to the pieces of the sequences to the left and to the right
|
||||
// of the matching subsequence. This does not yield minimal edit
|
||||
// sequences, but does tend to yield matches that "look right" to people.
|
||||
//
|
||||
// SequenceMatcher tries to compute a "human-friendly diff" between two
|
||||
// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
|
||||
// longest *contiguous* & junk-free matching subsequence. That's what
|
||||
// catches peoples' eyes. The Windows(tm) windiff has another interesting
|
||||
// notion, pairing up elements that appear uniquely in each sequence.
|
||||
// That, and the method here, appear to yield more intuitive difference
|
||||
// reports than does diff. This method appears to be the least vulnerable
|
||||
// to synching up on blocks of "junk lines", though (like blank lines in
|
||||
// ordinary text files, or maybe "<P>" lines in HTML files). That may be
|
||||
// because this is the only method of the 3 that has a *concept* of
|
||||
// "junk" <wink>.
|
||||
//
|
||||
// Timing: Basic R-O is cubic time worst case and quadratic time expected
|
||||
// case. SequenceMatcher is quadratic time for the worst case and has
|
||||
// expected-case behavior dependent in a complicated way on how many
|
||||
// elements the sequences have in common; best case time is linear.
|
||||
type SequenceMatcher struct {
|
||||
a []string
|
||||
b []string
|
||||
b2j map[string][]int
|
||||
IsJunk func(string) bool
|
||||
autoJunk bool
|
||||
bJunk map[string]struct{}
|
||||
matchingBlocks []Match
|
||||
fullBCount map[string]int
|
||||
bPopular map[string]struct{}
|
||||
opCodes []OpCode
|
||||
}
|
||||
|
||||
func NewMatcher(a, b []string) *SequenceMatcher {
|
||||
m := SequenceMatcher{autoJunk: true}
|
||||
m.SetSeqs(a, b)
|
||||
return &m
|
||||
}
|
||||
|
||||
func NewMatcherWithJunk(a, b []string, autoJunk bool,
|
||||
isJunk func(string) bool) *SequenceMatcher {
|
||||
|
||||
m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
|
||||
m.SetSeqs(a, b)
|
||||
return &m
|
||||
}
|
||||
|
||||
// Set two sequences to be compared.
|
||||
func (m *SequenceMatcher) SetSeqs(a, b []string) {
|
||||
m.SetSeq1(a)
|
||||
m.SetSeq2(b)
|
||||
}
|
||||
|
||||
// Set the first sequence to be compared. The second sequence to be compared is
|
||||
// not changed.
|
||||
//
|
||||
// SequenceMatcher computes and caches detailed information about the second
|
||||
// sequence, so if you want to compare one sequence S against many sequences,
|
||||
// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
|
||||
// sequences.
|
||||
//
|
||||
// See also SetSeqs() and SetSeq2().
|
||||
func (m *SequenceMatcher) SetSeq1(a []string) {
|
||||
if &a == &m.a {
|
||||
return
|
||||
}
|
||||
m.a = a
|
||||
m.matchingBlocks = nil
|
||||
m.opCodes = nil
|
||||
}
|
||||
|
||||
// Set the second sequence to be compared. The first sequence to be compared is
|
||||
// not changed.
|
||||
func (m *SequenceMatcher) SetSeq2(b []string) {
|
||||
if &b == &m.b {
|
||||
return
|
||||
}
|
||||
m.b = b
|
||||
m.matchingBlocks = nil
|
||||
m.opCodes = nil
|
||||
m.fullBCount = nil
|
||||
m.chainB()
|
||||
}
|
||||
|
||||
func (m *SequenceMatcher) chainB() {
|
||||
// Populate line -> index mapping
|
||||
b2j := map[string][]int{}
|
||||
for i, s := range m.b {
|
||||
indices := b2j[s]
|
||||
indices = append(indices, i)
|
||||
b2j[s] = indices
|
||||
}
|
||||
|
||||
// Purge junk elements
|
||||
m.bJunk = map[string]struct{}{}
|
||||
if m.IsJunk != nil {
|
||||
junk := m.bJunk
|
||||
for s, _ := range b2j {
|
||||
if m.IsJunk(s) {
|
||||
junk[s] = struct{}{}
|
||||
}
|
||||
}
|
||||
for s, _ := range junk {
|
||||
delete(b2j, s)
|
||||
}
|
||||
}
|
||||
|
||||
// Purge remaining popular elements
|
||||
popular := map[string]struct{}{}
|
||||
n := len(m.b)
|
||||
if m.autoJunk && n >= 200 {
|
||||
ntest := n/100 + 1
|
||||
for s, indices := range b2j {
|
||||
if len(indices) > ntest {
|
||||
popular[s] = struct{}{}
|
||||
}
|
||||
}
|
||||
for s, _ := range popular {
|
||||
delete(b2j, s)
|
||||
}
|
||||
}
|
||||
m.bPopular = popular
|
||||
m.b2j = b2j
|
||||
}
|
||||
|
||||
func (m *SequenceMatcher) isBJunk(s string) bool {
|
||||
_, ok := m.bJunk[s]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Find longest matching block in a[alo:ahi] and b[blo:bhi].
|
||||
//
|
||||
// If IsJunk is not defined:
|
||||
//
|
||||
// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
|
||||
// alo <= i <= i+k <= ahi
|
||||
// blo <= j <= j+k <= bhi
|
||||
// and for all (i',j',k') meeting those conditions,
|
||||
// k >= k'
|
||||
// i <= i'
|
||||
// and if i == i', j <= j'
|
||||
//
|
||||
// In other words, of all maximal matching blocks, return one that
|
||||
// starts earliest in a, and of all those maximal matching blocks that
|
||||
// start earliest in a, return the one that starts earliest in b.
|
||||
//
|
||||
// If IsJunk is defined, first the longest matching block is
|
||||
// determined as above, but with the additional restriction that no
|
||||
// junk element appears in the block. Then that block is extended as
|
||||
// far as possible by matching (only) junk elements on both sides. So
|
||||
// the resulting block never matches on junk except as identical junk
|
||||
// happens to be adjacent to an "interesting" match.
|
||||
//
|
||||
// If no blocks match, return (alo, blo, 0).
|
||||
func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
|
||||
// CAUTION: stripping common prefix or suffix would be incorrect.
|
||||
// E.g.,
|
||||
// ab
|
||||
// acab
|
||||
// Longest matching block is "ab", but if common prefix is
|
||||
// stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
|
||||
// strip, so ends up claiming that ab is changed to acab by
|
||||
// inserting "ca" in the middle. That's minimal but unintuitive:
|
||||
// "it's obvious" that someone inserted "ac" at the front.
|
||||
// Windiff ends up at the same place as diff, but by pairing up
|
||||
// the unique 'b's and then matching the first two 'a's.
|
||||
besti, bestj, bestsize := alo, blo, 0
|
||||
|
||||
// find longest junk-free match
|
||||
// during an iteration of the loop, j2len[j] = length of longest
|
||||
// junk-free match ending with a[i-1] and b[j]
|
||||
j2len := map[int]int{}
|
||||
for i := alo; i != ahi; i++ {
|
||||
// look at all instances of a[i] in b; note that because
|
||||
// b2j has no junk keys, the loop is skipped if a[i] is junk
|
||||
newj2len := map[int]int{}
|
||||
for _, j := range m.b2j[m.a[i]] {
|
||||
// a[i] matches b[j]
|
||||
if j < blo {
|
||||
continue
|
||||
}
|
||||
if j >= bhi {
|
||||
break
|
||||
}
|
||||
k := j2len[j-1] + 1
|
||||
newj2len[j] = k
|
||||
if k > bestsize {
|
||||
besti, bestj, bestsize = i-k+1, j-k+1, k
|
||||
}
|
||||
}
|
||||
j2len = newj2len
|
||||
}
|
||||
|
||||
// Extend the best by non-junk elements on each end. In particular,
|
||||
// "popular" non-junk elements aren't in b2j, which greatly speeds
|
||||
// the inner loop above, but also means "the best" match so far
|
||||
// doesn't contain any junk *or* popular non-junk elements.
|
||||
for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
|
||||
m.a[besti-1] == m.b[bestj-1] {
|
||||
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
|
||||
}
|
||||
for besti+bestsize < ahi && bestj+bestsize < bhi &&
|
||||
!m.isBJunk(m.b[bestj+bestsize]) &&
|
||||
m.a[besti+bestsize] == m.b[bestj+bestsize] {
|
||||
bestsize += 1
|
||||
}
|
||||
|
||||
// Now that we have a wholly interesting match (albeit possibly
|
||||
// empty!), we may as well suck up the matching junk on each
|
||||
// side of it too. Can't think of a good reason not to, and it
|
||||
// saves post-processing the (possibly considerable) expense of
|
||||
// figuring out what to do with it. In the case of an empty
|
||||
// interesting match, this is clearly the right thing to do,
|
||||
// because no other kind of match is possible in the regions.
|
||||
for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
|
||||
m.a[besti-1] == m.b[bestj-1] {
|
||||
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
|
||||
}
|
||||
for besti+bestsize < ahi && bestj+bestsize < bhi &&
|
||||
m.isBJunk(m.b[bestj+bestsize]) &&
|
||||
m.a[besti+bestsize] == m.b[bestj+bestsize] {
|
||||
bestsize += 1
|
||||
}
|
||||
|
||||
return Match{A: besti, B: bestj, Size: bestsize}
|
||||
}
|
||||
|
||||
// Return list of triples describing matching subsequences.
|
||||
//
|
||||
// Each triple is of the form (i, j, n), and means that
|
||||
// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
|
||||
// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
|
||||
// adjacent triples in the list, and the second is not the last triple in the
|
||||
// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
|
||||
// adjacent equal blocks.
|
||||
//
|
||||
// The last triple is a dummy, (len(a), len(b), 0), and is the only
|
||||
// triple with n==0.
|
||||
func (m *SequenceMatcher) GetMatchingBlocks() []Match {
|
||||
if m.matchingBlocks != nil {
|
||||
return m.matchingBlocks
|
||||
}
|
||||
|
||||
var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
|
||||
matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
|
||||
match := m.findLongestMatch(alo, ahi, blo, bhi)
|
||||
i, j, k := match.A, match.B, match.Size
|
||||
if match.Size > 0 {
|
||||
if alo < i && blo < j {
|
||||
matched = matchBlocks(alo, i, blo, j, matched)
|
||||
}
|
||||
matched = append(matched, match)
|
||||
if i+k < ahi && j+k < bhi {
|
||||
matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
|
||||
}
|
||||
}
|
||||
return matched
|
||||
}
|
||||
matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
|
||||
|
||||
// It's possible that we have adjacent equal blocks in the
|
||||
// matching_blocks list now.
|
||||
nonAdjacent := []Match{}
|
||||
i1, j1, k1 := 0, 0, 0
|
||||
for _, b := range matched {
|
||||
// Is this block adjacent to i1, j1, k1?
|
||||
i2, j2, k2 := b.A, b.B, b.Size
|
||||
if i1+k1 == i2 && j1+k1 == j2 {
|
||||
// Yes, so collapse them -- this just increases the length of
|
||||
// the first block by the length of the second, and the first
|
||||
// block so lengthened remains the block to compare against.
|
||||
k1 += k2
|
||||
} else {
|
||||
// Not adjacent. Remember the first block (k1==0 means it's
|
||||
// the dummy we started with), and make the second block the
|
||||
// new block to compare against.
|
||||
if k1 > 0 {
|
||||
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
|
||||
}
|
||||
i1, j1, k1 = i2, j2, k2
|
||||
}
|
||||
}
|
||||
if k1 > 0 {
|
||||
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
|
||||
}
|
||||
|
||||
nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
|
||||
m.matchingBlocks = nonAdjacent
|
||||
return m.matchingBlocks
|
||||
}
|
||||
|
||||
// Return list of 5-tuples describing how to turn a into b.
|
||||
//
|
||||
// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
|
||||
// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
|
||||
// tuple preceding it, and likewise for j1 == the previous j2.
|
||||
//
|
||||
// The tags are characters, with these meanings:
|
||||
//
|
||||
// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
|
||||
//
|
||||
// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
|
||||
//
|
||||
// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
|
||||
//
|
||||
// 'e' (equal): a[i1:i2] == b[j1:j2]
|
||||
func (m *SequenceMatcher) GetOpCodes() []OpCode {
|
||||
if m.opCodes != nil {
|
||||
return m.opCodes
|
||||
}
|
||||
i, j := 0, 0
|
||||
matching := m.GetMatchingBlocks()
|
||||
opCodes := make([]OpCode, 0, len(matching))
|
||||
for _, m := range matching {
|
||||
// invariant: we've pumped out correct diffs to change
|
||||
// a[:i] into b[:j], and the next matching block is
|
||||
// a[ai:ai+size] == b[bj:bj+size]. So we need to pump
|
||||
// out a diff to change a[i:ai] into b[j:bj], pump out
|
||||
// the matching block, and move (i,j) beyond the match
|
||||
ai, bj, size := m.A, m.B, m.Size
|
||||
tag := byte(0)
|
||||
if i < ai && j < bj {
|
||||
tag = 'r'
|
||||
} else if i < ai {
|
||||
tag = 'd'
|
||||
} else if j < bj {
|
||||
tag = 'i'
|
||||
}
|
||||
if tag > 0 {
|
||||
opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
|
||||
}
|
||||
i, j = ai+size, bj+size
|
||||
// the list of matching blocks is terminated by a
|
||||
// sentinel with size 0
|
||||
if size > 0 {
|
||||
opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
|
||||
}
|
||||
}
|
||||
m.opCodes = opCodes
|
||||
return m.opCodes
|
||||
}
|
||||
|
||||
// Isolate change clusters by eliminating ranges with no changes.
|
||||
//
|
||||
// Return a generator of groups with up to n lines of context.
|
||||
// Each group is in the same format as returned by GetOpCodes().
|
||||
func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
|
||||
if n < 0 {
|
||||
n = 3
|
||||
}
|
||||
codes := m.GetOpCodes()
|
||||
if len(codes) == 0 {
|
||||
codes = []OpCode{OpCode{'e', 0, 1, 0, 1}}
|
||||
}
|
||||
// Fixup leading and trailing groups if they show no changes.
|
||||
if codes[0].Tag == 'e' {
|
||||
c := codes[0]
|
||||
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||
codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
|
||||
}
|
||||
if codes[len(codes)-1].Tag == 'e' {
|
||||
c := codes[len(codes)-1]
|
||||
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||
codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
|
||||
}
|
||||
nn := n + n
|
||||
groups := [][]OpCode{}
|
||||
group := []OpCode{}
|
||||
for _, c := range codes {
|
||||
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||
// End the current group and start a new one whenever
|
||||
// there is a large range with no changes.
|
||||
if c.Tag == 'e' && i2-i1 > nn {
|
||||
group = append(group, OpCode{c.Tag, i1, min(i2, i1+n),
|
||||
j1, min(j2, j1+n)})
|
||||
groups = append(groups, group)
|
||||
group = []OpCode{}
|
||||
i1, j1 = max(i1, i2-n), max(j1, j2-n)
|
||||
}
|
||||
group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
|
||||
}
|
||||
if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
|
||||
groups = append(groups, group)
|
||||
}
|
||||
return groups
|
||||
}
|
||||
|
||||
// Return a measure of the sequences' similarity (float in [0,1]).
|
||||
//
|
||||
// Where T is the total number of elements in both sequences, and
|
||||
// M is the number of matches, this is 2.0*M / T.
|
||||
// Note that this is 1 if the sequences are identical, and 0 if
|
||||
// they have nothing in common.
|
||||
//
|
||||
// .Ratio() is expensive to compute if you haven't already computed
|
||||
// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
|
||||
// want to try .QuickRatio() or .RealQuickRation() first to get an
|
||||
// upper bound.
|
||||
func (m *SequenceMatcher) Ratio() float64 {
|
||||
matches := 0
|
||||
for _, m := range m.GetMatchingBlocks() {
|
||||
matches += m.Size
|
||||
}
|
||||
return calculateRatio(matches, len(m.a)+len(m.b))
|
||||
}
|
||||
|
||||
// Return an upper bound on ratio() relatively quickly.
|
||||
//
|
||||
// This isn't defined beyond that it is an upper bound on .Ratio(), and
|
||||
// is faster to compute.
|
||||
func (m *SequenceMatcher) QuickRatio() float64 {
|
||||
// viewing a and b as multisets, set matches to the cardinality
|
||||
// of their intersection; this counts the number of matches
|
||||
// without regard to order, so is clearly an upper bound
|
||||
if m.fullBCount == nil {
|
||||
m.fullBCount = map[string]int{}
|
||||
for _, s := range m.b {
|
||||
m.fullBCount[s] = m.fullBCount[s] + 1
|
||||
}
|
||||
}
|
||||
|
||||
// avail[x] is the number of times x appears in 'b' less the
|
||||
// number of times we've seen it in 'a' so far ... kinda
|
||||
avail := map[string]int{}
|
||||
matches := 0
|
||||
for _, s := range m.a {
|
||||
n, ok := avail[s]
|
||||
if !ok {
|
||||
n = m.fullBCount[s]
|
||||
}
|
||||
avail[s] = n - 1
|
||||
if n > 0 {
|
||||
matches += 1
|
||||
}
|
||||
}
|
||||
return calculateRatio(matches, len(m.a)+len(m.b))
|
||||
}
|
||||
|
||||
// Return an upper bound on ratio() very quickly.
|
||||
//
|
||||
// This isn't defined beyond that it is an upper bound on .Ratio(), and
|
||||
// is faster to compute than either .Ratio() or .QuickRatio().
|
||||
func (m *SequenceMatcher) RealQuickRatio() float64 {
|
||||
la, lb := len(m.a), len(m.b)
|
||||
return calculateRatio(min(la, lb), la+lb)
|
||||
}
|
||||
|
||||
// Convert range to the "ed" format
|
||||
func formatRangeUnified(start, stop int) string {
|
||||
// Per the diff spec at http://www.unix.org/single_unix_specification/
|
||||
beginning := start + 1 // lines start numbering with one
|
||||
length := stop - start
|
||||
if length == 1 {
|
||||
return fmt.Sprintf("%d", beginning)
|
||||
}
|
||||
if length == 0 {
|
||||
beginning -= 1 // empty ranges begin at line just before the range
|
||||
}
|
||||
return fmt.Sprintf("%d,%d", beginning, length)
|
||||
}
|
||||
|
||||
// Unified diff parameters
|
||||
type UnifiedDiff struct {
|
||||
A []string // First sequence lines
|
||||
FromFile string // First file name
|
||||
FromDate string // First file time
|
||||
B []string // Second sequence lines
|
||||
ToFile string // Second file name
|
||||
ToDate string // Second file time
|
||||
Eol string // Headers end of line, defaults to LF
|
||||
Context int // Number of context lines
|
||||
}
|
||||
|
||||
// Compare two sequences of lines; generate the delta as a unified diff.
|
||||
//
|
||||
// Unified diffs are a compact way of showing line changes and a few
|
||||
// lines of context. The number of context lines is set by 'n' which
|
||||
// defaults to three.
|
||||
//
|
||||
// By default, the diff control lines (those with ---, +++, or @@) are
|
||||
// created with a trailing newline. This is helpful so that inputs
|
||||
// created from file.readlines() result in diffs that are suitable for
|
||||
// file.writelines() since both the inputs and outputs have trailing
|
||||
// newlines.
|
||||
//
|
||||
// For inputs that do not have trailing newlines, set the lineterm
|
||||
// argument to "" so that the output will be uniformly newline free.
|
||||
//
|
||||
// The unidiff format normally has a header for filenames and modification
|
||||
// times. Any or all of these may be specified using strings for
|
||||
// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
|
||||
// The modification times are normally expressed in the ISO 8601 format.
|
||||
func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
|
||||
buf := bufio.NewWriter(writer)
|
||||
defer buf.Flush()
|
||||
wf := func(format string, args ...interface{}) error {
|
||||
_, err := buf.WriteString(fmt.Sprintf(format, args...))
|
||||
return err
|
||||
}
|
||||
ws := func(s string) error {
|
||||
_, err := buf.WriteString(s)
|
||||
return err
|
||||
}
|
||||
|
||||
if len(diff.Eol) == 0 {
|
||||
diff.Eol = "\n"
|
||||
}
|
||||
|
||||
started := false
|
||||
m := NewMatcher(diff.A, diff.B)
|
||||
for _, g := range m.GetGroupedOpCodes(diff.Context) {
|
||||
if !started {
|
||||
started = true
|
||||
fromDate := ""
|
||||
if len(diff.FromDate) > 0 {
|
||||
fromDate = "\t" + diff.FromDate
|
||||
}
|
||||
toDate := ""
|
||||
if len(diff.ToDate) > 0 {
|
||||
toDate = "\t" + diff.ToDate
|
||||
}
|
||||
if diff.FromFile != "" || diff.ToFile != "" {
|
||||
err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
first, last := g[0], g[len(g)-1]
|
||||
range1 := formatRangeUnified(first.I1, last.I2)
|
||||
range2 := formatRangeUnified(first.J1, last.J2)
|
||||
if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, c := range g {
|
||||
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||
if c.Tag == 'e' {
|
||||
for _, line := range diff.A[i1:i2] {
|
||||
if err := ws(" " + line); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if c.Tag == 'r' || c.Tag == 'd' {
|
||||
for _, line := range diff.A[i1:i2] {
|
||||
if err := ws("-" + line); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if c.Tag == 'r' || c.Tag == 'i' {
|
||||
for _, line := range diff.B[j1:j2] {
|
||||
if err := ws("+" + line); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Like WriteUnifiedDiff but returns the diff a string.
|
||||
func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
|
||||
w := &bytes.Buffer{}
|
||||
err := WriteUnifiedDiff(w, diff)
|
||||
return string(w.Bytes()), err
|
||||
}
|
||||
|
||||
// Convert range to the "ed" format.
|
||||
func formatRangeContext(start, stop int) string {
|
||||
// Per the diff spec at http://www.unix.org/single_unix_specification/
|
||||
beginning := start + 1 // lines start numbering with one
|
||||
length := stop - start
|
||||
if length == 0 {
|
||||
beginning -= 1 // empty ranges begin at line just before the range
|
||||
}
|
||||
if length <= 1 {
|
||||
return fmt.Sprintf("%d", beginning)
|
||||
}
|
||||
return fmt.Sprintf("%d,%d", beginning, beginning+length-1)
|
||||
}
|
||||
|
||||
type ContextDiff UnifiedDiff
|
||||
|
||||
// Compare two sequences of lines; generate the delta as a context diff.
|
||||
//
|
||||
// Context diffs are a compact way of showing line changes and a few
|
||||
// lines of context. The number of context lines is set by diff.Context
|
||||
// which defaults to three.
|
||||
//
|
||||
// By default, the diff control lines (those with *** or ---) are
|
||||
// created with a trailing newline.
|
||||
//
|
||||
// For inputs that do not have trailing newlines, set the diff.Eol
|
||||
// argument to "" so that the output will be uniformly newline free.
|
||||
//
|
||||
// The context diff format normally has a header for filenames and
|
||||
// modification times. Any or all of these may be specified using
|
||||
// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate.
|
||||
// The modification times are normally expressed in the ISO 8601 format.
|
||||
// If not specified, the strings default to blanks.
|
||||
func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
|
||||
buf := bufio.NewWriter(writer)
|
||||
defer buf.Flush()
|
||||
var diffErr error
|
||||
wf := func(format string, args ...interface{}) {
|
||||
_, err := buf.WriteString(fmt.Sprintf(format, args...))
|
||||
if diffErr == nil && err != nil {
|
||||
diffErr = err
|
||||
}
|
||||
}
|
||||
ws := func(s string) {
|
||||
_, err := buf.WriteString(s)
|
||||
if diffErr == nil && err != nil {
|
||||
diffErr = err
|
||||
}
|
||||
}
|
||||
|
||||
if len(diff.Eol) == 0 {
|
||||
diff.Eol = "\n"
|
||||
}
|
||||
|
||||
prefix := map[byte]string{
|
||||
'i': "+ ",
|
||||
'd': "- ",
|
||||
'r': "! ",
|
||||
'e': " ",
|
||||
}
|
||||
|
||||
started := false
|
||||
m := NewMatcher(diff.A, diff.B)
|
||||
for _, g := range m.GetGroupedOpCodes(diff.Context) {
|
||||
if !started {
|
||||
started = true
|
||||
fromDate := ""
|
||||
if len(diff.FromDate) > 0 {
|
||||
fromDate = "\t" + diff.FromDate
|
||||
}
|
||||
toDate := ""
|
||||
if len(diff.ToDate) > 0 {
|
||||
toDate = "\t" + diff.ToDate
|
||||
}
|
||||
if diff.FromFile != "" || diff.ToFile != "" {
|
||||
wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
|
||||
wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
|
||||
}
|
||||
}
|
||||
|
||||
first, last := g[0], g[len(g)-1]
|
||||
ws("***************" + diff.Eol)
|
||||
|
||||
range1 := formatRangeContext(first.I1, last.I2)
|
||||
wf("*** %s ****%s", range1, diff.Eol)
|
||||
for _, c := range g {
|
||||
if c.Tag == 'r' || c.Tag == 'd' {
|
||||
for _, cc := range g {
|
||||
if cc.Tag == 'i' {
|
||||
continue
|
||||
}
|
||||
for _, line := range diff.A[cc.I1:cc.I2] {
|
||||
ws(prefix[cc.Tag] + line)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
range2 := formatRangeContext(first.J1, last.J2)
|
||||
wf("--- %s ----%s", range2, diff.Eol)
|
||||
for _, c := range g {
|
||||
if c.Tag == 'r' || c.Tag == 'i' {
|
||||
for _, cc := range g {
|
||||
if cc.Tag == 'd' {
|
||||
continue
|
||||
}
|
||||
for _, line := range diff.B[cc.J1:cc.J2] {
|
||||
ws(prefix[cc.Tag] + line)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return diffErr
|
||||
}
|
||||
|
||||
// Like WriteContextDiff but returns the diff a string.
|
||||
func GetContextDiffString(diff ContextDiff) (string, error) {
|
||||
w := &bytes.Buffer{}
|
||||
err := WriteContextDiff(w, diff)
|
||||
return string(w.Bytes()), err
|
||||
}
|
||||
|
||||
// Split a string on "\n" while preserving them. The output can be used
|
||||
// as input for UnifiedDiff and ContextDiff structures.
|
||||
func SplitLines(s string) []string {
|
||||
lines := strings.SplitAfter(s, "\n")
|
||||
lines[len(lines)-1] += "\n"
|
||||
return lines
|
||||
}
|
22
vendor/github.com/stretchr/testify/LICENSE
generated
vendored
Normal file
22
vendor/github.com/stretchr/testify/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell
|
||||
|
||||
Please consider promoting this project if you find it useful.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person
|
||||
obtaining a copy of this software and associated documentation
|
||||
files (the "Software"), to deal in the Software without restriction,
|
||||
including without limitation the rights to use, copy, modify, merge,
|
||||
publish, distribute, sublicense, and/or sell copies of the Software,
|
||||
and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included
|
||||
in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT
|
||||
OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
|
||||
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
387
vendor/github.com/stretchr/testify/assert/assertion_forward.go
generated
vendored
Normal file
387
vendor/github.com/stretchr/testify/assert/assertion_forward.go
generated
vendored
Normal file
|
@ -0,0 +1,387 @@
|
|||
/*
|
||||
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
|
||||
* THIS FILE MUST NOT BE EDITED BY HAND
|
||||
*/
|
||||
|
||||
package assert
|
||||
|
||||
import (
|
||||
|
||||
http "net/http"
|
||||
url "net/url"
|
||||
time "time"
|
||||
)
|
||||
|
||||
|
||||
// Condition uses a Comparison to assert a complex condition.
|
||||
func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool {
|
||||
return Condition(a.t, comp, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Contains asserts that the specified string, list(array, slice...) or map contains the
|
||||
// specified substring or element.
|
||||
//
|
||||
// a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'")
|
||||
// a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'")
|
||||
// a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
|
||||
return Contains(a.t, s, contains, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
|
||||
// a slice or a channel with len == 0.
|
||||
//
|
||||
// a.Empty(obj)
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
|
||||
return Empty(a.t, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Equal asserts that two objects are equal.
|
||||
//
|
||||
// a.Equal(123, 123, "123 and 123 should be equal")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
|
||||
return Equal(a.t, expected, actual, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// EqualError asserts that a function returned an error (i.e. not `nil`)
|
||||
// and that it is equal to the provided error.
|
||||
//
|
||||
// actualObj, err := SomeFunction()
|
||||
// if assert.Error(t, err, "An error was expected") {
|
||||
// assert.Equal(t, err, expectedError)
|
||||
// }
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool {
|
||||
return EqualError(a.t, theError, errString, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// EqualValues asserts that two objects are equal or convertable to the same types
|
||||
// and equal.
|
||||
//
|
||||
// a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
|
||||
return EqualValues(a.t, expected, actual, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Error asserts that a function returned an error (i.e. not `nil`).
|
||||
//
|
||||
// actualObj, err := SomeFunction()
|
||||
// if a.Error(err, "An error was expected") {
|
||||
// assert.Equal(t, err, expectedError)
|
||||
// }
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
|
||||
return Error(a.t, err, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Exactly asserts that two objects are equal is value and type.
|
||||
//
|
||||
// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
|
||||
return Exactly(a.t, expected, actual, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Fail reports a failure through
|
||||
func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool {
|
||||
return Fail(a.t, failureMessage, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// FailNow fails test
|
||||
func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool {
|
||||
return FailNow(a.t, failureMessage, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// False asserts that the specified value is false.
|
||||
//
|
||||
// a.False(myBool, "myBool should be false")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool {
|
||||
return False(a.t, value, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// HTTPBodyContains asserts that a specified handler returns a
|
||||
// body that contains a string.
|
||||
//
|
||||
// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool {
|
||||
return HTTPBodyContains(a.t, handler, method, url, values, str)
|
||||
}
|
||||
|
||||
|
||||
// HTTPBodyNotContains asserts that a specified handler returns a
|
||||
// body that does not contain a string.
|
||||
//
|
||||
// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool {
|
||||
return HTTPBodyNotContains(a.t, handler, method, url, values, str)
|
||||
}
|
||||
|
||||
|
||||
// HTTPError asserts that a specified handler returns an error status code.
|
||||
//
|
||||
// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) bool {
|
||||
return HTTPError(a.t, handler, method, url, values)
|
||||
}
|
||||
|
||||
|
||||
// HTTPRedirect asserts that a specified handler returns a redirect status code.
|
||||
//
|
||||
// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) bool {
|
||||
return HTTPRedirect(a.t, handler, method, url, values)
|
||||
}
|
||||
|
||||
|
||||
// HTTPSuccess asserts that a specified handler returns a success status code.
|
||||
//
|
||||
// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) bool {
|
||||
return HTTPSuccess(a.t, handler, method, url, values)
|
||||
}
|
||||
|
||||
|
||||
// Implements asserts that an object is implemented by the specified interface.
|
||||
//
|
||||
// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject")
|
||||
func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
return Implements(a.t, interfaceObject, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// InDelta asserts that the two numerals are within delta of each other.
|
||||
//
|
||||
// a.InDelta(math.Pi, (22 / 7.0), 0.01)
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
|
||||
return InDelta(a.t, expected, actual, delta, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// InDeltaSlice is the same as InDelta, except it compares two slices.
|
||||
func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
|
||||
return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// InEpsilon asserts that expected and actual have a relative error less than epsilon
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
|
||||
return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// InEpsilonSlice is the same as InEpsilon, except it compares two slices.
|
||||
func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
|
||||
return InEpsilonSlice(a.t, expected, actual, delta, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// IsType asserts that the specified objects are of the same type.
|
||||
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
return IsType(a.t, expectedType, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// JSONEq asserts that two JSON strings are equivalent.
|
||||
//
|
||||
// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool {
|
||||
return JSONEq(a.t, expected, actual, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Len asserts that the specified object has specific length.
|
||||
// Len also fails if the object has a type that len() not accept.
|
||||
//
|
||||
// a.Len(mySlice, 3, "The size of slice is not 3")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool {
|
||||
return Len(a.t, object, length, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Nil asserts that the specified object is nil.
|
||||
//
|
||||
// a.Nil(err, "err should be nothing")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool {
|
||||
return Nil(a.t, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// NoError asserts that a function returned no error (i.e. `nil`).
|
||||
//
|
||||
// actualObj, err := SomeFunction()
|
||||
// if a.NoError(err) {
|
||||
// assert.Equal(t, actualObj, expectedObj)
|
||||
// }
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool {
|
||||
return NoError(a.t, err, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
|
||||
// specified substring or element.
|
||||
//
|
||||
// a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
|
||||
// a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'")
|
||||
// a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
|
||||
return NotContains(a.t, s, contains, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
|
||||
// a slice or a channel with len == 0.
|
||||
//
|
||||
// if a.NotEmpty(obj) {
|
||||
// assert.Equal(t, "two", obj[1])
|
||||
// }
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool {
|
||||
return NotEmpty(a.t, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// NotEqual asserts that the specified values are NOT equal.
|
||||
//
|
||||
// a.NotEqual(obj1, obj2, "two objects shouldn't be equal")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
|
||||
return NotEqual(a.t, expected, actual, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// NotNil asserts that the specified object is not nil.
|
||||
//
|
||||
// a.NotNil(err, "err should be something")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool {
|
||||
return NotNil(a.t, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
|
||||
//
|
||||
// a.NotPanics(func(){
|
||||
// RemainCalm()
|
||||
// }, "Calling RemainCalm() should NOT panic")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
|
||||
return NotPanics(a.t, f, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// NotRegexp asserts that a specified regexp does not match a string.
|
||||
//
|
||||
// a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
|
||||
// a.NotRegexp("^start", "it's not starting")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
|
||||
return NotRegexp(a.t, rx, str, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// NotZero asserts that i is not the zero value for its type and returns the truth.
|
||||
func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool {
|
||||
return NotZero(a.t, i, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Panics asserts that the code inside the specified PanicTestFunc panics.
|
||||
//
|
||||
// a.Panics(func(){
|
||||
// GoCrazy()
|
||||
// }, "Calling GoCrazy() should panic")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
|
||||
return Panics(a.t, f, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Regexp asserts that a specified regexp matches a string.
|
||||
//
|
||||
// a.Regexp(regexp.MustCompile("start"), "it's starting")
|
||||
// a.Regexp("start...$", "it's not starting")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
|
||||
return Regexp(a.t, rx, str, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// True asserts that the specified value is true.
|
||||
//
|
||||
// a.True(myBool, "myBool should be true")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool {
|
||||
return True(a.t, value, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// WithinDuration asserts that the two times are within duration delta of each other.
|
||||
//
|
||||
// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
|
||||
return WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
|
||||
}
|
||||
|
||||
|
||||
// Zero asserts that i is the zero value for its type and returns the truth.
|
||||
func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool {
|
||||
return Zero(a.t, i, msgAndArgs...)
|
||||
}
|
1004
vendor/github.com/stretchr/testify/assert/assertions.go
generated
vendored
Normal file
1004
vendor/github.com/stretchr/testify/assert/assertions.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
45
vendor/github.com/stretchr/testify/assert/doc.go
generated
vendored
Normal file
45
vendor/github.com/stretchr/testify/assert/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
|||
// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system.
|
||||
//
|
||||
// Example Usage
|
||||
//
|
||||
// The following is a complete example using assert in a standard test function:
|
||||
// import (
|
||||
// "testing"
|
||||
// "github.com/stretchr/testify/assert"
|
||||
// )
|
||||
//
|
||||
// func TestSomething(t *testing.T) {
|
||||
//
|
||||
// var a string = "Hello"
|
||||
// var b string = "Hello"
|
||||
//
|
||||
// assert.Equal(t, a, b, "The two words should be the same.")
|
||||
//
|
||||
// }
|
||||
//
|
||||
// if you assert many times, use the format below:
|
||||
//
|
||||
// import (
|
||||
// "testing"
|
||||
// "github.com/stretchr/testify/assert"
|
||||
// )
|
||||
//
|
||||
// func TestSomething(t *testing.T) {
|
||||
// assert := assert.New(t)
|
||||
//
|
||||
// var a string = "Hello"
|
||||
// var b string = "Hello"
|
||||
//
|
||||
// assert.Equal(a, b, "The two words should be the same.")
|
||||
// }
|
||||
//
|
||||
// Assertions
|
||||
//
|
||||
// Assertions allow you to easily write test code, and are global funcs in the `assert` package.
|
||||
// All assertion functions take, as the first argument, the `*testing.T` object provided by the
|
||||
// testing framework. This allows the assertion funcs to write the failings and other details to
|
||||
// the correct place.
|
||||
//
|
||||
// Every assertion function also takes an optional string message as the final argument,
|
||||
// allowing custom error messages to be appended to the message the assertion method outputs.
|
||||
package assert
|
10
vendor/github.com/stretchr/testify/assert/errors.go
generated
vendored
Normal file
10
vendor/github.com/stretchr/testify/assert/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
package assert
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// AnError is an error instance useful for testing. If the code does not care
|
||||
// about error specifics, and only needs to return the error for example, this
|
||||
// error should be used to make the test code more readable.
|
||||
var AnError = errors.New("assert.AnError general error for testing")
|
16
vendor/github.com/stretchr/testify/assert/forward_assertions.go
generated
vendored
Normal file
16
vendor/github.com/stretchr/testify/assert/forward_assertions.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
package assert
|
||||
|
||||
// Assertions provides assertion methods around the
|
||||
// TestingT interface.
|
||||
type Assertions struct {
|
||||
t TestingT
|
||||
}
|
||||
|
||||
// New makes a new Assertions object for the specified TestingT.
|
||||
func New(t TestingT) *Assertions {
|
||||
return &Assertions{
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl
|
106
vendor/github.com/stretchr/testify/assert/http_assertions.go
generated
vendored
Normal file
106
vendor/github.com/stretchr/testify/assert/http_assertions.go
generated
vendored
Normal file
|
@ -0,0 +1,106 @@
|
|||
package assert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// httpCode is a helper that returns HTTP code of the response. It returns -1
|
||||
// if building a new request fails.
|
||||
func httpCode(handler http.HandlerFunc, method, url string, values url.Values) int {
|
||||
w := httptest.NewRecorder()
|
||||
req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
handler(w, req)
|
||||
return w.Code
|
||||
}
|
||||
|
||||
// HTTPSuccess asserts that a specified handler returns a success status code.
|
||||
//
|
||||
// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool {
|
||||
code := httpCode(handler, method, url, values)
|
||||
if code == -1 {
|
||||
return false
|
||||
}
|
||||
return code >= http.StatusOK && code <= http.StatusPartialContent
|
||||
}
|
||||
|
||||
// HTTPRedirect asserts that a specified handler returns a redirect status code.
|
||||
//
|
||||
// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool {
|
||||
code := httpCode(handler, method, url, values)
|
||||
if code == -1 {
|
||||
return false
|
||||
}
|
||||
return code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
|
||||
}
|
||||
|
||||
// HTTPError asserts that a specified handler returns an error status code.
|
||||
//
|
||||
// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool {
|
||||
code := httpCode(handler, method, url, values)
|
||||
if code == -1 {
|
||||
return false
|
||||
}
|
||||
return code >= http.StatusBadRequest
|
||||
}
|
||||
|
||||
// HTTPBody is a helper that returns HTTP body of the response. It returns
|
||||
// empty string if building a new request fails.
|
||||
func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
|
||||
w := httptest.NewRecorder()
|
||||
req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
handler(w, req)
|
||||
return w.Body.String()
|
||||
}
|
||||
|
||||
// HTTPBodyContains asserts that a specified handler returns a
|
||||
// body that contains a string.
|
||||
//
|
||||
// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool {
|
||||
body := HTTPBody(handler, method, url, values)
|
||||
|
||||
contains := strings.Contains(body, fmt.Sprint(str))
|
||||
if !contains {
|
||||
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
|
||||
}
|
||||
|
||||
return contains
|
||||
}
|
||||
|
||||
// HTTPBodyNotContains asserts that a specified handler returns a
|
||||
// body that does not contain a string.
|
||||
//
|
||||
// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool {
|
||||
body := HTTPBody(handler, method, url, values)
|
||||
|
||||
contains := strings.Contains(body, fmt.Sprint(str))
|
||||
if contains {
|
||||
Fail(t, "Expected response body for %s to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)
|
||||
}
|
||||
|
||||
return !contains
|
||||
}
|
Loading…
Reference in a new issue