Browse Source

Merge pull request #27998 from dnephin/compose-on-swarm

Support `docker stack deploy` from a Compose file
Victor Vieux 8 years ago
parent
commit
750d634d62
55 changed files with 16548 additions and 158 deletions
  1. 3 2
      cli/command/service/opts.go
  2. 1 1
      cli/command/service/update.go
  3. 0 1
      cli/command/stack/cmd.go
  4. 8 0
      cli/command/stack/common.go
  5. 0 39
      cli/command/stack/config.go
  6. 477 80
      cli/command/stack/deploy.go
  7. 80 0
      cli/command/stack/deploy_bundlefile.go
  8. 5 4
      cli/command/stack/opts.go
  9. 1 1
      daemon/cluster/executor/container/container.go
  10. 30 20
      integration-cli/docker_cli_stack_test.go
  11. 9 0
      integration-cli/fixtures/deploy/default.yaml
  12. 16 10
      opts/opts.go
  13. 8 0
      vendor.conf
  14. 191 0
      vendor/github.com/aanand/compose-file/LICENSE
  15. 89 0
      vendor/github.com/aanand/compose-file/interpolation/interpolation.go
  16. 597 0
      vendor/github.com/aanand/compose-file/loader/loader.go
  17. 70 0
      vendor/github.com/aanand/compose-file/schema/bindata.go
  18. 113 0
      vendor/github.com/aanand/compose-file/schema/schema.go
  19. 108 0
      vendor/github.com/aanand/compose-file/template/template.go
  20. 200 0
      vendor/github.com/aanand/compose-file/types/types.go
  21. 21 0
      vendor/github.com/mitchellh/mapstructure/LICENSE
  22. 154 0
      vendor/github.com/mitchellh/mapstructure/decode_hooks.go
  23. 50 0
      vendor/github.com/mitchellh/mapstructure/error.go
  24. 782 0
      vendor/github.com/mitchellh/mapstructure/mapstructure.go
  25. 217 0
      vendor/github.com/xeipuuv/gojsonpointer/pointer.go
  26. 141 0
      vendor/github.com/xeipuuv/gojsonreference/reference.go
  27. 242 0
      vendor/github.com/xeipuuv/gojsonschema/errors.go
  28. 178 0
      vendor/github.com/xeipuuv/gojsonschema/format_checkers.go
  29. 37 0
      vendor/github.com/xeipuuv/gojsonschema/internalLog.go
  30. 72 0
      vendor/github.com/xeipuuv/gojsonschema/jsonContext.go
  31. 286 0
      vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go
  32. 275 0
      vendor/github.com/xeipuuv/gojsonschema/locales.go
  33. 171 0
      vendor/github.com/xeipuuv/gojsonschema/result.go
  34. 908 0
      vendor/github.com/xeipuuv/gojsonschema/schema.go
  35. 107 0
      vendor/github.com/xeipuuv/gojsonschema/schemaPool.go
  36. 67 0
      vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go
  37. 83 0
      vendor/github.com/xeipuuv/gojsonschema/schemaType.go
  38. 227 0
      vendor/github.com/xeipuuv/gojsonschema/subSchema.go
  39. 58 0
      vendor/github.com/xeipuuv/gojsonschema/types.go
  40. 202 0
      vendor/github.com/xeipuuv/gojsonschema/utils.go
  41. 829 0
      vendor/github.com/xeipuuv/gojsonschema/validation.go
  42. 188 0
      vendor/gopkg.in/yaml.v2/LICENSE
  43. 742 0
      vendor/gopkg.in/yaml.v2/apic.go
  44. 683 0
      vendor/gopkg.in/yaml.v2/decode.go
  45. 1685 0
      vendor/gopkg.in/yaml.v2/emitterc.go
  46. 306 0
      vendor/gopkg.in/yaml.v2/encode.go
  47. 1096 0
      vendor/gopkg.in/yaml.v2/parserc.go
  48. 394 0
      vendor/gopkg.in/yaml.v2/readerc.go
  49. 203 0
      vendor/gopkg.in/yaml.v2/resolve.go
  50. 2710 0
      vendor/gopkg.in/yaml.v2/scannerc.go
  51. 104 0
      vendor/gopkg.in/yaml.v2/sorter.go
  52. 89 0
      vendor/gopkg.in/yaml.v2/writerc.go
  53. 346 0
      vendor/gopkg.in/yaml.v2/yaml.go
  54. 716 0
      vendor/gopkg.in/yaml.v2/yamlh.go
  55. 173 0
      vendor/gopkg.in/yaml.v2/yamlprivateh.go

+ 3 - 2
cli/command/service/opts.go

@@ -297,7 +297,7 @@ func (e *endpointOptions) ToEndpointSpec() *swarm.EndpointSpec {
 	ports, portBindings, _ := nat.ParsePortSpecs(e.ports.GetAll())
 	ports, portBindings, _ := nat.ParsePortSpecs(e.ports.GetAll())
 
 
 	for port := range ports {
 	for port := range ports {
-		portConfigs = append(portConfigs, convertPortToPortConfig(port, portBindings)...)
+		portConfigs = append(portConfigs, ConvertPortToPortConfig(port, portBindings)...)
 	}
 	}
 
 
 	return &swarm.EndpointSpec{
 	return &swarm.EndpointSpec{
@@ -306,7 +306,8 @@ func (e *endpointOptions) ToEndpointSpec() *swarm.EndpointSpec {
 	}
 	}
 }
 }
 
 
-func convertPortToPortConfig(
+// ConvertPortToPortConfig converts ports to the swarm type
+func ConvertPortToPortConfig(
 	port nat.Port,
 	port nat.Port,
 	portBindings map[nat.Port][]nat.PortBinding,
 	portBindings map[nat.Port][]nat.PortBinding,
 ) []swarm.PortConfig {
 ) []swarm.PortConfig {

+ 1 - 1
cli/command/service/update.go

@@ -639,7 +639,7 @@ func updatePorts(flags *pflag.FlagSet, portConfig *[]swarm.PortConfig) error {
 		ports, portBindings, _ := nat.ParsePortSpecs(values)
 		ports, portBindings, _ := nat.ParsePortSpecs(values)
 
 
 		for port := range ports {
 		for port := range ports {
-			newConfigs := convertPortToPortConfig(port, portBindings)
+			newConfigs := ConvertPortToPortConfig(port, portBindings)
 			for _, entry := range newConfigs {
 			for _, entry := range newConfigs {
 				if v, ok := portSet[portConfigToString(&entry)]; ok && v != entry {
 				if v, ok := portSet[portConfigToString(&entry)]; ok && v != entry {
 					return fmt.Errorf("conflicting port mapping between %v:%v/%s and %v:%v/%s", entry.PublishedPort, entry.TargetPort, entry.Protocol, v.PublishedPort, v.TargetPort, v.Protocol)
 					return fmt.Errorf("conflicting port mapping between %v:%v/%s and %v:%v/%s", entry.PublishedPort, entry.TargetPort, entry.Protocol, v.PublishedPort, v.TargetPort, v.Protocol)

+ 0 - 1
cli/command/stack/cmd.go

@@ -19,7 +19,6 @@ func NewStackCommand(dockerCli *command.DockerCli) *cobra.Command {
 		Tags: map[string]string{"experimental": "", "version": "1.25"},
 		Tags: map[string]string{"experimental": "", "version": "1.25"},
 	}
 	}
 	cmd.AddCommand(
 	cmd.AddCommand(
-		newConfigCommand(dockerCli),
 		newDeployCommand(dockerCli),
 		newDeployCommand(dockerCli),
 		newListCommand(dockerCli),
 		newListCommand(dockerCli),
 		newRemoveCommand(dockerCli),
 		newRemoveCommand(dockerCli),

+ 8 - 0
cli/command/stack/common.go

@@ -46,3 +46,11 @@ func getNetworks(
 		ctx,
 		ctx,
 		types.NetworkListOptions{Filters: getStackFilter(namespace)})
 		types.NetworkListOptions{Filters: getStackFilter(namespace)})
 }
 }
+
+type namespace struct {
+	name string
+}
+
+func (n namespace) scope(name string) string {
+	return n.name + "_" + name
+}

+ 0 - 39
cli/command/stack/config.go

@@ -1,39 +0,0 @@
-package stack
-
-import (
-	"github.com/docker/docker/cli"
-	"github.com/docker/docker/cli/command"
-	"github.com/docker/docker/cli/command/bundlefile"
-	"github.com/spf13/cobra"
-)
-
-type configOptions struct {
-	bundlefile string
-	namespace  string
-}
-
-func newConfigCommand(dockerCli *command.DockerCli) *cobra.Command {
-	var opts configOptions
-
-	cmd := &cobra.Command{
-		Use:   "config [OPTIONS] STACK",
-		Short: "Print the stack configuration",
-		Args:  cli.ExactArgs(1),
-		RunE: func(cmd *cobra.Command, args []string) error {
-			opts.namespace = args[0]
-			return runConfig(dockerCli, opts)
-		},
-	}
-
-	flags := cmd.Flags()
-	addBundlefileFlag(&opts.bundlefile, flags)
-	return cmd
-}
-
-func runConfig(dockerCli *command.DockerCli, opts configOptions) error {
-	bundle, err := loadBundlefile(dockerCli.Err(), opts.namespace, opts.bundlefile)
-	if err != nil {
-		return err
-	}
-	return bundlefile.Print(dockerCli.Out(), bundle)
-}

+ 477 - 80
cli/command/stack/deploy.go

@@ -2,16 +2,26 @@ package stack
 
 
 import (
 import (
 	"fmt"
 	"fmt"
+	"io/ioutil"
+	"os"
+	"sort"
 	"strings"
 	"strings"
 
 
 	"github.com/spf13/cobra"
 	"github.com/spf13/cobra"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 
 
+	"github.com/aanand/compose-file/loader"
+	composetypes "github.com/aanand/compose-file/types"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/mount"
+	networktypes "github.com/docker/docker/api/types/network"
 	"github.com/docker/docker/api/types/swarm"
 	"github.com/docker/docker/api/types/swarm"
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cli/command"
 	"github.com/docker/docker/cli/command"
-	"github.com/docker/docker/cli/command/bundlefile"
+	servicecmd "github.com/docker/docker/cli/command/service"
+	"github.com/docker/docker/opts"
+	runconfigopts "github.com/docker/docker/runconfig/opts"
+	"github.com/docker/go-connections/nat"
 )
 )
 
 
 const (
 const (
@@ -20,6 +30,7 @@ const (
 
 
 type deployOptions struct {
 type deployOptions struct {
 	bundlefile       string
 	bundlefile       string
+	composefile      string
 	namespace        string
 	namespace        string
 	sendRegistryAuth bool
 	sendRegistryAuth bool
 }
 }
@@ -30,10 +41,10 @@ func newDeployCommand(dockerCli *command.DockerCli) *cobra.Command {
 	cmd := &cobra.Command{
 	cmd := &cobra.Command{
 		Use:     "deploy [OPTIONS] STACK",
 		Use:     "deploy [OPTIONS] STACK",
 		Aliases: []string{"up"},
 		Aliases: []string{"up"},
-		Short:   "Create and update a stack from a Distributed Application Bundle (DAB)",
+		Short:   "Deploy a new stack or update an existing stack",
 		Args:    cli.ExactArgs(1),
 		Args:    cli.ExactArgs(1),
 		RunE: func(cmd *cobra.Command, args []string) error {
 		RunE: func(cmd *cobra.Command, args []string) error {
-			opts.namespace = strings.TrimSuffix(args[0], ".dab")
+			opts.namespace = args[0]
 			return runDeploy(dockerCli, opts)
 			return runDeploy(dockerCli, opts)
 		},
 		},
 		Tags: map[string]string{"experimental": "", "version": "1.25"},
 		Tags: map[string]string{"experimental": "", "version": "1.25"},
@@ -41,57 +52,160 @@ func newDeployCommand(dockerCli *command.DockerCli) *cobra.Command {
 
 
 	flags := cmd.Flags()
 	flags := cmd.Flags()
 	addBundlefileFlag(&opts.bundlefile, flags)
 	addBundlefileFlag(&opts.bundlefile, flags)
+	addComposefileFlag(&opts.composefile, flags)
 	addRegistryAuthFlag(&opts.sendRegistryAuth, flags)
 	addRegistryAuthFlag(&opts.sendRegistryAuth, flags)
 	return cmd
 	return cmd
 }
 }
 
 
 func runDeploy(dockerCli *command.DockerCli, opts deployOptions) error {
 func runDeploy(dockerCli *command.DockerCli, opts deployOptions) error {
-	bundle, err := loadBundlefile(dockerCli.Err(), opts.namespace, opts.bundlefile)
+	switch {
+	case opts.bundlefile == "" && opts.composefile == "":
+		return fmt.Errorf("Please specify either a bundle file (with --bundle-file) or a Compose file (with --compose-file).")
+	case opts.bundlefile != "" && opts.composefile != "":
+		return fmt.Errorf("You cannot specify both a bundle file and a Compose file.")
+	case opts.bundlefile != "":
+		return deployBundle(dockerCli, opts)
+	default:
+		return deployCompose(dockerCli, opts)
+	}
+}
+
+func deployCompose(dockerCli *command.DockerCli, opts deployOptions) error {
+	configDetails, err := getConfigDetails(opts)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
 
 
-	info, err := dockerCli.Client().Info(context.Background())
+	config, err := loader.Load(configDetails)
 	if err != nil {
 	if err != nil {
+		if fpe, ok := err.(*loader.ForbiddenPropertiesError); ok {
+			return fmt.Errorf("Compose file contains unsupported options:\n\n%s\n",
+				propertyWarnings(fpe.Properties))
+		}
+
 		return err
 		return err
 	}
 	}
-	if !info.Swarm.ControlAvailable {
-		return fmt.Errorf("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.")
+
+	unsupportedProperties := loader.GetUnsupportedProperties(configDetails)
+	if len(unsupportedProperties) > 0 {
+		fmt.Fprintf(dockerCli.Err(), "Ignoring unsupported options: %s\n\n",
+			strings.Join(unsupportedProperties, ", "))
+	}
+
+	deprecatedProperties := loader.GetDeprecatedProperties(configDetails)
+	if len(deprecatedProperties) > 0 {
+		fmt.Fprintf(dockerCli.Err(), "Ignoring deprecated options:\n\n%s\n\n",
+			propertyWarnings(deprecatedProperties))
 	}
 	}
 
 
-	networks := getUniqueNetworkNames(bundle.Services)
 	ctx := context.Background()
 	ctx := context.Background()
+	namespace := namespace{name: opts.namespace}
 
 
-	if err := updateNetworks(ctx, dockerCli, networks, opts.namespace); err != nil {
+	networks := convertNetworks(namespace, config.Networks)
+	if err := createNetworks(ctx, dockerCli, namespace, networks); err != nil {
+		return err
+	}
+	services, err := convertServices(namespace, config)
+	if err != nil {
 		return err
 		return err
 	}
 	}
-	return deployServices(ctx, dockerCli, bundle.Services, opts.namespace, opts.sendRegistryAuth)
+	return deployServices(ctx, dockerCli, services, namespace, opts.sendRegistryAuth)
 }
 }
 
 
-func getUniqueNetworkNames(services map[string]bundlefile.Service) []string {
-	networkSet := make(map[string]bool)
-	for _, service := range services {
-		for _, network := range service.Networks {
-			networkSet[network] = true
-		}
+func propertyWarnings(properties map[string]string) string {
+	var msgs []string
+	for name, description := range properties {
+		msgs = append(msgs, fmt.Sprintf("%s: %s", name, description))
+	}
+	sort.Strings(msgs)
+	return strings.Join(msgs, "\n\n")
+}
+
+func getConfigDetails(opts deployOptions) (composetypes.ConfigDetails, error) {
+	var details composetypes.ConfigDetails
+	var err error
+
+	details.WorkingDir, err = os.Getwd()
+	if err != nil {
+		return details, err
+	}
+
+	configFile, err := getConfigFile(opts.composefile)
+	if err != nil {
+		return details, err
+	}
+	// TODO: support multiple files
+	details.ConfigFiles = []composetypes.ConfigFile{*configFile}
+	return details, nil
+}
+
+func getConfigFile(filename string) (*composetypes.ConfigFile, error) {
+	bytes, err := ioutil.ReadFile(filename)
+	if err != nil {
+		return nil, err
+	}
+	config, err := loader.ParseYAML(bytes)
+	if err != nil {
+		return nil, err
+	}
+	return &composetypes.ConfigFile{
+		Filename: filename,
+		Config:   config,
+	}, nil
+}
+
+func convertNetworks(
+	namespace namespace,
+	networks map[string]composetypes.NetworkConfig,
+) map[string]types.NetworkCreate {
+	if networks == nil {
+		networks = make(map[string]composetypes.NetworkConfig)
 	}
 	}
 
 
-	networks := []string{}
-	for network := range networkSet {
-		networks = append(networks, network)
+	// TODO: only add default network if it's used
+	networks["default"] = composetypes.NetworkConfig{}
+
+	result := make(map[string]types.NetworkCreate)
+
+	for internalName, network := range networks {
+		if network.External.Name != "" {
+			continue
+		}
+
+		createOpts := types.NetworkCreate{
+			Labels:  getStackLabels(namespace.name, network.Labels),
+			Driver:  network.Driver,
+			Options: network.DriverOpts,
+		}
+
+		if network.Ipam.Driver != "" || len(network.Ipam.Config) > 0 {
+			createOpts.IPAM = &networktypes.IPAM{}
+		}
+
+		if network.Ipam.Driver != "" {
+			createOpts.IPAM.Driver = network.Ipam.Driver
+		}
+		for _, ipamConfig := range network.Ipam.Config {
+			config := networktypes.IPAMConfig{
+				Subnet: ipamConfig.Subnet,
+			}
+			createOpts.IPAM.Config = append(createOpts.IPAM.Config, config)
+		}
+		result[internalName] = createOpts
 	}
 	}
-	return networks
+
+	return result
 }
 }
 
 
-func updateNetworks(
+func createNetworks(
 	ctx context.Context,
 	ctx context.Context,
 	dockerCli *command.DockerCli,
 	dockerCli *command.DockerCli,
-	networks []string,
-	namespace string,
+	namespace namespace,
+	networks map[string]types.NetworkCreate,
 ) error {
 ) error {
 	client := dockerCli.Client()
 	client := dockerCli.Client()
 
 
-	existingNetworks, err := getNetworks(ctx, client, namespace)
+	existingNetworks, err := getNetworks(ctx, client, namespace.name)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -101,47 +215,170 @@ func updateNetworks(
 		existingNetworkMap[network.Name] = network
 		existingNetworkMap[network.Name] = network
 	}
 	}
 
 
-	createOpts := types.NetworkCreate{
-		Labels: getStackLabels(namespace, nil),
-		Driver: defaultNetworkDriver,
-	}
-
-	for _, internalName := range networks {
-		name := fmt.Sprintf("%s_%s", namespace, internalName)
-
+	for internalName, createOpts := range networks {
+		name := namespace.scope(internalName)
 		if _, exists := existingNetworkMap[name]; exists {
 		if _, exists := existingNetworkMap[name]; exists {
 			continue
 			continue
 		}
 		}
+
+		if createOpts.Driver == "" {
+			createOpts.Driver = defaultNetworkDriver
+		}
+
 		fmt.Fprintf(dockerCli.Out(), "Creating network %s\n", name)
 		fmt.Fprintf(dockerCli.Out(), "Creating network %s\n", name)
 		if _, err := client.NetworkCreate(ctx, name, createOpts); err != nil {
 		if _, err := client.NetworkCreate(ctx, name, createOpts); err != nil {
 			return err
 			return err
 		}
 		}
 	}
 	}
+
 	return nil
 	return nil
 }
 }
 
 
-func convertNetworks(networks []string, namespace string, name string) []swarm.NetworkAttachmentConfig {
+func convertServiceNetworks(
+	networks map[string]*composetypes.ServiceNetworkConfig,
+	namespace namespace,
+	name string,
+) []swarm.NetworkAttachmentConfig {
+	if len(networks) == 0 {
+		return []swarm.NetworkAttachmentConfig{
+			{
+				Target:  namespace.scope("default"),
+				Aliases: []string{name},
+			},
+		}
+	}
+
 	nets := []swarm.NetworkAttachmentConfig{}
 	nets := []swarm.NetworkAttachmentConfig{}
-	for _, network := range networks {
+	for networkName, network := range networks {
 		nets = append(nets, swarm.NetworkAttachmentConfig{
 		nets = append(nets, swarm.NetworkAttachmentConfig{
-			Target:  namespace + "_" + network,
-			Aliases: []string{name},
+			Target:  namespace.scope(networkName),
+			Aliases: append(network.Aliases, name),
 		})
 		})
 	}
 	}
 	return nets
 	return nets
 }
 }
 
 
+func convertVolumes(
+	serviceVolumes []string,
+	stackVolumes map[string]composetypes.VolumeConfig,
+	namespace namespace,
+) ([]mount.Mount, error) {
+	var mounts []mount.Mount
+
+	for _, volumeSpec := range serviceVolumes {
+		mount, err := convertVolumeToMount(volumeSpec, stackVolumes, namespace)
+		if err != nil {
+			return nil, err
+		}
+		mounts = append(mounts, mount)
+	}
+	return mounts, nil
+}
+
+func convertVolumeToMount(
+	volumeSpec string,
+	stackVolumes map[string]composetypes.VolumeConfig,
+	namespace namespace,
+) (mount.Mount, error) {
+	var source, target string
+	var mode []string
+
+	// TODO: split Windows path mappings properly
+	parts := strings.SplitN(volumeSpec, ":", 3)
+
+	switch len(parts) {
+	case 3:
+		source = parts[0]
+		target = parts[1]
+		mode = strings.Split(parts[2], ",")
+	case 2:
+		source = parts[0]
+		target = parts[1]
+	case 1:
+		target = parts[0]
+	default:
+		return mount.Mount{}, fmt.Errorf("invald volume: %s", volumeSpec)
+	}
+
+	// TODO: catch Windows paths here
+	if strings.HasPrefix(source, "/") {
+		return mount.Mount{
+			Type:        mount.TypeBind,
+			Source:      source,
+			Target:      target,
+			ReadOnly:    isReadOnly(mode),
+			BindOptions: getBindOptions(mode),
+		}, nil
+	}
+
+	stackVolume, exists := stackVolumes[source]
+	if !exists {
+		return mount.Mount{}, fmt.Errorf("undefined volume: %s", source)
+	}
+
+	var volumeOptions *mount.VolumeOptions
+	if stackVolume.External.Name != "" {
+		source = stackVolume.External.Name
+	} else {
+		volumeOptions = &mount.VolumeOptions{
+			Labels: stackVolume.Labels,
+			NoCopy: isNoCopy(mode),
+		}
+
+		if stackVolume.Driver != "" {
+			volumeOptions.DriverConfig = &mount.Driver{
+				Name:    stackVolume.Driver,
+				Options: stackVolume.DriverOpts,
+			}
+		}
+		source = namespace.scope(source)
+	}
+	return mount.Mount{
+		Type:          mount.TypeVolume,
+		Source:        source,
+		Target:        target,
+		ReadOnly:      isReadOnly(mode),
+		VolumeOptions: volumeOptions,
+	}, nil
+}
+
+func modeHas(mode []string, field string) bool {
+	for _, item := range mode {
+		if item == field {
+			return true
+		}
+	}
+	return false
+}
+
+func isReadOnly(mode []string) bool {
+	return modeHas(mode, "ro")
+}
+
+func isNoCopy(mode []string) bool {
+	return modeHas(mode, "nocopy")
+}
+
+func getBindOptions(mode []string) *mount.BindOptions {
+	for _, item := range mode {
+		if strings.Contains(item, "private") || strings.Contains(item, "shared") || strings.Contains(item, "slave") {
+			return &mount.BindOptions{Propagation: mount.Propagation(item)}
+		}
+	}
+	return nil
+}
+
 func deployServices(
 func deployServices(
 	ctx context.Context,
 	ctx context.Context,
 	dockerCli *command.DockerCli,
 	dockerCli *command.DockerCli,
-	services map[string]bundlefile.Service,
-	namespace string,
+	services map[string]swarm.ServiceSpec,
+	namespace namespace,
 	sendAuth bool,
 	sendAuth bool,
 ) error {
 ) error {
 	apiClient := dockerCli.Client()
 	apiClient := dockerCli.Client()
 	out := dockerCli.Out()
 	out := dockerCli.Out()
 
 
-	existingServices, err := getServices(ctx, apiClient, namespace)
+	existingServices, err := getServices(ctx, apiClient, namespace.name)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -151,47 +388,8 @@ func deployServices(
 		existingServiceMap[service.Spec.Name] = service
 		existingServiceMap[service.Spec.Name] = service
 	}
 	}
 
 
-	for internalName, service := range services {
-		name := fmt.Sprintf("%s_%s", namespace, internalName)
-
-		var ports []swarm.PortConfig
-		for _, portSpec := range service.Ports {
-			ports = append(ports, swarm.PortConfig{
-				Protocol:   swarm.PortConfigProtocol(portSpec.Protocol),
-				TargetPort: portSpec.Port,
-			})
-		}
-
-		serviceSpec := swarm.ServiceSpec{
-			Annotations: swarm.Annotations{
-				Name:   name,
-				Labels: getStackLabels(namespace, service.Labels),
-			},
-			TaskTemplate: swarm.TaskSpec{
-				ContainerSpec: swarm.ContainerSpec{
-					Image:   service.Image,
-					Command: service.Command,
-					Args:    service.Args,
-					Env:     service.Env,
-					// Service Labels will not be copied to Containers
-					// automatically during the deployment so we apply
-					// it here.
-					Labels: getStackLabels(namespace, nil),
-				},
-			},
-			EndpointSpec: &swarm.EndpointSpec{
-				Ports: ports,
-			},
-			Networks: convertNetworks(service.Networks, namespace, internalName),
-		}
-
-		cspec := &serviceSpec.TaskTemplate.ContainerSpec
-		if service.WorkingDir != nil {
-			cspec.Dir = *service.WorkingDir
-		}
-		if service.User != nil {
-			cspec.User = *service.User
-		}
+	for internalName, serviceSpec := range services {
+		name := namespace.scope(internalName)
 
 
 		encodedAuth := ""
 		encodedAuth := ""
 		if sendAuth {
 		if sendAuth {
@@ -234,3 +432,202 @@ func deployServices(
 
 
 	return nil
 	return nil
 }
 }
+
+func convertServices(
+	namespace namespace,
+	config *composetypes.Config,
+) (map[string]swarm.ServiceSpec, error) {
+	result := make(map[string]swarm.ServiceSpec)
+
+	services := config.Services
+	volumes := config.Volumes
+
+	for _, service := range services {
+		serviceSpec, err := convertService(namespace, service, volumes)
+		if err != nil {
+			return nil, err
+		}
+		result[service.Name] = serviceSpec
+	}
+
+	return result, nil
+}
+
+func convertService(
+	namespace namespace,
+	service composetypes.ServiceConfig,
+	volumes map[string]composetypes.VolumeConfig,
+) (swarm.ServiceSpec, error) {
+	name := namespace.scope(service.Name)
+
+	endpoint, err := convertEndpointSpec(service.Ports)
+	if err != nil {
+		return swarm.ServiceSpec{}, err
+	}
+
+	mode, err := convertDeployMode(service.Deploy.Mode, service.Deploy.Replicas)
+	if err != nil {
+		return swarm.ServiceSpec{}, err
+	}
+
+	mounts, err := convertVolumes(service.Volumes, volumes, namespace)
+	if err != nil {
+		// TODO: better error message (include service name)
+		return swarm.ServiceSpec{}, err
+	}
+
+	resources, err := convertResources(service.Deploy.Resources)
+	if err != nil {
+		return swarm.ServiceSpec{}, err
+	}
+
+	restartPolicy, err := convertRestartPolicy(
+		service.Restart, service.Deploy.RestartPolicy)
+	if err != nil {
+		return swarm.ServiceSpec{}, err
+	}
+
+	serviceSpec := swarm.ServiceSpec{
+		Annotations: swarm.Annotations{
+			Name:   name,
+			Labels: getStackLabels(namespace.name, service.Deploy.Labels),
+		},
+		TaskTemplate: swarm.TaskSpec{
+			ContainerSpec: swarm.ContainerSpec{
+				Image:           service.Image,
+				Command:         service.Entrypoint,
+				Args:            service.Command,
+				Hostname:        service.Hostname,
+				Env:             convertEnvironment(service.Environment),
+				Labels:          getStackLabels(namespace.name, service.Labels),
+				Dir:             service.WorkingDir,
+				User:            service.User,
+				Mounts:          mounts,
+				StopGracePeriod: service.StopGracePeriod,
+			},
+			Resources:     resources,
+			RestartPolicy: restartPolicy,
+			Placement: &swarm.Placement{
+				Constraints: service.Deploy.Placement.Constraints,
+			},
+		},
+		EndpointSpec: endpoint,
+		Mode:         mode,
+		Networks:     convertServiceNetworks(service.Networks, namespace, service.Name),
+		UpdateConfig: convertUpdateConfig(service.Deploy.UpdateConfig),
+	}
+
+	return serviceSpec, nil
+}
+
+func convertRestartPolicy(restart string, source *composetypes.RestartPolicy) (*swarm.RestartPolicy, error) {
+	// TODO: log if restart is being ignored
+	if source == nil {
+		policy, err := runconfigopts.ParseRestartPolicy(restart)
+		if err != nil {
+			return nil, err
+		}
+		// TODO: is this an accurate convertion?
+		switch {
+		case policy.IsNone():
+			return nil, nil
+		case policy.IsAlways(), policy.IsUnlessStopped():
+			return &swarm.RestartPolicy{
+				Condition: swarm.RestartPolicyConditionAny,
+			}, nil
+		case policy.IsOnFailure():
+			attempts := uint64(policy.MaximumRetryCount)
+			return &swarm.RestartPolicy{
+				Condition:   swarm.RestartPolicyConditionOnFailure,
+				MaxAttempts: &attempts,
+			}, nil
+		}
+	}
+	return &swarm.RestartPolicy{
+		Condition:   swarm.RestartPolicyCondition(source.Condition),
+		Delay:       source.Delay,
+		MaxAttempts: source.MaxAttempts,
+		Window:      source.Window,
+	}, nil
+}
+
+func convertUpdateConfig(source *composetypes.UpdateConfig) *swarm.UpdateConfig {
+	if source == nil {
+		return nil
+	}
+	return &swarm.UpdateConfig{
+		Parallelism:     source.Parallelism,
+		Delay:           source.Delay,
+		FailureAction:   source.FailureAction,
+		Monitor:         source.Monitor,
+		MaxFailureRatio: source.MaxFailureRatio,
+	}
+}
+
+func convertResources(source composetypes.Resources) (*swarm.ResourceRequirements, error) {
+	resources := &swarm.ResourceRequirements{}
+	if source.Limits != nil {
+		cpus, err := opts.ParseCPUs(source.Limits.NanoCPUs)
+		if err != nil {
+			return nil, err
+		}
+		resources.Limits = &swarm.Resources{
+			NanoCPUs:    cpus,
+			MemoryBytes: int64(source.Limits.MemoryBytes),
+		}
+	}
+	if source.Reservations != nil {
+		cpus, err := opts.ParseCPUs(source.Reservations.NanoCPUs)
+		if err != nil {
+			return nil, err
+		}
+		resources.Reservations = &swarm.Resources{
+			NanoCPUs:    cpus,
+			MemoryBytes: int64(source.Reservations.MemoryBytes),
+		}
+	}
+	return resources, nil
+}
+
+func convertEndpointSpec(source []string) (*swarm.EndpointSpec, error) {
+	portConfigs := []swarm.PortConfig{}
+	ports, portBindings, err := nat.ParsePortSpecs(source)
+	if err != nil {
+		return nil, err
+	}
+
+	for port := range ports {
+		portConfigs = append(
+			portConfigs,
+			servicecmd.ConvertPortToPortConfig(port, portBindings)...)
+	}
+
+	return &swarm.EndpointSpec{Ports: portConfigs}, nil
+}
+
+func convertEnvironment(source map[string]string) []string {
+	var output []string
+
+	for name, value := range source {
+		output = append(output, fmt.Sprintf("%s=%s", name, value))
+	}
+
+	return output
+}
+
+func convertDeployMode(mode string, replicas *uint64) (swarm.ServiceMode, error) {
+	serviceMode := swarm.ServiceMode{}
+
+	switch mode {
+	case "global":
+		if replicas != nil {
+			return serviceMode, fmt.Errorf("replicas can only be used with replicated mode")
+		}
+		serviceMode.Global = &swarm.GlobalService{}
+	case "replicated", "":
+		serviceMode.Replicated = &swarm.ReplicatedService{Replicas: replicas}
+	default:
+		return serviceMode, fmt.Errorf("Unknown mode: %s", mode)
+	}
+	return serviceMode, nil
+}

+ 80 - 0
cli/command/stack/deploy_bundlefile.go

@@ -0,0 +1,80 @@
+package stack
+
+import (
+	"golang.org/x/net/context"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/swarm"
+	"github.com/docker/docker/cli/command"
+)
+
+func deployBundle(dockerCli *command.DockerCli, opts deployOptions) error {
+	bundle, err := loadBundlefile(dockerCli.Err(), opts.namespace, opts.bundlefile)
+	if err != nil {
+		return err
+	}
+
+	namespace := namespace{name: opts.namespace}
+
+	networks := make(map[string]types.NetworkCreate)
+	for _, service := range bundle.Services {
+		for _, networkName := range service.Networks {
+			networks[networkName] = types.NetworkCreate{
+				Labels: getStackLabels(namespace.name, nil),
+			}
+		}
+	}
+
+	services := make(map[string]swarm.ServiceSpec)
+	for internalName, service := range bundle.Services {
+		name := namespace.scope(internalName)
+
+		var ports []swarm.PortConfig
+		for _, portSpec := range service.Ports {
+			ports = append(ports, swarm.PortConfig{
+				Protocol:   swarm.PortConfigProtocol(portSpec.Protocol),
+				TargetPort: portSpec.Port,
+			})
+		}
+
+		nets := []swarm.NetworkAttachmentConfig{}
+		for _, networkName := range service.Networks {
+			nets = append(nets, swarm.NetworkAttachmentConfig{
+				Target:  namespace.scope(networkName),
+				Aliases: []string{networkName},
+			})
+		}
+
+		serviceSpec := swarm.ServiceSpec{
+			Annotations: swarm.Annotations{
+				Name:   name,
+				Labels: getStackLabels(namespace.name, service.Labels),
+			},
+			TaskTemplate: swarm.TaskSpec{
+				ContainerSpec: swarm.ContainerSpec{
+					Image:   service.Image,
+					Command: service.Command,
+					Args:    service.Args,
+					Env:     service.Env,
+					// Service Labels will not be copied to Containers
+					// automatically during the deployment so we apply
+					// it here.
+					Labels: getStackLabels(namespace.name, nil),
+				},
+			},
+			EndpointSpec: &swarm.EndpointSpec{
+				Ports: ports,
+			},
+			Networks: nets,
+		}
+
+		services[internalName] = serviceSpec
+	}
+
+	ctx := context.Background()
+
+	if err := createNetworks(ctx, dockerCli, namespace, networks); err != nil {
+		return err
+	}
+	return deployServices(ctx, dockerCli, services, namespace, opts.sendRegistryAuth)
+}

+ 5 - 4
cli/command/stack/opts.go

@@ -9,11 +9,12 @@ import (
 	"github.com/spf13/pflag"
 	"github.com/spf13/pflag"
 )
 )
 
 
+func addComposefileFlag(opt *string, flags *pflag.FlagSet) {
+	flags.StringVar(opt, "compose-file", "", "Path to a Compose file")
+}
+
 func addBundlefileFlag(opt *string, flags *pflag.FlagSet) {
 func addBundlefileFlag(opt *string, flags *pflag.FlagSet) {
-	flags.StringVar(
-		opt,
-		"file", "",
-		"Path to a Distributed Application Bundle file (Default: STACK.dab)")
+	flags.StringVar(opt, "bundle-file", "", "Path to a Distributed Application Bundle file")
 }
 }
 
 
 func addRegistryAuthFlag(opt *bool, flags *pflag.FlagSet) {
 func addRegistryAuthFlag(opt *bool, flags *pflag.FlagSet) {

+ 1 - 1
daemon/cluster/executor/container/container.go

@@ -141,8 +141,8 @@ func (c *containerConfig) config() *enginecontainer.Config {
 		Labels:      c.labels(),
 		Labels:      c.labels(),
 		Tty:         c.spec().TTY,
 		Tty:         c.spec().TTY,
 		User:        c.spec().User,
 		User:        c.spec().User,
-		Hostname:    c.spec().Hostname,
 		Env:         c.spec().Env,
 		Env:         c.spec().Env,
+		Hostname:    c.spec().Hostname,
 		WorkingDir:  c.spec().Dir,
 		WorkingDir:  c.spec().Dir,
 		Image:       c.image(),
 		Image:       c.image(),
 		Volumes:     c.volumes(),
 		Volumes:     c.volumes(),

+ 30 - 20
integration-cli/docker_cli_stack_test.go

@@ -41,6 +41,30 @@ func (s *DockerSwarmSuite) TestStackServices(c *check.C) {
 	c.Assert(out, check.Equals, "Nothing found in stack: UNKNOWN_STACK\n")
 	c.Assert(out, check.Equals, "Nothing found in stack: UNKNOWN_STACK\n")
 }
 }
 
 
+func (s *DockerSwarmSuite) TestStackDeployComposeFile(c *check.C) {
+	testRequires(c, ExperimentalDaemon)
+	d := s.AddDaemon(c, true, true)
+
+	testStackName := "testdeploy"
+	stackArgs := []string{
+		"stack", "deploy",
+		"--compose-file", "fixtures/deploy/default.yaml",
+		testStackName,
+	}
+	out, err := d.Cmd(stackArgs...)
+	c.Assert(err, checker.IsNil, check.Commentf(out))
+
+	out, err = d.Cmd([]string{"stack", "ls"}...)
+	c.Assert(err, checker.IsNil)
+	c.Assert(out, check.Equals, "NAME        SERVICES\n"+"testdeploy  2\n")
+
+	out, err = d.Cmd([]string{"stack", "rm", testStackName}...)
+	c.Assert(err, checker.IsNil)
+	out, err = d.Cmd([]string{"stack", "ls"}...)
+	c.Assert(err, checker.IsNil)
+	c.Assert(out, check.Equals, "NAME  SERVICES\n")
+}
+
 // testDAB is the DAB JSON used for testing.
 // testDAB is the DAB JSON used for testing.
 // TODO: Use template/text and substitute "Image" with the result of
 // TODO: Use template/text and substitute "Image" with the result of
 // `docker inspect --format '{{index .RepoDigests 0}}' busybox:latest`
 // `docker inspect --format '{{index .RepoDigests 0}}' busybox:latest`
@@ -59,7 +83,7 @@ const testDAB = `{
     }
     }
 }`
 }`
 
 
-func (s *DockerSwarmSuite) TestStackWithDAB(c *check.C) {
+func (s *DockerSwarmSuite) TestStackDeployWithDAB(c *check.C) {
 	testRequires(c, ExperimentalDaemon)
 	testRequires(c, ExperimentalDaemon)
 	// setup
 	// setup
 	testStackName := "test"
 	testStackName := "test"
@@ -69,7 +93,11 @@ func (s *DockerSwarmSuite) TestStackWithDAB(c *check.C) {
 	c.Assert(err, checker.IsNil)
 	c.Assert(err, checker.IsNil)
 	d := s.AddDaemon(c, true, true)
 	d := s.AddDaemon(c, true, true)
 	// deploy
 	// deploy
-	stackArgs := []string{"stack", "deploy", testStackName}
+	stackArgs := []string{
+		"stack", "deploy",
+		"--bundle-file", testDABFileName,
+		testStackName,
+	}
 	out, err := d.Cmd(stackArgs...)
 	out, err := d.Cmd(stackArgs...)
 	c.Assert(err, checker.IsNil)
 	c.Assert(err, checker.IsNil)
 	c.Assert(out, checker.Contains, "Loading bundle from test.dab\n")
 	c.Assert(out, checker.Contains, "Loading bundle from test.dab\n")
@@ -92,21 +120,3 @@ func (s *DockerSwarmSuite) TestStackWithDAB(c *check.C) {
 	c.Assert(err, checker.IsNil)
 	c.Assert(err, checker.IsNil)
 	c.Assert(out, check.Equals, "NAME  SERVICES\n")
 	c.Assert(out, check.Equals, "NAME  SERVICES\n")
 }
 }
-
-func (s *DockerSwarmSuite) TestStackWithDABExtension(c *check.C) {
-	testRequires(c, ExperimentalDaemon)
-	// setup
-	testStackName := "test.dab"
-	testDABFileName := testStackName
-	defer os.RemoveAll(testDABFileName)
-	err := ioutil.WriteFile(testDABFileName, []byte(testDAB), 0444)
-	c.Assert(err, checker.IsNil)
-	d := s.AddDaemon(c, true, true)
-	// deploy
-	stackArgs := []string{"stack", "deploy", testStackName}
-	out, err := d.Cmd(stackArgs...)
-	c.Assert(err, checker.IsNil)
-	c.Assert(out, checker.Contains, "Loading bundle from test.dab\n")
-	c.Assert(out, checker.Contains, "Creating service test_srv1\n")
-	c.Assert(out, checker.Contains, "Creating service test_srv2\n")
-}

+ 9 - 0
integration-cli/fixtures/deploy/default.yaml

@@ -0,0 +1,9 @@
+
+version: "3"
+services:
+  web:
+    image: busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0
+    command: top
+  db:
+    image: busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0
+    command: "tail -f /dev/null"

+ 16 - 10
opts/opts.go

@@ -331,16 +331,9 @@ func (c *NanoCPUs) String() string {
 
 
 // Set sets the value of the NanoCPU by passing a string
 // Set sets the value of the NanoCPU by passing a string
 func (c *NanoCPUs) Set(value string) error {
 func (c *NanoCPUs) Set(value string) error {
-	cpu, ok := new(big.Rat).SetString(value)
-	if !ok {
-		return fmt.Errorf("Failed to parse %v as a rational number", value)
-	}
-	nano := cpu.Mul(cpu, big.NewRat(1e9, 1))
-	if !nano.IsInt() {
-		return fmt.Errorf("value is too precise")
-	}
-	*c = NanoCPUs(nano.Num().Int64())
-	return nil
+	cpus, err := ParseCPUs(value)
+	*c = NanoCPUs(cpus)
+	return err
 }
 }
 
 
 // Type returns the type
 // Type returns the type
@@ -352,3 +345,16 @@ func (c *NanoCPUs) Type() string {
 func (c *NanoCPUs) Value() int64 {
 func (c *NanoCPUs) Value() int64 {
 	return int64(*c)
 	return int64(*c)
 }
 }
+
+// ParseCPUs takes a string ratio and returns an integer value of nano cpus
+func ParseCPUs(value string) (int64, error) {
+	cpu, ok := new(big.Rat).SetString(value)
+	if !ok {
+		return 0, fmt.Errorf("failed to parse %v as a rational number", value)
+	}
+	nano := cpu.Mul(cpu, big.NewRat(1e9, 1))
+	if !nano.IsInt() {
+		return 0, fmt.Errorf("value is too precise")
+	}
+	return nano.Num().Int64(), nil
+}

+ 8 - 0
vendor.conf

@@ -130,3 +130,11 @@ github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff
 
 
 # metrics
 # metrics
 github.com/docker/go-metrics 86138d05f285fd9737a99bee2d9be30866b59d72
 github.com/docker/go-metrics 86138d05f285fd9737a99bee2d9be30866b59d72
+
+# composefile
+github.com/aanand/compose-file 480a79677acccb83b52c41161c22eaf4358460cc
+github.com/mitchellh/mapstructure f3009df150dadf309fdee4a54ed65c124afad715
+github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a
+github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45
+github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d
+gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4

+ 191 - 0
vendor/github.com/aanand/compose-file/LICENSE

@@ -0,0 +1,191 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        https://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   Copyright 2016 Docker, Inc.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       https://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 89 - 0
vendor/github.com/aanand/compose-file/interpolation/interpolation.go

@@ -0,0 +1,89 @@
+package interpolation
+
+import (
+	"fmt"
+
+	"github.com/aanand/compose-file/template"
+	"github.com/aanand/compose-file/types"
+)
+
+func Interpolate(config types.Dict, section string, mapping template.Mapping) (types.Dict, error) {
+	out := types.Dict{}
+
+	for name, item := range config {
+		if item == nil {
+			out[name] = nil
+			continue
+		}
+		interpolatedItem, err := interpolateSectionItem(name, item.(types.Dict), section, mapping)
+		if err != nil {
+			return nil, err
+		}
+		out[name] = interpolatedItem
+	}
+
+	return out, nil
+}
+
+func interpolateSectionItem(
+	name string,
+	item types.Dict,
+	section string,
+	mapping template.Mapping,
+) (types.Dict, error) {
+
+	out := types.Dict{}
+
+	for key, value := range item {
+		interpolatedValue, err := recursiveInterpolate(value, mapping)
+		if err != nil {
+			return nil, fmt.Errorf(
+				"Invalid interpolation format for %#v option in %s %#v: %#v",
+				key, section, name, err.Template,
+			)
+		}
+		out[key] = interpolatedValue
+	}
+
+	return out, nil
+
+}
+
+func recursiveInterpolate(
+	value interface{},
+	mapping template.Mapping,
+) (interface{}, *template.InvalidTemplateError) {
+
+	switch value := value.(type) {
+
+	case string:
+		return template.Substitute(value, mapping)
+
+	case types.Dict:
+		out := types.Dict{}
+		for key, elem := range value {
+			interpolatedElem, err := recursiveInterpolate(elem, mapping)
+			if err != nil {
+				return nil, err
+			}
+			out[key] = interpolatedElem
+		}
+		return out, nil
+
+	case []interface{}:
+		out := make([]interface{}, len(value))
+		for i, elem := range value {
+			interpolatedElem, err := recursiveInterpolate(elem, mapping)
+			if err != nil {
+				return nil, err
+			}
+			out[i] = interpolatedElem
+		}
+		return out, nil
+
+	default:
+		return value, nil
+
+	}
+
+}

+ 597 - 0
vendor/github.com/aanand/compose-file/loader/loader.go

@@ -0,0 +1,597 @@
+package loader
+
+import (
+	"fmt"
+	"os"
+	"path"
+	"reflect"
+	"regexp"
+	"sort"
+	"strings"
+
+	"github.com/aanand/compose-file/interpolation"
+	"github.com/aanand/compose-file/schema"
+	"github.com/aanand/compose-file/types"
+	"github.com/docker/docker/runconfig/opts"
+	units "github.com/docker/go-units"
+	shellwords "github.com/mattn/go-shellwords"
+	"github.com/mitchellh/mapstructure"
+	yaml "gopkg.in/yaml.v2"
+)
+
+var (
+	fieldNameRegexp = regexp.MustCompile("[A-Z][a-z0-9]+")
+)
+
+// ParseYAML reads the bytes from a file, parses the bytes into a mapping
+// structure, and returns it.
+func ParseYAML(source []byte) (types.Dict, error) {
+	var cfg interface{}
+	if err := yaml.Unmarshal(source, &cfg); err != nil {
+		return nil, err
+	}
+	cfgMap, ok := cfg.(map[interface{}]interface{})
+	if !ok {
+		return nil, fmt.Errorf("Top-level object must be a mapping")
+	}
+	converted, err := convertToStringKeysRecursive(cfgMap, "")
+	if err != nil {
+		return nil, err
+	}
+	return converted.(types.Dict), nil
+}
+
+// Load reads a ConfigDetails and returns a fully loaded configuration
+func Load(configDetails types.ConfigDetails) (*types.Config, error) {
+	if len(configDetails.ConfigFiles) < 1 {
+		return nil, fmt.Errorf("No files specified")
+	}
+	if len(configDetails.ConfigFiles) > 1 {
+		return nil, fmt.Errorf("Multiple files are not yet supported")
+	}
+
+	configDict := getConfigDict(configDetails)
+
+	if services, ok := configDict["services"]; ok {
+		if servicesDict, ok := services.(types.Dict); ok {
+			forbidden := getProperties(servicesDict, types.ForbiddenProperties)
+
+			if len(forbidden) > 0 {
+				return nil, &ForbiddenPropertiesError{Properties: forbidden}
+			}
+		}
+	}
+
+	if err := schema.Validate(configDict); err != nil {
+		return nil, err
+	}
+
+	cfg := types.Config{}
+	version := configDict["version"].(string)
+	if version != "3" {
+		return nil, fmt.Errorf("Unsupported version: %#v. The only supported version is 3", version)
+	}
+
+	if services, ok := configDict["services"]; ok {
+		servicesConfig, err := interpolation.Interpolate(services.(types.Dict), "service", os.LookupEnv)
+		if err != nil {
+			return nil, err
+		}
+
+		servicesList, err := loadServices(servicesConfig, configDetails.WorkingDir)
+		if err != nil {
+			return nil, err
+		}
+
+		cfg.Services = servicesList
+	}
+
+	if networks, ok := configDict["networks"]; ok {
+		networksConfig, err := interpolation.Interpolate(networks.(types.Dict), "network", os.LookupEnv)
+		if err != nil {
+			return nil, err
+		}
+
+		networksMapping, err := loadNetworks(networksConfig)
+		if err != nil {
+			return nil, err
+		}
+
+		cfg.Networks = networksMapping
+	}
+
+	if volumes, ok := configDict["volumes"]; ok {
+		volumesConfig, err := interpolation.Interpolate(volumes.(types.Dict), "volume", os.LookupEnv)
+		if err != nil {
+			return nil, err
+		}
+
+		volumesMapping, err := loadVolumes(volumesConfig)
+		if err != nil {
+			return nil, err
+		}
+
+		cfg.Volumes = volumesMapping
+	}
+
+	return &cfg, nil
+}
+
+func GetUnsupportedProperties(configDetails types.ConfigDetails) []string {
+	unsupported := map[string]bool{}
+
+	for _, service := range getServices(getConfigDict(configDetails)) {
+		serviceDict := service.(types.Dict)
+		for _, property := range types.UnsupportedProperties {
+			if _, isSet := serviceDict[property]; isSet {
+				unsupported[property] = true
+			}
+		}
+	}
+
+	return sortedKeys(unsupported)
+}
+
+func sortedKeys(set map[string]bool) []string {
+	var keys []string
+	for key, _ := range set {
+		keys = append(keys, key)
+	}
+	sort.Strings(keys)
+	return keys
+}
+
+func GetDeprecatedProperties(configDetails types.ConfigDetails) map[string]string {
+	return getProperties(getServices(getConfigDict(configDetails)), types.DeprecatedProperties)
+}
+
+func getProperties(services types.Dict, propertyMap map[string]string) map[string]string {
+	output := map[string]string{}
+
+	for _, service := range services {
+		if serviceDict, ok := service.(types.Dict); ok {
+			for property, description := range propertyMap {
+				if _, isSet := serviceDict[property]; isSet {
+					output[property] = description
+				}
+			}
+		}
+	}
+
+	return output
+}
+
+type ForbiddenPropertiesError struct {
+	Properties map[string]string
+}
+
+func (e *ForbiddenPropertiesError) Error() string {
+	return "Configuration contains forbidden properties"
+}
+
+// TODO: resolve multiple files into a single config
+func getConfigDict(configDetails types.ConfigDetails) types.Dict {
+	return configDetails.ConfigFiles[0].Config
+}
+
+func getServices(configDict types.Dict) types.Dict {
+	if services, ok := configDict["services"]; ok {
+		if servicesDict, ok := services.(types.Dict); ok {
+			return servicesDict
+		}
+	}
+
+	return types.Dict{}
+}
+
+func transform(source map[string]interface{}, target interface{}) error {
+	data := mapstructure.Metadata{}
+	config := &mapstructure.DecoderConfig{
+		DecodeHook: mapstructure.ComposeDecodeHookFunc(
+			transformHook,
+			mapstructure.StringToTimeDurationHookFunc()),
+		Result:   target,
+		Metadata: &data,
+	}
+	decoder, err := mapstructure.NewDecoder(config)
+	if err != nil {
+		return err
+	}
+	err = decoder.Decode(source)
+	// TODO: log unused keys
+	return err
+}
+
+func transformHook(
+	source reflect.Type,
+	target reflect.Type,
+	data interface{},
+) (interface{}, error) {
+	switch target {
+	case reflect.TypeOf(types.External{}):
+		return transformExternal(source, target, data)
+	case reflect.TypeOf(make(map[string]string, 0)):
+		return transformMapStringString(source, target, data)
+	case reflect.TypeOf(types.UlimitsConfig{}):
+		return transformUlimits(source, target, data)
+	case reflect.TypeOf(types.UnitBytes(0)):
+		return loadSize(data)
+	}
+	switch target.Kind() {
+	case reflect.Struct:
+		return transformStruct(source, target, data)
+	}
+	return data, nil
+}
+
+// keys needs to be converted to strings for jsonschema
+// TODO: don't use types.Dict
+func convertToStringKeysRecursive(value interface{}, keyPrefix string) (interface{}, error) {
+	if mapping, ok := value.(map[interface{}]interface{}); ok {
+		dict := make(types.Dict)
+		for key, entry := range mapping {
+			str, ok := key.(string)
+			if !ok {
+				var location string
+				if keyPrefix == "" {
+					location = "at top level"
+				} else {
+					location = fmt.Sprintf("in %s", keyPrefix)
+				}
+				return nil, fmt.Errorf("Non-string key %s: %#v", location, key)
+			}
+			var newKeyPrefix string
+			if keyPrefix == "" {
+				newKeyPrefix = str
+			} else {
+				newKeyPrefix = fmt.Sprintf("%s.%s", keyPrefix, str)
+			}
+			convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix)
+			if err != nil {
+				return nil, err
+			}
+			dict[str] = convertedEntry
+		}
+		return dict, nil
+	}
+	if list, ok := value.([]interface{}); ok {
+		var convertedList []interface{}
+		for index, entry := range list {
+			newKeyPrefix := fmt.Sprintf("%s[%d]", keyPrefix, index)
+			convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix)
+			if err != nil {
+				return nil, err
+			}
+			convertedList = append(convertedList, convertedEntry)
+		}
+		return convertedList, nil
+	}
+	return value, nil
+}
+
+func loadServices(servicesDict types.Dict, workingDir string) ([]types.ServiceConfig, error) {
+	var services []types.ServiceConfig
+
+	for name, serviceDef := range servicesDict {
+		serviceConfig, err := loadService(name, serviceDef.(types.Dict), workingDir)
+		if err != nil {
+			return nil, err
+		}
+		services = append(services, *serviceConfig)
+	}
+
+	return services, nil
+}
+
+func loadService(name string, serviceDict types.Dict, workingDir string) (*types.ServiceConfig, error) {
+	serviceConfig := &types.ServiceConfig{}
+	if err := transform(serviceDict, serviceConfig); err != nil {
+		return nil, err
+	}
+	serviceConfig.Name = name
+
+	if err := resolveEnvironment(serviceConfig, serviceDict, workingDir); err != nil {
+		return nil, err
+	}
+
+	if err := resolveVolumePaths(serviceConfig.Volumes, workingDir); err != nil {
+		return nil, err
+	}
+
+	return serviceConfig, nil
+}
+
+func resolveEnvironment(serviceConfig *types.ServiceConfig, serviceDict types.Dict, workingDir string) error {
+	environment := make(map[string]string)
+
+	if envFileVal, ok := serviceDict["env_file"]; ok {
+		envFiles := loadStringOrListOfStrings(envFileVal)
+
+		var envVars []string
+
+		for _, file := range envFiles {
+			filePath := path.Join(workingDir, file)
+			fileVars, err := opts.ParseEnvFile(filePath)
+			if err != nil {
+				return err
+			}
+			envVars = append(envVars, fileVars...)
+		}
+
+		for k, v := range opts.ConvertKVStringsToMap(envVars) {
+			environment[k] = v
+		}
+	}
+
+	for k, v := range serviceConfig.Environment {
+		environment[k] = v
+	}
+
+	serviceConfig.Environment = environment
+
+	return nil
+}
+
+func resolveVolumePaths(volumes []string, workingDir string) error {
+	for i, mapping := range volumes {
+		parts := strings.SplitN(mapping, ":", 2)
+		if len(parts) == 1 {
+			continue
+		}
+
+		if strings.HasPrefix(parts[0], ".") {
+			parts[0] = path.Join(workingDir, parts[0])
+		}
+		parts[0] = expandUser(parts[0])
+
+		volumes[i] = strings.Join(parts, ":")
+	}
+
+	return nil
+}
+
+// TODO: make this more robust
+func expandUser(path string) string {
+	if strings.HasPrefix(path, "~") {
+		return strings.Replace(path, "~", os.Getenv("HOME"), 1)
+	}
+	return path
+}
+
+func transformUlimits(
+	source reflect.Type,
+	target reflect.Type,
+	data interface{},
+) (interface{}, error) {
+	switch value := data.(type) {
+	case int:
+		return types.UlimitsConfig{Single: value}, nil
+	case types.Dict:
+		ulimit := types.UlimitsConfig{}
+		ulimit.Soft = value["soft"].(int)
+		ulimit.Hard = value["hard"].(int)
+		return ulimit, nil
+	default:
+		return data, fmt.Errorf("invalid type %T for ulimits", value)
+	}
+}
+
+func loadNetworks(source types.Dict) (map[string]types.NetworkConfig, error) {
+	networks := make(map[string]types.NetworkConfig)
+	err := transform(source, &networks)
+	if err != nil {
+		return networks, err
+	}
+	for name, network := range networks {
+		if network.External.External && network.External.Name == "" {
+			network.External.Name = name
+			networks[name] = network
+		}
+	}
+	return networks, nil
+}
+
+func loadVolumes(source types.Dict) (map[string]types.VolumeConfig, error) {
+	volumes := make(map[string]types.VolumeConfig)
+	err := transform(source, &volumes)
+	if err != nil {
+		return volumes, err
+	}
+	for name, volume := range volumes {
+		if volume.External.External && volume.External.Name == "" {
+			volume.External.Name = name
+			volumes[name] = volume
+		}
+	}
+	return volumes, nil
+}
+
+func transformStruct(
+	source reflect.Type,
+	target reflect.Type,
+	data interface{},
+) (interface{}, error) {
+	structValue, ok := data.(map[string]interface{})
+	if !ok {
+		// FIXME: this is necessary because of convertToStringKeysRecursive
+		structValue, ok = data.(types.Dict)
+		if !ok {
+			panic(fmt.Sprintf(
+				"transformStruct called with non-map type: %T, %s", data, data))
+		}
+	}
+
+	var err error
+	for i := 0; i < target.NumField(); i++ {
+		field := target.Field(i)
+		fieldTag := field.Tag.Get("compose")
+
+		yamlName := toYAMLName(field.Name)
+		value, ok := structValue[yamlName]
+		if !ok {
+			continue
+		}
+
+		structValue[yamlName], err = convertField(
+			fieldTag, reflect.TypeOf(value), field.Type, value)
+		if err != nil {
+			return nil, fmt.Errorf("field %s: %s", yamlName, err.Error())
+		}
+	}
+	return structValue, nil
+}
+
+func transformMapStringString(
+	source reflect.Type,
+	target reflect.Type,
+	data interface{},
+) (interface{}, error) {
+	switch value := data.(type) {
+	case map[string]interface{}:
+		return toMapStringString(value), nil
+	case types.Dict:
+		return toMapStringString(value), nil
+	case map[string]string:
+		return value, nil
+	default:
+		return data, fmt.Errorf("invalid type %T for map[string]string", value)
+	}
+}
+
+func convertField(
+	fieldTag string,
+	source reflect.Type,
+	target reflect.Type,
+	data interface{},
+) (interface{}, error) {
+	switch fieldTag {
+	case "":
+		return data, nil
+	case "list_or_dict_equals":
+		return loadMappingOrList(data, "="), nil
+	case "list_or_dict_colon":
+		return loadMappingOrList(data, ":"), nil
+	case "list_or_struct_map":
+		return loadListOrStructMap(data, target)
+	case "string_or_list":
+		return loadStringOrListOfStrings(data), nil
+	case "list_of_strings_or_numbers":
+		return loadListOfStringsOrNumbers(data), nil
+	case "shell_command":
+		return loadShellCommand(data)
+	case "size":
+		return loadSize(data)
+	case "-":
+		return nil, nil
+	}
+	return data, nil
+}
+
+func transformExternal(
+	source reflect.Type,
+	target reflect.Type,
+	data interface{},
+) (interface{}, error) {
+	switch value := data.(type) {
+	case bool:
+		return map[string]interface{}{"external": value}, nil
+	case types.Dict:
+		return map[string]interface{}{"external": true, "name": value["name"]}, nil
+	case map[string]interface{}:
+		return map[string]interface{}{"external": true, "name": value["name"]}, nil
+	default:
+		return data, fmt.Errorf("invalid type %T for external", value)
+	}
+}
+
+func toYAMLName(name string) string {
+	nameParts := fieldNameRegexp.FindAllString(name, -1)
+	for i, p := range nameParts {
+		nameParts[i] = strings.ToLower(p)
+	}
+	return strings.Join(nameParts, "_")
+}
+
+func loadListOrStructMap(value interface{}, target reflect.Type) (interface{}, error) {
+	mapValue := reflect.MakeMap(target)
+
+	if list, ok := value.([]interface{}); ok {
+		for _, name := range list {
+			mapValue.SetMapIndex(reflect.ValueOf(name), reflect.ValueOf(nil))
+		}
+		return mapValue.Interface(), nil
+	}
+
+	return value, nil
+}
+
+func loadListOfStringsOrNumbers(value interface{}) []string {
+	list := value.([]interface{})
+	result := make([]string, len(list))
+	for i, item := range list {
+		result[i] = fmt.Sprint(item)
+	}
+	return result
+}
+
+func loadStringOrListOfStrings(value interface{}) []string {
+	if list, ok := value.([]interface{}); ok {
+		result := make([]string, len(list))
+		for i, item := range list {
+			result[i] = fmt.Sprint(item)
+		}
+		return result
+	}
+	return []string{value.(string)}
+}
+
+func loadMappingOrList(mappingOrList interface{}, sep string) map[string]string {
+	if mapping, ok := mappingOrList.(types.Dict); ok {
+		return toMapStringString(mapping)
+	}
+	if list, ok := mappingOrList.([]interface{}); ok {
+		result := make(map[string]string)
+		for _, value := range list {
+			parts := strings.SplitN(value.(string), sep, 2)
+			if len(parts) == 1 {
+				result[parts[0]] = ""
+			} else {
+				result[parts[0]] = parts[1]
+			}
+		}
+		return result
+	}
+	panic(fmt.Errorf("expected a map or a slice, got: %#v", mappingOrList))
+}
+
+func loadShellCommand(value interface{}) (interface{}, error) {
+	if str, ok := value.(string); ok {
+		return shellwords.Parse(str)
+	}
+	return value, nil
+}
+
+func loadSize(value interface{}) (int64, error) {
+	switch value := value.(type) {
+	case int:
+		return int64(value), nil
+	case string:
+		return units.RAMInBytes(value)
+	}
+	panic(fmt.Errorf("invalid type for size %T", value))
+}
+
+func toMapStringString(value map[string]interface{}) map[string]string {
+	output := make(map[string]string)
+	for key, value := range value {
+		output[key] = toString(value)
+	}
+	return output
+}
+
+func toString(value interface{}) string {
+	if value == nil {
+		return ""
+	}
+	return fmt.Sprint(value)
+}

File diff suppressed because it is too large
+ 70 - 0
vendor/github.com/aanand/compose-file/schema/bindata.go


+ 113 - 0
vendor/github.com/aanand/compose-file/schema/schema.go

@@ -0,0 +1,113 @@
+package schema
+
+//go:generate go-bindata -pkg schema data
+
+import (
+	"fmt"
+	"strings"
+	"time"
+
+	"github.com/xeipuuv/gojsonschema"
+)
+
+type portsFormatChecker struct{}
+
+func (checker portsFormatChecker) IsFormat(input string) bool {
+	// TODO: implement this
+	return true
+}
+
+type durationFormatChecker struct{}
+
+func (checker durationFormatChecker) IsFormat(input string) bool {
+	_, err := time.ParseDuration(input)
+	return err == nil
+}
+
+func init() {
+	gojsonschema.FormatCheckers.Add("expose", portsFormatChecker{})
+	gojsonschema.FormatCheckers.Add("ports", portsFormatChecker{})
+	gojsonschema.FormatCheckers.Add("duration", durationFormatChecker{})
+}
+
+// Validate uses the jsonschema to validate the configuration
+func Validate(config map[string]interface{}) error {
+	schemaData, err := Asset("data/config_schema_v3.json")
+	if err != nil {
+		return err
+	}
+
+	schemaLoader := gojsonschema.NewStringLoader(string(schemaData))
+	dataLoader := gojsonschema.NewGoLoader(config)
+
+	result, err := gojsonschema.Validate(schemaLoader, dataLoader)
+	if err != nil {
+		return err
+	}
+
+	if !result.Valid() {
+		return toError(result)
+	}
+
+	return nil
+}
+
+func toError(result *gojsonschema.Result) error {
+	err := getMostSpecificError(result.Errors())
+	description := getDescription(err)
+	return fmt.Errorf("%s %s", err.Field(), description)
+}
+
+func getDescription(err gojsonschema.ResultError) string {
+	if err.Type() == "invalid_type" {
+		if expectedType, ok := err.Details()["expected"].(string); ok {
+			return fmt.Sprintf("must be a %s", humanReadableType(expectedType))
+		}
+	}
+
+	return err.Description()
+}
+
+func humanReadableType(definition string) string {
+	if definition[0:1] == "[" {
+		allTypes := strings.Split(definition[1:len(definition)-1], ",")
+		for i, t := range allTypes {
+			allTypes[i] = humanReadableType(t)
+		}
+		return fmt.Sprintf(
+			"%s or %s",
+			strings.Join(allTypes[0:len(allTypes)-1], ", "),
+			allTypes[len(allTypes)-1],
+		)
+	}
+	if definition == "object" {
+		return "mapping"
+	}
+	if definition == "array" {
+		return "list"
+	}
+	return definition
+}
+
+func getMostSpecificError(errors []gojsonschema.ResultError) gojsonschema.ResultError {
+	var mostSpecificError gojsonschema.ResultError
+
+	for _, err := range errors {
+		if mostSpecificError == nil {
+			mostSpecificError = err
+		} else if specificity(err) > specificity(mostSpecificError) {
+			mostSpecificError = err
+		} else if specificity(err) == specificity(mostSpecificError) {
+			// Invalid type errors win in a tie-breaker for most specific field name
+			if err.Type() == "invalid_type" && mostSpecificError.Type() != "invalid_type" {
+				mostSpecificError = err
+			}
+		}
+	}
+
+	return mostSpecificError
+}
+
+func specificity(err gojsonschema.ResultError) int {
+	return len(strings.Split(err.Field(), "."))
+}

+ 108 - 0
vendor/github.com/aanand/compose-file/template/template.go

@@ -0,0 +1,108 @@
+package template
+
+import (
+	"fmt"
+	"regexp"
+	"strings"
+)
+
+var delimiter = "\\$"
+var substitution = "[_a-z][_a-z0-9]*(?::?-[^}]+)?"
+
+var patternString = fmt.Sprintf(
+	"%s(?i:(?P<escaped>%s)|(?P<named>%s)|{(?P<braced>%s)}|(?P<invalid>))",
+	delimiter, delimiter, substitution, substitution,
+)
+
+var pattern = regexp.MustCompile(patternString)
+
+type InvalidTemplateError struct {
+	Template string
+}
+
+func (e InvalidTemplateError) Error() string {
+	return fmt.Sprintf("Invalid template: %#v", e.Template)
+}
+
+// A user-supplied function which maps from variable names to values.
+// Returns the value as a string and a bool indicating whether
+// the value is present, to distinguish between an empty string
+// and the absence of a value.
+type Mapping func(string) (string, bool)
+
+func Substitute(template string, mapping Mapping) (result string, err *InvalidTemplateError) {
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(*InvalidTemplateError); ok {
+				err = e
+			} else {
+				panic(r)
+			}
+		}
+	}()
+
+	result = pattern.ReplaceAllStringFunc(template, func(substring string) string {
+		matches := pattern.FindStringSubmatch(substring)
+		groups := make(map[string]string)
+		for i, name := range pattern.SubexpNames() {
+			if i != 0 {
+				groups[name] = matches[i]
+			}
+		}
+
+		substitution := groups["named"]
+		if substitution == "" {
+			substitution = groups["braced"]
+		}
+		if substitution != "" {
+			// Soft default (fall back if unset or empty)
+			if strings.Contains(substitution, ":-") {
+				name, defaultValue := partition(substitution, ":-")
+				value, ok := mapping(name)
+				if !ok || value == "" {
+					return defaultValue
+				}
+				return value
+			}
+
+			// Hard default (fall back if-and-only-if empty)
+			if strings.Contains(substitution, "-") {
+				name, defaultValue := partition(substitution, "-")
+				value, ok := mapping(name)
+				if !ok {
+					return defaultValue
+				}
+				return value
+			}
+
+			// No default (fall back to empty string)
+			value, ok := mapping(substitution)
+			if !ok {
+				return ""
+			}
+			return value
+		}
+
+		if escaped := groups["escaped"]; escaped != "" {
+			return escaped
+		}
+
+		panic(&InvalidTemplateError{Template: template})
+		return ""
+	})
+
+	return
+}
+
+// Split the string at the first occurrence of sep, and return the part before the separator,
+// and the part after the separator.
+//
+// If the separator is not found, return the string itself, followed by an empty string.
+func partition(s, sep string) (string, string) {
+	if strings.Contains(s, sep) {
+		parts := strings.SplitN(s, sep, 2)
+		return parts[0], parts[1]
+	} else {
+		return s, ""
+	}
+}

+ 200 - 0
vendor/github.com/aanand/compose-file/types/types.go

@@ -0,0 +1,200 @@
+package types
+
+import (
+	"time"
+)
+
+var UnsupportedProperties = []string{
+	"build",
+	"cap_add",
+	"cap_drop",
+	"cgroup_parent",
+	"devices",
+	"dns",
+	"dns_search",
+	"domainname",
+	"external_links",
+	"extra_hosts",
+	"ipc",
+	"links",
+	"mac_address",
+	"network_mode",
+	"privileged",
+	"read_only",
+	"restart",
+	"security_opt",
+	"shm_size",
+	"stdin_open",
+	"stop_signal",
+	"tmpfs",
+}
+
+var DeprecatedProperties = map[string]string{
+	"container_name": "Setting the container name is not supported.",
+	"expose":         "Exposing ports is unnecessary - services on the same network can access each other's containers on any port.",
+}
+
+var ForbiddenProperties = map[string]string{
+	"extends":       "Support for `extends` is not implemented yet. Use `docker-compose config` to generate a configuration with all `extends` options resolved, and deploy from that.",
+	"volume_driver": "Instead of setting the volume driver on the service, define a volume using the top-level `volumes` option and specify the driver there.",
+	"volumes_from":  "To share a volume between services, define it using the top-level `volumes` option and reference it from each service that shares it using the service-level `volumes` option.",
+	"cpu_quota":     "Set resource limits using deploy.resources",
+	"cpu_shares":    "Set resource limits using deploy.resources",
+	"cpuset":        "Set resource limits using deploy.resources",
+	"mem_limit":     "Set resource limits using deploy.resources",
+	"memswap_limit": "Set resource limits using deploy.resources",
+}
+
+type Dict map[string]interface{}
+
+type ConfigFile struct {
+	Filename string
+	Config   Dict
+}
+
+type ConfigDetails struct {
+	WorkingDir  string
+	ConfigFiles []ConfigFile
+	Environment map[string]string
+}
+
+type Config struct {
+	Services []ServiceConfig
+	Networks map[string]NetworkConfig
+	Volumes  map[string]VolumeConfig
+}
+
+type ServiceConfig struct {
+	Name string
+
+	CapAdd          []string `mapstructure:"cap_add"`
+	CapDrop         []string `mapstructure:"cap_drop"`
+	CgroupParent    string   `mapstructure:"cgroup_parent"`
+	Command         []string `compose:"shell_command"`
+	ContainerName   string   `mapstructure:"container_name"`
+	DependsOn       []string `mapstructure:"depends_on"`
+	Deploy          DeployConfig
+	Devices         []string
+	Dns             []string          `compose:"string_or_list"`
+	DnsSearch       []string          `mapstructure:"dns_search" compose:"string_or_list"`
+	DomainName      string            `mapstructure:"domainname"`
+	Entrypoint      []string          `compose:"shell_command"`
+	Environment     map[string]string `compose:"list_or_dict_equals"`
+	Expose          []string          `compose:"list_of_strings_or_numbers"`
+	ExternalLinks   []string          `mapstructure:"external_links"`
+	ExtraHosts      map[string]string `mapstructure:"extra_hosts" compose:"list_or_dict_colon"`
+	Hostname        string
+	Image           string
+	Ipc             string
+	Labels          map[string]string `compose:"list_or_dict_equals"`
+	Links           []string
+	Logging         *LoggingConfig
+	MacAddress      string                           `mapstructure:"mac_address"`
+	NetworkMode     string                           `mapstructure:"network_mode"`
+	Networks        map[string]*ServiceNetworkConfig `compose:"list_or_struct_map"`
+	Pid             string
+	Ports           []string `compose:"list_of_strings_or_numbers"`
+	Privileged      bool
+	ReadOnly        bool `mapstructure:"read_only"`
+	Restart         string
+	SecurityOpt     []string       `mapstructure:"security_opt"`
+	StdinOpen       bool           `mapstructure:"stdin_open"`
+	StopGracePeriod *time.Duration `mapstructure:"stop_grace_period"`
+	StopSignal      string         `mapstructure:"stop_signal"`
+	Tmpfs           []string       `compose:"string_or_list"`
+	Tty             bool           `mapstructure:"tty"`
+	Ulimits         map[string]*UlimitsConfig
+	User            string
+	Volumes         []string
+	WorkingDir      string `mapstructure:"working_dir"`
+}
+
+type LoggingConfig struct {
+	Driver  string
+	Options map[string]string
+}
+
+type DeployConfig struct {
+	Mode          string
+	Replicas      *uint64
+	Labels        map[string]string `compose:"list_or_dict_equals"`
+	UpdateConfig  *UpdateConfig     `mapstructure:"update_config"`
+	Resources     Resources
+	RestartPolicy *RestartPolicy `mapstructure:"restart_policy"`
+	Placement     Placement
+}
+
+type UpdateConfig struct {
+	Parallelism     uint64
+	Delay           time.Duration
+	FailureAction   string `mapstructure:"failure_action"`
+	Monitor         time.Duration
+	MaxFailureRatio float32 `mapstructure:"max_failure_ratio"`
+}
+
+type Resources struct {
+	Limits       *Resource
+	Reservations *Resource
+}
+
+type Resource struct {
+	// TODO: types to convert from units and ratios
+	NanoCPUs    string    `mapstructure:"cpus"`
+	MemoryBytes UnitBytes `mapstructure:"memory"`
+}
+
+type UnitBytes int64
+
+type RestartPolicy struct {
+	Condition   string
+	Delay       *time.Duration
+	MaxAttempts *uint64 `mapstructure:"max_attempts"`
+	Window      *time.Duration
+}
+
+type Placement struct {
+	Constraints []string
+}
+
+type ServiceNetworkConfig struct {
+	Aliases     []string
+	Ipv4Address string `mapstructure:"ipv4_address"`
+	Ipv6Address string `mapstructure:"ipv6_address"`
+}
+
+type UlimitsConfig struct {
+	Single int
+	Soft   int
+	Hard   int
+}
+
+type NetworkConfig struct {
+	Driver     string
+	DriverOpts map[string]string `mapstructure:"driver_opts"`
+	Ipam       IPAMConfig
+	External   External
+	Labels     map[string]string `compose:"list_or_dict_equals"`
+}
+
+type IPAMConfig struct {
+	Driver string
+	Config []*IPAMPool
+}
+
+type IPAMPool struct {
+	Subnet string
+}
+
+type VolumeConfig struct {
+	Driver     string
+	DriverOpts map[string]string `mapstructure:"driver_opts"`
+	External   External
+	Labels     map[string]string `compose:"list_or_dict_equals"`
+}
+
+// External identifies a Volume or Network as a reference to a resource that is
+// not managed, and should already exist.
+type External struct {
+	Name     string
+	External bool
+}

+ 21 - 0
vendor/github.com/mitchellh/mapstructure/LICENSE

@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 154 - 0
vendor/github.com/mitchellh/mapstructure/decode_hooks.go

@@ -0,0 +1,154 @@
+package mapstructure
+
+import (
+	"errors"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
+// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
+func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
+	// Create variables here so we can reference them with the reflect pkg
+	var f1 DecodeHookFuncType
+	var f2 DecodeHookFuncKind
+
+	// Fill in the variables into this interface and the rest is done
+	// automatically using the reflect package.
+	potential := []interface{}{f1, f2}
+
+	v := reflect.ValueOf(h)
+	vt := v.Type()
+	for _, raw := range potential {
+		pt := reflect.ValueOf(raw).Type()
+		if vt.ConvertibleTo(pt) {
+			return v.Convert(pt).Interface()
+		}
+	}
+
+	return nil
+}
+
+// DecodeHookExec executes the given decode hook. This should be used
+// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
+// that took reflect.Kind instead of reflect.Type.
+func DecodeHookExec(
+	raw DecodeHookFunc,
+	from reflect.Type, to reflect.Type,
+	data interface{}) (interface{}, error) {
+	// Build our arguments that reflect expects
+	argVals := make([]reflect.Value, 3)
+	argVals[0] = reflect.ValueOf(from)
+	argVals[1] = reflect.ValueOf(to)
+	argVals[2] = reflect.ValueOf(data)
+
+	switch f := typedDecodeHook(raw).(type) {
+	case DecodeHookFuncType:
+		return f(from, to, data)
+	case DecodeHookFuncKind:
+		return f(from.Kind(), to.Kind(), data)
+	default:
+		return nil, errors.New("invalid decode hook signature")
+	}
+}
+
+// ComposeDecodeHookFunc creates a single DecodeHookFunc that
+// automatically composes multiple DecodeHookFuncs.
+//
+// The composed funcs are called in order, with the result of the
+// previous transformation.
+func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
+	return func(
+		f reflect.Type,
+		t reflect.Type,
+		data interface{}) (interface{}, error) {
+		var err error
+		for _, f1 := range fs {
+			data, err = DecodeHookExec(f1, f, t, data)
+			if err != nil {
+				return nil, err
+			}
+
+			// Modify the from kind to be correct with the new data
+			f = nil
+			if val := reflect.ValueOf(data); val.IsValid() {
+				f = val.Type()
+			}
+		}
+
+		return data, nil
+	}
+}
+
+// StringToSliceHookFunc returns a DecodeHookFunc that converts
+// string to []string by splitting on the given sep.
+func StringToSliceHookFunc(sep string) DecodeHookFunc {
+	return func(
+		f reflect.Kind,
+		t reflect.Kind,
+		data interface{}) (interface{}, error) {
+		if f != reflect.String || t != reflect.Slice {
+			return data, nil
+		}
+
+		raw := data.(string)
+		if raw == "" {
+			return []string{}, nil
+		}
+
+		return strings.Split(raw, sep), nil
+	}
+}
+
+// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
+// strings to time.Duration.
+func StringToTimeDurationHookFunc() DecodeHookFunc {
+	return func(
+		f reflect.Type,
+		t reflect.Type,
+		data interface{}) (interface{}, error) {
+		if f.Kind() != reflect.String {
+			return data, nil
+		}
+		if t != reflect.TypeOf(time.Duration(5)) {
+			return data, nil
+		}
+
+		// Convert it by parsing
+		return time.ParseDuration(data.(string))
+	}
+}
+
+func WeaklyTypedHook(
+	f reflect.Kind,
+	t reflect.Kind,
+	data interface{}) (interface{}, error) {
+	dataVal := reflect.ValueOf(data)
+	switch t {
+	case reflect.String:
+		switch f {
+		case reflect.Bool:
+			if dataVal.Bool() {
+				return "1", nil
+			} else {
+				return "0", nil
+			}
+		case reflect.Float32:
+			return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
+		case reflect.Int:
+			return strconv.FormatInt(dataVal.Int(), 10), nil
+		case reflect.Slice:
+			dataType := dataVal.Type()
+			elemKind := dataType.Elem().Kind()
+			if elemKind == reflect.Uint8 {
+				return string(dataVal.Interface().([]uint8)), nil
+			}
+		case reflect.Uint:
+			return strconv.FormatUint(dataVal.Uint(), 10), nil
+		}
+	}
+
+	return data, nil
+}

+ 50 - 0
vendor/github.com/mitchellh/mapstructure/error.go

@@ -0,0 +1,50 @@
+package mapstructure
+
+import (
+	"errors"
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// Error implements the error interface and can represents multiple
+// errors that occur in the course of a single decode.
+type Error struct {
+	Errors []string
+}
+
+func (e *Error) Error() string {
+	points := make([]string, len(e.Errors))
+	for i, err := range e.Errors {
+		points[i] = fmt.Sprintf("* %s", err)
+	}
+
+	sort.Strings(points)
+	return fmt.Sprintf(
+		"%d error(s) decoding:\n\n%s",
+		len(e.Errors), strings.Join(points, "\n"))
+}
+
+// WrappedErrors implements the errwrap.Wrapper interface to make this
+// return value more useful with the errwrap and go-multierror libraries.
+func (e *Error) WrappedErrors() []error {
+	if e == nil {
+		return nil
+	}
+
+	result := make([]error, len(e.Errors))
+	for i, e := range e.Errors {
+		result[i] = errors.New(e)
+	}
+
+	return result
+}
+
+func appendErrors(errors []string, err error) []string {
+	switch e := err.(type) {
+	case *Error:
+		return append(errors, e.Errors...)
+	default:
+		return append(errors, e.Error())
+	}
+}

+ 782 - 0
vendor/github.com/mitchellh/mapstructure/mapstructure.go

@@ -0,0 +1,782 @@
+// The mapstructure package exposes functionality to convert an
+// abitrary map[string]interface{} into a native Go structure.
+//
+// The Go structure can be arbitrarily complex, containing slices,
+// other structs, etc. and the decoder will properly decode nested
+// maps and so on into the proper structures in the native Go struct.
+// See the examples to see what the decoder is capable of.
+package mapstructure
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// DecodeHookFunc is the callback function that can be used for
+// data transformations. See "DecodeHook" in the DecoderConfig
+// struct.
+//
+// The type should be DecodeHookFuncType or DecodeHookFuncKind.
+// Either is accepted. Types are a superset of Kinds (Types can return
+// Kinds) and are generally a richer thing to use, but Kinds are simpler
+// if you only need those.
+//
+// The reason DecodeHookFunc is multi-typed is for backwards compatibility:
+// we started with Kinds and then realized Types were the better solution,
+// but have a promise to not break backwards compat so we now support
+// both.
+type DecodeHookFunc interface{}
+
+type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
+type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
+
+// DecoderConfig is the configuration that is used to create a new decoder
+// and allows customization of various aspects of decoding.
+type DecoderConfig struct {
+	// DecodeHook, if set, will be called before any decoding and any
+	// type conversion (if WeaklyTypedInput is on). This lets you modify
+	// the values before they're set down onto the resulting struct.
+	//
+	// If an error is returned, the entire decode will fail with that
+	// error.
+	DecodeHook DecodeHookFunc
+
+	// If ErrorUnused is true, then it is an error for there to exist
+	// keys in the original map that were unused in the decoding process
+	// (extra keys).
+	ErrorUnused bool
+
+	// ZeroFields, if set to true, will zero fields before writing them.
+	// For example, a map will be emptied before decoded values are put in
+	// it. If this is false, a map will be merged.
+	ZeroFields bool
+
+	// If WeaklyTypedInput is true, the decoder will make the following
+	// "weak" conversions:
+	//
+	//   - bools to string (true = "1", false = "0")
+	//   - numbers to string (base 10)
+	//   - bools to int/uint (true = 1, false = 0)
+	//   - strings to int/uint (base implied by prefix)
+	//   - int to bool (true if value != 0)
+	//   - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F,
+	//     FALSE, false, False. Anything else is an error)
+	//   - empty array = empty map and vice versa
+	//   - negative numbers to overflowed uint values (base 10)
+	//   - slice of maps to a merged map
+	//
+	WeaklyTypedInput bool
+
+	// Metadata is the struct that will contain extra metadata about
+	// the decoding. If this is nil, then no metadata will be tracked.
+	Metadata *Metadata
+
+	// Result is a pointer to the struct that will contain the decoded
+	// value.
+	Result interface{}
+
+	// The tag name that mapstructure reads for field names. This
+	// defaults to "mapstructure"
+	TagName string
+}
+
+// A Decoder takes a raw interface value and turns it into structured
+// data, keeping track of rich error information along the way in case
+// anything goes wrong. Unlike the basic top-level Decode method, you can
+// more finely control how the Decoder behaves using the DecoderConfig
+// structure. The top-level Decode method is just a convenience that sets
+// up the most basic Decoder.
+type Decoder struct {
+	config *DecoderConfig
+}
+
+// Metadata contains information about decoding a structure that
+// is tedious or difficult to get otherwise.
+type Metadata struct {
+	// Keys are the keys of the structure which were successfully decoded
+	Keys []string
+
+	// Unused is a slice of keys that were found in the raw value but
+	// weren't decoded since there was no matching field in the result interface
+	Unused []string
+}
+
+// Decode takes a map and uses reflection to convert it into the
+// given Go native structure. val must be a pointer to a struct.
+func Decode(m interface{}, rawVal interface{}) error {
+	config := &DecoderConfig{
+		Metadata: nil,
+		Result:   rawVal,
+	}
+
+	decoder, err := NewDecoder(config)
+	if err != nil {
+		return err
+	}
+
+	return decoder.Decode(m)
+}
+
+// WeakDecode is the same as Decode but is shorthand to enable
+// WeaklyTypedInput. See DecoderConfig for more info.
+func WeakDecode(input, output interface{}) error {
+	config := &DecoderConfig{
+		Metadata:         nil,
+		Result:           output,
+		WeaklyTypedInput: true,
+	}
+
+	decoder, err := NewDecoder(config)
+	if err != nil {
+		return err
+	}
+
+	return decoder.Decode(input)
+}
+
+// NewDecoder returns a new decoder for the given configuration. Once
+// a decoder has been returned, the same configuration must not be used
+// again.
+func NewDecoder(config *DecoderConfig) (*Decoder, error) {
+	val := reflect.ValueOf(config.Result)
+	if val.Kind() != reflect.Ptr {
+		return nil, errors.New("result must be a pointer")
+	}
+
+	val = val.Elem()
+	if !val.CanAddr() {
+		return nil, errors.New("result must be addressable (a pointer)")
+	}
+
+	if config.Metadata != nil {
+		if config.Metadata.Keys == nil {
+			config.Metadata.Keys = make([]string, 0)
+		}
+
+		if config.Metadata.Unused == nil {
+			config.Metadata.Unused = make([]string, 0)
+		}
+	}
+
+	if config.TagName == "" {
+		config.TagName = "mapstructure"
+	}
+
+	result := &Decoder{
+		config: config,
+	}
+
+	return result, nil
+}
+
+// Decode decodes the given raw interface to the target pointer specified
+// by the configuration.
+func (d *Decoder) Decode(raw interface{}) error {
+	return d.decode("", raw, reflect.ValueOf(d.config.Result).Elem())
+}
+
+// Decodes an unknown data type into a specific reflection value.
+func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error {
+	if data == nil {
+		// If the data is nil, then we don't set anything.
+		return nil
+	}
+
+	dataVal := reflect.ValueOf(data)
+	if !dataVal.IsValid() {
+		// If the data value is invalid, then we just set the value
+		// to be the zero value.
+		val.Set(reflect.Zero(val.Type()))
+		return nil
+	}
+
+	if d.config.DecodeHook != nil {
+		// We have a DecodeHook, so let's pre-process the data.
+		var err error
+		data, err = DecodeHookExec(
+			d.config.DecodeHook,
+			dataVal.Type(), val.Type(), data)
+		if err != nil {
+			return err
+		}
+	}
+
+	var err error
+	dataKind := getKind(val)
+	switch dataKind {
+	case reflect.Bool:
+		err = d.decodeBool(name, data, val)
+	case reflect.Interface:
+		err = d.decodeBasic(name, data, val)
+	case reflect.String:
+		err = d.decodeString(name, data, val)
+	case reflect.Int:
+		err = d.decodeInt(name, data, val)
+	case reflect.Uint:
+		err = d.decodeUint(name, data, val)
+	case reflect.Float32:
+		err = d.decodeFloat(name, data, val)
+	case reflect.Struct:
+		err = d.decodeStruct(name, data, val)
+	case reflect.Map:
+		err = d.decodeMap(name, data, val)
+	case reflect.Ptr:
+		err = d.decodePtr(name, data, val)
+	case reflect.Slice:
+		err = d.decodeSlice(name, data, val)
+	default:
+		// If we reached this point then we weren't able to decode it
+		return fmt.Errorf("%s: unsupported type: %s", name, dataKind)
+	}
+
+	// If we reached here, then we successfully decoded SOMETHING, so
+	// mark the key as used if we're tracking metadata.
+	if d.config.Metadata != nil && name != "" {
+		d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+	}
+
+	return err
+}
+
+// This decodes a basic type (bool, int, string, etc.) and sets the
+// value to "data" of that type.
+func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.ValueOf(data)
+	if !dataVal.IsValid() {
+		dataVal = reflect.Zero(val.Type())
+	}
+
+	dataValType := dataVal.Type()
+	if !dataValType.AssignableTo(val.Type()) {
+		return fmt.Errorf(
+			"'%s' expected type '%s', got '%s'",
+			name, val.Type(), dataValType)
+	}
+
+	val.Set(dataVal)
+	return nil
+}
+
+func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.ValueOf(data)
+	dataKind := getKind(dataVal)
+
+	converted := true
+	switch {
+	case dataKind == reflect.String:
+		val.SetString(dataVal.String())
+	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+		if dataVal.Bool() {
+			val.SetString("1")
+		} else {
+			val.SetString("0")
+		}
+	case dataKind == reflect.Int && d.config.WeaklyTypedInput:
+		val.SetString(strconv.FormatInt(dataVal.Int(), 10))
+	case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
+		val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
+	case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
+		val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
+	case dataKind == reflect.Slice && d.config.WeaklyTypedInput:
+		dataType := dataVal.Type()
+		elemKind := dataType.Elem().Kind()
+		switch {
+		case elemKind == reflect.Uint8:
+			val.SetString(string(dataVal.Interface().([]uint8)))
+		default:
+			converted = false
+		}
+	default:
+		converted = false
+	}
+
+	if !converted {
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.ValueOf(data)
+	dataKind := getKind(dataVal)
+	dataType := dataVal.Type()
+
+	switch {
+	case dataKind == reflect.Int:
+		val.SetInt(dataVal.Int())
+	case dataKind == reflect.Uint:
+		val.SetInt(int64(dataVal.Uint()))
+	case dataKind == reflect.Float32:
+		val.SetInt(int64(dataVal.Float()))
+	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+		if dataVal.Bool() {
+			val.SetInt(1)
+		} else {
+			val.SetInt(0)
+		}
+	case dataKind == reflect.String && d.config.WeaklyTypedInput:
+		i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits())
+		if err == nil {
+			val.SetInt(i)
+		} else {
+			return fmt.Errorf("cannot parse '%s' as int: %s", name, err)
+		}
+	case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+		jn := data.(json.Number)
+		i, err := jn.Int64()
+		if err != nil {
+			return fmt.Errorf(
+				"error decoding json.Number into %s: %s", name, err)
+		}
+		val.SetInt(i)
+	default:
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.ValueOf(data)
+	dataKind := getKind(dataVal)
+
+	switch {
+	case dataKind == reflect.Int:
+		i := dataVal.Int()
+		if i < 0 && !d.config.WeaklyTypedInput {
+			return fmt.Errorf("cannot parse '%s', %d overflows uint",
+				name, i)
+		}
+		val.SetUint(uint64(i))
+	case dataKind == reflect.Uint:
+		val.SetUint(dataVal.Uint())
+	case dataKind == reflect.Float32:
+		f := dataVal.Float()
+		if f < 0 && !d.config.WeaklyTypedInput {
+			return fmt.Errorf("cannot parse '%s', %f overflows uint",
+				name, f)
+		}
+		val.SetUint(uint64(f))
+	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+		if dataVal.Bool() {
+			val.SetUint(1)
+		} else {
+			val.SetUint(0)
+		}
+	case dataKind == reflect.String && d.config.WeaklyTypedInput:
+		i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits())
+		if err == nil {
+			val.SetUint(i)
+		} else {
+			return fmt.Errorf("cannot parse '%s' as uint: %s", name, err)
+		}
+	default:
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.ValueOf(data)
+	dataKind := getKind(dataVal)
+
+	switch {
+	case dataKind == reflect.Bool:
+		val.SetBool(dataVal.Bool())
+	case dataKind == reflect.Int && d.config.WeaklyTypedInput:
+		val.SetBool(dataVal.Int() != 0)
+	case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
+		val.SetBool(dataVal.Uint() != 0)
+	case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
+		val.SetBool(dataVal.Float() != 0)
+	case dataKind == reflect.String && d.config.WeaklyTypedInput:
+		b, err := strconv.ParseBool(dataVal.String())
+		if err == nil {
+			val.SetBool(b)
+		} else if dataVal.String() == "" {
+			val.SetBool(false)
+		} else {
+			return fmt.Errorf("cannot parse '%s' as bool: %s", name, err)
+		}
+	default:
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.ValueOf(data)
+	dataKind := getKind(dataVal)
+	dataType := dataVal.Type()
+
+	switch {
+	case dataKind == reflect.Int:
+		val.SetFloat(float64(dataVal.Int()))
+	case dataKind == reflect.Uint:
+		val.SetFloat(float64(dataVal.Uint()))
+	case dataKind == reflect.Float32:
+		val.SetFloat(float64(dataVal.Float()))
+	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+		if dataVal.Bool() {
+			val.SetFloat(1)
+		} else {
+			val.SetFloat(0)
+		}
+	case dataKind == reflect.String && d.config.WeaklyTypedInput:
+		f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits())
+		if err == nil {
+			val.SetFloat(f)
+		} else {
+			return fmt.Errorf("cannot parse '%s' as float: %s", name, err)
+		}
+	case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+		jn := data.(json.Number)
+		i, err := jn.Float64()
+		if err != nil {
+			return fmt.Errorf(
+				"error decoding json.Number into %s: %s", name, err)
+		}
+		val.SetFloat(i)
+	default:
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error {
+	valType := val.Type()
+	valKeyType := valType.Key()
+	valElemType := valType.Elem()
+
+	// By default we overwrite keys in the current map
+	valMap := val
+
+	// If the map is nil or we're purposely zeroing fields, make a new map
+	if valMap.IsNil() || d.config.ZeroFields {
+		// Make a new map to hold our result
+		mapType := reflect.MapOf(valKeyType, valElemType)
+		valMap = reflect.MakeMap(mapType)
+	}
+
+	// Check input type
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	if dataVal.Kind() != reflect.Map {
+		// In weak mode, we accept a slice of maps as an input...
+		if d.config.WeaklyTypedInput {
+			switch dataVal.Kind() {
+			case reflect.Array, reflect.Slice:
+				// Special case for BC reasons (covered by tests)
+				if dataVal.Len() == 0 {
+					val.Set(valMap)
+					return nil
+				}
+
+				for i := 0; i < dataVal.Len(); i++ {
+					err := d.decode(
+						fmt.Sprintf("%s[%d]", name, i),
+						dataVal.Index(i).Interface(), val)
+					if err != nil {
+						return err
+					}
+				}
+
+				return nil
+			}
+		}
+
+		return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
+	}
+
+	// Accumulate errors
+	errors := make([]string, 0)
+
+	for _, k := range dataVal.MapKeys() {
+		fieldName := fmt.Sprintf("%s[%s]", name, k)
+
+		// First decode the key into the proper type
+		currentKey := reflect.Indirect(reflect.New(valKeyType))
+		if err := d.decode(fieldName, k.Interface(), currentKey); err != nil {
+			errors = appendErrors(errors, err)
+			continue
+		}
+
+		// Next decode the data into the proper type
+		v := dataVal.MapIndex(k).Interface()
+		currentVal := reflect.Indirect(reflect.New(valElemType))
+		if err := d.decode(fieldName, v, currentVal); err != nil {
+			errors = appendErrors(errors, err)
+			continue
+		}
+
+		valMap.SetMapIndex(currentKey, currentVal)
+	}
+
+	// Set the built up map to the value
+	val.Set(valMap)
+
+	// If we had errors, return those
+	if len(errors) > 0 {
+		return &Error{errors}
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error {
+	// Create an element of the concrete (non pointer) type and decode
+	// into that. Then set the value of the pointer to this type.
+	valType := val.Type()
+	valElemType := valType.Elem()
+	realVal := reflect.New(valElemType)
+	if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
+		return err
+	}
+
+	val.Set(realVal)
+	return nil
+}
+
+func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	dataValKind := dataVal.Kind()
+	valType := val.Type()
+	valElemType := valType.Elem()
+	sliceType := reflect.SliceOf(valElemType)
+
+	// Check input type
+	if dataValKind != reflect.Array && dataValKind != reflect.Slice {
+		// Accept empty map instead of array/slice in weakly typed mode
+		if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 {
+			val.Set(reflect.MakeSlice(sliceType, 0, 0))
+			return nil
+		} else {
+			return fmt.Errorf(
+				"'%s': source data must be an array or slice, got %s", name, dataValKind)
+		}
+	}
+
+	// Make a new slice to hold our result, same size as the original data.
+	valSlice := reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
+
+	// Accumulate any errors
+	errors := make([]string, 0)
+
+	for i := 0; i < dataVal.Len(); i++ {
+		currentData := dataVal.Index(i).Interface()
+		currentField := valSlice.Index(i)
+
+		fieldName := fmt.Sprintf("%s[%d]", name, i)
+		if err := d.decode(fieldName, currentData, currentField); err != nil {
+			errors = appendErrors(errors, err)
+		}
+	}
+
+	// Finally, set the value to the slice we built up
+	val.Set(valSlice)
+
+	// If there were errors, we return those
+	if len(errors) > 0 {
+		return &Error{errors}
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+
+	// If the type of the value to write to and the data match directly,
+	// then we just set it directly instead of recursing into the structure.
+	if dataVal.Type() == val.Type() {
+		val.Set(dataVal)
+		return nil
+	}
+
+	dataValKind := dataVal.Kind()
+	if dataValKind != reflect.Map {
+		return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind)
+	}
+
+	dataValType := dataVal.Type()
+	if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface {
+		return fmt.Errorf(
+			"'%s' needs a map with string keys, has '%s' keys",
+			name, dataValType.Key().Kind())
+	}
+
+	dataValKeys := make(map[reflect.Value]struct{})
+	dataValKeysUnused := make(map[interface{}]struct{})
+	for _, dataValKey := range dataVal.MapKeys() {
+		dataValKeys[dataValKey] = struct{}{}
+		dataValKeysUnused[dataValKey.Interface()] = struct{}{}
+	}
+
+	errors := make([]string, 0)
+
+	// This slice will keep track of all the structs we'll be decoding.
+	// There can be more than one struct if there are embedded structs
+	// that are squashed.
+	structs := make([]reflect.Value, 1, 5)
+	structs[0] = val
+
+	// Compile the list of all the fields that we're going to be decoding
+	// from all the structs.
+	fields := make(map[*reflect.StructField]reflect.Value)
+	for len(structs) > 0 {
+		structVal := structs[0]
+		structs = structs[1:]
+
+		structType := structVal.Type()
+
+		for i := 0; i < structType.NumField(); i++ {
+			fieldType := structType.Field(i)
+			fieldKind := fieldType.Type.Kind()
+
+			// If "squash" is specified in the tag, we squash the field down.
+			squash := false
+			tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
+			for _, tag := range tagParts[1:] {
+				if tag == "squash" {
+					squash = true
+					break
+				}
+			}
+
+			if squash {
+				if fieldKind != reflect.Struct {
+					errors = appendErrors(errors,
+						fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind))
+				} else {
+					structs = append(structs, val.FieldByName(fieldType.Name))
+				}
+				continue
+			}
+
+			// Normal struct field, store it away
+			fields[&fieldType] = structVal.Field(i)
+		}
+	}
+
+	for fieldType, field := range fields {
+		fieldName := fieldType.Name
+
+		tagValue := fieldType.Tag.Get(d.config.TagName)
+		tagValue = strings.SplitN(tagValue, ",", 2)[0]
+		if tagValue != "" {
+			fieldName = tagValue
+		}
+
+		rawMapKey := reflect.ValueOf(fieldName)
+		rawMapVal := dataVal.MapIndex(rawMapKey)
+		if !rawMapVal.IsValid() {
+			// Do a slower search by iterating over each key and
+			// doing case-insensitive search.
+			for dataValKey := range dataValKeys {
+				mK, ok := dataValKey.Interface().(string)
+				if !ok {
+					// Not a string key
+					continue
+				}
+
+				if strings.EqualFold(mK, fieldName) {
+					rawMapKey = dataValKey
+					rawMapVal = dataVal.MapIndex(dataValKey)
+					break
+				}
+			}
+
+			if !rawMapVal.IsValid() {
+				// There was no matching key in the map for the value in
+				// the struct. Just ignore.
+				continue
+			}
+		}
+
+		// Delete the key we're using from the unused map so we stop tracking
+		delete(dataValKeysUnused, rawMapKey.Interface())
+
+		if !field.IsValid() {
+			// This should never happen
+			panic("field is not valid")
+		}
+
+		// If we can't set the field, then it is unexported or something,
+		// and we just continue onwards.
+		if !field.CanSet() {
+			continue
+		}
+
+		// If the name is empty string, then we're at the root, and we
+		// don't dot-join the fields.
+		if name != "" {
+			fieldName = fmt.Sprintf("%s.%s", name, fieldName)
+		}
+
+		if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil {
+			errors = appendErrors(errors, err)
+		}
+	}
+
+	if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
+		keys := make([]string, 0, len(dataValKeysUnused))
+		for rawKey := range dataValKeysUnused {
+			keys = append(keys, rawKey.(string))
+		}
+		sort.Strings(keys)
+
+		err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", "))
+		errors = appendErrors(errors, err)
+	}
+
+	if len(errors) > 0 {
+		return &Error{errors}
+	}
+
+	// Add the unused keys to the list of unused keys if we're tracking metadata
+	if d.config.Metadata != nil {
+		for rawKey := range dataValKeysUnused {
+			key := rawKey.(string)
+			if name != "" {
+				key = fmt.Sprintf("%s.%s", name, key)
+			}
+
+			d.config.Metadata.Unused = append(d.config.Metadata.Unused, key)
+		}
+	}
+
+	return nil
+}
+
+func getKind(val reflect.Value) reflect.Kind {
+	kind := val.Kind()
+
+	switch {
+	case kind >= reflect.Int && kind <= reflect.Int64:
+		return reflect.Int
+	case kind >= reflect.Uint && kind <= reflect.Uint64:
+		return reflect.Uint
+	case kind >= reflect.Float32 && kind <= reflect.Float64:
+		return reflect.Float32
+	default:
+		return kind
+	}
+}

+ 217 - 0
vendor/github.com/xeipuuv/gojsonpointer/pointer.go

@@ -0,0 +1,217 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author  			xeipuuv
+// author-github 	https://github.com/xeipuuv
+// author-mail		xeipuuv@gmail.com
+//
+// repository-name	gojsonpointer
+// repository-desc	An implementation of JSON Pointer - Go language
+//
+// description		Main and unique file.
+//
+// created      	25-02-2013
+
+package gojsonpointer
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+)
+
+const (
+	const_empty_pointer     = ``
+	const_pointer_separator = `/`
+
+	const_invalid_start = `JSON pointer must be empty or start with a "` + const_pointer_separator + `"`
+)
+
+type implStruct struct {
+	mode string // "SET" or "GET"
+
+	inDocument interface{}
+
+	setInValue interface{}
+
+	getOutNode interface{}
+	getOutKind reflect.Kind
+	outError   error
+}
+
+func NewJsonPointer(jsonPointerString string) (JsonPointer, error) {
+
+	var p JsonPointer
+	err := p.parse(jsonPointerString)
+	return p, err
+
+}
+
+type JsonPointer struct {
+	referenceTokens []string
+}
+
+// "Constructor", parses the given string JSON pointer
+func (p *JsonPointer) parse(jsonPointerString string) error {
+
+	var err error
+
+	if jsonPointerString != const_empty_pointer {
+		if !strings.HasPrefix(jsonPointerString, const_pointer_separator) {
+			err = errors.New(const_invalid_start)
+		} else {
+			referenceTokens := strings.Split(jsonPointerString, const_pointer_separator)
+			for _, referenceToken := range referenceTokens[1:] {
+				p.referenceTokens = append(p.referenceTokens, referenceToken)
+			}
+		}
+	}
+
+	return err
+}
+
+// Uses the pointer to retrieve a value from a JSON document
+func (p *JsonPointer) Get(document interface{}) (interface{}, reflect.Kind, error) {
+
+	is := &implStruct{mode: "GET", inDocument: document}
+	p.implementation(is)
+	return is.getOutNode, is.getOutKind, is.outError
+
+}
+
+// Uses the pointer to update a value from a JSON document
+func (p *JsonPointer) Set(document interface{}, value interface{}) (interface{}, error) {
+
+	is := &implStruct{mode: "SET", inDocument: document, setInValue: value}
+	p.implementation(is)
+	return document, is.outError
+
+}
+
+// Both Get and Set functions use the same implementation to avoid code duplication
+func (p *JsonPointer) implementation(i *implStruct) {
+
+	kind := reflect.Invalid
+
+	// Full document when empty
+	if len(p.referenceTokens) == 0 {
+		i.getOutNode = i.inDocument
+		i.outError = nil
+		i.getOutKind = kind
+		i.outError = nil
+		return
+	}
+
+	node := i.inDocument
+
+	for ti, token := range p.referenceTokens {
+
+		decodedToken := decodeReferenceToken(token)
+		isLastToken := ti == len(p.referenceTokens)-1
+
+		rValue := reflect.ValueOf(node)
+		kind = rValue.Kind()
+
+		switch kind {
+
+		case reflect.Map:
+			m := node.(map[string]interface{})
+			if _, ok := m[decodedToken]; ok {
+				node = m[decodedToken]
+				if isLastToken && i.mode == "SET" {
+					m[decodedToken] = i.setInValue
+				}
+			} else {
+				i.outError = errors.New(fmt.Sprintf("Object has no key '%s'", token))
+				i.getOutKind = kind
+				i.getOutNode = nil
+				return
+			}
+
+		case reflect.Slice:
+			s := node.([]interface{})
+			tokenIndex, err := strconv.Atoi(token)
+			if err != nil {
+				i.outError = errors.New(fmt.Sprintf("Invalid array index '%s'", token))
+				i.getOutKind = kind
+				i.getOutNode = nil
+				return
+			}
+			sLength := len(s)
+			if tokenIndex < 0 || tokenIndex >= sLength {
+				i.outError = errors.New(fmt.Sprintf("Out of bound array[0,%d] index '%d'", sLength, tokenIndex))
+				i.getOutKind = kind
+				i.getOutNode = nil
+				return
+			}
+
+			node = s[tokenIndex]
+			if isLastToken && i.mode == "SET" {
+				s[tokenIndex] = i.setInValue
+			}
+
+		default:
+			i.outError = errors.New(fmt.Sprintf("Invalid token reference '%s'", token))
+			i.getOutKind = kind
+			i.getOutNode = nil
+			return
+		}
+
+	}
+
+	rValue := reflect.ValueOf(node)
+	kind = rValue.Kind()
+
+	i.getOutNode = node
+	i.getOutKind = kind
+	i.outError = nil
+}
+
+// Pointer to string representation function
+func (p *JsonPointer) String() string {
+
+	if len(p.referenceTokens) == 0 {
+		return const_empty_pointer
+	}
+
+	pointerString := const_pointer_separator + strings.Join(p.referenceTokens, const_pointer_separator)
+
+	return pointerString
+}
+
+// Specific JSON pointer encoding here
+// ~0 => ~
+// ~1 => /
+// ... and vice versa
+
+const (
+	const_encoded_reference_token_0 = `~0`
+	const_encoded_reference_token_1 = `~1`
+	const_decoded_reference_token_0 = `~`
+	const_decoded_reference_token_1 = `/`
+)
+
+func decodeReferenceToken(token string) string {
+	step1 := strings.Replace(token, const_encoded_reference_token_1, const_decoded_reference_token_1, -1)
+	step2 := strings.Replace(step1, const_encoded_reference_token_0, const_decoded_reference_token_0, -1)
+	return step2
+}
+
+func encodeReferenceToken(token string) string {
+	step1 := strings.Replace(token, const_decoded_reference_token_1, const_encoded_reference_token_1, -1)
+	step2 := strings.Replace(step1, const_decoded_reference_token_0, const_encoded_reference_token_0, -1)
+	return step2
+}

+ 141 - 0
vendor/github.com/xeipuuv/gojsonreference/reference.go

@@ -0,0 +1,141 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author  			xeipuuv
+// author-github 	https://github.com/xeipuuv
+// author-mail		xeipuuv@gmail.com
+//
+// repository-name	gojsonreference
+// repository-desc	An implementation of JSON Reference - Go language
+//
+// description		Main and unique file.
+//
+// created      	26-02-2013
+
+package gojsonreference
+
+import (
+	"errors"
+	"github.com/xeipuuv/gojsonpointer"
+	"net/url"
+	"path/filepath"
+	"runtime"
+	"strings"
+)
+
+const (
+	const_fragment_char = `#`
+)
+
+func NewJsonReference(jsonReferenceString string) (JsonReference, error) {
+
+	var r JsonReference
+	err := r.parse(jsonReferenceString)
+	return r, err
+
+}
+
+type JsonReference struct {
+	referenceUrl     *url.URL
+	referencePointer gojsonpointer.JsonPointer
+
+	HasFullUrl      bool
+	HasUrlPathOnly  bool
+	HasFragmentOnly bool
+	HasFileScheme   bool
+	HasFullFilePath bool
+}
+
+func (r *JsonReference) GetUrl() *url.URL {
+	return r.referenceUrl
+}
+
+func (r *JsonReference) GetPointer() *gojsonpointer.JsonPointer {
+	return &r.referencePointer
+}
+
+func (r *JsonReference) String() string {
+
+	if r.referenceUrl != nil {
+		return r.referenceUrl.String()
+	}
+
+	if r.HasFragmentOnly {
+		return const_fragment_char + r.referencePointer.String()
+	}
+
+	return r.referencePointer.String()
+}
+
+func (r *JsonReference) IsCanonical() bool {
+	return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullUrl)
+}
+
+// "Constructor", parses the given string JSON reference
+func (r *JsonReference) parse(jsonReferenceString string) (err error) {
+
+	r.referenceUrl, err = url.Parse(jsonReferenceString)
+	if err != nil {
+		return
+	}
+	refUrl := r.referenceUrl
+
+	if refUrl.Scheme != "" && refUrl.Host != "" {
+		r.HasFullUrl = true
+	} else {
+		if refUrl.Path != "" {
+			r.HasUrlPathOnly = true
+		} else if refUrl.RawQuery == "" && refUrl.Fragment != "" {
+			r.HasFragmentOnly = true
+		}
+	}
+
+	r.HasFileScheme = refUrl.Scheme == "file"
+	if runtime.GOOS == "windows" {
+		// on Windows, a file URL may have an extra leading slash, and if it
+		// doesn't then its first component will be treated as the host by the
+		// Go runtime
+		if refUrl.Host == "" && strings.HasPrefix(refUrl.Path, "/") {
+			r.HasFullFilePath = filepath.IsAbs(refUrl.Path[1:])
+		} else {
+			r.HasFullFilePath = filepath.IsAbs(refUrl.Host + refUrl.Path)
+		}
+	} else {
+		r.HasFullFilePath = filepath.IsAbs(refUrl.Path)
+	}
+
+	// invalid json-pointer error means url has no json-pointer fragment. simply ignore error
+	r.referencePointer, _ = gojsonpointer.NewJsonPointer(refUrl.Fragment)
+
+	return
+}
+
+// Creates a new reference from a parent and a child
+// If the child cannot inherit from the parent, an error is returned
+func (r *JsonReference) Inherits(child JsonReference) (*JsonReference, error) {
+	childUrl := child.GetUrl()
+	parentUrl := r.GetUrl()
+	if childUrl == nil {
+		return nil, errors.New("childUrl is nil!")
+	}
+	if parentUrl == nil {
+		return nil, errors.New("parentUrl is nil!")
+	}
+
+	ref, err := NewJsonReference(parentUrl.ResolveReference(childUrl).String())
+	if err != nil {
+		return nil, err
+	}
+	return &ref, err
+}

+ 242 - 0
vendor/github.com/xeipuuv/gojsonschema/errors.go

@@ -0,0 +1,242 @@
+package gojsonschema
+
+import (
+	"fmt"
+	"strings"
+)
+
+type (
+	// RequiredError. ErrorDetails: property string
+	RequiredError struct {
+		ResultErrorFields
+	}
+
+	// InvalidTypeError. ErrorDetails: expected, given
+	InvalidTypeError struct {
+		ResultErrorFields
+	}
+
+	// NumberAnyOfError. ErrorDetails: -
+	NumberAnyOfError struct {
+		ResultErrorFields
+	}
+
+	// NumberOneOfError. ErrorDetails: -
+	NumberOneOfError struct {
+		ResultErrorFields
+	}
+
+	// NumberAllOfError. ErrorDetails: -
+	NumberAllOfError struct {
+		ResultErrorFields
+	}
+
+	// NumberNotError. ErrorDetails: -
+	NumberNotError struct {
+		ResultErrorFields
+	}
+
+	// MissingDependencyError. ErrorDetails: dependency
+	MissingDependencyError struct {
+		ResultErrorFields
+	}
+
+	// InternalError. ErrorDetails: error
+	InternalError struct {
+		ResultErrorFields
+	}
+
+	// EnumError. ErrorDetails: allowed
+	EnumError struct {
+		ResultErrorFields
+	}
+
+	// ArrayNoAdditionalItemsError. ErrorDetails: -
+	ArrayNoAdditionalItemsError struct {
+		ResultErrorFields
+	}
+
+	// ArrayMinItemsError. ErrorDetails: min
+	ArrayMinItemsError struct {
+		ResultErrorFields
+	}
+
+	// ArrayMaxItemsError. ErrorDetails: max
+	ArrayMaxItemsError struct {
+		ResultErrorFields
+	}
+
+	// ItemsMustBeUniqueError. ErrorDetails: type
+	ItemsMustBeUniqueError struct {
+		ResultErrorFields
+	}
+
+	// ArrayMinPropertiesError. ErrorDetails: min
+	ArrayMinPropertiesError struct {
+		ResultErrorFields
+	}
+
+	// ArrayMaxPropertiesError. ErrorDetails: max
+	ArrayMaxPropertiesError struct {
+		ResultErrorFields
+	}
+
+	// AdditionalPropertyNotAllowedError. ErrorDetails: property
+	AdditionalPropertyNotAllowedError struct {
+		ResultErrorFields
+	}
+
+	// InvalidPropertyPatternError. ErrorDetails: property, pattern
+	InvalidPropertyPatternError struct {
+		ResultErrorFields
+	}
+
+	// StringLengthGTEError. ErrorDetails: min
+	StringLengthGTEError struct {
+		ResultErrorFields
+	}
+
+	// StringLengthLTEError. ErrorDetails: max
+	StringLengthLTEError struct {
+		ResultErrorFields
+	}
+
+	// DoesNotMatchPatternError. ErrorDetails: pattern
+	DoesNotMatchPatternError struct {
+		ResultErrorFields
+	}
+
+	// DoesNotMatchFormatError. ErrorDetails: format
+	DoesNotMatchFormatError struct {
+		ResultErrorFields
+	}
+
+	// MultipleOfError. ErrorDetails: multiple
+	MultipleOfError struct {
+		ResultErrorFields
+	}
+
+	// NumberGTEError. ErrorDetails: min
+	NumberGTEError struct {
+		ResultErrorFields
+	}
+
+	// NumberGTError. ErrorDetails: min
+	NumberGTError struct {
+		ResultErrorFields
+	}
+
+	// NumberLTEError. ErrorDetails: max
+	NumberLTEError struct {
+		ResultErrorFields
+	}
+
+	// NumberLTError. ErrorDetails: max
+	NumberLTError struct {
+		ResultErrorFields
+	}
+)
+
+// newError takes a ResultError type and sets the type, context, description, details, value, and field
+func newError(err ResultError, context *jsonContext, value interface{}, locale locale, details ErrorDetails) {
+	var t string
+	var d string
+	switch err.(type) {
+	case *RequiredError:
+		t = "required"
+		d = locale.Required()
+	case *InvalidTypeError:
+		t = "invalid_type"
+		d = locale.InvalidType()
+	case *NumberAnyOfError:
+		t = "number_any_of"
+		d = locale.NumberAnyOf()
+	case *NumberOneOfError:
+		t = "number_one_of"
+		d = locale.NumberOneOf()
+	case *NumberAllOfError:
+		t = "number_all_of"
+		d = locale.NumberAllOf()
+	case *NumberNotError:
+		t = "number_not"
+		d = locale.NumberNot()
+	case *MissingDependencyError:
+		t = "missing_dependency"
+		d = locale.MissingDependency()
+	case *InternalError:
+		t = "internal"
+		d = locale.Internal()
+	case *EnumError:
+		t = "enum"
+		d = locale.Enum()
+	case *ArrayNoAdditionalItemsError:
+		t = "array_no_additional_items"
+		d = locale.ArrayNoAdditionalItems()
+	case *ArrayMinItemsError:
+		t = "array_min_items"
+		d = locale.ArrayMinItems()
+	case *ArrayMaxItemsError:
+		t = "array_max_items"
+		d = locale.ArrayMaxItems()
+	case *ItemsMustBeUniqueError:
+		t = "unique"
+		d = locale.Unique()
+	case *ArrayMinPropertiesError:
+		t = "array_min_properties"
+		d = locale.ArrayMinProperties()
+	case *ArrayMaxPropertiesError:
+		t = "array_max_properties"
+		d = locale.ArrayMaxProperties()
+	case *AdditionalPropertyNotAllowedError:
+		t = "additional_property_not_allowed"
+		d = locale.AdditionalPropertyNotAllowed()
+	case *InvalidPropertyPatternError:
+		t = "invalid_property_pattern"
+		d = locale.InvalidPropertyPattern()
+	case *StringLengthGTEError:
+		t = "string_gte"
+		d = locale.StringGTE()
+	case *StringLengthLTEError:
+		t = "string_lte"
+		d = locale.StringLTE()
+	case *DoesNotMatchPatternError:
+		t = "pattern"
+		d = locale.DoesNotMatchPattern()
+	case *DoesNotMatchFormatError:
+		t = "format"
+		d = locale.DoesNotMatchFormat()
+	case *MultipleOfError:
+		t = "multiple_of"
+		d = locale.MultipleOf()
+	case *NumberGTEError:
+		t = "number_gte"
+		d = locale.NumberGTE()
+	case *NumberGTError:
+		t = "number_gt"
+		d = locale.NumberGT()
+	case *NumberLTEError:
+		t = "number_lte"
+		d = locale.NumberLTE()
+	case *NumberLTError:
+		t = "number_lt"
+		d = locale.NumberLT()
+	}
+
+	err.SetType(t)
+	err.SetContext(context)
+	err.SetValue(value)
+	err.SetDetails(details)
+	details["field"] = err.Field()
+	err.SetDescription(formatErrorDescription(d, details))
+}
+
+// formatErrorDescription takes a string in this format: %field% is required
+// and converts it to a string with replacements. The fields come from
+// the ErrorDetails struct and vary for each type of error.
+func formatErrorDescription(s string, details ErrorDetails) string {
+	for name, val := range details {
+		s = strings.Replace(s, "%"+strings.ToLower(name)+"%", fmt.Sprintf("%v", val), -1)
+	}
+
+	return s
+}

+ 178 - 0
vendor/github.com/xeipuuv/gojsonschema/format_checkers.go

@@ -0,0 +1,178 @@
+package gojsonschema
+
+import (
+	"net"
+	"net/url"
+	"reflect"
+	"regexp"
+	"strings"
+	"time"
+)
+
+type (
+	// FormatChecker is the interface all formatters added to FormatCheckerChain must implement
+	FormatChecker interface {
+		IsFormat(input string) bool
+	}
+
+	// FormatCheckerChain holds the formatters
+	FormatCheckerChain struct {
+		formatters map[string]FormatChecker
+	}
+
+	// EmailFormatter verifies email address formats
+	EmailFormatChecker struct{}
+
+	// IPV4FormatChecker verifies IP addresses in the ipv4 format
+	IPV4FormatChecker struct{}
+
+	// IPV6FormatChecker verifies IP addresses in the ipv6 format
+	IPV6FormatChecker struct{}
+
+	// DateTimeFormatChecker verifies date/time formats per RFC3339 5.6
+	//
+	// Valid formats:
+	// 		Partial Time: HH:MM:SS
+	//		Full Date: YYYY-MM-DD
+	// 		Full Time: HH:MM:SSZ-07:00
+	//		Date Time: YYYY-MM-DDTHH:MM:SSZ-0700
+	//
+	// 	Where
+	//		YYYY = 4DIGIT year
+	//		MM = 2DIGIT month ; 01-12
+	//		DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year
+	//		HH = 2DIGIT hour ; 00-23
+	//		MM = 2DIGIT ; 00-59
+	//		SS = 2DIGIT ; 00-58, 00-60 based on leap second rules
+	//		T = Literal
+	//		Z = Literal
+	//
+	//	Note: Nanoseconds are also suported in all formats
+	//
+	// http://tools.ietf.org/html/rfc3339#section-5.6
+	DateTimeFormatChecker struct{}
+
+	// URIFormatCheckers validates a URI with a valid Scheme per RFC3986
+	URIFormatChecker struct{}
+
+	// HostnameFormatChecker validates a hostname is in the correct format
+	HostnameFormatChecker struct{}
+
+	// UUIDFormatChecker validates a UUID is in the correct format
+	UUIDFormatChecker struct{}
+)
+
+var (
+	// Formatters holds the valid formatters, and is a public variable
+	// so library users can add custom formatters
+	FormatCheckers = FormatCheckerChain{
+		formatters: map[string]FormatChecker{
+			"date-time": DateTimeFormatChecker{},
+			"hostname":  HostnameFormatChecker{},
+			"email":     EmailFormatChecker{},
+			"ipv4":      IPV4FormatChecker{},
+			"ipv6":      IPV6FormatChecker{},
+			"uri":       URIFormatChecker{},
+			"uuid":      UUIDFormatChecker{},
+		},
+	}
+
+	// Regex credit: https://github.com/asaskevich/govalidator
+	rxEmail = regexp.MustCompile("^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$")
+
+	// Regex credit: https://www.socketloop.com/tutorials/golang-validate-hostname
+	rxHostname = regexp.MustCompile(`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`)
+
+	rxUUID = regexp.MustCompile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$")
+)
+
+// Add adds a FormatChecker to the FormatCheckerChain
+// The name used will be the value used for the format key in your json schema
+func (c *FormatCheckerChain) Add(name string, f FormatChecker) *FormatCheckerChain {
+	c.formatters[name] = f
+
+	return c
+}
+
+// Remove deletes a FormatChecker from the FormatCheckerChain (if it exists)
+func (c *FormatCheckerChain) Remove(name string) *FormatCheckerChain {
+	delete(c.formatters, name)
+
+	return c
+}
+
+// Has checks to see if the FormatCheckerChain holds a FormatChecker with the given name
+func (c *FormatCheckerChain) Has(name string) bool {
+	_, ok := c.formatters[name]
+
+	return ok
+}
+
+// IsFormat will check an input against a FormatChecker with the given name
+// to see if it is the correct format
+func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool {
+	f, ok := c.formatters[name]
+
+	if !ok {
+		return false
+	}
+
+	if !isKind(input, reflect.String) {
+		return false
+	}
+
+	inputString := input.(string)
+
+	return f.IsFormat(inputString)
+}
+
+func (f EmailFormatChecker) IsFormat(input string) bool {
+	return rxEmail.MatchString(input)
+}
+
+// Credit: https://github.com/asaskevich/govalidator
+func (f IPV4FormatChecker) IsFormat(input string) bool {
+	ip := net.ParseIP(input)
+	return ip != nil && strings.Contains(input, ".")
+}
+
+// Credit: https://github.com/asaskevich/govalidator
+func (f IPV6FormatChecker) IsFormat(input string) bool {
+	ip := net.ParseIP(input)
+	return ip != nil && strings.Contains(input, ":")
+}
+
+func (f DateTimeFormatChecker) IsFormat(input string) bool {
+	formats := []string{
+		"15:04:05",
+		"15:04:05Z07:00",
+		"2006-01-02",
+		time.RFC3339,
+		time.RFC3339Nano,
+	}
+
+	for _, format := range formats {
+		if _, err := time.Parse(format, input); err == nil {
+			return true
+		}
+	}
+
+	return false
+}
+
+func (f URIFormatChecker) IsFormat(input string) bool {
+	u, err := url.Parse(input)
+	if err != nil || u.Scheme == "" {
+		return false
+	}
+
+	return true
+}
+
+func (f HostnameFormatChecker) IsFormat(input string) bool {
+	return rxHostname.MatchString(input) && len(input) < 256
+}
+
+func (f UUIDFormatChecker) IsFormat(input string) bool {
+	return rxUUID.MatchString(input)
+}

+ 37 - 0
vendor/github.com/xeipuuv/gojsonschema/internalLog.go

@@ -0,0 +1,37 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author           xeipuuv
+// author-github    https://github.com/xeipuuv
+// author-mail      xeipuuv@gmail.com
+//
+// repository-name  gojsonschema
+// repository-desc  An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description      Very simple log wrapper.
+//					Used for debugging/testing purposes.
+//
+// created          01-01-2015
+
+package gojsonschema
+
+import (
+	"log"
+)
+
+const internalLogEnabled = false
+
+func internalLog(format string, v ...interface{}) {
+	log.Printf(format, v...)
+}

+ 72 - 0
vendor/github.com/xeipuuv/gojsonschema/jsonContext.go

@@ -0,0 +1,72 @@
+// Copyright 2013 MongoDB, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author           tolsen
+// author-github    https://github.com/tolsen
+//
+// repository-name  gojsonschema
+// repository-desc  An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description      Implements a persistent (immutable w/ shared structure) singly-linked list of strings for the purpose of storing a json context
+//
+// created          04-09-2013
+
+package gojsonschema
+
+import "bytes"
+
+// jsonContext implements a persistent linked-list of strings
+type jsonContext struct {
+	head string
+	tail *jsonContext
+}
+
+func newJsonContext(head string, tail *jsonContext) *jsonContext {
+	return &jsonContext{head, tail}
+}
+
+// String displays the context in reverse.
+// This plays well with the data structure's persistent nature with
+// Cons and a json document's tree structure.
+func (c *jsonContext) String(del ...string) string {
+	byteArr := make([]byte, 0, c.stringLen())
+	buf := bytes.NewBuffer(byteArr)
+	c.writeStringToBuffer(buf, del)
+
+	return buf.String()
+}
+
+func (c *jsonContext) stringLen() int {
+	length := 0
+	if c.tail != nil {
+		length = c.tail.stringLen() + 1 // add 1 for "."
+	}
+
+	length += len(c.head)
+	return length
+}
+
+func (c *jsonContext) writeStringToBuffer(buf *bytes.Buffer, del []string) {
+	if c.tail != nil {
+		c.tail.writeStringToBuffer(buf, del)
+
+		if len(del) > 0 {
+			buf.WriteString(del[0])
+		} else {
+			buf.WriteString(".")
+		}
+	}
+
+	buf.WriteString(c.head)
+}

+ 286 - 0
vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go

@@ -0,0 +1,286 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author           xeipuuv
+// author-github    https://github.com/xeipuuv
+// author-mail      xeipuuv@gmail.com
+//
+// repository-name  gojsonschema
+// repository-desc  An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description		Different strategies to load JSON files.
+// 					Includes References (file and HTTP), JSON strings and Go types.
+//
+// created          01-02-2015
+
+package gojsonschema
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"path/filepath"
+	"runtime"
+	"strings"
+
+	"github.com/xeipuuv/gojsonreference"
+)
+
+// JSON loader interface
+
+type JSONLoader interface {
+	jsonSource() interface{}
+	loadJSON() (interface{}, error)
+	loadSchema() (*Schema, error)
+}
+
+// JSON Reference loader
+// references are used to load JSONs from files and HTTP
+
+type jsonReferenceLoader struct {
+	source string
+}
+
+func (l *jsonReferenceLoader) jsonSource() interface{} {
+	return l.source
+}
+
+func NewReferenceLoader(source string) *jsonReferenceLoader {
+	return &jsonReferenceLoader{source: source}
+}
+
+func (l *jsonReferenceLoader) loadJSON() (interface{}, error) {
+
+	var err error
+
+	reference, err := gojsonreference.NewJsonReference(l.jsonSource().(string))
+	if err != nil {
+		return nil, err
+	}
+
+	refToUrl := reference
+	refToUrl.GetUrl().Fragment = ""
+
+	var document interface{}
+
+	if reference.HasFileScheme {
+
+		filename := strings.Replace(refToUrl.String(), "file://", "", -1)
+		if runtime.GOOS == "windows" {
+			// on Windows, a file URL may have an extra leading slash, use slashes
+			// instead of backslashes, and have spaces escaped
+			if strings.HasPrefix(filename, "/") {
+				filename = filename[1:]
+			}
+			filename = filepath.FromSlash(filename)
+			filename = strings.Replace(filename, "%20", " ", -1)
+		}
+
+		document, err = l.loadFromFile(filename)
+		if err != nil {
+			return nil, err
+		}
+
+	} else {
+
+		document, err = l.loadFromHTTP(refToUrl.String())
+		if err != nil {
+			return nil, err
+		}
+
+	}
+
+	return document, nil
+
+}
+
+func (l *jsonReferenceLoader) loadSchema() (*Schema, error) {
+
+	var err error
+
+	d := Schema{}
+	d.pool = newSchemaPool()
+	d.referencePool = newSchemaReferencePool()
+
+	d.documentReference, err = gojsonreference.NewJsonReference(l.jsonSource().(string))
+	if err != nil {
+		return nil, err
+	}
+
+	spd, err := d.pool.GetDocument(d.documentReference)
+	if err != nil {
+		return nil, err
+	}
+
+	err = d.parse(spd.Document)
+	if err != nil {
+		return nil, err
+	}
+
+	return &d, nil
+
+}
+
+func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) {
+
+	resp, err := http.Get(address)
+	if err != nil {
+		return nil, err
+	}
+
+	// must return HTTP Status 200 OK
+	if resp.StatusCode != http.StatusOK {
+		return nil, errors.New(formatErrorDescription(Locale.httpBadStatus(), ErrorDetails{"status": resp.Status}))
+	}
+
+	bodyBuff, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return nil, err
+	}
+
+	return decodeJsonUsingNumber(bytes.NewReader(bodyBuff))
+
+}
+
+func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) {
+
+	bodyBuff, err := ioutil.ReadFile(path)
+	if err != nil {
+		return nil, err
+	}
+
+	return decodeJsonUsingNumber(bytes.NewReader(bodyBuff))
+
+}
+
+// JSON string loader
+
+type jsonStringLoader struct {
+	source string
+}
+
+func (l *jsonStringLoader) jsonSource() interface{} {
+	return l.source
+}
+
+func NewStringLoader(source string) *jsonStringLoader {
+	return &jsonStringLoader{source: source}
+}
+
+func (l *jsonStringLoader) loadJSON() (interface{}, error) {
+
+	return decodeJsonUsingNumber(strings.NewReader(l.jsonSource().(string)))
+
+}
+
+func (l *jsonStringLoader) loadSchema() (*Schema, error) {
+
+	var err error
+
+	document, err := l.loadJSON()
+	if err != nil {
+		return nil, err
+	}
+
+	d := Schema{}
+	d.pool = newSchemaPool()
+	d.referencePool = newSchemaReferencePool()
+	d.documentReference, err = gojsonreference.NewJsonReference("#")
+	d.pool.SetStandaloneDocument(document)
+	if err != nil {
+		return nil, err
+	}
+
+	err = d.parse(document)
+	if err != nil {
+		return nil, err
+	}
+
+	return &d, nil
+
+}
+
+// JSON Go (types) loader
+// used to load JSONs from the code as maps, interface{}, structs ...
+
+type jsonGoLoader struct {
+	source interface{}
+}
+
+func (l *jsonGoLoader) jsonSource() interface{} {
+	return l.source
+}
+
+func NewGoLoader(source interface{}) *jsonGoLoader {
+	return &jsonGoLoader{source: source}
+}
+
+func (l *jsonGoLoader) loadJSON() (interface{}, error) {
+
+	// convert it to a compliant JSON first to avoid types "mismatches"
+
+	jsonBytes, err := json.Marshal(l.jsonSource())
+	if err != nil {
+		return nil, err
+	}
+
+	return decodeJsonUsingNumber(bytes.NewReader(jsonBytes))
+
+}
+
+func (l *jsonGoLoader) loadSchema() (*Schema, error) {
+
+	var err error
+
+	document, err := l.loadJSON()
+	if err != nil {
+		return nil, err
+	}
+
+	d := Schema{}
+	d.pool = newSchemaPool()
+	d.referencePool = newSchemaReferencePool()
+	d.documentReference, err = gojsonreference.NewJsonReference("#")
+	d.pool.SetStandaloneDocument(document)
+	if err != nil {
+		return nil, err
+	}
+
+	err = d.parse(document)
+	if err != nil {
+		return nil, err
+	}
+
+	return &d, nil
+
+}
+
+func decodeJsonUsingNumber(r io.Reader) (interface{}, error) {
+
+	var document interface{}
+
+	decoder := json.NewDecoder(r)
+	decoder.UseNumber()
+
+	err := decoder.Decode(&document)
+	if err != nil {
+		return nil, err
+	}
+
+	return document, nil
+
+}

+ 275 - 0
vendor/github.com/xeipuuv/gojsonschema/locales.go

@@ -0,0 +1,275 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author           xeipuuv
+// author-github    https://github.com/xeipuuv
+// author-mail      xeipuuv@gmail.com
+//
+// repository-name  gojsonschema
+// repository-desc  An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description      Contains const string and messages.
+//
+// created          01-01-2015
+
+package gojsonschema
+
+type (
+	// locale is an interface for definining custom error strings
+	locale interface {
+		Required() string
+		InvalidType() string
+		NumberAnyOf() string
+		NumberOneOf() string
+		NumberAllOf() string
+		NumberNot() string
+		MissingDependency() string
+		Internal() string
+		Enum() string
+		ArrayNoAdditionalItems() string
+		ArrayMinItems() string
+		ArrayMaxItems() string
+		Unique() string
+		ArrayMinProperties() string
+		ArrayMaxProperties() string
+		AdditionalPropertyNotAllowed() string
+		InvalidPropertyPattern() string
+		StringGTE() string
+		StringLTE() string
+		DoesNotMatchPattern() string
+		DoesNotMatchFormat() string
+		MultipleOf() string
+		NumberGTE() string
+		NumberGT() string
+		NumberLTE() string
+		NumberLT() string
+
+		// Schema validations
+		RegexPattern() string
+		GreaterThanZero() string
+		MustBeOfA() string
+		MustBeOfAn() string
+		CannotBeUsedWithout() string
+		CannotBeGT() string
+		MustBeOfType() string
+		MustBeValidRegex() string
+		MustBeValidFormat() string
+		MustBeGTEZero() string
+		KeyCannotBeGreaterThan() string
+		KeyItemsMustBeOfType() string
+		KeyItemsMustBeUnique() string
+		ReferenceMustBeCanonical() string
+		NotAValidType() string
+		Duplicated() string
+		httpBadStatus() string
+
+		// ErrorFormat
+		ErrorFormat() string
+	}
+
+	// DefaultLocale is the default locale for this package
+	DefaultLocale struct{}
+)
+
+func (l DefaultLocale) Required() string {
+	return `%property% is required`
+}
+
+func (l DefaultLocale) InvalidType() string {
+	return `Invalid type. Expected: %expected%, given: %given%`
+}
+
+func (l DefaultLocale) NumberAnyOf() string {
+	return `Must validate at least one schema (anyOf)`
+}
+
+func (l DefaultLocale) NumberOneOf() string {
+	return `Must validate one and only one schema (oneOf)`
+}
+
+func (l DefaultLocale) NumberAllOf() string {
+	return `Must validate all the schemas (allOf)`
+}
+
+func (l DefaultLocale) NumberNot() string {
+	return `Must not validate the schema (not)`
+}
+
+func (l DefaultLocale) MissingDependency() string {
+	return `Has a dependency on %dependency%`
+}
+
+func (l DefaultLocale) Internal() string {
+	return `Internal Error %error%`
+}
+
+func (l DefaultLocale) Enum() string {
+	return `%field% must be one of the following: %allowed%`
+}
+
+func (l DefaultLocale) ArrayNoAdditionalItems() string {
+	return `No additional items allowed on array`
+}
+
+func (l DefaultLocale) ArrayMinItems() string {
+	return `Array must have at least %min% items`
+}
+
+func (l DefaultLocale) ArrayMaxItems() string {
+	return `Array must have at most %max% items`
+}
+
+func (l DefaultLocale) Unique() string {
+	return `%type% items must be unique`
+}
+
+func (l DefaultLocale) ArrayMinProperties() string {
+	return `Must have at least %min% properties`
+}
+
+func (l DefaultLocale) ArrayMaxProperties() string {
+	return `Must have at most %max% properties`
+}
+
+func (l DefaultLocale) AdditionalPropertyNotAllowed() string {
+	return `Additional property %property% is not allowed`
+}
+
+func (l DefaultLocale) InvalidPropertyPattern() string {
+	return `Property "%property%" does not match pattern %pattern%`
+}
+
+func (l DefaultLocale) StringGTE() string {
+	return `String length must be greater than or equal to %min%`
+}
+
+func (l DefaultLocale) StringLTE() string {
+	return `String length must be less than or equal to %max%`
+}
+
+func (l DefaultLocale) DoesNotMatchPattern() string {
+	return `Does not match pattern '%pattern%'`
+}
+
+func (l DefaultLocale) DoesNotMatchFormat() string {
+	return `Does not match format '%format%'`
+}
+
+func (l DefaultLocale) MultipleOf() string {
+	return `Must be a multiple of %multiple%`
+}
+
+func (l DefaultLocale) NumberGTE() string {
+	return `Must be greater than or equal to %min%`
+}
+
+func (l DefaultLocale) NumberGT() string {
+	return `Must be greater than %min%`
+}
+
+func (l DefaultLocale) NumberLTE() string {
+	return `Must be less than or equal to %max%`
+}
+
+func (l DefaultLocale) NumberLT() string {
+	return `Must be less than %max%`
+}
+
+// Schema validators
+func (l DefaultLocale) RegexPattern() string {
+	return `Invalid regex pattern '%pattern%'`
+}
+
+func (l DefaultLocale) GreaterThanZero() string {
+	return `%number% must be strictly greater than 0`
+}
+
+func (l DefaultLocale) MustBeOfA() string {
+	return `%x% must be of a %y%`
+}
+
+func (l DefaultLocale) MustBeOfAn() string {
+	return `%x% must be of an %y%`
+}
+
+func (l DefaultLocale) CannotBeUsedWithout() string {
+	return `%x% cannot be used without %y%`
+}
+
+func (l DefaultLocale) CannotBeGT() string {
+	return `%x% cannot be greater than %y%`
+}
+
+func (l DefaultLocale) MustBeOfType() string {
+	return `%key% must be of type %type%`
+}
+
+func (l DefaultLocale) MustBeValidRegex() string {
+	return `%key% must be a valid regex`
+}
+
+func (l DefaultLocale) MustBeValidFormat() string {
+	return `%key% must be a valid format %given%`
+}
+
+func (l DefaultLocale) MustBeGTEZero() string {
+	return `%key% must be greater than or equal to 0`
+}
+
+func (l DefaultLocale) KeyCannotBeGreaterThan() string {
+	return `%key% cannot be greater than %y%`
+}
+
+func (l DefaultLocale) KeyItemsMustBeOfType() string {
+	return `%key% items must be %type%`
+}
+
+func (l DefaultLocale) KeyItemsMustBeUnique() string {
+	return `%key% items must be unique`
+}
+
+func (l DefaultLocale) ReferenceMustBeCanonical() string {
+	return `Reference %reference% must be canonical`
+}
+
+func (l DefaultLocale) NotAValidType() string {
+	return `%type% is not a valid type -- `
+}
+
+func (l DefaultLocale) Duplicated() string {
+	return `%type% type is duplicated`
+}
+
+func (l DefaultLocale) httpBadStatus() string {
+	return `Could not read schema from HTTP, response status is %status%`
+}
+
+// Replacement options: field, description, context, value
+func (l DefaultLocale) ErrorFormat() string {
+	return `%field%: %description%`
+}
+
+const (
+	STRING_NUMBER                     = "number"
+	STRING_ARRAY_OF_STRINGS           = "array of strings"
+	STRING_ARRAY_OF_SCHEMAS           = "array of schemas"
+	STRING_SCHEMA                     = "schema"
+	STRING_SCHEMA_OR_ARRAY_OF_STRINGS = "schema or array of strings"
+	STRING_PROPERTIES                 = "properties"
+	STRING_DEPENDENCY                 = "dependency"
+	STRING_PROPERTY                   = "property"
+	STRING_UNDEFINED                  = "undefined"
+	STRING_CONTEXT_ROOT               = "(root)"
+	STRING_ROOT_SCHEMA_PROPERTY       = "(root)"
+)

+ 171 - 0
vendor/github.com/xeipuuv/gojsonschema/result.go

@@ -0,0 +1,171 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author           xeipuuv
+// author-github    https://github.com/xeipuuv
+// author-mail      xeipuuv@gmail.com
+//
+// repository-name  gojsonschema
+// repository-desc  An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description      Result and ResultError implementations.
+//
+// created          01-01-2015
+
+package gojsonschema
+
+import (
+	"fmt"
+	"strings"
+)
+
+type (
+	// ErrorDetails is a map of details specific to each error.
+	// While the values will vary, every error will contain a "field" value
+	ErrorDetails map[string]interface{}
+
+	// ResultError is the interface that library errors must implement
+	ResultError interface {
+		Field() string
+		SetType(string)
+		Type() string
+		SetContext(*jsonContext)
+		Context() *jsonContext
+		SetDescription(string)
+		Description() string
+		SetValue(interface{})
+		Value() interface{}
+		SetDetails(ErrorDetails)
+		Details() ErrorDetails
+	}
+
+	// ResultErrorFields holds the fields for each ResultError implementation.
+	// ResultErrorFields implements the ResultError interface, so custom errors
+	// can be defined by just embedding this type
+	ResultErrorFields struct {
+		errorType   string       // A string with the type of error (i.e. invalid_type)
+		context     *jsonContext // Tree like notation of the part that failed the validation. ex (root).a.b ...
+		description string       // A human readable error message
+		value       interface{}  // Value given by the JSON file that is the source of the error
+		details     ErrorDetails
+	}
+
+	Result struct {
+		errors []ResultError
+		// Scores how well the validation matched. Useful in generating
+		// better error messages for anyOf and oneOf.
+		score int
+	}
+)
+
+// Field outputs the field name without the root context
+// i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName
+func (v *ResultErrorFields) Field() string {
+	if p, ok := v.Details()["property"]; ok {
+		if str, isString := p.(string); isString {
+			return str
+		}
+	}
+
+	return strings.TrimPrefix(v.context.String(), STRING_ROOT_SCHEMA_PROPERTY+".")
+}
+
+func (v *ResultErrorFields) SetType(errorType string) {
+	v.errorType = errorType
+}
+
+func (v *ResultErrorFields) Type() string {
+	return v.errorType
+}
+
+func (v *ResultErrorFields) SetContext(context *jsonContext) {
+	v.context = context
+}
+
+func (v *ResultErrorFields) Context() *jsonContext {
+	return v.context
+}
+
+func (v *ResultErrorFields) SetDescription(description string) {
+	v.description = description
+}
+
+func (v *ResultErrorFields) Description() string {
+	return v.description
+}
+
+func (v *ResultErrorFields) SetValue(value interface{}) {
+	v.value = value
+}
+
+func (v *ResultErrorFields) Value() interface{} {
+	return v.value
+}
+
+func (v *ResultErrorFields) SetDetails(details ErrorDetails) {
+	v.details = details
+}
+
+func (v *ResultErrorFields) Details() ErrorDetails {
+	return v.details
+}
+
+func (v ResultErrorFields) String() string {
+	// as a fallback, the value is displayed go style
+	valueString := fmt.Sprintf("%v", v.value)
+
+	// marshal the go value value to json
+	if v.value == nil {
+		valueString = TYPE_NULL
+	} else {
+		if vs, err := marshalToJsonString(v.value); err == nil {
+			if vs == nil {
+				valueString = TYPE_NULL
+			} else {
+				valueString = *vs
+			}
+		}
+	}
+
+	return formatErrorDescription(Locale.ErrorFormat(), ErrorDetails{
+		"context":     v.context.String(),
+		"description": v.description,
+		"value":       valueString,
+		"field":       v.Field(),
+	})
+}
+
+func (v *Result) Valid() bool {
+	return len(v.errors) == 0
+}
+
+func (v *Result) Errors() []ResultError {
+	return v.errors
+}
+
+func (v *Result) addError(err ResultError, context *jsonContext, value interface{}, details ErrorDetails) {
+	newError(err, context, value, Locale, details)
+	v.errors = append(v.errors, err)
+	v.score -= 2 // results in a net -1 when added to the +1 we get at the end of the validation function
+}
+
+// Used to copy errors from a sub-schema to the main one
+func (v *Result) mergeErrors(otherResult *Result) {
+	v.errors = append(v.errors, otherResult.Errors()...)
+	v.score += otherResult.score
+}
+
+func (v *Result) incrementScore() {
+	v.score++
+}

+ 908 - 0
vendor/github.com/xeipuuv/gojsonschema/schema.go

@@ -0,0 +1,908 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author           xeipuuv
+// author-github    https://github.com/xeipuuv
+// author-mail      xeipuuv@gmail.com
+//
+// repository-name  gojsonschema
+// repository-desc  An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description      Defines Schema, the main entry to every subSchema.
+//                  Contains the parsing logic and error checking.
+//
+// created          26-02-2013
+
+package gojsonschema
+
+import (
+	//	"encoding/json"
+	"errors"
+	"reflect"
+	"regexp"
+
+	"github.com/xeipuuv/gojsonreference"
+)
+
+var (
+	// Locale is the default locale to use
+	// Library users can overwrite with their own implementation
+	Locale locale = DefaultLocale{}
+)
+
+func NewSchema(l JSONLoader) (*Schema, error) {
+	return l.loadSchema()
+}
+
+type Schema struct {
+	documentReference gojsonreference.JsonReference
+	rootSchema        *subSchema
+	pool              *schemaPool
+	referencePool     *schemaReferencePool
+}
+
+func (d *Schema) parse(document interface{}) error {
+	d.rootSchema = &subSchema{property: STRING_ROOT_SCHEMA_PROPERTY}
+	return d.parseSchema(document, d.rootSchema)
+}
+
+func (d *Schema) SetRootSchemaName(name string) {
+	d.rootSchema.property = name
+}
+
+// Parses a subSchema
+//
+// Pretty long function ( sorry :) )... but pretty straight forward, repetitive and boring
+// Not much magic involved here, most of the job is to validate the key names and their values,
+// then the values are copied into subSchema struct
+//
+func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema) error {
+
+	if !isKind(documentNode, reflect.Map) {
+		return errors.New(formatErrorDescription(
+			Locale.InvalidType(),
+			ErrorDetails{
+				"expected": TYPE_OBJECT,
+				"given":    STRING_SCHEMA,
+			},
+		))
+	}
+
+	m := documentNode.(map[string]interface{})
+
+	if currentSchema == d.rootSchema {
+		currentSchema.ref = &d.documentReference
+	}
+
+	// $subSchema
+	if existsMapKey(m, KEY_SCHEMA) {
+		if !isKind(m[KEY_SCHEMA], reflect.String) {
+			return errors.New(formatErrorDescription(
+				Locale.InvalidType(),
+				ErrorDetails{
+					"expected": TYPE_STRING,
+					"given":    KEY_SCHEMA,
+				},
+			))
+		}
+		schemaRef := m[KEY_SCHEMA].(string)
+		schemaReference, err := gojsonreference.NewJsonReference(schemaRef)
+		currentSchema.subSchema = &schemaReference
+		if err != nil {
+			return err
+		}
+	}
+
+	// $ref
+	if existsMapKey(m, KEY_REF) && !isKind(m[KEY_REF], reflect.String) {
+		return errors.New(formatErrorDescription(
+			Locale.InvalidType(),
+			ErrorDetails{
+				"expected": TYPE_STRING,
+				"given":    KEY_REF,
+			},
+		))
+	}
+	if k, ok := m[KEY_REF].(string); ok {
+
+		if sch, ok := d.referencePool.Get(currentSchema.ref.String() + k); ok {
+
+			currentSchema.refSchema = sch
+
+		} else {
+
+			var err error
+			err = d.parseReference(documentNode, currentSchema, k)
+			if err != nil {
+				return err
+			}
+
+			return nil
+		}
+	}
+
+	// definitions
+	if existsMapKey(m, KEY_DEFINITIONS) {
+		if isKind(m[KEY_DEFINITIONS], reflect.Map) {
+			currentSchema.definitions = make(map[string]*subSchema)
+			for dk, dv := range m[KEY_DEFINITIONS].(map[string]interface{}) {
+				if isKind(dv, reflect.Map) {
+					newSchema := &subSchema{property: KEY_DEFINITIONS, parent: currentSchema, ref: currentSchema.ref}
+					currentSchema.definitions[dk] = newSchema
+					err := d.parseSchema(dv, newSchema)
+					if err != nil {
+						return errors.New(err.Error())
+					}
+				} else {
+					return errors.New(formatErrorDescription(
+						Locale.InvalidType(),
+						ErrorDetails{
+							"expected": STRING_ARRAY_OF_SCHEMAS,
+							"given":    KEY_DEFINITIONS,
+						},
+					))
+				}
+			}
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.InvalidType(),
+				ErrorDetails{
+					"expected": STRING_ARRAY_OF_SCHEMAS,
+					"given":    KEY_DEFINITIONS,
+				},
+			))
+		}
+
+	}
+
+	// id
+	if existsMapKey(m, KEY_ID) && !isKind(m[KEY_ID], reflect.String) {
+		return errors.New(formatErrorDescription(
+			Locale.InvalidType(),
+			ErrorDetails{
+				"expected": TYPE_STRING,
+				"given":    KEY_ID,
+			},
+		))
+	}
+	if k, ok := m[KEY_ID].(string); ok {
+		currentSchema.id = &k
+	}
+
+	// title
+	if existsMapKey(m, KEY_TITLE) && !isKind(m[KEY_TITLE], reflect.String) {
+		return errors.New(formatErrorDescription(
+			Locale.InvalidType(),
+			ErrorDetails{
+				"expected": TYPE_STRING,
+				"given":    KEY_TITLE,
+			},
+		))
+	}
+	if k, ok := m[KEY_TITLE].(string); ok {
+		currentSchema.title = &k
+	}
+
+	// description
+	if existsMapKey(m, KEY_DESCRIPTION) && !isKind(m[KEY_DESCRIPTION], reflect.String) {
+		return errors.New(formatErrorDescription(
+			Locale.InvalidType(),
+			ErrorDetails{
+				"expected": TYPE_STRING,
+				"given":    KEY_DESCRIPTION,
+			},
+		))
+	}
+	if k, ok := m[KEY_DESCRIPTION].(string); ok {
+		currentSchema.description = &k
+	}
+
+	// type
+	if existsMapKey(m, KEY_TYPE) {
+		if isKind(m[KEY_TYPE], reflect.String) {
+			if k, ok := m[KEY_TYPE].(string); ok {
+				err := currentSchema.types.Add(k)
+				if err != nil {
+					return err
+				}
+			}
+		} else {
+			if isKind(m[KEY_TYPE], reflect.Slice) {
+				arrayOfTypes := m[KEY_TYPE].([]interface{})
+				for _, typeInArray := range arrayOfTypes {
+					if reflect.ValueOf(typeInArray).Kind() != reflect.String {
+						return errors.New(formatErrorDescription(
+							Locale.InvalidType(),
+							ErrorDetails{
+								"expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS,
+								"given":    KEY_TYPE,
+							},
+						))
+					} else {
+						currentSchema.types.Add(typeInArray.(string))
+					}
+				}
+
+			} else {
+				return errors.New(formatErrorDescription(
+					Locale.InvalidType(),
+					ErrorDetails{
+						"expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS,
+						"given":    KEY_TYPE,
+					},
+				))
+			}
+		}
+	}
+
+	// properties
+	if existsMapKey(m, KEY_PROPERTIES) {
+		err := d.parseProperties(m[KEY_PROPERTIES], currentSchema)
+		if err != nil {
+			return err
+		}
+	}
+
+	// additionalProperties
+	if existsMapKey(m, KEY_ADDITIONAL_PROPERTIES) {
+		if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Bool) {
+			currentSchema.additionalProperties = m[KEY_ADDITIONAL_PROPERTIES].(bool)
+		} else if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Map) {
+			newSchema := &subSchema{property: KEY_ADDITIONAL_PROPERTIES, parent: currentSchema, ref: currentSchema.ref}
+			currentSchema.additionalProperties = newSchema
+			err := d.parseSchema(m[KEY_ADDITIONAL_PROPERTIES], newSchema)
+			if err != nil {
+				return errors.New(err.Error())
+			}
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.InvalidType(),
+				ErrorDetails{
+					"expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA,
+					"given":    KEY_ADDITIONAL_PROPERTIES,
+				},
+			))
+		}
+	}
+
+	// patternProperties
+	if existsMapKey(m, KEY_PATTERN_PROPERTIES) {
+		if isKind(m[KEY_PATTERN_PROPERTIES], reflect.Map) {
+			patternPropertiesMap := m[KEY_PATTERN_PROPERTIES].(map[string]interface{})
+			if len(patternPropertiesMap) > 0 {
+				currentSchema.patternProperties = make(map[string]*subSchema)
+				for k, v := range patternPropertiesMap {
+					_, err := regexp.MatchString(k, "")
+					if err != nil {
+						return errors.New(formatErrorDescription(
+							Locale.RegexPattern(),
+							ErrorDetails{"pattern": k},
+						))
+					}
+					newSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref}
+					err = d.parseSchema(v, newSchema)
+					if err != nil {
+						return errors.New(err.Error())
+					}
+					currentSchema.patternProperties[k] = newSchema
+				}
+			}
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.InvalidType(),
+				ErrorDetails{
+					"expected": STRING_SCHEMA,
+					"given":    KEY_PATTERN_PROPERTIES,
+				},
+			))
+		}
+	}
+
+	// dependencies
+	if existsMapKey(m, KEY_DEPENDENCIES) {
+		err := d.parseDependencies(m[KEY_DEPENDENCIES], currentSchema)
+		if err != nil {
+			return err
+		}
+	}
+
+	// items
+	if existsMapKey(m, KEY_ITEMS) {
+		if isKind(m[KEY_ITEMS], reflect.Slice) {
+			for _, itemElement := range m[KEY_ITEMS].([]interface{}) {
+				if isKind(itemElement, reflect.Map) {
+					newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS}
+					newSchema.ref = currentSchema.ref
+					currentSchema.AddItemsChild(newSchema)
+					err := d.parseSchema(itemElement, newSchema)
+					if err != nil {
+						return err
+					}
+				} else {
+					return errors.New(formatErrorDescription(
+						Locale.InvalidType(),
+						ErrorDetails{
+							"expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS,
+							"given":    KEY_ITEMS,
+						},
+					))
+				}
+				currentSchema.itemsChildrenIsSingleSchema = false
+			}
+		} else if isKind(m[KEY_ITEMS], reflect.Map) {
+			newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS}
+			newSchema.ref = currentSchema.ref
+			currentSchema.AddItemsChild(newSchema)
+			err := d.parseSchema(m[KEY_ITEMS], newSchema)
+			if err != nil {
+				return err
+			}
+			currentSchema.itemsChildrenIsSingleSchema = true
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.InvalidType(),
+				ErrorDetails{
+					"expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS,
+					"given":    KEY_ITEMS,
+				},
+			))
+		}
+	}
+
+	// additionalItems
+	if existsMapKey(m, KEY_ADDITIONAL_ITEMS) {
+		if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Bool) {
+			currentSchema.additionalItems = m[KEY_ADDITIONAL_ITEMS].(bool)
+		} else if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Map) {
+			newSchema := &subSchema{property: KEY_ADDITIONAL_ITEMS, parent: currentSchema, ref: currentSchema.ref}
+			currentSchema.additionalItems = newSchema
+			err := d.parseSchema(m[KEY_ADDITIONAL_ITEMS], newSchema)
+			if err != nil {
+				return errors.New(err.Error())
+			}
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.InvalidType(),
+				ErrorDetails{
+					"expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA,
+					"given":    KEY_ADDITIONAL_ITEMS,
+				},
+			))
+		}
+	}
+
+	// validation : number / integer
+
+	if existsMapKey(m, KEY_MULTIPLE_OF) {
+		multipleOfValue := mustBeNumber(m[KEY_MULTIPLE_OF])
+		if multipleOfValue == nil {
+			return errors.New(formatErrorDescription(
+				Locale.InvalidType(),
+				ErrorDetails{
+					"expected": STRING_NUMBER,
+					"given":    KEY_MULTIPLE_OF,
+				},
+			))
+		}
+		if *multipleOfValue <= 0 {
+			return errors.New(formatErrorDescription(
+				Locale.GreaterThanZero(),
+				ErrorDetails{"number": KEY_MULTIPLE_OF},
+			))
+		}
+		currentSchema.multipleOf = multipleOfValue
+	}
+
+	if existsMapKey(m, KEY_MINIMUM) {
+		minimumValue := mustBeNumber(m[KEY_MINIMUM])
+		if minimumValue == nil {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfA(),
+				ErrorDetails{"x": KEY_MINIMUM, "y": STRING_NUMBER},
+			))
+		}
+		currentSchema.minimum = minimumValue
+	}
+
+	if existsMapKey(m, KEY_EXCLUSIVE_MINIMUM) {
+		if isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) {
+			if currentSchema.minimum == nil {
+				return errors.New(formatErrorDescription(
+					Locale.CannotBeUsedWithout(),
+					ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM},
+				))
+			}
+			exclusiveMinimumValue := m[KEY_EXCLUSIVE_MINIMUM].(bool)
+			currentSchema.exclusiveMinimum = exclusiveMinimumValue
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfA(),
+				ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": TYPE_BOOLEAN},
+			))
+		}
+	}
+
+	if existsMapKey(m, KEY_MAXIMUM) {
+		maximumValue := mustBeNumber(m[KEY_MAXIMUM])
+		if maximumValue == nil {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfA(),
+				ErrorDetails{"x": KEY_MAXIMUM, "y": STRING_NUMBER},
+			))
+		}
+		currentSchema.maximum = maximumValue
+	}
+
+	if existsMapKey(m, KEY_EXCLUSIVE_MAXIMUM) {
+		if isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) {
+			if currentSchema.maximum == nil {
+				return errors.New(formatErrorDescription(
+					Locale.CannotBeUsedWithout(),
+					ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM},
+				))
+			}
+			exclusiveMaximumValue := m[KEY_EXCLUSIVE_MAXIMUM].(bool)
+			currentSchema.exclusiveMaximum = exclusiveMaximumValue
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfA(),
+				ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": STRING_NUMBER},
+			))
+		}
+	}
+
+	if currentSchema.minimum != nil && currentSchema.maximum != nil {
+		if *currentSchema.minimum > *currentSchema.maximum {
+			return errors.New(formatErrorDescription(
+				Locale.CannotBeGT(),
+				ErrorDetails{"x": KEY_MINIMUM, "y": KEY_MAXIMUM},
+			))
+		}
+	}
+
+	// validation : string
+
+	if existsMapKey(m, KEY_MIN_LENGTH) {
+		minLengthIntegerValue := mustBeInteger(m[KEY_MIN_LENGTH])
+		if minLengthIntegerValue == nil {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfAn(),
+				ErrorDetails{"x": KEY_MIN_LENGTH, "y": TYPE_INTEGER},
+			))
+		}
+		if *minLengthIntegerValue < 0 {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeGTEZero(),
+				ErrorDetails{"key": KEY_MIN_LENGTH},
+			))
+		}
+		currentSchema.minLength = minLengthIntegerValue
+	}
+
+	if existsMapKey(m, KEY_MAX_LENGTH) {
+		maxLengthIntegerValue := mustBeInteger(m[KEY_MAX_LENGTH])
+		if maxLengthIntegerValue == nil {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfAn(),
+				ErrorDetails{"x": KEY_MAX_LENGTH, "y": TYPE_INTEGER},
+			))
+		}
+		if *maxLengthIntegerValue < 0 {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeGTEZero(),
+				ErrorDetails{"key": KEY_MAX_LENGTH},
+			))
+		}
+		currentSchema.maxLength = maxLengthIntegerValue
+	}
+
+	if currentSchema.minLength != nil && currentSchema.maxLength != nil {
+		if *currentSchema.minLength > *currentSchema.maxLength {
+			return errors.New(formatErrorDescription(
+				Locale.CannotBeGT(),
+				ErrorDetails{"x": KEY_MIN_LENGTH, "y": KEY_MAX_LENGTH},
+			))
+		}
+	}
+
+	if existsMapKey(m, KEY_PATTERN) {
+		if isKind(m[KEY_PATTERN], reflect.String) {
+			regexpObject, err := regexp.Compile(m[KEY_PATTERN].(string))
+			if err != nil {
+				return errors.New(formatErrorDescription(
+					Locale.MustBeValidRegex(),
+					ErrorDetails{"key": KEY_PATTERN},
+				))
+			}
+			currentSchema.pattern = regexpObject
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfA(),
+				ErrorDetails{"x": KEY_PATTERN, "y": TYPE_STRING},
+			))
+		}
+	}
+
+	if existsMapKey(m, KEY_FORMAT) {
+		formatString, ok := m[KEY_FORMAT].(string)
+		if ok && FormatCheckers.Has(formatString) {
+			currentSchema.format = formatString
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeValidFormat(),
+				ErrorDetails{"key": KEY_FORMAT, "given": m[KEY_FORMAT]},
+			))
+		}
+	}
+
+	// validation : object
+
+	if existsMapKey(m, KEY_MIN_PROPERTIES) {
+		minPropertiesIntegerValue := mustBeInteger(m[KEY_MIN_PROPERTIES])
+		if minPropertiesIntegerValue == nil {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfAn(),
+				ErrorDetails{"x": KEY_MIN_PROPERTIES, "y": TYPE_INTEGER},
+			))
+		}
+		if *minPropertiesIntegerValue < 0 {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeGTEZero(),
+				ErrorDetails{"key": KEY_MIN_PROPERTIES},
+			))
+		}
+		currentSchema.minProperties = minPropertiesIntegerValue
+	}
+
+	if existsMapKey(m, KEY_MAX_PROPERTIES) {
+		maxPropertiesIntegerValue := mustBeInteger(m[KEY_MAX_PROPERTIES])
+		if maxPropertiesIntegerValue == nil {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfAn(),
+				ErrorDetails{"x": KEY_MAX_PROPERTIES, "y": TYPE_INTEGER},
+			))
+		}
+		if *maxPropertiesIntegerValue < 0 {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeGTEZero(),
+				ErrorDetails{"key": KEY_MAX_PROPERTIES},
+			))
+		}
+		currentSchema.maxProperties = maxPropertiesIntegerValue
+	}
+
+	if currentSchema.minProperties != nil && currentSchema.maxProperties != nil {
+		if *currentSchema.minProperties > *currentSchema.maxProperties {
+			return errors.New(formatErrorDescription(
+				Locale.KeyCannotBeGreaterThan(),
+				ErrorDetails{"key": KEY_MIN_PROPERTIES, "y": KEY_MAX_PROPERTIES},
+			))
+		}
+	}
+
+	if existsMapKey(m, KEY_REQUIRED) {
+		if isKind(m[KEY_REQUIRED], reflect.Slice) {
+			requiredValues := m[KEY_REQUIRED].([]interface{})
+			for _, requiredValue := range requiredValues {
+				if isKind(requiredValue, reflect.String) {
+					err := currentSchema.AddRequired(requiredValue.(string))
+					if err != nil {
+						return err
+					}
+				} else {
+					return errors.New(formatErrorDescription(
+						Locale.KeyItemsMustBeOfType(),
+						ErrorDetails{"key": KEY_REQUIRED, "type": TYPE_STRING},
+					))
+				}
+			}
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfAn(),
+				ErrorDetails{"x": KEY_REQUIRED, "y": TYPE_ARRAY},
+			))
+		}
+	}
+
+	// validation : array
+
+	if existsMapKey(m, KEY_MIN_ITEMS) {
+		minItemsIntegerValue := mustBeInteger(m[KEY_MIN_ITEMS])
+		if minItemsIntegerValue == nil {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfAn(),
+				ErrorDetails{"x": KEY_MIN_ITEMS, "y": TYPE_INTEGER},
+			))
+		}
+		if *minItemsIntegerValue < 0 {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeGTEZero(),
+				ErrorDetails{"key": KEY_MIN_ITEMS},
+			))
+		}
+		currentSchema.minItems = minItemsIntegerValue
+	}
+
+	if existsMapKey(m, KEY_MAX_ITEMS) {
+		maxItemsIntegerValue := mustBeInteger(m[KEY_MAX_ITEMS])
+		if maxItemsIntegerValue == nil {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfAn(),
+				ErrorDetails{"x": KEY_MAX_ITEMS, "y": TYPE_INTEGER},
+			))
+		}
+		if *maxItemsIntegerValue < 0 {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeGTEZero(),
+				ErrorDetails{"key": KEY_MAX_ITEMS},
+			))
+		}
+		currentSchema.maxItems = maxItemsIntegerValue
+	}
+
+	if existsMapKey(m, KEY_UNIQUE_ITEMS) {
+		if isKind(m[KEY_UNIQUE_ITEMS], reflect.Bool) {
+			currentSchema.uniqueItems = m[KEY_UNIQUE_ITEMS].(bool)
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfA(),
+				ErrorDetails{"x": KEY_UNIQUE_ITEMS, "y": TYPE_BOOLEAN},
+			))
+		}
+	}
+
+	// validation : all
+
+	if existsMapKey(m, KEY_ENUM) {
+		if isKind(m[KEY_ENUM], reflect.Slice) {
+			for _, v := range m[KEY_ENUM].([]interface{}) {
+				err := currentSchema.AddEnum(v)
+				if err != nil {
+					return err
+				}
+			}
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfAn(),
+				ErrorDetails{"x": KEY_ENUM, "y": TYPE_ARRAY},
+			))
+		}
+	}
+
+	// validation : subSchema
+
+	if existsMapKey(m, KEY_ONE_OF) {
+		if isKind(m[KEY_ONE_OF], reflect.Slice) {
+			for _, v := range m[KEY_ONE_OF].([]interface{}) {
+				newSchema := &subSchema{property: KEY_ONE_OF, parent: currentSchema, ref: currentSchema.ref}
+				currentSchema.AddOneOf(newSchema)
+				err := d.parseSchema(v, newSchema)
+				if err != nil {
+					return err
+				}
+			}
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfAn(),
+				ErrorDetails{"x": KEY_ONE_OF, "y": TYPE_ARRAY},
+			))
+		}
+	}
+
+	if existsMapKey(m, KEY_ANY_OF) {
+		if isKind(m[KEY_ANY_OF], reflect.Slice) {
+			for _, v := range m[KEY_ANY_OF].([]interface{}) {
+				newSchema := &subSchema{property: KEY_ANY_OF, parent: currentSchema, ref: currentSchema.ref}
+				currentSchema.AddAnyOf(newSchema)
+				err := d.parseSchema(v, newSchema)
+				if err != nil {
+					return err
+				}
+			}
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfAn(),
+				ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY},
+			))
+		}
+	}
+
+	if existsMapKey(m, KEY_ALL_OF) {
+		if isKind(m[KEY_ALL_OF], reflect.Slice) {
+			for _, v := range m[KEY_ALL_OF].([]interface{}) {
+				newSchema := &subSchema{property: KEY_ALL_OF, parent: currentSchema, ref: currentSchema.ref}
+				currentSchema.AddAllOf(newSchema)
+				err := d.parseSchema(v, newSchema)
+				if err != nil {
+					return err
+				}
+			}
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfAn(),
+				ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY},
+			))
+		}
+	}
+
+	if existsMapKey(m, KEY_NOT) {
+		if isKind(m[KEY_NOT], reflect.Map) {
+			newSchema := &subSchema{property: KEY_NOT, parent: currentSchema, ref: currentSchema.ref}
+			currentSchema.SetNot(newSchema)
+			err := d.parseSchema(m[KEY_NOT], newSchema)
+			if err != nil {
+				return err
+			}
+		} else {
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfAn(),
+				ErrorDetails{"x": KEY_NOT, "y": TYPE_OBJECT},
+			))
+		}
+	}
+
+	return nil
+}
+
+func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSchema, reference string) (e error) {
+
+	var err error
+
+	jsonReference, err := gojsonreference.NewJsonReference(reference)
+	if err != nil {
+		return err
+	}
+
+	standaloneDocument := d.pool.GetStandaloneDocument()
+
+	if jsonReference.HasFullUrl {
+		currentSchema.ref = &jsonReference
+	} else {
+		inheritedReference, err := currentSchema.ref.Inherits(jsonReference)
+		if err != nil {
+			return err
+		}
+		currentSchema.ref = inheritedReference
+	}
+
+	jsonPointer := currentSchema.ref.GetPointer()
+
+	var refdDocumentNode interface{}
+
+	if standaloneDocument != nil {
+
+		var err error
+		refdDocumentNode, _, err = jsonPointer.Get(standaloneDocument)
+		if err != nil {
+			return err
+		}
+
+	} else {
+
+		var err error
+		dsp, err := d.pool.GetDocument(*currentSchema.ref)
+		if err != nil {
+			return err
+		}
+
+		refdDocumentNode, _, err = jsonPointer.Get(dsp.Document)
+		if err != nil {
+			return err
+		}
+
+	}
+
+	if !isKind(refdDocumentNode, reflect.Map) {
+		return errors.New(formatErrorDescription(
+			Locale.MustBeOfType(),
+			ErrorDetails{"key": STRING_SCHEMA, "type": TYPE_OBJECT},
+		))
+	}
+
+	// returns the loaded referenced subSchema for the caller to update its current subSchema
+	newSchemaDocument := refdDocumentNode.(map[string]interface{})
+
+	newSchema := &subSchema{property: KEY_REF, parent: currentSchema, ref: currentSchema.ref}
+	d.referencePool.Add(currentSchema.ref.String()+reference, newSchema)
+
+	err = d.parseSchema(newSchemaDocument, newSchema)
+	if err != nil {
+		return err
+	}
+
+	currentSchema.refSchema = newSchema
+
+	return nil
+
+}
+
+func (d *Schema) parseProperties(documentNode interface{}, currentSchema *subSchema) error {
+
+	if !isKind(documentNode, reflect.Map) {
+		return errors.New(formatErrorDescription(
+			Locale.MustBeOfType(),
+			ErrorDetails{"key": STRING_PROPERTIES, "type": TYPE_OBJECT},
+		))
+	}
+
+	m := documentNode.(map[string]interface{})
+	for k := range m {
+		schemaProperty := k
+		newSchema := &subSchema{property: schemaProperty, parent: currentSchema, ref: currentSchema.ref}
+		currentSchema.AddPropertiesChild(newSchema)
+		err := d.parseSchema(m[k], newSchema)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *Schema) parseDependencies(documentNode interface{}, currentSchema *subSchema) error {
+
+	if !isKind(documentNode, reflect.Map) {
+		return errors.New(formatErrorDescription(
+			Locale.MustBeOfType(),
+			ErrorDetails{"key": KEY_DEPENDENCIES, "type": TYPE_OBJECT},
+		))
+	}
+
+	m := documentNode.(map[string]interface{})
+	currentSchema.dependencies = make(map[string]interface{})
+
+	for k := range m {
+		switch reflect.ValueOf(m[k]).Kind() {
+
+		case reflect.Slice:
+			values := m[k].([]interface{})
+			var valuesToRegister []string
+
+			for _, value := range values {
+				if !isKind(value, reflect.String) {
+					return errors.New(formatErrorDescription(
+						Locale.MustBeOfType(),
+						ErrorDetails{
+							"key":  STRING_DEPENDENCY,
+							"type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS,
+						},
+					))
+				} else {
+					valuesToRegister = append(valuesToRegister, value.(string))
+				}
+				currentSchema.dependencies[k] = valuesToRegister
+			}
+
+		case reflect.Map:
+			depSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref}
+			err := d.parseSchema(m[k], depSchema)
+			if err != nil {
+				return err
+			}
+			currentSchema.dependencies[k] = depSchema
+
+		default:
+			return errors.New(formatErrorDescription(
+				Locale.MustBeOfType(),
+				ErrorDetails{
+					"key":  STRING_DEPENDENCY,
+					"type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS,
+				},
+			))
+		}
+
+	}
+
+	return nil
+}

+ 107 - 0
vendor/github.com/xeipuuv/gojsonschema/schemaPool.go

@@ -0,0 +1,107 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author           xeipuuv
+// author-github    https://github.com/xeipuuv
+// author-mail      xeipuuv@gmail.com
+//
+// repository-name  gojsonschema
+// repository-desc  An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description		Defines resources pooling.
+//                  Eases referencing and avoids downloading the same resource twice.
+//
+// created          26-02-2013
+
+package gojsonschema
+
+import (
+	"errors"
+
+	"github.com/xeipuuv/gojsonreference"
+)
+
+type schemaPoolDocument struct {
+	Document interface{}
+}
+
+type schemaPool struct {
+	schemaPoolDocuments map[string]*schemaPoolDocument
+	standaloneDocument  interface{}
+}
+
+func newSchemaPool() *schemaPool {
+
+	p := &schemaPool{}
+	p.schemaPoolDocuments = make(map[string]*schemaPoolDocument)
+	p.standaloneDocument = nil
+
+	return p
+}
+
+func (p *schemaPool) SetStandaloneDocument(document interface{}) {
+	p.standaloneDocument = document
+}
+
+func (p *schemaPool) GetStandaloneDocument() (document interface{}) {
+	return p.standaloneDocument
+}
+
+func (p *schemaPool) GetDocument(reference gojsonreference.JsonReference) (*schemaPoolDocument, error) {
+
+	if internalLogEnabled {
+		internalLog("Get Document ( %s )", reference.String())
+	}
+
+	var err error
+
+	// It is not possible to load anything that is not canonical...
+	if !reference.IsCanonical() {
+		return nil, errors.New(formatErrorDescription(
+			Locale.ReferenceMustBeCanonical(),
+			ErrorDetails{"reference": reference},
+		))
+	}
+
+	refToUrl := reference
+	refToUrl.GetUrl().Fragment = ""
+
+	var spd *schemaPoolDocument
+
+	// Try to find the requested document in the pool
+	for k := range p.schemaPoolDocuments {
+		if k == refToUrl.String() {
+			spd = p.schemaPoolDocuments[k]
+		}
+	}
+
+	if spd != nil {
+		if internalLogEnabled {
+			internalLog(" From pool")
+		}
+		return spd, nil
+	}
+
+	jsonReferenceLoader := NewReferenceLoader(reference.String())
+	document, err := jsonReferenceLoader.loadJSON()
+	if err != nil {
+		return nil, err
+	}
+
+	spd = &schemaPoolDocument{Document: document}
+	// add the document to the pool for potential later use
+	p.schemaPoolDocuments[refToUrl.String()] = spd
+
+	return spd, nil
+}

+ 67 - 0
vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go

@@ -0,0 +1,67 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author           xeipuuv
+// author-github    https://github.com/xeipuuv
+// author-mail      xeipuuv@gmail.com
+//
+// repository-name  gojsonschema
+// repository-desc  An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description      Pool of referenced schemas.
+//
+// created          25-06-2013
+
+package gojsonschema
+
+import (
+	"fmt"
+)
+
+type schemaReferencePool struct {
+	documents map[string]*subSchema
+}
+
+func newSchemaReferencePool() *schemaReferencePool {
+
+	p := &schemaReferencePool{}
+	p.documents = make(map[string]*subSchema)
+
+	return p
+}
+
+func (p *schemaReferencePool) Get(ref string) (r *subSchema, o bool) {
+
+	if internalLogEnabled {
+		internalLog(fmt.Sprintf("Schema Reference ( %s )", ref))
+	}
+
+	if sch, ok := p.documents[ref]; ok {
+		if internalLogEnabled {
+			internalLog(fmt.Sprintf(" From pool"))
+		}
+		return sch, true
+	}
+
+	return nil, false
+}
+
+func (p *schemaReferencePool) Add(ref string, sch *subSchema) {
+
+	if internalLogEnabled {
+		internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref))
+	}
+
+	p.documents[ref] = sch
+}

+ 83 - 0
vendor/github.com/xeipuuv/gojsonschema/schemaType.go

@@ -0,0 +1,83 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author           xeipuuv
+// author-github    https://github.com/xeipuuv
+// author-mail      xeipuuv@gmail.com
+//
+// repository-name  gojsonschema
+// repository-desc  An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description      Helper structure to handle schema types, and the combination of them.
+//
+// created          28-02-2013
+
+package gojsonschema
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+)
+
+type jsonSchemaType struct {
+	types []string
+}
+
+// Is the schema typed ? that is containing at least one type
+// When not typed, the schema does not need any type validation
+func (t *jsonSchemaType) IsTyped() bool {
+	return len(t.types) > 0
+}
+
+func (t *jsonSchemaType) Add(etype string) error {
+
+	if !isStringInSlice(JSON_TYPES, etype) {
+		return errors.New(formatErrorDescription(Locale.NotAValidType(), ErrorDetails{"type": etype}))
+	}
+
+	if t.Contains(etype) {
+		return errors.New(formatErrorDescription(Locale.Duplicated(), ErrorDetails{"type": etype}))
+	}
+
+	t.types = append(t.types, etype)
+
+	return nil
+}
+
+func (t *jsonSchemaType) Contains(etype string) bool {
+
+	for _, v := range t.types {
+		if v == etype {
+			return true
+		}
+	}
+
+	return false
+}
+
+func (t *jsonSchemaType) String() string {
+
+	if len(t.types) == 0 {
+		return STRING_UNDEFINED // should never happen
+	}
+
+	// Displayed as a list [type1,type2,...]
+	if len(t.types) > 1 {
+		return fmt.Sprintf("[%s]", strings.Join(t.types, ","))
+	}
+
+	// Only one type: name only
+	return t.types[0]
+}

+ 227 - 0
vendor/github.com/xeipuuv/gojsonschema/subSchema.go

@@ -0,0 +1,227 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author           xeipuuv
+// author-github    https://github.com/xeipuuv
+// author-mail      xeipuuv@gmail.com
+//
+// repository-name  gojsonschema
+// repository-desc  An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description      Defines the structure of a sub-subSchema.
+//                  A sub-subSchema can contain other sub-schemas.
+//
+// created          27-02-2013
+
+package gojsonschema
+
+import (
+	"errors"
+	"regexp"
+	"strings"
+
+	"github.com/xeipuuv/gojsonreference"
+)
+
+const (
+	KEY_SCHEMA                = "$subSchema"
+	KEY_ID                    = "$id"
+	KEY_REF                   = "$ref"
+	KEY_TITLE                 = "title"
+	KEY_DESCRIPTION           = "description"
+	KEY_TYPE                  = "type"
+	KEY_ITEMS                 = "items"
+	KEY_ADDITIONAL_ITEMS      = "additionalItems"
+	KEY_PROPERTIES            = "properties"
+	KEY_PATTERN_PROPERTIES    = "patternProperties"
+	KEY_ADDITIONAL_PROPERTIES = "additionalProperties"
+	KEY_DEFINITIONS           = "definitions"
+	KEY_MULTIPLE_OF           = "multipleOf"
+	KEY_MINIMUM               = "minimum"
+	KEY_MAXIMUM               = "maximum"
+	KEY_EXCLUSIVE_MINIMUM     = "exclusiveMinimum"
+	KEY_EXCLUSIVE_MAXIMUM     = "exclusiveMaximum"
+	KEY_MIN_LENGTH            = "minLength"
+	KEY_MAX_LENGTH            = "maxLength"
+	KEY_PATTERN               = "pattern"
+	KEY_FORMAT                = "format"
+	KEY_MIN_PROPERTIES        = "minProperties"
+	KEY_MAX_PROPERTIES        = "maxProperties"
+	KEY_DEPENDENCIES          = "dependencies"
+	KEY_REQUIRED              = "required"
+	KEY_MIN_ITEMS             = "minItems"
+	KEY_MAX_ITEMS             = "maxItems"
+	KEY_UNIQUE_ITEMS          = "uniqueItems"
+	KEY_ENUM                  = "enum"
+	KEY_ONE_OF                = "oneOf"
+	KEY_ANY_OF                = "anyOf"
+	KEY_ALL_OF                = "allOf"
+	KEY_NOT                   = "not"
+)
+
+type subSchema struct {
+
+	// basic subSchema meta properties
+	id          *string
+	title       *string
+	description *string
+
+	property string
+
+	// Types associated with the subSchema
+	types jsonSchemaType
+
+	// Reference url
+	ref *gojsonreference.JsonReference
+	// Schema referenced
+	refSchema *subSchema
+	// Json reference
+	subSchema *gojsonreference.JsonReference
+
+	// hierarchy
+	parent                      *subSchema
+	definitions                 map[string]*subSchema
+	definitionsChildren         []*subSchema
+	itemsChildren               []*subSchema
+	itemsChildrenIsSingleSchema bool
+	propertiesChildren          []*subSchema
+
+	// validation : number / integer
+	multipleOf       *float64
+	maximum          *float64
+	exclusiveMaximum bool
+	minimum          *float64
+	exclusiveMinimum bool
+
+	// validation : string
+	minLength *int
+	maxLength *int
+	pattern   *regexp.Regexp
+	format    string
+
+	// validation : object
+	minProperties *int
+	maxProperties *int
+	required      []string
+
+	dependencies         map[string]interface{}
+	additionalProperties interface{}
+	patternProperties    map[string]*subSchema
+
+	// validation : array
+	minItems    *int
+	maxItems    *int
+	uniqueItems bool
+
+	additionalItems interface{}
+
+	// validation : all
+	enum []string
+
+	// validation : subSchema
+	oneOf []*subSchema
+	anyOf []*subSchema
+	allOf []*subSchema
+	not   *subSchema
+}
+
+func (s *subSchema) AddEnum(i interface{}) error {
+
+	is, err := marshalToJsonString(i)
+	if err != nil {
+		return err
+	}
+
+	if isStringInSlice(s.enum, *is) {
+		return errors.New(formatErrorDescription(
+			Locale.KeyItemsMustBeUnique(),
+			ErrorDetails{"key": KEY_ENUM},
+		))
+	}
+
+	s.enum = append(s.enum, *is)
+
+	return nil
+}
+
+func (s *subSchema) ContainsEnum(i interface{}) (bool, error) {
+
+	is, err := marshalToJsonString(i)
+	if err != nil {
+		return false, err
+	}
+
+	return isStringInSlice(s.enum, *is), nil
+}
+
+func (s *subSchema) AddOneOf(subSchema *subSchema) {
+	s.oneOf = append(s.oneOf, subSchema)
+}
+
+func (s *subSchema) AddAllOf(subSchema *subSchema) {
+	s.allOf = append(s.allOf, subSchema)
+}
+
+func (s *subSchema) AddAnyOf(subSchema *subSchema) {
+	s.anyOf = append(s.anyOf, subSchema)
+}
+
+func (s *subSchema) SetNot(subSchema *subSchema) {
+	s.not = subSchema
+}
+
+func (s *subSchema) AddRequired(value string) error {
+
+	if isStringInSlice(s.required, value) {
+		return errors.New(formatErrorDescription(
+			Locale.KeyItemsMustBeUnique(),
+			ErrorDetails{"key": KEY_REQUIRED},
+		))
+	}
+
+	s.required = append(s.required, value)
+
+	return nil
+}
+
+func (s *subSchema) AddDefinitionChild(child *subSchema) {
+	s.definitionsChildren = append(s.definitionsChildren, child)
+}
+
+func (s *subSchema) AddItemsChild(child *subSchema) {
+	s.itemsChildren = append(s.itemsChildren, child)
+}
+
+func (s *subSchema) AddPropertiesChild(child *subSchema) {
+	s.propertiesChildren = append(s.propertiesChildren, child)
+}
+
+func (s *subSchema) PatternPropertiesString() string {
+
+	if s.patternProperties == nil || len(s.patternProperties) == 0 {
+		return STRING_UNDEFINED // should never happen
+	}
+
+	patternPropertiesKeySlice := []string{}
+	for pk, _ := range s.patternProperties {
+		patternPropertiesKeySlice = append(patternPropertiesKeySlice, `"`+pk+`"`)
+	}
+
+	if len(patternPropertiesKeySlice) == 1 {
+		return patternPropertiesKeySlice[0]
+	}
+
+	return "[" + strings.Join(patternPropertiesKeySlice, ",") + "]"
+
+}

+ 58 - 0
vendor/github.com/xeipuuv/gojsonschema/types.go

@@ -0,0 +1,58 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author           xeipuuv
+// author-github    https://github.com/xeipuuv
+// author-mail      xeipuuv@gmail.com
+//
+// repository-name  gojsonschema
+// repository-desc  An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description      Contains const types for schema and JSON.
+//
+// created          28-02-2013
+
+package gojsonschema
+
+const (
+	TYPE_ARRAY   = `array`
+	TYPE_BOOLEAN = `boolean`
+	TYPE_INTEGER = `integer`
+	TYPE_NUMBER  = `number`
+	TYPE_NULL    = `null`
+	TYPE_OBJECT  = `object`
+	TYPE_STRING  = `string`
+)
+
+var JSON_TYPES []string
+var SCHEMA_TYPES []string
+
+func init() {
+	JSON_TYPES = []string{
+		TYPE_ARRAY,
+		TYPE_BOOLEAN,
+		TYPE_INTEGER,
+		TYPE_NUMBER,
+		TYPE_NULL,
+		TYPE_OBJECT,
+		TYPE_STRING}
+
+	SCHEMA_TYPES = []string{
+		TYPE_ARRAY,
+		TYPE_BOOLEAN,
+		TYPE_INTEGER,
+		TYPE_NUMBER,
+		TYPE_OBJECT,
+		TYPE_STRING}
+}

+ 202 - 0
vendor/github.com/xeipuuv/gojsonschema/utils.go

@@ -0,0 +1,202 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author           xeipuuv
+// author-github    https://github.com/xeipuuv
+// author-mail      xeipuuv@gmail.com
+//
+// repository-name  gojsonschema
+// repository-desc  An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description      Various utility functions.
+//
+// created          26-02-2013
+
+package gojsonschema
+
+import (
+	"encoding/json"
+	"fmt"
+	"math"
+	"reflect"
+	"strconv"
+)
+
+func isKind(what interface{}, kind reflect.Kind) bool {
+	return reflect.ValueOf(what).Kind() == kind
+}
+
+func existsMapKey(m map[string]interface{}, k string) bool {
+	_, ok := m[k]
+	return ok
+}
+
+func isStringInSlice(s []string, what string) bool {
+	for i := range s {
+		if s[i] == what {
+			return true
+		}
+	}
+	return false
+}
+
+func marshalToJsonString(value interface{}) (*string, error) {
+
+	mBytes, err := json.Marshal(value)
+	if err != nil {
+		return nil, err
+	}
+
+	sBytes := string(mBytes)
+	return &sBytes, nil
+}
+
+func isJsonNumber(what interface{}) bool {
+
+	switch what.(type) {
+
+	case json.Number:
+		return true
+	}
+
+	return false
+}
+
+func checkJsonNumber(what interface{}) (isValidFloat64 bool, isValidInt64 bool, isValidInt32 bool) {
+
+	jsonNumber := what.(json.Number)
+
+	_, errFloat64 := jsonNumber.Float64()
+	_, errInt64 := jsonNumber.Int64()
+
+	isValidFloat64 = errFloat64 == nil
+	isValidInt64 = errInt64 == nil
+
+	_, errInt32 := strconv.ParseInt(jsonNumber.String(), 10, 32)
+	isValidInt32 = isValidInt64 && errInt32 == nil
+
+	return
+
+}
+
+// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER
+const (
+	max_json_float = float64(1<<53 - 1)  // 9007199254740991.0 	 2^53 - 1
+	min_json_float = -float64(1<<53 - 1) //-9007199254740991.0	-2^53 - 1
+)
+
+func isFloat64AnInteger(f float64) bool {
+
+	if math.IsNaN(f) || math.IsInf(f, 0) || f < min_json_float || f > max_json_float {
+		return false
+	}
+
+	return f == float64(int64(f)) || f == float64(uint64(f))
+}
+
+func mustBeInteger(what interface{}) *int {
+
+	if isJsonNumber(what) {
+
+		number := what.(json.Number)
+
+		_, _, isValidInt32 := checkJsonNumber(number)
+
+		if isValidInt32 {
+
+			int64Value, err := number.Int64()
+			if err != nil {
+				return nil
+			}
+
+			int32Value := int(int64Value)
+			return &int32Value
+
+		} else {
+			return nil
+		}
+
+	}
+
+	return nil
+}
+
+func mustBeNumber(what interface{}) *float64 {
+
+	if isJsonNumber(what) {
+
+		number := what.(json.Number)
+		float64Value, err := number.Float64()
+
+		if err == nil {
+			return &float64Value
+		} else {
+			return nil
+		}
+
+	}
+
+	return nil
+
+}
+
+// formats a number so that it is displayed as the smallest string possible
+func resultErrorFormatJsonNumber(n json.Number) string {
+
+	if int64Value, err := n.Int64(); err == nil {
+		return fmt.Sprintf("%d", int64Value)
+	}
+
+	float64Value, _ := n.Float64()
+
+	return fmt.Sprintf("%g", float64Value)
+}
+
+// formats a number so that it is displayed as the smallest string possible
+func resultErrorFormatNumber(n float64) string {
+
+	if isFloat64AnInteger(n) {
+		return fmt.Sprintf("%d", int64(n))
+	}
+
+	return fmt.Sprintf("%g", n)
+}
+
+func convertDocumentNode(val interface{}) interface{} {
+
+	if lval, ok := val.([]interface{}); ok {
+
+		res := []interface{}{}
+		for _, v := range lval {
+			res = append(res, convertDocumentNode(v))
+		}
+
+		return res
+
+	}
+
+	if mval, ok := val.(map[interface{}]interface{}); ok {
+
+		res := map[string]interface{}{}
+
+		for k, v := range mval {
+			res[k.(string)] = convertDocumentNode(v)
+		}
+
+		return res
+
+	}
+
+	return val
+}

+ 829 - 0
vendor/github.com/xeipuuv/gojsonschema/validation.go

@@ -0,0 +1,829 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author           xeipuuv
+// author-github    https://github.com/xeipuuv
+// author-mail      xeipuuv@gmail.com
+//
+// repository-name  gojsonschema
+// repository-desc  An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description      Extends Schema and subSchema, implements the validation phase.
+//
+// created          28-02-2013
+
+package gojsonschema
+
+import (
+	"encoding/json"
+	"reflect"
+	"regexp"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+)
+
+func Validate(ls JSONLoader, ld JSONLoader) (*Result, error) {
+
+	var err error
+
+	// load schema
+
+	schema, err := NewSchema(ls)
+	if err != nil {
+		return nil, err
+	}
+
+	// begine validation
+
+	return schema.Validate(ld)
+
+}
+
+func (v *Schema) Validate(l JSONLoader) (*Result, error) {
+
+	// load document
+
+	root, err := l.loadJSON()
+	if err != nil {
+		return nil, err
+	}
+
+	// begin validation
+
+	result := &Result{}
+	context := newJsonContext(STRING_CONTEXT_ROOT, nil)
+	v.rootSchema.validateRecursive(v.rootSchema, root, result, context)
+
+	return result, nil
+
+}
+
+func (v *subSchema) subValidateWithContext(document interface{}, context *jsonContext) *Result {
+	result := &Result{}
+	v.validateRecursive(v, document, result, context)
+	return result
+}
+
+// Walker function to validate the json recursively against the subSchema
+func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *jsonContext) {
+
+	if internalLogEnabled {
+		internalLog("validateRecursive %s", context.String())
+		internalLog(" %v", currentNode)
+	}
+
+	// Handle referenced schemas, returns directly when a $ref is found
+	if currentSubSchema.refSchema != nil {
+		v.validateRecursive(currentSubSchema.refSchema, currentNode, result, context)
+		return
+	}
+
+	// Check for null value
+	if currentNode == nil {
+		if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_NULL) {
+			result.addError(
+				new(InvalidTypeError),
+				context,
+				currentNode,
+				ErrorDetails{
+					"expected": currentSubSchema.types.String(),
+					"given":    TYPE_NULL,
+				},
+			)
+			return
+		}
+
+		currentSubSchema.validateSchema(currentSubSchema, currentNode, result, context)
+		v.validateCommon(currentSubSchema, currentNode, result, context)
+
+	} else { // Not a null value
+
+		if isJsonNumber(currentNode) {
+
+			value := currentNode.(json.Number)
+
+			_, isValidInt64, _ := checkJsonNumber(value)
+
+			validType := currentSubSchema.types.Contains(TYPE_NUMBER) || (isValidInt64 && currentSubSchema.types.Contains(TYPE_INTEGER))
+
+			if currentSubSchema.types.IsTyped() && !validType {
+
+				givenType := TYPE_INTEGER
+				if !isValidInt64 {
+					givenType = TYPE_NUMBER
+				}
+
+				result.addError(
+					new(InvalidTypeError),
+					context,
+					currentNode,
+					ErrorDetails{
+						"expected": currentSubSchema.types.String(),
+						"given":    givenType,
+					},
+				)
+				return
+			}
+
+			currentSubSchema.validateSchema(currentSubSchema, value, result, context)
+			v.validateNumber(currentSubSchema, value, result, context)
+			v.validateCommon(currentSubSchema, value, result, context)
+			v.validateString(currentSubSchema, value, result, context)
+
+		} else {
+
+			rValue := reflect.ValueOf(currentNode)
+			rKind := rValue.Kind()
+
+			switch rKind {
+
+			// Slice => JSON array
+
+			case reflect.Slice:
+
+				if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_ARRAY) {
+					result.addError(
+						new(InvalidTypeError),
+						context,
+						currentNode,
+						ErrorDetails{
+							"expected": currentSubSchema.types.String(),
+							"given":    TYPE_ARRAY,
+						},
+					)
+					return
+				}
+
+				castCurrentNode := currentNode.([]interface{})
+
+				currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context)
+
+				v.validateArray(currentSubSchema, castCurrentNode, result, context)
+				v.validateCommon(currentSubSchema, castCurrentNode, result, context)
+
+			// Map => JSON object
+
+			case reflect.Map:
+				if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_OBJECT) {
+					result.addError(
+						new(InvalidTypeError),
+						context,
+						currentNode,
+						ErrorDetails{
+							"expected": currentSubSchema.types.String(),
+							"given":    TYPE_OBJECT,
+						},
+					)
+					return
+				}
+
+				castCurrentNode, ok := currentNode.(map[string]interface{})
+				if !ok {
+					castCurrentNode = convertDocumentNode(currentNode).(map[string]interface{})
+				}
+
+				currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context)
+
+				v.validateObject(currentSubSchema, castCurrentNode, result, context)
+				v.validateCommon(currentSubSchema, castCurrentNode, result, context)
+
+				for _, pSchema := range currentSubSchema.propertiesChildren {
+					nextNode, ok := castCurrentNode[pSchema.property]
+					if ok {
+						subContext := newJsonContext(pSchema.property, context)
+						v.validateRecursive(pSchema, nextNode, result, subContext)
+					}
+				}
+
+			// Simple JSON values : string, number, boolean
+
+			case reflect.Bool:
+
+				if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_BOOLEAN) {
+					result.addError(
+						new(InvalidTypeError),
+						context,
+						currentNode,
+						ErrorDetails{
+							"expected": currentSubSchema.types.String(),
+							"given":    TYPE_BOOLEAN,
+						},
+					)
+					return
+				}
+
+				value := currentNode.(bool)
+
+				currentSubSchema.validateSchema(currentSubSchema, value, result, context)
+				v.validateNumber(currentSubSchema, value, result, context)
+				v.validateCommon(currentSubSchema, value, result, context)
+				v.validateString(currentSubSchema, value, result, context)
+
+			case reflect.String:
+
+				if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_STRING) {
+					result.addError(
+						new(InvalidTypeError),
+						context,
+						currentNode,
+						ErrorDetails{
+							"expected": currentSubSchema.types.String(),
+							"given":    TYPE_STRING,
+						},
+					)
+					return
+				}
+
+				value := currentNode.(string)
+
+				currentSubSchema.validateSchema(currentSubSchema, value, result, context)
+				v.validateNumber(currentSubSchema, value, result, context)
+				v.validateCommon(currentSubSchema, value, result, context)
+				v.validateString(currentSubSchema, value, result, context)
+
+			}
+
+		}
+
+	}
+
+	result.incrementScore()
+}
+
+// Different kinds of validation there, subSchema / common / array / object / string...
+func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *jsonContext) {
+
+	if internalLogEnabled {
+		internalLog("validateSchema %s", context.String())
+		internalLog(" %v", currentNode)
+	}
+
+	if len(currentSubSchema.anyOf) > 0 {
+
+		validatedAnyOf := false
+		var bestValidationResult *Result
+
+		for _, anyOfSchema := range currentSubSchema.anyOf {
+			if !validatedAnyOf {
+				validationResult := anyOfSchema.subValidateWithContext(currentNode, context)
+				validatedAnyOf = validationResult.Valid()
+
+				if !validatedAnyOf && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) {
+					bestValidationResult = validationResult
+				}
+			}
+		}
+		if !validatedAnyOf {
+
+			result.addError(new(NumberAnyOfError), context, currentNode, ErrorDetails{})
+
+			if bestValidationResult != nil {
+				// add error messages of closest matching subSchema as
+				// that's probably the one the user was trying to match
+				result.mergeErrors(bestValidationResult)
+			}
+		}
+	}
+
+	if len(currentSubSchema.oneOf) > 0 {
+
+		nbValidated := 0
+		var bestValidationResult *Result
+
+		for _, oneOfSchema := range currentSubSchema.oneOf {
+			validationResult := oneOfSchema.subValidateWithContext(currentNode, context)
+			if validationResult.Valid() {
+				nbValidated++
+			} else if nbValidated == 0 && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) {
+				bestValidationResult = validationResult
+			}
+		}
+
+		if nbValidated != 1 {
+
+			result.addError(new(NumberOneOfError), context, currentNode, ErrorDetails{})
+
+			if nbValidated == 0 {
+				// add error messages of closest matching subSchema as
+				// that's probably the one the user was trying to match
+				result.mergeErrors(bestValidationResult)
+			}
+		}
+
+	}
+
+	if len(currentSubSchema.allOf) > 0 {
+		nbValidated := 0
+
+		for _, allOfSchema := range currentSubSchema.allOf {
+			validationResult := allOfSchema.subValidateWithContext(currentNode, context)
+			if validationResult.Valid() {
+				nbValidated++
+			}
+			result.mergeErrors(validationResult)
+		}
+
+		if nbValidated != len(currentSubSchema.allOf) {
+			result.addError(new(NumberAllOfError), context, currentNode, ErrorDetails{})
+		}
+	}
+
+	if currentSubSchema.not != nil {
+		validationResult := currentSubSchema.not.subValidateWithContext(currentNode, context)
+		if validationResult.Valid() {
+			result.addError(new(NumberNotError), context, currentNode, ErrorDetails{})
+		}
+	}
+
+	if currentSubSchema.dependencies != nil && len(currentSubSchema.dependencies) > 0 {
+		if isKind(currentNode, reflect.Map) {
+			for elementKey := range currentNode.(map[string]interface{}) {
+				if dependency, ok := currentSubSchema.dependencies[elementKey]; ok {
+					switch dependency := dependency.(type) {
+
+					case []string:
+						for _, dependOnKey := range dependency {
+							if _, dependencyResolved := currentNode.(map[string]interface{})[dependOnKey]; !dependencyResolved {
+								result.addError(
+									new(MissingDependencyError),
+									context,
+									currentNode,
+									ErrorDetails{"dependency": dependOnKey},
+								)
+							}
+						}
+
+					case *subSchema:
+						dependency.validateRecursive(dependency, currentNode, result, context)
+
+					}
+				}
+			}
+		}
+	}
+
+	result.incrementScore()
+}
+
+func (v *subSchema) validateCommon(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) {
+
+	if internalLogEnabled {
+		internalLog("validateCommon %s", context.String())
+		internalLog(" %v", value)
+	}
+
+	// enum:
+	if len(currentSubSchema.enum) > 0 {
+		has, err := currentSubSchema.ContainsEnum(value)
+		if err != nil {
+			result.addError(new(InternalError), context, value, ErrorDetails{"error": err})
+		}
+		if !has {
+			result.addError(
+				new(EnumError),
+				context,
+				value,
+				ErrorDetails{
+					"allowed": strings.Join(currentSubSchema.enum, ", "),
+				},
+			)
+		}
+	}
+
+	result.incrementScore()
+}
+
+func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface{}, result *Result, context *jsonContext) {
+
+	if internalLogEnabled {
+		internalLog("validateArray %s", context.String())
+		internalLog(" %v", value)
+	}
+
+	nbItems := len(value)
+
+	// TODO explain
+	if currentSubSchema.itemsChildrenIsSingleSchema {
+		for i := range value {
+			subContext := newJsonContext(strconv.Itoa(i), context)
+			validationResult := currentSubSchema.itemsChildren[0].subValidateWithContext(value[i], subContext)
+			result.mergeErrors(validationResult)
+		}
+	} else {
+		if currentSubSchema.itemsChildren != nil && len(currentSubSchema.itemsChildren) > 0 {
+
+			nbItems := len(currentSubSchema.itemsChildren)
+			nbValues := len(value)
+
+			if nbItems == nbValues {
+				for i := 0; i != nbItems; i++ {
+					subContext := newJsonContext(strconv.Itoa(i), context)
+					validationResult := currentSubSchema.itemsChildren[i].subValidateWithContext(value[i], subContext)
+					result.mergeErrors(validationResult)
+				}
+			} else if nbItems < nbValues {
+				switch currentSubSchema.additionalItems.(type) {
+				case bool:
+					if !currentSubSchema.additionalItems.(bool) {
+						result.addError(new(ArrayNoAdditionalItemsError), context, value, ErrorDetails{})
+					}
+				case *subSchema:
+					additionalItemSchema := currentSubSchema.additionalItems.(*subSchema)
+					for i := nbItems; i != nbValues; i++ {
+						subContext := newJsonContext(strconv.Itoa(i), context)
+						validationResult := additionalItemSchema.subValidateWithContext(value[i], subContext)
+						result.mergeErrors(validationResult)
+					}
+				}
+			}
+		}
+	}
+
+	// minItems & maxItems
+	if currentSubSchema.minItems != nil {
+		if nbItems < int(*currentSubSchema.minItems) {
+			result.addError(
+				new(ArrayMinItemsError),
+				context,
+				value,
+				ErrorDetails{"min": *currentSubSchema.minItems},
+			)
+		}
+	}
+	if currentSubSchema.maxItems != nil {
+		if nbItems > int(*currentSubSchema.maxItems) {
+			result.addError(
+				new(ArrayMaxItemsError),
+				context,
+				value,
+				ErrorDetails{"max": *currentSubSchema.maxItems},
+			)
+		}
+	}
+
+	// uniqueItems:
+	if currentSubSchema.uniqueItems {
+		var stringifiedItems []string
+		for _, v := range value {
+			vString, err := marshalToJsonString(v)
+			if err != nil {
+				result.addError(new(InternalError), context, value, ErrorDetails{"err": err})
+			}
+			if isStringInSlice(stringifiedItems, *vString) {
+				result.addError(
+					new(ItemsMustBeUniqueError),
+					context,
+					value,
+					ErrorDetails{"type": TYPE_ARRAY},
+				)
+			}
+			stringifiedItems = append(stringifiedItems, *vString)
+		}
+	}
+
+	result.incrementScore()
+}
+
+func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string]interface{}, result *Result, context *jsonContext) {
+
+	if internalLogEnabled {
+		internalLog("validateObject %s", context.String())
+		internalLog(" %v", value)
+	}
+
+	// minProperties & maxProperties:
+	if currentSubSchema.minProperties != nil {
+		if len(value) < int(*currentSubSchema.minProperties) {
+			result.addError(
+				new(ArrayMinPropertiesError),
+				context,
+				value,
+				ErrorDetails{"min": *currentSubSchema.minProperties},
+			)
+		}
+	}
+	if currentSubSchema.maxProperties != nil {
+		if len(value) > int(*currentSubSchema.maxProperties) {
+			result.addError(
+				new(ArrayMaxPropertiesError),
+				context,
+				value,
+				ErrorDetails{"max": *currentSubSchema.maxProperties},
+			)
+		}
+	}
+
+	// required:
+	for _, requiredProperty := range currentSubSchema.required {
+		_, ok := value[requiredProperty]
+		if ok {
+			result.incrementScore()
+		} else {
+			result.addError(
+				new(RequiredError),
+				context,
+				value,
+				ErrorDetails{"property": requiredProperty},
+			)
+		}
+	}
+
+	// additionalProperty & patternProperty:
+	if currentSubSchema.additionalProperties != nil {
+
+		switch currentSubSchema.additionalProperties.(type) {
+		case bool:
+
+			if !currentSubSchema.additionalProperties.(bool) {
+
+				for pk := range value {
+
+					found := false
+					for _, spValue := range currentSubSchema.propertiesChildren {
+						if pk == spValue.property {
+							found = true
+						}
+					}
+
+					pp_has, pp_match := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context)
+
+					if found {
+
+						if pp_has && !pp_match {
+							result.addError(
+								new(AdditionalPropertyNotAllowedError),
+								context,
+								value,
+								ErrorDetails{"property": pk},
+							)
+						}
+
+					} else {
+
+						if !pp_has || !pp_match {
+							result.addError(
+								new(AdditionalPropertyNotAllowedError),
+								context,
+								value,
+								ErrorDetails{"property": pk},
+							)
+						}
+
+					}
+				}
+			}
+
+		case *subSchema:
+
+			additionalPropertiesSchema := currentSubSchema.additionalProperties.(*subSchema)
+			for pk := range value {
+
+				found := false
+				for _, spValue := range currentSubSchema.propertiesChildren {
+					if pk == spValue.property {
+						found = true
+					}
+				}
+
+				pp_has, pp_match := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context)
+
+				if found {
+
+					if pp_has && !pp_match {
+						validationResult := additionalPropertiesSchema.subValidateWithContext(value[pk], context)
+						result.mergeErrors(validationResult)
+					}
+
+				} else {
+
+					if !pp_has || !pp_match {
+						validationResult := additionalPropertiesSchema.subValidateWithContext(value[pk], context)
+						result.mergeErrors(validationResult)
+					}
+
+				}
+
+			}
+		}
+	} else {
+
+		for pk := range value {
+
+			pp_has, pp_match := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context)
+
+			if pp_has && !pp_match {
+
+				result.addError(
+					new(InvalidPropertyPatternError),
+					context,
+					value,
+					ErrorDetails{
+						"property": pk,
+						"pattern":  currentSubSchema.PatternPropertiesString(),
+					},
+				)
+			}
+
+		}
+	}
+
+	result.incrementScore()
+}
+
+func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key string, value interface{}, result *Result, context *jsonContext) (has bool, matched bool) {
+
+	if internalLogEnabled {
+		internalLog("validatePatternProperty %s", context.String())
+		internalLog(" %s %v", key, value)
+	}
+
+	has = false
+
+	validatedkey := false
+
+	for pk, pv := range currentSubSchema.patternProperties {
+		if matches, _ := regexp.MatchString(pk, key); matches {
+			has = true
+			subContext := newJsonContext(key, context)
+			validationResult := pv.subValidateWithContext(value, subContext)
+			result.mergeErrors(validationResult)
+			if validationResult.Valid() {
+				validatedkey = true
+			}
+		}
+	}
+
+	if !validatedkey {
+		return has, false
+	}
+
+	result.incrementScore()
+
+	return has, true
+}
+
+func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) {
+
+	// Ignore JSON numbers
+	if isJsonNumber(value) {
+		return
+	}
+
+	// Ignore non strings
+	if !isKind(value, reflect.String) {
+		return
+	}
+
+	if internalLogEnabled {
+		internalLog("validateString %s", context.String())
+		internalLog(" %v", value)
+	}
+
+	stringValue := value.(string)
+
+	// minLength & maxLength:
+	if currentSubSchema.minLength != nil {
+		if utf8.RuneCount([]byte(stringValue)) < int(*currentSubSchema.minLength) {
+			result.addError(
+				new(StringLengthGTEError),
+				context,
+				value,
+				ErrorDetails{"min": *currentSubSchema.minLength},
+			)
+		}
+	}
+	if currentSubSchema.maxLength != nil {
+		if utf8.RuneCount([]byte(stringValue)) > int(*currentSubSchema.maxLength) {
+			result.addError(
+				new(StringLengthLTEError),
+				context,
+				value,
+				ErrorDetails{"max": *currentSubSchema.maxLength},
+			)
+		}
+	}
+
+	// pattern:
+	if currentSubSchema.pattern != nil {
+		if !currentSubSchema.pattern.MatchString(stringValue) {
+			result.addError(
+				new(DoesNotMatchPatternError),
+				context,
+				value,
+				ErrorDetails{"pattern": currentSubSchema.pattern},
+			)
+
+		}
+	}
+
+	// format
+	if currentSubSchema.format != "" {
+		if !FormatCheckers.IsFormat(currentSubSchema.format, stringValue) {
+			result.addError(
+				new(DoesNotMatchFormatError),
+				context,
+				value,
+				ErrorDetails{"format": currentSubSchema.format},
+			)
+		}
+	}
+
+	result.incrementScore()
+}
+
+func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) {
+
+	// Ignore non numbers
+	if !isJsonNumber(value) {
+		return
+	}
+
+	if internalLogEnabled {
+		internalLog("validateNumber %s", context.String())
+		internalLog(" %v", value)
+	}
+
+	number := value.(json.Number)
+	float64Value, _ := number.Float64()
+
+	// multipleOf:
+	if currentSubSchema.multipleOf != nil {
+
+		if !isFloat64AnInteger(float64Value / *currentSubSchema.multipleOf) {
+			result.addError(
+				new(MultipleOfError),
+				context,
+				resultErrorFormatJsonNumber(number),
+				ErrorDetails{"multiple": *currentSubSchema.multipleOf},
+			)
+		}
+	}
+
+	//maximum & exclusiveMaximum:
+	if currentSubSchema.maximum != nil {
+		if currentSubSchema.exclusiveMaximum {
+			if float64Value >= *currentSubSchema.maximum {
+				result.addError(
+					new(NumberLTError),
+					context,
+					resultErrorFormatJsonNumber(number),
+					ErrorDetails{
+						"max": resultErrorFormatNumber(*currentSubSchema.maximum),
+					},
+				)
+			}
+		} else {
+			if float64Value > *currentSubSchema.maximum {
+				result.addError(
+					new(NumberLTEError),
+					context,
+					resultErrorFormatJsonNumber(number),
+					ErrorDetails{
+						"max": resultErrorFormatNumber(*currentSubSchema.maximum),
+					},
+				)
+			}
+		}
+	}
+
+	//minimum & exclusiveMinimum:
+	if currentSubSchema.minimum != nil {
+		if currentSubSchema.exclusiveMinimum {
+			if float64Value <= *currentSubSchema.minimum {
+				result.addError(
+					new(NumberGTError),
+					context,
+					resultErrorFormatJsonNumber(number),
+					ErrorDetails{
+						"min": resultErrorFormatNumber(*currentSubSchema.minimum),
+					},
+				)
+			}
+		} else {
+			if float64Value < *currentSubSchema.minimum {
+				result.addError(
+					new(NumberGTEError),
+					context,
+					resultErrorFormatJsonNumber(number),
+					ErrorDetails{
+						"min": resultErrorFormatNumber(*currentSubSchema.minimum),
+					},
+				)
+			}
+		}
+	}
+
+	result.incrementScore()
+}

+ 188 - 0
vendor/gopkg.in/yaml.v2/LICENSE

@@ -0,0 +1,188 @@
+
+Copyright (c) 2011-2014 - Canonical Inc.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.

+ 742 - 0
vendor/gopkg.in/yaml.v2/apic.go

@@ -0,0 +1,742 @@
+package yaml
+
+import (
+	"io"
+	"os"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+	//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+	// Check if we can move the queue at the beginning of the buffer.
+	if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+		if parser.tokens_head != len(parser.tokens) {
+			copy(parser.tokens, parser.tokens[parser.tokens_head:])
+		}
+		parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+		parser.tokens_head = 0
+	}
+	parser.tokens = append(parser.tokens, *token)
+	if pos < 0 {
+		return
+	}
+	copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+	parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+	*parser = yaml_parser_t{
+		raw_buffer: make([]byte, 0, input_raw_buffer_size),
+		buffer:     make([]byte, 0, input_buffer_size),
+	}
+	return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+	*parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+	if parser.input_pos == len(parser.input) {
+		return 0, io.EOF
+	}
+	n = copy(buffer, parser.input[parser.input_pos:])
+	parser.input_pos += n
+	return n, nil
+}
+
+// File read handler.
+func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+	return parser.input_file.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+	if parser.read_handler != nil {
+		panic("must set the input source only once")
+	}
+	parser.read_handler = yaml_string_read_handler
+	parser.input = input
+	parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
+	if parser.read_handler != nil {
+		panic("must set the input source only once")
+	}
+	parser.read_handler = yaml_file_read_handler
+	parser.input_file = file
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+	if parser.encoding != yaml_ANY_ENCODING {
+		panic("must set the encoding only once")
+	}
+	parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
+	*emitter = yaml_emitter_t{
+		buffer:     make([]byte, output_buffer_size),
+		raw_buffer: make([]byte, 0, output_raw_buffer_size),
+		states:     make([]yaml_emitter_state_t, 0, initial_stack_size),
+		events:     make([]yaml_event_t, 0, initial_queue_size),
+	}
+	return true
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+	*emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+	*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+	return nil
+}
+
+// File write handler.
+func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+	_, err := emitter.output_file.Write(buffer)
+	return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+	if emitter.write_handler != nil {
+		panic("must set the output target only once")
+	}
+	emitter.write_handler = yaml_string_write_handler
+	emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
+	if emitter.write_handler != nil {
+		panic("must set the output target only once")
+	}
+	emitter.write_handler = yaml_file_write_handler
+	emitter.output_file = file
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+	if emitter.encoding != yaml_ANY_ENCODING {
+		panic("must set the output encoding only once")
+	}
+	emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+	emitter.canonical = canonical
+}
+
+//// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+	if indent < 2 || indent > 9 {
+		indent = 2
+	}
+	emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+	if width < 0 {
+		width = -1
+	}
+	emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+	emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+	emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+//    assert(token);  // Non-NULL token object expected.
+//
+//    switch (token.type)
+//    {
+//        case YAML_TAG_DIRECTIVE_TOKEN:
+//            yaml_free(token.data.tag_directive.handle);
+//            yaml_free(token.data.tag_directive.prefix);
+//            break;
+//
+//        case YAML_ALIAS_TOKEN:
+//            yaml_free(token.data.alias.value);
+//            break;
+//
+//        case YAML_ANCHOR_TOKEN:
+//            yaml_free(token.data.anchor.value);
+//            break;
+//
+//        case YAML_TAG_TOKEN:
+//            yaml_free(token.data.tag.handle);
+//            yaml_free(token.data.tag.suffix);
+//            break;
+//
+//        case YAML_SCALAR_TOKEN:
+//            yaml_free(token.data.scalar.value);
+//            break;
+//
+//        default:
+//            break;
+//    }
+//
+//    memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+//    yaml_char_t *end = start+length;
+//    yaml_char_t *pointer = start;
+//
+//    while (pointer < end) {
+//        unsigned char octet;
+//        unsigned int width;
+//        unsigned int value;
+//        size_t k;
+//
+//        octet = pointer[0];
+//        width = (octet & 0x80) == 0x00 ? 1 :
+//                (octet & 0xE0) == 0xC0 ? 2 :
+//                (octet & 0xF0) == 0xE0 ? 3 :
+//                (octet & 0xF8) == 0xF0 ? 4 : 0;
+//        value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+//                (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+//                (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+//                (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+//        if (!width) return 0;
+//        if (pointer+width > end) return 0;
+//        for (k = 1; k < width; k ++) {
+//            octet = pointer[k];
+//            if ((octet & 0xC0) != 0x80) return 0;
+//            value = (value << 6) + (octet & 0x3F);
+//        }
+//        if (!((width == 1) ||
+//            (width == 2 && value >= 0x80) ||
+//            (width == 3 && value >= 0x800) ||
+//            (width == 4 && value >= 0x10000))) return 0;
+//
+//        pointer += width;
+//    }
+//
+//    return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
+	*event = yaml_event_t{
+		typ:      yaml_STREAM_START_EVENT,
+		encoding: encoding,
+	}
+	return true
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
+	*event = yaml_event_t{
+		typ: yaml_STREAM_END_EVENT,
+	}
+	return true
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
+	tag_directives []yaml_tag_directive_t, implicit bool) bool {
+	*event = yaml_event_t{
+		typ:               yaml_DOCUMENT_START_EVENT,
+		version_directive: version_directive,
+		tag_directives:    tag_directives,
+		implicit:          implicit,
+	}
+	return true
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
+	*event = yaml_event_t{
+		typ:      yaml_DOCUMENT_END_EVENT,
+		implicit: implicit,
+	}
+	return true
+}
+
+///*
+// * Create ALIAS.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
+//{
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    anchor_copy *yaml_char_t = NULL
+//
+//    assert(event) // Non-NULL event object is expected.
+//    assert(anchor) // Non-NULL anchor is expected.
+//
+//    if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
+//
+//    anchor_copy = yaml_strdup(anchor)
+//    if (!anchor_copy)
+//        return 0
+//
+//    ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
+//
+//    return 1
+//}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+	*event = yaml_event_t{
+		typ:             yaml_SCALAR_EVENT,
+		anchor:          anchor,
+		tag:             tag,
+		value:           value,
+		implicit:        plain_implicit,
+		quoted_implicit: quoted_implicit,
+		style:           yaml_style_t(style),
+	}
+	return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+	*event = yaml_event_t{
+		typ:      yaml_SEQUENCE_START_EVENT,
+		anchor:   anchor,
+		tag:      tag,
+		implicit: implicit,
+		style:    yaml_style_t(style),
+	}
+	return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+	*event = yaml_event_t{
+		typ: yaml_SEQUENCE_END_EVENT,
+	}
+	return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
+	*event = yaml_event_t{
+		typ:      yaml_MAPPING_START_EVENT,
+		anchor:   anchor,
+		tag:      tag,
+		implicit: implicit,
+		style:    yaml_style_t(style),
+	}
+	return true
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
+	*event = yaml_event_t{
+		typ: yaml_MAPPING_END_EVENT,
+	}
+	return true
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+	*event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+//        version_directive *yaml_version_directive_t,
+//        tag_directives_start *yaml_tag_directive_t,
+//        tag_directives_end *yaml_tag_directive_t,
+//        start_implicit int, end_implicit int)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    struct {
+//        start *yaml_node_t
+//        end *yaml_node_t
+//        top *yaml_node_t
+//    } nodes = { NULL, NULL, NULL }
+//    version_directive_copy *yaml_version_directive_t = NULL
+//    struct {
+//        start *yaml_tag_directive_t
+//        end *yaml_tag_directive_t
+//        top *yaml_tag_directive_t
+//    } tag_directives_copy = { NULL, NULL, NULL }
+//    value yaml_tag_directive_t = { NULL, NULL }
+//    mark yaml_mark_t = { 0, 0, 0 }
+//
+//    assert(document) // Non-NULL document object is expected.
+//    assert((tag_directives_start && tag_directives_end) ||
+//            (tag_directives_start == tag_directives_end))
+//                            // Valid tag directives are expected.
+//
+//    if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+//    if (version_directive) {
+//        version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+//        if (!version_directive_copy) goto error
+//        version_directive_copy.major = version_directive.major
+//        version_directive_copy.minor = version_directive.minor
+//    }
+//
+//    if (tag_directives_start != tag_directives_end) {
+//        tag_directive *yaml_tag_directive_t
+//        if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+//            goto error
+//        for (tag_directive = tag_directives_start
+//                tag_directive != tag_directives_end; tag_directive ++) {
+//            assert(tag_directive.handle)
+//            assert(tag_directive.prefix)
+//            if (!yaml_check_utf8(tag_directive.handle,
+//                        strlen((char *)tag_directive.handle)))
+//                goto error
+//            if (!yaml_check_utf8(tag_directive.prefix,
+//                        strlen((char *)tag_directive.prefix)))
+//                goto error
+//            value.handle = yaml_strdup(tag_directive.handle)
+//            value.prefix = yaml_strdup(tag_directive.prefix)
+//            if (!value.handle || !value.prefix) goto error
+//            if (!PUSH(&context, tag_directives_copy, value))
+//                goto error
+//            value.handle = NULL
+//            value.prefix = NULL
+//        }
+//    }
+//
+//    DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+//            tag_directives_copy.start, tag_directives_copy.top,
+//            start_implicit, end_implicit, mark, mark)
+//
+//    return 1
+//
+//error:
+//    STACK_DEL(&context, nodes)
+//    yaml_free(version_directive_copy)
+//    while (!STACK_EMPTY(&context, tag_directives_copy)) {
+//        value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+//        yaml_free(value.handle)
+//        yaml_free(value.prefix)
+//    }
+//    STACK_DEL(&context, tag_directives_copy)
+//    yaml_free(value.handle)
+//    yaml_free(value.prefix)
+//
+//    return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    tag_directive *yaml_tag_directive_t
+//
+//    context.error = YAML_NO_ERROR // Eliminate a compliler warning.
+//
+//    assert(document) // Non-NULL document object is expected.
+//
+//    while (!STACK_EMPTY(&context, document.nodes)) {
+//        node yaml_node_t = POP(&context, document.nodes)
+//        yaml_free(node.tag)
+//        switch (node.type) {
+//            case YAML_SCALAR_NODE:
+//                yaml_free(node.data.scalar.value)
+//                break
+//            case YAML_SEQUENCE_NODE:
+//                STACK_DEL(&context, node.data.sequence.items)
+//                break
+//            case YAML_MAPPING_NODE:
+//                STACK_DEL(&context, node.data.mapping.pairs)
+//                break
+//            default:
+//                assert(0) // Should not happen.
+//        }
+//    }
+//    STACK_DEL(&context, document.nodes)
+//
+//    yaml_free(document.version_directive)
+//    for (tag_directive = document.tag_directives.start
+//            tag_directive != document.tag_directives.end
+//            tag_directive++) {
+//        yaml_free(tag_directive.handle)
+//        yaml_free(tag_directive.prefix)
+//    }
+//    yaml_free(document.tag_directives.start)
+//
+//    memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+//        return document.nodes.start + index - 1
+//    }
+//    return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (document.nodes.top != document.nodes.start) {
+//        return document.nodes.start
+//    }
+//    return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+//        tag *yaml_char_t, value *yaml_char_t, length int,
+//        style yaml_scalar_style_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    tag_copy *yaml_char_t = NULL
+//    value_copy *yaml_char_t = NULL
+//    node yaml_node_t
+//
+//    assert(document) // Non-NULL document object is expected.
+//    assert(value) // Non-NULL value is expected.
+//
+//    if (!tag) {
+//        tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+//    }
+//
+//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+//    tag_copy = yaml_strdup(tag)
+//    if (!tag_copy) goto error
+//
+//    if (length < 0) {
+//        length = strlen((char *)value)
+//    }
+//
+//    if (!yaml_check_utf8(value, length)) goto error
+//    value_copy = yaml_malloc(length+1)
+//    if (!value_copy) goto error
+//    memcpy(value_copy, value, length)
+//    value_copy[length] = '\0'
+//
+//    SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+//    if (!PUSH(&context, document.nodes, node)) goto error
+//
+//    return document.nodes.top - document.nodes.start
+//
+//error:
+//    yaml_free(tag_copy)
+//    yaml_free(value_copy)
+//
+//    return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+//        tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    tag_copy *yaml_char_t = NULL
+//    struct {
+//        start *yaml_node_item_t
+//        end *yaml_node_item_t
+//        top *yaml_node_item_t
+//    } items = { NULL, NULL, NULL }
+//    node yaml_node_t
+//
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (!tag) {
+//        tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+//    }
+//
+//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+//    tag_copy = yaml_strdup(tag)
+//    if (!tag_copy) goto error
+//
+//    if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+//    SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+//            style, mark, mark)
+//    if (!PUSH(&context, document.nodes, node)) goto error
+//
+//    return document.nodes.top - document.nodes.start
+//
+//error:
+//    STACK_DEL(&context, items)
+//    yaml_free(tag_copy)
+//
+//    return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+//        tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    tag_copy *yaml_char_t = NULL
+//    struct {
+//        start *yaml_node_pair_t
+//        end *yaml_node_pair_t
+//        top *yaml_node_pair_t
+//    } pairs = { NULL, NULL, NULL }
+//    node yaml_node_t
+//
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (!tag) {
+//        tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+//    }
+//
+//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+//    tag_copy = yaml_strdup(tag)
+//    if (!tag_copy) goto error
+//
+//    if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+//    MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+//            style, mark, mark)
+//    if (!PUSH(&context, document.nodes, node)) goto error
+//
+//    return document.nodes.top - document.nodes.start
+//
+//error:
+//    STACK_DEL(&context, pairs)
+//    yaml_free(tag_copy)
+//
+//    return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+//        sequence int, item int)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//
+//    assert(document) // Non-NULL document is required.
+//    assert(sequence > 0
+//            && document.nodes.start + sequence <= document.nodes.top)
+//                            // Valid sequence id is required.
+//    assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+//                            // A sequence node is required.
+//    assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+//                            // Valid item id is required.
+//
+//    if (!PUSH(&context,
+//                document.nodes.start[sequence-1].data.sequence.items, item))
+//        return 0
+//
+//    return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+//        mapping int, key int, value int)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//
+//    pair yaml_node_pair_t
+//
+//    assert(document) // Non-NULL document is required.
+//    assert(mapping > 0
+//            && document.nodes.start + mapping <= document.nodes.top)
+//                            // Valid mapping id is required.
+//    assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+//                            // A mapping node is required.
+//    assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+//                            // Valid key id is required.
+//    assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+//                            // Valid value id is required.
+//
+//    pair.key = key
+//    pair.value = value
+//
+//    if (!PUSH(&context,
+//                document.nodes.start[mapping-1].data.mapping.pairs, pair))
+//        return 0
+//
+//    return 1
+//}
+//
+//

+ 683 - 0
vendor/gopkg.in/yaml.v2/decode.go

@@ -0,0 +1,683 @@
+package yaml
+
+import (
+	"encoding"
+	"encoding/base64"
+	"fmt"
+	"math"
+	"reflect"
+	"strconv"
+	"time"
+)
+
+const (
+	documentNode = 1 << iota
+	mappingNode
+	sequenceNode
+	scalarNode
+	aliasNode
+)
+
+type node struct {
+	kind         int
+	line, column int
+	tag          string
+	value        string
+	implicit     bool
+	children     []*node
+	anchors      map[string]*node
+}
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+	parser yaml_parser_t
+	event  yaml_event_t
+	doc    *node
+}
+
+func newParser(b []byte) *parser {
+	p := parser{}
+	if !yaml_parser_initialize(&p.parser) {
+		panic("failed to initialize YAML emitter")
+	}
+
+	if len(b) == 0 {
+		b = []byte{'\n'}
+	}
+
+	yaml_parser_set_input_string(&p.parser, b)
+
+	p.skip()
+	if p.event.typ != yaml_STREAM_START_EVENT {
+		panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
+	}
+	p.skip()
+	return &p
+}
+
+func (p *parser) destroy() {
+	if p.event.typ != yaml_NO_EVENT {
+		yaml_event_delete(&p.event)
+	}
+	yaml_parser_delete(&p.parser)
+}
+
+func (p *parser) skip() {
+	if p.event.typ != yaml_NO_EVENT {
+		if p.event.typ == yaml_STREAM_END_EVENT {
+			failf("attempted to go past the end of stream; corrupted value?")
+		}
+		yaml_event_delete(&p.event)
+	}
+	if !yaml_parser_parse(&p.parser, &p.event) {
+		p.fail()
+	}
+}
+
+func (p *parser) fail() {
+	var where string
+	var line int
+	if p.parser.problem_mark.line != 0 {
+		line = p.parser.problem_mark.line
+	} else if p.parser.context_mark.line != 0 {
+		line = p.parser.context_mark.line
+	}
+	if line != 0 {
+		where = "line " + strconv.Itoa(line) + ": "
+	}
+	var msg string
+	if len(p.parser.problem) > 0 {
+		msg = p.parser.problem
+	} else {
+		msg = "unknown problem parsing YAML content"
+	}
+	failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *node, anchor []byte) {
+	if anchor != nil {
+		p.doc.anchors[string(anchor)] = n
+	}
+}
+
+func (p *parser) parse() *node {
+	switch p.event.typ {
+	case yaml_SCALAR_EVENT:
+		return p.scalar()
+	case yaml_ALIAS_EVENT:
+		return p.alias()
+	case yaml_MAPPING_START_EVENT:
+		return p.mapping()
+	case yaml_SEQUENCE_START_EVENT:
+		return p.sequence()
+	case yaml_DOCUMENT_START_EVENT:
+		return p.document()
+	case yaml_STREAM_END_EVENT:
+		// Happens when attempting to decode an empty buffer.
+		return nil
+	default:
+		panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
+	}
+	panic("unreachable")
+}
+
+func (p *parser) node(kind int) *node {
+	return &node{
+		kind:   kind,
+		line:   p.event.start_mark.line,
+		column: p.event.start_mark.column,
+	}
+}
+
+func (p *parser) document() *node {
+	n := p.node(documentNode)
+	n.anchors = make(map[string]*node)
+	p.doc = n
+	p.skip()
+	n.children = append(n.children, p.parse())
+	if p.event.typ != yaml_DOCUMENT_END_EVENT {
+		panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
+	}
+	p.skip()
+	return n
+}
+
+func (p *parser) alias() *node {
+	n := p.node(aliasNode)
+	n.value = string(p.event.anchor)
+	p.skip()
+	return n
+}
+
+func (p *parser) scalar() *node {
+	n := p.node(scalarNode)
+	n.value = string(p.event.value)
+	n.tag = string(p.event.tag)
+	n.implicit = p.event.implicit
+	p.anchor(n, p.event.anchor)
+	p.skip()
+	return n
+}
+
+func (p *parser) sequence() *node {
+	n := p.node(sequenceNode)
+	p.anchor(n, p.event.anchor)
+	p.skip()
+	for p.event.typ != yaml_SEQUENCE_END_EVENT {
+		n.children = append(n.children, p.parse())
+	}
+	p.skip()
+	return n
+}
+
+func (p *parser) mapping() *node {
+	n := p.node(mappingNode)
+	p.anchor(n, p.event.anchor)
+	p.skip()
+	for p.event.typ != yaml_MAPPING_END_EVENT {
+		n.children = append(n.children, p.parse(), p.parse())
+	}
+	p.skip()
+	return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+	doc     *node
+	aliases map[string]bool
+	mapType reflect.Type
+	terrors []string
+}
+
+var (
+	mapItemType    = reflect.TypeOf(MapItem{})
+	durationType   = reflect.TypeOf(time.Duration(0))
+	defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
+	ifaceType      = defaultMapType.Elem()
+)
+
+func newDecoder() *decoder {
+	d := &decoder{mapType: defaultMapType}
+	d.aliases = make(map[string]bool)
+	return d
+}
+
+func (d *decoder) terror(n *node, tag string, out reflect.Value) {
+	if n.tag != "" {
+		tag = n.tag
+	}
+	value := n.value
+	if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
+		if len(value) > 10 {
+			value = " `" + value[:7] + "...`"
+		} else {
+			value = " `" + value + "`"
+		}
+	}
+	d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
+	terrlen := len(d.terrors)
+	err := u.UnmarshalYAML(func(v interface{}) (err error) {
+		defer handleErr(&err)
+		d.unmarshal(n, reflect.ValueOf(v))
+		if len(d.terrors) > terrlen {
+			issues := d.terrors[terrlen:]
+			d.terrors = d.terrors[:terrlen]
+			return &TypeError{issues}
+		}
+		return nil
+	})
+	if e, ok := err.(*TypeError); ok {
+		d.terrors = append(d.terrors, e.Errors...)
+		return false
+	}
+	if err != nil {
+		fail(err)
+	}
+	return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+	if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") {
+		return out, false, false
+	}
+	again := true
+	for again {
+		again = false
+		if out.Kind() == reflect.Ptr {
+			if out.IsNil() {
+				out.Set(reflect.New(out.Type().Elem()))
+			}
+			out = out.Elem()
+			again = true
+		}
+		if out.CanAddr() {
+			if u, ok := out.Addr().Interface().(Unmarshaler); ok {
+				good = d.callUnmarshaler(n, u)
+				return out, true, good
+			}
+		}
+	}
+	return out, false, false
+}
+
+func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
+	switch n.kind {
+	case documentNode:
+		return d.document(n, out)
+	case aliasNode:
+		return d.alias(n, out)
+	}
+	out, unmarshaled, good := d.prepare(n, out)
+	if unmarshaled {
+		return good
+	}
+	switch n.kind {
+	case scalarNode:
+		good = d.scalar(n, out)
+	case mappingNode:
+		good = d.mapping(n, out)
+	case sequenceNode:
+		good = d.sequence(n, out)
+	default:
+		panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
+	}
+	return good
+}
+
+func (d *decoder) document(n *node, out reflect.Value) (good bool) {
+	if len(n.children) == 1 {
+		d.doc = n
+		d.unmarshal(n.children[0], out)
+		return true
+	}
+	return false
+}
+
+func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
+	an, ok := d.doc.anchors[n.value]
+	if !ok {
+		failf("unknown anchor '%s' referenced", n.value)
+	}
+	if d.aliases[n.value] {
+		failf("anchor '%s' value contains itself", n.value)
+	}
+	d.aliases[n.value] = true
+	good = d.unmarshal(an, out)
+	delete(d.aliases, n.value)
+	return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+	for _, k := range out.MapKeys() {
+		out.SetMapIndex(k, zeroValue)
+	}
+}
+
+func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
+	var tag string
+	var resolved interface{}
+	if n.tag == "" && !n.implicit {
+		tag = yaml_STR_TAG
+		resolved = n.value
+	} else {
+		tag, resolved = resolve(n.tag, n.value)
+		if tag == yaml_BINARY_TAG {
+			data, err := base64.StdEncoding.DecodeString(resolved.(string))
+			if err != nil {
+				failf("!!binary value contains invalid base64 data")
+			}
+			resolved = string(data)
+		}
+	}
+	if resolved == nil {
+		if out.Kind() == reflect.Map && !out.CanAddr() {
+			resetMap(out)
+		} else {
+			out.Set(reflect.Zero(out.Type()))
+		}
+		return true
+	}
+	if s, ok := resolved.(string); ok && out.CanAddr() {
+		if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
+			err := u.UnmarshalText([]byte(s))
+			if err != nil {
+				fail(err)
+			}
+			return true
+		}
+	}
+	switch out.Kind() {
+	case reflect.String:
+		if tag == yaml_BINARY_TAG {
+			out.SetString(resolved.(string))
+			good = true
+		} else if resolved != nil {
+			out.SetString(n.value)
+			good = true
+		}
+	case reflect.Interface:
+		if resolved == nil {
+			out.Set(reflect.Zero(out.Type()))
+		} else {
+			out.Set(reflect.ValueOf(resolved))
+		}
+		good = true
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		switch resolved := resolved.(type) {
+		case int:
+			if !out.OverflowInt(int64(resolved)) {
+				out.SetInt(int64(resolved))
+				good = true
+			}
+		case int64:
+			if !out.OverflowInt(resolved) {
+				out.SetInt(resolved)
+				good = true
+			}
+		case uint64:
+			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+				out.SetInt(int64(resolved))
+				good = true
+			}
+		case float64:
+			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+				out.SetInt(int64(resolved))
+				good = true
+			}
+		case string:
+			if out.Type() == durationType {
+				d, err := time.ParseDuration(resolved)
+				if err == nil {
+					out.SetInt(int64(d))
+					good = true
+				}
+			}
+		}
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		switch resolved := resolved.(type) {
+		case int:
+			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				good = true
+			}
+		case int64:
+			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				good = true
+			}
+		case uint64:
+			if !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				good = true
+			}
+		case float64:
+			if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				good = true
+			}
+		}
+	case reflect.Bool:
+		switch resolved := resolved.(type) {
+		case bool:
+			out.SetBool(resolved)
+			good = true
+		}
+	case reflect.Float32, reflect.Float64:
+		switch resolved := resolved.(type) {
+		case int:
+			out.SetFloat(float64(resolved))
+			good = true
+		case int64:
+			out.SetFloat(float64(resolved))
+			good = true
+		case uint64:
+			out.SetFloat(float64(resolved))
+			good = true
+		case float64:
+			out.SetFloat(resolved)
+			good = true
+		}
+	case reflect.Ptr:
+		if out.Type().Elem() == reflect.TypeOf(resolved) {
+			// TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
+			elem := reflect.New(out.Type().Elem())
+			elem.Elem().Set(reflect.ValueOf(resolved))
+			out.Set(elem)
+			good = true
+		}
+	}
+	if !good {
+		d.terror(n, tag, out)
+	}
+	return good
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+	v := reflect.ValueOf(i)
+	sv := reflect.New(v.Type()).Elem()
+	sv.Set(v)
+	return sv
+}
+
+func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
+	l := len(n.children)
+
+	var iface reflect.Value
+	switch out.Kind() {
+	case reflect.Slice:
+		out.Set(reflect.MakeSlice(out.Type(), l, l))
+	case reflect.Interface:
+		// No type hints. Will have to use a generic sequence.
+		iface = out
+		out = settableValueOf(make([]interface{}, l))
+	default:
+		d.terror(n, yaml_SEQ_TAG, out)
+		return false
+	}
+	et := out.Type().Elem()
+
+	j := 0
+	for i := 0; i < l; i++ {
+		e := reflect.New(et).Elem()
+		if ok := d.unmarshal(n.children[i], e); ok {
+			out.Index(j).Set(e)
+			j++
+		}
+	}
+	out.Set(out.Slice(0, j))
+	if iface.IsValid() {
+		iface.Set(out)
+	}
+	return true
+}
+
+func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
+	switch out.Kind() {
+	case reflect.Struct:
+		return d.mappingStruct(n, out)
+	case reflect.Slice:
+		return d.mappingSlice(n, out)
+	case reflect.Map:
+		// okay
+	case reflect.Interface:
+		if d.mapType.Kind() == reflect.Map {
+			iface := out
+			out = reflect.MakeMap(d.mapType)
+			iface.Set(out)
+		} else {
+			slicev := reflect.New(d.mapType).Elem()
+			if !d.mappingSlice(n, slicev) {
+				return false
+			}
+			out.Set(slicev)
+			return true
+		}
+	default:
+		d.terror(n, yaml_MAP_TAG, out)
+		return false
+	}
+	outt := out.Type()
+	kt := outt.Key()
+	et := outt.Elem()
+
+	mapType := d.mapType
+	if outt.Key() == ifaceType && outt.Elem() == ifaceType {
+		d.mapType = outt
+	}
+
+	if out.IsNil() {
+		out.Set(reflect.MakeMap(outt))
+	}
+	l := len(n.children)
+	for i := 0; i < l; i += 2 {
+		if isMerge(n.children[i]) {
+			d.merge(n.children[i+1], out)
+			continue
+		}
+		k := reflect.New(kt).Elem()
+		if d.unmarshal(n.children[i], k) {
+			kkind := k.Kind()
+			if kkind == reflect.Interface {
+				kkind = k.Elem().Kind()
+			}
+			if kkind == reflect.Map || kkind == reflect.Slice {
+				failf("invalid map key: %#v", k.Interface())
+			}
+			e := reflect.New(et).Elem()
+			if d.unmarshal(n.children[i+1], e) {
+				out.SetMapIndex(k, e)
+			}
+		}
+	}
+	d.mapType = mapType
+	return true
+}
+
+func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
+	outt := out.Type()
+	if outt.Elem() != mapItemType {
+		d.terror(n, yaml_MAP_TAG, out)
+		return false
+	}
+
+	mapType := d.mapType
+	d.mapType = outt
+
+	var slice []MapItem
+	var l = len(n.children)
+	for i := 0; i < l; i += 2 {
+		if isMerge(n.children[i]) {
+			d.merge(n.children[i+1], out)
+			continue
+		}
+		item := MapItem{}
+		k := reflect.ValueOf(&item.Key).Elem()
+		if d.unmarshal(n.children[i], k) {
+			v := reflect.ValueOf(&item.Value).Elem()
+			if d.unmarshal(n.children[i+1], v) {
+				slice = append(slice, item)
+			}
+		}
+	}
+	out.Set(reflect.ValueOf(slice))
+	d.mapType = mapType
+	return true
+}
+
+func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
+	sinfo, err := getStructInfo(out.Type())
+	if err != nil {
+		panic(err)
+	}
+	name := settableValueOf("")
+	l := len(n.children)
+
+	var inlineMap reflect.Value
+	var elemType reflect.Type
+	if sinfo.InlineMap != -1 {
+		inlineMap = out.Field(sinfo.InlineMap)
+		inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
+		elemType = inlineMap.Type().Elem()
+	}
+
+	for i := 0; i < l; i += 2 {
+		ni := n.children[i]
+		if isMerge(ni) {
+			d.merge(n.children[i+1], out)
+			continue
+		}
+		if !d.unmarshal(ni, name) {
+			continue
+		}
+		if info, ok := sinfo.FieldsMap[name.String()]; ok {
+			var field reflect.Value
+			if info.Inline == nil {
+				field = out.Field(info.Num)
+			} else {
+				field = out.FieldByIndex(info.Inline)
+			}
+			d.unmarshal(n.children[i+1], field)
+		} else if sinfo.InlineMap != -1 {
+			if inlineMap.IsNil() {
+				inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+			}
+			value := reflect.New(elemType).Elem()
+			d.unmarshal(n.children[i+1], value)
+			inlineMap.SetMapIndex(name, value)
+		}
+	}
+	return true
+}
+
+func failWantMap() {
+	failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(n *node, out reflect.Value) {
+	switch n.kind {
+	case mappingNode:
+		d.unmarshal(n, out)
+	case aliasNode:
+		an, ok := d.doc.anchors[n.value]
+		if ok && an.kind != mappingNode {
+			failWantMap()
+		}
+		d.unmarshal(n, out)
+	case sequenceNode:
+		// Step backwards as earlier nodes take precedence.
+		for i := len(n.children) - 1; i >= 0; i-- {
+			ni := n.children[i]
+			if ni.kind == aliasNode {
+				an, ok := d.doc.anchors[ni.value]
+				if ok && an.kind != mappingNode {
+					failWantMap()
+				}
+			} else if ni.kind != mappingNode {
+				failWantMap()
+			}
+			d.unmarshal(ni, out)
+		}
+	default:
+		failWantMap()
+	}
+}
+
+func isMerge(n *node) bool {
+	return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
+}

+ 1685 - 0
vendor/gopkg.in/yaml.v2/emitterc.go

@@ -0,0 +1,1685 @@
+package yaml
+
+import (
+	"bytes"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) {
+		return yaml_emitter_flush(emitter)
+	}
+	return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+		return false
+	}
+	emitter.buffer[emitter.buffer_pos] = value
+	emitter.buffer_pos++
+	emitter.column++
+	return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+		return false
+	}
+	switch emitter.line_break {
+	case yaml_CR_BREAK:
+		emitter.buffer[emitter.buffer_pos] = '\r'
+		emitter.buffer_pos += 1
+	case yaml_LN_BREAK:
+		emitter.buffer[emitter.buffer_pos] = '\n'
+		emitter.buffer_pos += 1
+	case yaml_CRLN_BREAK:
+		emitter.buffer[emitter.buffer_pos+0] = '\r'
+		emitter.buffer[emitter.buffer_pos+1] = '\n'
+		emitter.buffer_pos += 2
+	default:
+		panic("unknown line break setting")
+	}
+	emitter.column = 0
+	emitter.line++
+	return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+		return false
+	}
+	p := emitter.buffer_pos
+	w := width(s[*i])
+	switch w {
+	case 4:
+		emitter.buffer[p+3] = s[*i+3]
+		fallthrough
+	case 3:
+		emitter.buffer[p+2] = s[*i+2]
+		fallthrough
+	case 2:
+		emitter.buffer[p+1] = s[*i+1]
+		fallthrough
+	case 1:
+		emitter.buffer[p+0] = s[*i+0]
+	default:
+		panic("unknown character width")
+	}
+	emitter.column++
+	emitter.buffer_pos += w
+	*i += w
+	return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+	for i := 0; i < len(s); {
+		if !write(emitter, s, &i) {
+			return false
+		}
+	}
+	return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+	if s[*i] == '\n' {
+		if !put_break(emitter) {
+			return false
+		}
+		*i++
+	} else {
+		if !write(emitter, s, i) {
+			return false
+		}
+		emitter.column = 0
+		emitter.line++
+	}
+	return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+	emitter.error = yaml_EMITTER_ERROR
+	emitter.problem = problem
+	return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	emitter.events = append(emitter.events, *event)
+	for !yaml_emitter_need_more_events(emitter) {
+		event := &emitter.events[emitter.events_head]
+		if !yaml_emitter_analyze_event(emitter, event) {
+			return false
+		}
+		if !yaml_emitter_state_machine(emitter, event) {
+			return false
+		}
+		yaml_event_delete(event)
+		emitter.events_head++
+	}
+	return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+//  - 1 event for DOCUMENT-START
+//  - 2 events for SEQUENCE-START
+//  - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+	if emitter.events_head == len(emitter.events) {
+		return true
+	}
+	var accumulate int
+	switch emitter.events[emitter.events_head].typ {
+	case yaml_DOCUMENT_START_EVENT:
+		accumulate = 1
+		break
+	case yaml_SEQUENCE_START_EVENT:
+		accumulate = 2
+		break
+	case yaml_MAPPING_START_EVENT:
+		accumulate = 3
+		break
+	default:
+		return false
+	}
+	if len(emitter.events)-emitter.events_head > accumulate {
+		return false
+	}
+	var level int
+	for i := emitter.events_head; i < len(emitter.events); i++ {
+		switch emitter.events[i].typ {
+		case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+			level++
+		case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+			level--
+		}
+		if level == 0 {
+			return false
+		}
+	}
+	return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+	for i := 0; i < len(emitter.tag_directives); i++ {
+		if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+			if allow_duplicates {
+				return true
+			}
+			return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+		}
+	}
+
+	// [Go] Do we actually need to copy this given garbage collection
+	// and the lack of deallocating destructors?
+	tag_copy := yaml_tag_directive_t{
+		handle: make([]byte, len(value.handle)),
+		prefix: make([]byte, len(value.prefix)),
+	}
+	copy(tag_copy.handle, value.handle)
+	copy(tag_copy.prefix, value.prefix)
+	emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+	return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+	emitter.indents = append(emitter.indents, emitter.indent)
+	if emitter.indent < 0 {
+		if flow {
+			emitter.indent = emitter.best_indent
+		} else {
+			emitter.indent = 0
+		}
+	} else if !indentless {
+		emitter.indent += emitter.best_indent
+	}
+	return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	switch emitter.state {
+	default:
+	case yaml_EMIT_STREAM_START_STATE:
+		return yaml_emitter_emit_stream_start(emitter, event)
+
+	case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+		return yaml_emitter_emit_document_start(emitter, event, true)
+
+	case yaml_EMIT_DOCUMENT_START_STATE:
+		return yaml_emitter_emit_document_start(emitter, event, false)
+
+	case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+		return yaml_emitter_emit_document_content(emitter, event)
+
+	case yaml_EMIT_DOCUMENT_END_STATE:
+		return yaml_emitter_emit_document_end(emitter, event)
+
+	case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+		return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
+
+	case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+		return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
+
+	case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+		return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
+
+	case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+		return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
+
+	case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+		return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+	case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+		return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+	case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+		return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+	case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+		return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+	case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+		return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+	case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+		return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+	case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+		return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+	case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+		return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+	case yaml_EMIT_END_STATE:
+		return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+	}
+	panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if event.typ != yaml_STREAM_START_EVENT {
+		return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+	}
+	if emitter.encoding == yaml_ANY_ENCODING {
+		emitter.encoding = event.encoding
+		if emitter.encoding == yaml_ANY_ENCODING {
+			emitter.encoding = yaml_UTF8_ENCODING
+		}
+	}
+	if emitter.best_indent < 2 || emitter.best_indent > 9 {
+		emitter.best_indent = 2
+	}
+	if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+		emitter.best_width = 80
+	}
+	if emitter.best_width < 0 {
+		emitter.best_width = 1<<31 - 1
+	}
+	if emitter.line_break == yaml_ANY_BREAK {
+		emitter.line_break = yaml_LN_BREAK
+	}
+
+	emitter.indent = -1
+	emitter.line = 0
+	emitter.column = 0
+	emitter.whitespace = true
+	emitter.indention = true
+
+	if emitter.encoding != yaml_UTF8_ENCODING {
+		if !yaml_emitter_write_bom(emitter) {
+			return false
+		}
+	}
+	emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+	return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+	if event.typ == yaml_DOCUMENT_START_EVENT {
+
+		if event.version_directive != nil {
+			if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+				return false
+			}
+		}
+
+		for i := 0; i < len(event.tag_directives); i++ {
+			tag_directive := &event.tag_directives[i]
+			if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+				return false
+			}
+			if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+				return false
+			}
+		}
+
+		for i := 0; i < len(default_tag_directives); i++ {
+			tag_directive := &default_tag_directives[i]
+			if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+				return false
+			}
+		}
+
+		implicit := event.implicit
+		if !first || emitter.canonical {
+			implicit = false
+		}
+
+		if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+			if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+
+		if event.version_directive != nil {
+			implicit = false
+			if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+
+		if len(event.tag_directives) > 0 {
+			implicit = false
+			for i := 0; i < len(event.tag_directives); i++ {
+				tag_directive := &event.tag_directives[i]
+				if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+					return false
+				}
+				if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+					return false
+				}
+				if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+					return false
+				}
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+		}
+
+		if yaml_emitter_check_empty_document(emitter) {
+			implicit = false
+		}
+		if !implicit {
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+			if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+				return false
+			}
+			if emitter.canonical {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+		}
+
+		emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+		return true
+	}
+
+	if event.typ == yaml_STREAM_END_EVENT {
+		if emitter.open_ended {
+			if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_flush(emitter) {
+			return false
+		}
+		emitter.state = yaml_EMIT_END_STATE
+		return true
+	}
+
+	return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+	return yaml_emitter_emit_node(emitter, event, true, false, false, false)
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if event.typ != yaml_DOCUMENT_END_EVENT {
+		return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+	}
+	if !yaml_emitter_write_indent(emitter) {
+		return false
+	}
+	if !event.implicit {
+		// [Go] Allocate the slice elsewhere.
+		if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+			return false
+		}
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+	}
+	if !yaml_emitter_flush(emitter) {
+		return false
+	}
+	emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+	emitter.tag_directives = emitter.tag_directives[:0]
+	return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+	if first {
+		if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+			return false
+		}
+		if !yaml_emitter_increase_indent(emitter, true, false) {
+			return false
+		}
+		emitter.flow_level++
+	}
+
+	if event.typ == yaml_SEQUENCE_END_EVENT {
+		emitter.flow_level--
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		if emitter.canonical && !first {
+			if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+			return false
+		}
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+
+		return true
+	}
+
+	if !first {
+		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+			return false
+		}
+	}
+
+	if emitter.canonical || emitter.column > emitter.best_width {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+	if first {
+		if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+			return false
+		}
+		if !yaml_emitter_increase_indent(emitter, true, false) {
+			return false
+		}
+		emitter.flow_level++
+	}
+
+	if event.typ == yaml_MAPPING_END_EVENT {
+		emitter.flow_level--
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		if emitter.canonical && !first {
+			if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+			return false
+		}
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+		return true
+	}
+
+	if !first {
+		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+			return false
+		}
+	}
+	if emitter.canonical || emitter.column > emitter.best_width {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+	}
+
+	if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+		emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+		return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+		return false
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+	if simple {
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+			return false
+		}
+	} else {
+		if emitter.canonical || emitter.column > emitter.best_width {
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+			return false
+		}
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+	if first {
+		if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
+			return false
+		}
+	}
+	if event.typ == yaml_SEQUENCE_END_EVENT {
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+		return true
+	}
+	if !yaml_emitter_write_indent(emitter) {
+		return false
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+		return false
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+	if first {
+		if !yaml_emitter_increase_indent(emitter, false, false) {
+			return false
+		}
+	}
+	if event.typ == yaml_MAPPING_END_EVENT {
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+		return true
+	}
+	if !yaml_emitter_write_indent(emitter) {
+		return false
+	}
+	if yaml_emitter_check_simple_key(emitter) {
+		emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+		return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+		return false
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+	if simple {
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+			return false
+		}
+	} else {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+			return false
+		}
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+	root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+	emitter.root_context = root
+	emitter.sequence_context = sequence
+	emitter.mapping_context = mapping
+	emitter.simple_key_context = simple_key
+
+	switch event.typ {
+	case yaml_ALIAS_EVENT:
+		return yaml_emitter_emit_alias(emitter, event)
+	case yaml_SCALAR_EVENT:
+		return yaml_emitter_emit_scalar(emitter, event)
+	case yaml_SEQUENCE_START_EVENT:
+		return yaml_emitter_emit_sequence_start(emitter, event)
+	case yaml_MAPPING_START_EVENT:
+		return yaml_emitter_emit_mapping_start(emitter, event)
+	default:
+		return yaml_emitter_set_emitter_error(emitter,
+			"expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
+	}
+	return false
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	emitter.state = emitter.states[len(emitter.states)-1]
+	emitter.states = emitter.states[:len(emitter.states)-1]
+	return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_select_scalar_style(emitter, event) {
+		return false
+	}
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_tag(emitter) {
+		return false
+	}
+	if !yaml_emitter_increase_indent(emitter, true, false) {
+		return false
+	}
+	if !yaml_emitter_process_scalar(emitter) {
+		return false
+	}
+	emitter.indent = emitter.indents[len(emitter.indents)-1]
+	emitter.indents = emitter.indents[:len(emitter.indents)-1]
+	emitter.state = emitter.states[len(emitter.states)-1]
+	emitter.states = emitter.states[:len(emitter.states)-1]
+	return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_tag(emitter) {
+		return false
+	}
+	if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+		yaml_emitter_check_empty_sequence(emitter) {
+		emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+	} else {
+		emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+	}
+	return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_tag(emitter) {
+		return false
+	}
+	if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+		yaml_emitter_check_empty_mapping(emitter) {
+		emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+	} else {
+		emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+	}
+	return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+	return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+	if len(emitter.events)-emitter.events_head < 2 {
+		return false
+	}
+	return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+		emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+	if len(emitter.events)-emitter.events_head < 2 {
+		return false
+	}
+	return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+		emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+	length := 0
+	switch emitter.events[emitter.events_head].typ {
+	case yaml_ALIAS_EVENT:
+		length += len(emitter.anchor_data.anchor)
+	case yaml_SCALAR_EVENT:
+		if emitter.scalar_data.multiline {
+			return false
+		}
+		length += len(emitter.anchor_data.anchor) +
+			len(emitter.tag_data.handle) +
+			len(emitter.tag_data.suffix) +
+			len(emitter.scalar_data.value)
+	case yaml_SEQUENCE_START_EVENT:
+		if !yaml_emitter_check_empty_sequence(emitter) {
+			return false
+		}
+		length += len(emitter.anchor_data.anchor) +
+			len(emitter.tag_data.handle) +
+			len(emitter.tag_data.suffix)
+	case yaml_MAPPING_START_EVENT:
+		if !yaml_emitter_check_empty_mapping(emitter) {
+			return false
+		}
+		length += len(emitter.anchor_data.anchor) +
+			len(emitter.tag_data.handle) +
+			len(emitter.tag_data.suffix)
+	default:
+		return false
+	}
+	return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+	no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+	if no_tag && !event.implicit && !event.quoted_implicit {
+		return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+	}
+
+	style := event.scalar_style()
+	if style == yaml_ANY_SCALAR_STYLE {
+		style = yaml_PLAIN_SCALAR_STYLE
+	}
+	if emitter.canonical {
+		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	}
+	if emitter.simple_key_context && emitter.scalar_data.multiline {
+		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	}
+
+	if style == yaml_PLAIN_SCALAR_STYLE {
+		if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+			emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+		}
+		if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+		}
+		if no_tag && !event.implicit {
+			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+		}
+	}
+	if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+		if !emitter.scalar_data.single_quoted_allowed {
+			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+		}
+	}
+	if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+		if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+		}
+	}
+
+	if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+		emitter.tag_data.handle = []byte{'!'}
+	}
+	emitter.scalar_data.style = style
+	return true
+}
+
+// Write an achor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+	if emitter.anchor_data.anchor == nil {
+		return true
+	}
+	c := []byte{'&'}
+	if emitter.anchor_data.alias {
+		c[0] = '*'
+	}
+	if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+		return false
+	}
+	return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+	if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+		return true
+	}
+	if len(emitter.tag_data.handle) > 0 {
+		if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+			return false
+		}
+		if len(emitter.tag_data.suffix) > 0 {
+			if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+				return false
+			}
+		}
+	} else {
+		// [Go] Allocate these slices elsewhere.
+		if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+			return false
+		}
+		if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+			return false
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+			return false
+		}
+	}
+	return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+	switch emitter.scalar_data.style {
+	case yaml_PLAIN_SCALAR_STYLE:
+		return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+	case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+		return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+	case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+		return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+	case yaml_LITERAL_SCALAR_STYLE:
+		return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+	case yaml_FOLDED_SCALAR_STYLE:
+		return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+	}
+	panic("unknown scalar style")
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+	if version_directive.major != 1 || version_directive.minor != 1 {
+		return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+	}
+	return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+	handle := tag_directive.handle
+	prefix := tag_directive.prefix
+	if len(handle) == 0 {
+		return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+	}
+	if handle[0] != '!' {
+		return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+	}
+	if handle[len(handle)-1] != '!' {
+		return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+	}
+	for i := 1; i < len(handle)-1; i += width(handle[i]) {
+		if !is_alpha(handle, i) {
+			return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+		}
+	}
+	if len(prefix) == 0 {
+		return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+	}
+	return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+	if len(anchor) == 0 {
+		problem := "anchor value must not be empty"
+		if alias {
+			problem = "alias value must not be empty"
+		}
+		return yaml_emitter_set_emitter_error(emitter, problem)
+	}
+	for i := 0; i < len(anchor); i += width(anchor[i]) {
+		if !is_alpha(anchor, i) {
+			problem := "anchor value must contain alphanumerical characters only"
+			if alias {
+				problem = "alias value must contain alphanumerical characters only"
+			}
+			return yaml_emitter_set_emitter_error(emitter, problem)
+		}
+	}
+	emitter.anchor_data.anchor = anchor
+	emitter.anchor_data.alias = alias
+	return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+	if len(tag) == 0 {
+		return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+	}
+	for i := 0; i < len(emitter.tag_directives); i++ {
+		tag_directive := &emitter.tag_directives[i]
+		if bytes.HasPrefix(tag, tag_directive.prefix) {
+			emitter.tag_data.handle = tag_directive.handle
+			emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+			return true
+		}
+	}
+	emitter.tag_data.suffix = tag
+	return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+	var (
+		block_indicators   = false
+		flow_indicators    = false
+		line_breaks        = false
+		special_characters = false
+
+		leading_space  = false
+		leading_break  = false
+		trailing_space = false
+		trailing_break = false
+		break_space    = false
+		space_break    = false
+
+		preceeded_by_whitespace = false
+		followed_by_whitespace  = false
+		previous_space          = false
+		previous_break          = false
+	)
+
+	emitter.scalar_data.value = value
+
+	if len(value) == 0 {
+		emitter.scalar_data.multiline = false
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = true
+		emitter.scalar_data.single_quoted_allowed = true
+		emitter.scalar_data.block_allowed = false
+		return true
+	}
+
+	if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+		block_indicators = true
+		flow_indicators = true
+	}
+
+	preceeded_by_whitespace = true
+	for i, w := 0, 0; i < len(value); i += w {
+		w = width(value[i])
+		followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+		if i == 0 {
+			switch value[i] {
+			case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+				flow_indicators = true
+				block_indicators = true
+			case '?', ':':
+				flow_indicators = true
+				if followed_by_whitespace {
+					block_indicators = true
+				}
+			case '-':
+				if followed_by_whitespace {
+					flow_indicators = true
+					block_indicators = true
+				}
+			}
+		} else {
+			switch value[i] {
+			case ',', '?', '[', ']', '{', '}':
+				flow_indicators = true
+			case ':':
+				flow_indicators = true
+				if followed_by_whitespace {
+					block_indicators = true
+				}
+			case '#':
+				if preceeded_by_whitespace {
+					flow_indicators = true
+					block_indicators = true
+				}
+			}
+		}
+
+		if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+			special_characters = true
+		}
+		if is_space(value, i) {
+			if i == 0 {
+				leading_space = true
+			}
+			if i+width(value[i]) == len(value) {
+				trailing_space = true
+			}
+			if previous_break {
+				break_space = true
+			}
+			previous_space = true
+			previous_break = false
+		} else if is_break(value, i) {
+			line_breaks = true
+			if i == 0 {
+				leading_break = true
+			}
+			if i+width(value[i]) == len(value) {
+				trailing_break = true
+			}
+			if previous_space {
+				space_break = true
+			}
+			previous_space = false
+			previous_break = true
+		} else {
+			previous_space = false
+			previous_break = false
+		}
+
+		// [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+		preceeded_by_whitespace = is_blankz(value, i)
+	}
+
+	emitter.scalar_data.multiline = line_breaks
+	emitter.scalar_data.flow_plain_allowed = true
+	emitter.scalar_data.block_plain_allowed = true
+	emitter.scalar_data.single_quoted_allowed = true
+	emitter.scalar_data.block_allowed = true
+
+	if leading_space || leading_break || trailing_space || trailing_break {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+	}
+	if trailing_space {
+		emitter.scalar_data.block_allowed = false
+	}
+	if break_space {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+		emitter.scalar_data.single_quoted_allowed = false
+	}
+	if space_break || special_characters {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+		emitter.scalar_data.single_quoted_allowed = false
+		emitter.scalar_data.block_allowed = false
+	}
+	if line_breaks {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+	}
+	if flow_indicators {
+		emitter.scalar_data.flow_plain_allowed = false
+	}
+	if block_indicators {
+		emitter.scalar_data.block_plain_allowed = false
+	}
+	return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+	emitter.anchor_data.anchor = nil
+	emitter.tag_data.handle = nil
+	emitter.tag_data.suffix = nil
+	emitter.scalar_data.value = nil
+
+	switch event.typ {
+	case yaml_ALIAS_EVENT:
+		if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+			return false
+		}
+
+	case yaml_SCALAR_EVENT:
+		if len(event.anchor) > 0 {
+			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+				return false
+			}
+		}
+		if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+			if !yaml_emitter_analyze_tag(emitter, event.tag) {
+				return false
+			}
+		}
+		if !yaml_emitter_analyze_scalar(emitter, event.value) {
+			return false
+		}
+
+	case yaml_SEQUENCE_START_EVENT:
+		if len(event.anchor) > 0 {
+			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+				return false
+			}
+		}
+		if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+			if !yaml_emitter_analyze_tag(emitter, event.tag) {
+				return false
+			}
+		}
+
+	case yaml_MAPPING_START_EVENT:
+		if len(event.anchor) > 0 {
+			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+				return false
+			}
+		}
+		if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+			if !yaml_emitter_analyze_tag(emitter, event.tag) {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+	if !flush(emitter) {
+		return false
+	}
+	pos := emitter.buffer_pos
+	emitter.buffer[pos+0] = '\xEF'
+	emitter.buffer[pos+1] = '\xBB'
+	emitter.buffer[pos+2] = '\xBF'
+	emitter.buffer_pos += 3
+	return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+	indent := emitter.indent
+	if indent < 0 {
+		indent = 0
+	}
+	if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+		if !put_break(emitter) {
+			return false
+		}
+	}
+	for emitter.column < indent {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	emitter.whitespace = true
+	emitter.indention = true
+	return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+	if need_whitespace && !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	if !write_all(emitter, indicator) {
+		return false
+	}
+	emitter.whitespace = is_whitespace
+	emitter.indention = (emitter.indention && is_indention)
+	emitter.open_ended = false
+	return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+	if !write_all(emitter, value) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+	if !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	if !write_all(emitter, value) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+	if need_whitespace && !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	for i := 0; i < len(value); {
+		var must_write bool
+		switch value[i] {
+		case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+			must_write = true
+		default:
+			must_write = is_alpha(value, i)
+		}
+		if must_write {
+			if !write(emitter, value, &i) {
+				return false
+			}
+		} else {
+			w := width(value[i])
+			for k := 0; k < w; k++ {
+				octet := value[i]
+				i++
+				if !put(emitter, '%') {
+					return false
+				}
+
+				c := octet >> 4
+				if c < 10 {
+					c += '0'
+				} else {
+					c += 'A' - 10
+				}
+				if !put(emitter, c) {
+					return false
+				}
+
+				c = octet & 0x0f
+				if c < 10 {
+					c += '0'
+				} else {
+					c += 'A' - 10
+				}
+				if !put(emitter, c) {
+					return false
+				}
+			}
+		}
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+	if !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+
+	spaces := false
+	breaks := false
+	for i := 0; i < len(value); {
+		if is_space(value, i) {
+			if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				i += width(value[i])
+			} else {
+				if !write(emitter, value, &i) {
+					return false
+				}
+			}
+			spaces = true
+		} else if is_break(value, i) {
+			if !breaks && value[i] == '\n' {
+				if !put_break(emitter) {
+					return false
+				}
+			}
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+			if !write(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = false
+			spaces = false
+			breaks = false
+		}
+	}
+
+	emitter.whitespace = false
+	emitter.indention = false
+	if emitter.root_context {
+		emitter.open_ended = true
+	}
+
+	return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+	if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+		return false
+	}
+
+	spaces := false
+	breaks := false
+	for i := 0; i < len(value); {
+		if is_space(value, i) {
+			if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				i += width(value[i])
+			} else {
+				if !write(emitter, value, &i) {
+					return false
+				}
+			}
+			spaces = true
+		} else if is_break(value, i) {
+			if !breaks && value[i] == '\n' {
+				if !put_break(emitter) {
+					return false
+				}
+			}
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+			if value[i] == '\'' {
+				if !put(emitter, '\'') {
+					return false
+				}
+			}
+			if !write(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = false
+			spaces = false
+			breaks = false
+		}
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+	spaces := false
+	if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+		return false
+	}
+
+	for i := 0; i < len(value); {
+		if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+			is_bom(value, i) || is_break(value, i) ||
+			value[i] == '"' || value[i] == '\\' {
+
+			octet := value[i]
+
+			var w int
+			var v rune
+			switch {
+			case octet&0x80 == 0x00:
+				w, v = 1, rune(octet&0x7F)
+			case octet&0xE0 == 0xC0:
+				w, v = 2, rune(octet&0x1F)
+			case octet&0xF0 == 0xE0:
+				w, v = 3, rune(octet&0x0F)
+			case octet&0xF8 == 0xF0:
+				w, v = 4, rune(octet&0x07)
+			}
+			for k := 1; k < w; k++ {
+				octet = value[i+k]
+				v = (v << 6) + (rune(octet) & 0x3F)
+			}
+			i += w
+
+			if !put(emitter, '\\') {
+				return false
+			}
+
+			var ok bool
+			switch v {
+			case 0x00:
+				ok = put(emitter, '0')
+			case 0x07:
+				ok = put(emitter, 'a')
+			case 0x08:
+				ok = put(emitter, 'b')
+			case 0x09:
+				ok = put(emitter, 't')
+			case 0x0A:
+				ok = put(emitter, 'n')
+			case 0x0b:
+				ok = put(emitter, 'v')
+			case 0x0c:
+				ok = put(emitter, 'f')
+			case 0x0d:
+				ok = put(emitter, 'r')
+			case 0x1b:
+				ok = put(emitter, 'e')
+			case 0x22:
+				ok = put(emitter, '"')
+			case 0x5c:
+				ok = put(emitter, '\\')
+			case 0x85:
+				ok = put(emitter, 'N')
+			case 0xA0:
+				ok = put(emitter, '_')
+			case 0x2028:
+				ok = put(emitter, 'L')
+			case 0x2029:
+				ok = put(emitter, 'P')
+			default:
+				if v <= 0xFF {
+					ok = put(emitter, 'x')
+					w = 2
+				} else if v <= 0xFFFF {
+					ok = put(emitter, 'u')
+					w = 4
+				} else {
+					ok = put(emitter, 'U')
+					w = 8
+				}
+				for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+					digit := byte((v >> uint(k)) & 0x0F)
+					if digit < 10 {
+						ok = put(emitter, digit+'0')
+					} else {
+						ok = put(emitter, digit+'A'-10)
+					}
+				}
+			}
+			if !ok {
+				return false
+			}
+			spaces = false
+		} else if is_space(value, i) {
+			if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				if is_space(value, i+1) {
+					if !put(emitter, '\\') {
+						return false
+					}
+				}
+				i += width(value[i])
+			} else if !write(emitter, value, &i) {
+				return false
+			}
+			spaces = true
+		} else {
+			if !write(emitter, value, &i) {
+				return false
+			}
+			spaces = false
+		}
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+	if is_space(value, 0) || is_break(value, 0) {
+		indent_hint := []byte{'0' + byte(emitter.best_indent)}
+		if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+			return false
+		}
+	}
+
+	emitter.open_ended = false
+
+	var chomp_hint [1]byte
+	if len(value) == 0 {
+		chomp_hint[0] = '-'
+	} else {
+		i := len(value) - 1
+		for value[i]&0xC0 == 0x80 {
+			i--
+		}
+		if !is_break(value, i) {
+			chomp_hint[0] = '-'
+		} else if i == 0 {
+			chomp_hint[0] = '+'
+			emitter.open_ended = true
+		} else {
+			i--
+			for value[i]&0xC0 == 0x80 {
+				i--
+			}
+			if is_break(value, i) {
+				chomp_hint[0] = '+'
+				emitter.open_ended = true
+			}
+		}
+	}
+	if chomp_hint[0] != 0 {
+		if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+			return false
+		}
+	}
+	return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+	if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+		return false
+	}
+	if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+		return false
+	}
+	if !put_break(emitter) {
+		return false
+	}
+	emitter.indention = true
+	emitter.whitespace = true
+	breaks := true
+	for i := 0; i < len(value); {
+		if is_break(value, i) {
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+			if !write(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = false
+			breaks = false
+		}
+	}
+
+	return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+	if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+		return false
+	}
+	if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+		return false
+	}
+
+	if !put_break(emitter) {
+		return false
+	}
+	emitter.indention = true
+	emitter.whitespace = true
+
+	breaks := true
+	leading_spaces := true
+	for i := 0; i < len(value); {
+		if is_break(value, i) {
+			if !breaks && !leading_spaces && value[i] == '\n' {
+				k := 0
+				for is_break(value, k) {
+					k += width(value[k])
+				}
+				if !is_blankz(value, k) {
+					if !put_break(emitter) {
+						return false
+					}
+				}
+			}
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				leading_spaces = is_blank(value, i)
+			}
+			if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				i += width(value[i])
+			} else {
+				if !write(emitter, value, &i) {
+					return false
+				}
+			}
+			emitter.indention = false
+			breaks = false
+		}
+	}
+	return true
+}

+ 306 - 0
vendor/gopkg.in/yaml.v2/encode.go

@@ -0,0 +1,306 @@
+package yaml
+
+import (
+	"encoding"
+	"fmt"
+	"reflect"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+)
+
+type encoder struct {
+	emitter yaml_emitter_t
+	event   yaml_event_t
+	out     []byte
+	flow    bool
+}
+
+func newEncoder() (e *encoder) {
+	e = &encoder{}
+	e.must(yaml_emitter_initialize(&e.emitter))
+	yaml_emitter_set_output_string(&e.emitter, &e.out)
+	yaml_emitter_set_unicode(&e.emitter, true)
+	e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
+	e.emit()
+	e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
+	e.emit()
+	return e
+}
+
+func (e *encoder) finish() {
+	e.must(yaml_document_end_event_initialize(&e.event, true))
+	e.emit()
+	e.emitter.open_ended = false
+	e.must(yaml_stream_end_event_initialize(&e.event))
+	e.emit()
+}
+
+func (e *encoder) destroy() {
+	yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+	// This will internally delete the e.event value.
+	if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
+		e.must(false)
+	}
+}
+
+func (e *encoder) must(ok bool) {
+	if !ok {
+		msg := e.emitter.problem
+		if msg == "" {
+			msg = "unknown problem generating YAML content"
+		}
+		failf("%s", msg)
+	}
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+	if !in.IsValid() {
+		e.nilv()
+		return
+	}
+	iface := in.Interface()
+	if m, ok := iface.(Marshaler); ok {
+		v, err := m.MarshalYAML()
+		if err != nil {
+			fail(err)
+		}
+		if v == nil {
+			e.nilv()
+			return
+		}
+		in = reflect.ValueOf(v)
+	} else if m, ok := iface.(encoding.TextMarshaler); ok {
+		text, err := m.MarshalText()
+		if err != nil {
+			fail(err)
+		}
+		in = reflect.ValueOf(string(text))
+	}
+	switch in.Kind() {
+	case reflect.Interface:
+		if in.IsNil() {
+			e.nilv()
+		} else {
+			e.marshal(tag, in.Elem())
+		}
+	case reflect.Map:
+		e.mapv(tag, in)
+	case reflect.Ptr:
+		if in.IsNil() {
+			e.nilv()
+		} else {
+			e.marshal(tag, in.Elem())
+		}
+	case reflect.Struct:
+		e.structv(tag, in)
+	case reflect.Slice:
+		if in.Type().Elem() == mapItemType {
+			e.itemsv(tag, in)
+		} else {
+			e.slicev(tag, in)
+		}
+	case reflect.String:
+		e.stringv(tag, in)
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		if in.Type() == durationType {
+			e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
+		} else {
+			e.intv(tag, in)
+		}
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		e.uintv(tag, in)
+	case reflect.Float32, reflect.Float64:
+		e.floatv(tag, in)
+	case reflect.Bool:
+		e.boolv(tag, in)
+	default:
+		panic("cannot marshal type: " + in.Type().String())
+	}
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+	e.mappingv(tag, func() {
+		keys := keyList(in.MapKeys())
+		sort.Sort(keys)
+		for _, k := range keys {
+			e.marshal("", k)
+			e.marshal("", in.MapIndex(k))
+		}
+	})
+}
+
+func (e *encoder) itemsv(tag string, in reflect.Value) {
+	e.mappingv(tag, func() {
+		slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
+		for _, item := range slice {
+			e.marshal("", reflect.ValueOf(item.Key))
+			e.marshal("", reflect.ValueOf(item.Value))
+		}
+	})
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+	sinfo, err := getStructInfo(in.Type())
+	if err != nil {
+		panic(err)
+	}
+	e.mappingv(tag, func() {
+		for _, info := range sinfo.FieldsList {
+			var value reflect.Value
+			if info.Inline == nil {
+				value = in.Field(info.Num)
+			} else {
+				value = in.FieldByIndex(info.Inline)
+			}
+			if info.OmitEmpty && isZero(value) {
+				continue
+			}
+			e.marshal("", reflect.ValueOf(info.Key))
+			e.flow = info.Flow
+			e.marshal("", value)
+		}
+		if sinfo.InlineMap >= 0 {
+			m := in.Field(sinfo.InlineMap)
+			if m.Len() > 0 {
+				e.flow = false
+				keys := keyList(m.MapKeys())
+				sort.Sort(keys)
+				for _, k := range keys {
+					if _, found := sinfo.FieldsMap[k.String()]; found {
+						panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
+					}
+					e.marshal("", k)
+					e.flow = false
+					e.marshal("", m.MapIndex(k))
+				}
+			}
+		}
+	})
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+	implicit := tag == ""
+	style := yaml_BLOCK_MAPPING_STYLE
+	if e.flow {
+		e.flow = false
+		style = yaml_FLOW_MAPPING_STYLE
+	}
+	e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+	e.emit()
+	f()
+	e.must(yaml_mapping_end_event_initialize(&e.event))
+	e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+	implicit := tag == ""
+	style := yaml_BLOCK_SEQUENCE_STYLE
+	if e.flow {
+		e.flow = false
+		style = yaml_FLOW_SEQUENCE_STYLE
+	}
+	e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+	e.emit()
+	n := in.Len()
+	for i := 0; i < n; i++ {
+		e.marshal("", in.Index(i))
+	}
+	e.must(yaml_sequence_end_event_initialize(&e.event))
+	e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+	// Fast path.
+	if s == "" {
+		return false
+	}
+	c := s[0]
+	if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+		return false
+	}
+	// Do the full match.
+	return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+	var style yaml_scalar_style_t
+	s := in.String()
+	rtag, rs := resolve("", s)
+	if rtag == yaml_BINARY_TAG {
+		if tag == "" || tag == yaml_STR_TAG {
+			tag = rtag
+			s = rs.(string)
+		} else if tag == yaml_BINARY_TAG {
+			failf("explicitly tagged !!binary data must be base64-encoded")
+		} else {
+			failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+		}
+	}
+	if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
+		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	} else if strings.Contains(s, "\n") {
+		style = yaml_LITERAL_SCALAR_STYLE
+	} else {
+		style = yaml_PLAIN_SCALAR_STYLE
+	}
+	e.emitScalar(s, "", tag, style)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+	var s string
+	if in.Bool() {
+		s = "true"
+	} else {
+		s = "false"
+	}
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+	s := strconv.FormatInt(in.Int(), 10)
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+	s := strconv.FormatUint(in.Uint(), 10)
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+	// FIXME: Handle 64 bits here.
+	s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
+	switch s {
+	case "+Inf":
+		s = ".inf"
+	case "-Inf":
+		s = "-.inf"
+	case "NaN":
+		s = ".nan"
+	}
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) nilv() {
+	e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
+	implicit := tag == ""
+	e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+	e.emit()
+}

+ 1096 - 0
vendor/gopkg.in/yaml.v2/parserc.go

@@ -0,0 +1,1096 @@
+package yaml
+
+import (
+	"bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream               ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document    ::= block_node DOCUMENT-END*
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence    ::=
+//                          ALIAS
+//                          | properties (block_content | indentless_block_sequence)?
+//                          | block_content
+//                          | indentless_block_sequence
+// block_node           ::= ALIAS
+//                          | properties block_content?
+//                          | block_content
+// flow_node            ::= ALIAS
+//                          | properties flow_content?
+//                          | flow_content
+// properties           ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content        ::= block_collection | flow_collection | SCALAR
+// flow_content         ::= flow_collection | SCALAR
+// block_collection     ::= block_sequence | block_mapping
+// flow_collection      ::= flow_sequence | flow_mapping
+// block_sequence       ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence  ::= (BLOCK-ENTRY block_node?)+
+// block_mapping        ::= BLOCK-MAPPING_START
+//                          ((KEY block_node_or_indentless_sequence?)?
+//                          (VALUE block_node_or_indentless_sequence?)?)*
+//                          BLOCK-END
+// flow_sequence        ::= FLOW-SEQUENCE-START
+//                          (flow_sequence_entry FLOW-ENTRY)*
+//                          flow_sequence_entry?
+//                          FLOW-SEQUENCE-END
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping         ::= FLOW-MAPPING-START
+//                          (flow_mapping_entry FLOW-ENTRY)*
+//                          flow_mapping_entry?
+//                          FLOW-MAPPING-END
+// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+	if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+		return &parser.tokens[parser.tokens_head]
+	}
+	return nil
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+	parser.token_available = false
+	parser.tokens_parsed++
+	parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+	parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+	// Erase the event object.
+	*event = yaml_event_t{}
+
+	// No events after the end of the stream or error.
+	if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+		return true
+	}
+
+	// Generate the next event.
+	return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+	parser.error = yaml_PARSER_ERROR
+	parser.problem = problem
+	parser.problem_mark = problem_mark
+	return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+	parser.error = yaml_PARSER_ERROR
+	parser.context = context
+	parser.context_mark = context_mark
+	parser.problem = problem
+	parser.problem_mark = problem_mark
+	return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+	//trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+	switch parser.state {
+	case yaml_PARSE_STREAM_START_STATE:
+		return yaml_parser_parse_stream_start(parser, event)
+
+	case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+		return yaml_parser_parse_document_start(parser, event, true)
+
+	case yaml_PARSE_DOCUMENT_START_STATE:
+		return yaml_parser_parse_document_start(parser, event, false)
+
+	case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+		return yaml_parser_parse_document_content(parser, event)
+
+	case yaml_PARSE_DOCUMENT_END_STATE:
+		return yaml_parser_parse_document_end(parser, event)
+
+	case yaml_PARSE_BLOCK_NODE_STATE:
+		return yaml_parser_parse_node(parser, event, true, false)
+
+	case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+		return yaml_parser_parse_node(parser, event, true, true)
+
+	case yaml_PARSE_FLOW_NODE_STATE:
+		return yaml_parser_parse_node(parser, event, false, false)
+
+	case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+		return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+	case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+		return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+	case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+		return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+	case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+		return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+	case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+		return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+	case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+		return yaml_parser_parse_block_mapping_value(parser, event)
+
+	case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+		return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+		return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+		return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+		return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+		return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+	case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+		return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+	case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+		return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+	case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+		return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+	case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+		return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+	default:
+		panic("invalid parser state")
+	}
+	return false
+}
+
+// Parse the production:
+// stream   ::= STREAM-START implicit_document? explicit_document* STREAM-END
+//              ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ != yaml_STREAM_START_TOKEN {
+		return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark)
+	}
+	parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+	*event = yaml_event_t{
+		typ:        yaml_STREAM_START_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.end_mark,
+		encoding:   token.encoding,
+	}
+	skip_token(parser)
+	return true
+}
+
+// Parse the productions:
+// implicit_document    ::= block_node DOCUMENT-END*
+//                          *
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//                          *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	// Parse extra document end indicators.
+	if !implicit {
+		for token.typ == yaml_DOCUMENT_END_TOKEN {
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+		}
+	}
+
+	if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+		token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+		token.typ != yaml_DOCUMENT_START_TOKEN &&
+		token.typ != yaml_STREAM_END_TOKEN {
+		// Parse an implicit document.
+		if !yaml_parser_process_directives(parser, nil, nil) {
+			return false
+		}
+		parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+		parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+		*event = yaml_event_t{
+			typ:        yaml_DOCUMENT_START_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+
+	} else if token.typ != yaml_STREAM_END_TOKEN {
+		// Parse an explicit document.
+		var version_directive *yaml_version_directive_t
+		var tag_directives []yaml_tag_directive_t
+		start_mark := token.start_mark
+		if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+			return false
+		}
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_DOCUMENT_START_TOKEN {
+			yaml_parser_set_parser_error(parser,
+				"did not find expected <document start>", token.start_mark)
+			return false
+		}
+		parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+		parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+		end_mark := token.end_mark
+
+		*event = yaml_event_t{
+			typ:               yaml_DOCUMENT_START_EVENT,
+			start_mark:        start_mark,
+			end_mark:          end_mark,
+			version_directive: version_directive,
+			tag_directives:    tag_directives,
+			implicit:          false,
+		}
+		skip_token(parser)
+
+	} else {
+		// Parse the stream end.
+		parser.state = yaml_PARSE_END_STATE
+		*event = yaml_event_t{
+			typ:        yaml_STREAM_END_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+		skip_token(parser)
+	}
+
+	return true
+}
+
+// Parse the productions:
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//                                                    ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+		token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+		token.typ == yaml_DOCUMENT_START_TOKEN ||
+		token.typ == yaml_DOCUMENT_END_TOKEN ||
+		token.typ == yaml_STREAM_END_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		return yaml_parser_process_empty_scalar(parser, event,
+			token.start_mark)
+	}
+	return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document    ::= block_node DOCUMENT-END*
+//                                     *************
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	start_mark := token.start_mark
+	end_mark := token.start_mark
+
+	implicit := true
+	if token.typ == yaml_DOCUMENT_END_TOKEN {
+		end_mark = token.end_mark
+		skip_token(parser)
+		implicit = false
+	}
+
+	parser.tag_directives = parser.tag_directives[:0]
+
+	parser.state = yaml_PARSE_DOCUMENT_START_STATE
+	*event = yaml_event_t{
+		typ:        yaml_DOCUMENT_END_EVENT,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		implicit:   implicit,
+	}
+	return true
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence    ::=
+//                          ALIAS
+//                          *****
+//                          | properties (block_content | indentless_block_sequence)?
+//                            **********  *
+//                          | block_content | indentless_block_sequence
+//                            *
+// block_node           ::= ALIAS
+//                          *****
+//                          | properties block_content?
+//                            ********** *
+//                          | block_content
+//                            *
+// flow_node            ::= ALIAS
+//                          *****
+//                          | properties flow_content?
+//                            ********** *
+//                          | flow_content
+//                            *
+// properties           ::= TAG ANCHOR? | ANCHOR TAG?
+//                          *************************
+// block_content        ::= block_collection | flow_collection | SCALAR
+//                                                               ******
+// flow_content         ::= flow_collection | SCALAR
+//                                            ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+	//defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_ALIAS_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		*event = yaml_event_t{
+			typ:        yaml_ALIAS_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+			anchor:     token.value,
+		}
+		skip_token(parser)
+		return true
+	}
+
+	start_mark := token.start_mark
+	end_mark := token.start_mark
+
+	var tag_token bool
+	var tag_handle, tag_suffix, anchor []byte
+	var tag_mark yaml_mark_t
+	if token.typ == yaml_ANCHOR_TOKEN {
+		anchor = token.value
+		start_mark = token.start_mark
+		end_mark = token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ == yaml_TAG_TOKEN {
+			tag_token = true
+			tag_handle = token.value
+			tag_suffix = token.suffix
+			tag_mark = token.start_mark
+			end_mark = token.end_mark
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+		}
+	} else if token.typ == yaml_TAG_TOKEN {
+		tag_token = true
+		tag_handle = token.value
+		tag_suffix = token.suffix
+		start_mark = token.start_mark
+		tag_mark = token.start_mark
+		end_mark = token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ == yaml_ANCHOR_TOKEN {
+			anchor = token.value
+			end_mark = token.end_mark
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+		}
+	}
+
+	var tag []byte
+	if tag_token {
+		if len(tag_handle) == 0 {
+			tag = tag_suffix
+			tag_suffix = nil
+		} else {
+			for i := range parser.tag_directives {
+				if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+					tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+					tag = append(tag, tag_suffix...)
+					break
+				}
+			}
+			if len(tag) == 0 {
+				yaml_parser_set_parser_error_context(parser,
+					"while parsing a node", start_mark,
+					"found undefined tag handle", tag_mark)
+				return false
+			}
+		}
+	}
+
+	implicit := len(tag) == 0
+	if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+		}
+		return true
+	}
+	if token.typ == yaml_SCALAR_TOKEN {
+		var plain_implicit, quoted_implicit bool
+		end_mark = token.end_mark
+		if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+			plain_implicit = true
+		} else if len(tag) == 0 {
+			quoted_implicit = true
+		}
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+
+		*event = yaml_event_t{
+			typ:             yaml_SCALAR_EVENT,
+			start_mark:      start_mark,
+			end_mark:        end_mark,
+			anchor:          anchor,
+			tag:             tag,
+			value:           token.value,
+			implicit:        plain_implicit,
+			quoted_implicit: quoted_implicit,
+			style:           yaml_style_t(token.style),
+		}
+		skip_token(parser)
+		return true
+	}
+	if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+		// [Go] Some of the events below can be merged as they differ only on style.
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+		}
+		return true
+	}
+	if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_MAPPING_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+		}
+		return true
+	}
+	if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+		}
+		return true
+	}
+	if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_MAPPING_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+		}
+		return true
+	}
+	if len(anchor) > 0 || len(tag) > 0 {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+
+		*event = yaml_event_t{
+			typ:             yaml_SCALAR_EVENT,
+			start_mark:      start_mark,
+			end_mark:        end_mark,
+			anchor:          anchor,
+			tag:             tag,
+			implicit:        implicit,
+			quoted_implicit: false,
+			style:           yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+		}
+		return true
+	}
+
+	context := "while parsing a flow node"
+	if block {
+		context = "while parsing a block node"
+	}
+	yaml_parser_set_parser_error_context(parser, context, start_mark,
+		"did not find expected node content", token.start_mark)
+	return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+//                    ********************  *********** *             *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+		mark := token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+			return yaml_parser_parse_node(parser, event, true, false)
+		} else {
+			parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+			return yaml_parser_process_empty_scalar(parser, event, mark)
+		}
+	}
+	if token.typ == yaml_BLOCK_END_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		parser.marks = parser.marks[:len(parser.marks)-1]
+
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_END_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+
+		skip_token(parser)
+		return true
+	}
+
+	context_mark := parser.marks[len(parser.marks)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+	return yaml_parser_set_parser_error_context(parser,
+		"while parsing a block collection", context_mark,
+		"did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence  ::= (BLOCK-ENTRY block_node?)+
+//                           *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+		mark := token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+			token.typ != yaml_KEY_TOKEN &&
+			token.typ != yaml_VALUE_TOKEN &&
+			token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+			return yaml_parser_parse_node(parser, event, true, false)
+		}
+		parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+		return yaml_parser_process_empty_scalar(parser, event, mark)
+	}
+	parser.state = parser.states[len(parser.states)-1]
+	parser.states = parser.states[:len(parser.states)-1]
+
+	*event = yaml_event_t{
+		typ:        yaml_SEQUENCE_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.start_mark, // [Go] Shouldn't this be token.end_mark?
+	}
+	return true
+}
+
+// Parse the productions:
+// block_mapping        ::= BLOCK-MAPPING_START
+//                          *******************
+//                          ((KEY block_node_or_indentless_sequence?)?
+//                            *** *
+//                          (VALUE block_node_or_indentless_sequence?)?)*
+//
+//                          BLOCK-END
+//                          *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_KEY_TOKEN {
+		mark := token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_KEY_TOKEN &&
+			token.typ != yaml_VALUE_TOKEN &&
+			token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+			return yaml_parser_parse_node(parser, event, true, true)
+		} else {
+			parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+			return yaml_parser_process_empty_scalar(parser, event, mark)
+		}
+	} else if token.typ == yaml_BLOCK_END_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		parser.marks = parser.marks[:len(parser.marks)-1]
+		*event = yaml_event_t{
+			typ:        yaml_MAPPING_END_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+		skip_token(parser)
+		return true
+	}
+
+	context_mark := parser.marks[len(parser.marks)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+	return yaml_parser_set_parser_error_context(parser,
+		"while parsing a block mapping", context_mark,
+		"did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping        ::= BLOCK-MAPPING_START
+//
+//                          ((KEY block_node_or_indentless_sequence?)?
+//
+//                          (VALUE block_node_or_indentless_sequence?)?)*
+//                           ***** *
+//                          BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ == yaml_VALUE_TOKEN {
+		mark := token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_KEY_TOKEN &&
+			token.typ != yaml_VALUE_TOKEN &&
+			token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+			return yaml_parser_parse_node(parser, event, true, true)
+		}
+		parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+		return yaml_parser_process_empty_scalar(parser, event, mark)
+	}
+	parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence        ::= FLOW-SEQUENCE-START
+//                          *******************
+//                          (flow_sequence_entry FLOW-ENTRY)*
+//                           *                   **********
+//                          flow_sequence_entry?
+//                          *
+//                          FLOW-SEQUENCE-END
+//                          *****************
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                          *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+		if !first {
+			if token.typ == yaml_FLOW_ENTRY_TOKEN {
+				skip_token(parser)
+				token = peek_token(parser)
+				if token == nil {
+					return false
+				}
+			} else {
+				context_mark := parser.marks[len(parser.marks)-1]
+				parser.marks = parser.marks[:len(parser.marks)-1]
+				return yaml_parser_set_parser_error_context(parser,
+					"while parsing a flow sequence", context_mark,
+					"did not find expected ',' or ']'", token.start_mark)
+			}
+		}
+
+		if token.typ == yaml_KEY_TOKEN {
+			parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+			*event = yaml_event_t{
+				typ:        yaml_MAPPING_START_EVENT,
+				start_mark: token.start_mark,
+				end_mark:   token.end_mark,
+				implicit:   true,
+				style:      yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+			}
+			skip_token(parser)
+			return true
+		} else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+
+	parser.state = parser.states[len(parser.states)-1]
+	parser.states = parser.states[:len(parser.states)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+
+	*event = yaml_event_t{
+		typ:        yaml_SEQUENCE_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.end_mark,
+	}
+
+	skip_token(parser)
+	return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                      *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ != yaml_VALUE_TOKEN &&
+		token.typ != yaml_FLOW_ENTRY_TOKEN &&
+		token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+		parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+		return yaml_parser_parse_node(parser, event, false, false)
+	}
+	mark := token.end_mark
+	skip_token(parser)
+	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+	return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                                      ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ == yaml_VALUE_TOKEN {
+		skip_token(parser)
+		token := peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                                                      *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+	*event = yaml_event_t{
+		typ:        yaml_MAPPING_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.start_mark, // [Go] Shouldn't this be end_mark?
+	}
+	return true
+}
+
+// Parse the productions:
+// flow_mapping         ::= FLOW-MAPPING-START
+//                          ******************
+//                          (flow_mapping_entry FLOW-ENTRY)*
+//                           *                  **********
+//                          flow_mapping_entry?
+//                          ******************
+//                          FLOW-MAPPING-END
+//                          ****************
+// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                          *           *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+		if !first {
+			if token.typ == yaml_FLOW_ENTRY_TOKEN {
+				skip_token(parser)
+				token = peek_token(parser)
+				if token == nil {
+					return false
+				}
+			} else {
+				context_mark := parser.marks[len(parser.marks)-1]
+				parser.marks = parser.marks[:len(parser.marks)-1]
+				return yaml_parser_set_parser_error_context(parser,
+					"while parsing a flow mapping", context_mark,
+					"did not find expected ',' or '}'", token.start_mark)
+			}
+		}
+
+		if token.typ == yaml_KEY_TOKEN {
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+			if token.typ != yaml_VALUE_TOKEN &&
+				token.typ != yaml_FLOW_ENTRY_TOKEN &&
+				token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+				parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+				return yaml_parser_parse_node(parser, event, false, false)
+			} else {
+				parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+				return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+			}
+		} else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+
+	parser.state = parser.states[len(parser.states)-1]
+	parser.states = parser.states[:len(parser.states)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+	*event = yaml_event_t{
+		typ:        yaml_MAPPING_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.end_mark,
+	}
+	skip_token(parser)
+	return true
+}
+
+// Parse the productions:
+// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                   *                  ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if empty {
+		parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+		return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+	}
+	if token.typ == yaml_VALUE_TOKEN {
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+	parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+	*event = yaml_event_t{
+		typ:        yaml_SCALAR_EVENT,
+		start_mark: mark,
+		end_mark:   mark,
+		value:      nil, // Empty
+		implicit:   true,
+		style:      yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+	}
+	return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+	{[]byte("!"), []byte("!")},
+	{[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+	version_directive_ref **yaml_version_directive_t,
+	tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+	var version_directive *yaml_version_directive_t
+	var tag_directives []yaml_tag_directive_t
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+		if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+			if version_directive != nil {
+				yaml_parser_set_parser_error(parser,
+					"found duplicate %YAML directive", token.start_mark)
+				return false
+			}
+			if token.major != 1 || token.minor != 1 {
+				yaml_parser_set_parser_error(parser,
+					"found incompatible YAML document", token.start_mark)
+				return false
+			}
+			version_directive = &yaml_version_directive_t{
+				major: token.major,
+				minor: token.minor,
+			}
+		} else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+			value := yaml_tag_directive_t{
+				handle: token.value,
+				prefix: token.prefix,
+			}
+			if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+				return false
+			}
+			tag_directives = append(tag_directives, value)
+		}
+
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+	}
+
+	for i := range default_tag_directives {
+		if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+			return false
+		}
+	}
+
+	if version_directive_ref != nil {
+		*version_directive_ref = version_directive
+	}
+	if tag_directives_ref != nil {
+		*tag_directives_ref = tag_directives
+	}
+	return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+	for i := range parser.tag_directives {
+		if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+			if allow_duplicates {
+				return true
+			}
+			return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+		}
+	}
+
+	// [Go] I suspect the copy is unnecessary. This was likely done
+	// because there was no way to track ownership of the data.
+	value_copy := yaml_tag_directive_t{
+		handle: make([]byte, len(value.handle)),
+		prefix: make([]byte, len(value.prefix)),
+	}
+	copy(value_copy.handle, value.handle)
+	copy(value_copy.prefix, value.prefix)
+	parser.tag_directives = append(parser.tag_directives, value_copy)
+	return true
+}

+ 394 - 0
vendor/gopkg.in/yaml.v2/readerc.go

@@ -0,0 +1,394 @@
+package yaml
+
+import (
+	"io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+	parser.error = yaml_READER_ERROR
+	parser.problem = problem
+	parser.problem_offset = offset
+	parser.problem_value = value
+	return false
+}
+
+// Byte order marks.
+const (
+	bom_UTF8    = "\xef\xbb\xbf"
+	bom_UTF16LE = "\xff\xfe"
+	bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+	// Ensure that we had enough bytes in the raw buffer.
+	for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+		if !yaml_parser_update_raw_buffer(parser) {
+			return false
+		}
+	}
+
+	// Determine the encoding.
+	buf := parser.raw_buffer
+	pos := parser.raw_buffer_pos
+	avail := len(buf) - pos
+	if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+		parser.encoding = yaml_UTF16LE_ENCODING
+		parser.raw_buffer_pos += 2
+		parser.offset += 2
+	} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+		parser.encoding = yaml_UTF16BE_ENCODING
+		parser.raw_buffer_pos += 2
+		parser.offset += 2
+	} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+		parser.encoding = yaml_UTF8_ENCODING
+		parser.raw_buffer_pos += 3
+		parser.offset += 3
+	} else {
+		parser.encoding = yaml_UTF8_ENCODING
+	}
+	return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+	size_read := 0
+
+	// Return if the raw buffer is full.
+	if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+		return true
+	}
+
+	// Return on EOF.
+	if parser.eof {
+		return true
+	}
+
+	// Move the remaining bytes in the raw buffer to the beginning.
+	if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+		copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+	}
+	parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+	parser.raw_buffer_pos = 0
+
+	// Call the read handler to fill the buffer.
+	size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+	parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+	if err == io.EOF {
+		parser.eof = true
+	} else if err != nil {
+		return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+	}
+	return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+	if parser.read_handler == nil {
+		panic("read handler must be set")
+	}
+
+	// If the EOF flag is set and the raw buffer is empty, do nothing.
+	if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+		return true
+	}
+
+	// Return if the buffer contains enough characters.
+	if parser.unread >= length {
+		return true
+	}
+
+	// Determine the input encoding if it is not known yet.
+	if parser.encoding == yaml_ANY_ENCODING {
+		if !yaml_parser_determine_encoding(parser) {
+			return false
+		}
+	}
+
+	// Move the unread characters to the beginning of the buffer.
+	buffer_len := len(parser.buffer)
+	if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+		copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+		buffer_len -= parser.buffer_pos
+		parser.buffer_pos = 0
+	} else if parser.buffer_pos == buffer_len {
+		buffer_len = 0
+		parser.buffer_pos = 0
+	}
+
+	// Open the whole buffer for writing, and cut it before returning.
+	parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+	// Fill the buffer until it has enough characters.
+	first := true
+	for parser.unread < length {
+
+		// Fill the raw buffer if necessary.
+		if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+			if !yaml_parser_update_raw_buffer(parser) {
+				parser.buffer = parser.buffer[:buffer_len]
+				return false
+			}
+		}
+		first = false
+
+		// Decode the raw buffer.
+	inner:
+		for parser.raw_buffer_pos != len(parser.raw_buffer) {
+			var value rune
+			var width int
+
+			raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+			// Decode the next character.
+			switch parser.encoding {
+			case yaml_UTF8_ENCODING:
+				// Decode a UTF-8 character.  Check RFC 3629
+				// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+				//
+				// The following table (taken from the RFC) is used for
+				// decoding.
+				//
+				//    Char. number range |        UTF-8 octet sequence
+				//      (hexadecimal)    |              (binary)
+				//   --------------------+------------------------------------
+				//   0000 0000-0000 007F | 0xxxxxxx
+				//   0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+				//   0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+				//   0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+				//
+				// Additionally, the characters in the range 0xD800-0xDFFF
+				// are prohibited as they are reserved for use with UTF-16
+				// surrogate pairs.
+
+				// Determine the length of the UTF-8 sequence.
+				octet := parser.raw_buffer[parser.raw_buffer_pos]
+				switch {
+				case octet&0x80 == 0x00:
+					width = 1
+				case octet&0xE0 == 0xC0:
+					width = 2
+				case octet&0xF0 == 0xE0:
+					width = 3
+				case octet&0xF8 == 0xF0:
+					width = 4
+				default:
+					// The leading octet is invalid.
+					return yaml_parser_set_reader_error(parser,
+						"invalid leading UTF-8 octet",
+						parser.offset, int(octet))
+				}
+
+				// Check if the raw buffer contains an incomplete character.
+				if width > raw_unread {
+					if parser.eof {
+						return yaml_parser_set_reader_error(parser,
+							"incomplete UTF-8 octet sequence",
+							parser.offset, -1)
+					}
+					break inner
+				}
+
+				// Decode the leading octet.
+				switch {
+				case octet&0x80 == 0x00:
+					value = rune(octet & 0x7F)
+				case octet&0xE0 == 0xC0:
+					value = rune(octet & 0x1F)
+				case octet&0xF0 == 0xE0:
+					value = rune(octet & 0x0F)
+				case octet&0xF8 == 0xF0:
+					value = rune(octet & 0x07)
+				default:
+					value = 0
+				}
+
+				// Check and decode the trailing octets.
+				for k := 1; k < width; k++ {
+					octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+					// Check if the octet is valid.
+					if (octet & 0xC0) != 0x80 {
+						return yaml_parser_set_reader_error(parser,
+							"invalid trailing UTF-8 octet",
+							parser.offset+k, int(octet))
+					}
+
+					// Decode the octet.
+					value = (value << 6) + rune(octet&0x3F)
+				}
+
+				// Check the length of the sequence against the value.
+				switch {
+				case width == 1:
+				case width == 2 && value >= 0x80:
+				case width == 3 && value >= 0x800:
+				case width == 4 && value >= 0x10000:
+				default:
+					return yaml_parser_set_reader_error(parser,
+						"invalid length of a UTF-8 sequence",
+						parser.offset, -1)
+				}
+
+				// Check the range of the value.
+				if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+					return yaml_parser_set_reader_error(parser,
+						"invalid Unicode character",
+						parser.offset, int(value))
+				}
+
+			case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+				var low, high int
+				if parser.encoding == yaml_UTF16LE_ENCODING {
+					low, high = 0, 1
+				} else {
+					low, high = 1, 0
+				}
+
+				// The UTF-16 encoding is not as simple as one might
+				// naively think.  Check RFC 2781
+				// (http://www.ietf.org/rfc/rfc2781.txt).
+				//
+				// Normally, two subsequent bytes describe a Unicode
+				// character.  However a special technique (called a
+				// surrogate pair) is used for specifying character
+				// values larger than 0xFFFF.
+				//
+				// A surrogate pair consists of two pseudo-characters:
+				//      high surrogate area (0xD800-0xDBFF)
+				//      low surrogate area (0xDC00-0xDFFF)
+				//
+				// The following formulas are used for decoding
+				// and encoding characters using surrogate pairs:
+				//
+				//  U  = U' + 0x10000   (0x01 00 00 <= U <= 0x10 FF FF)
+				//  U' = yyyyyyyyyyxxxxxxxxxx   (0 <= U' <= 0x0F FF FF)
+				//  W1 = 110110yyyyyyyyyy
+				//  W2 = 110111xxxxxxxxxx
+				//
+				// where U is the character value, W1 is the high surrogate
+				// area, W2 is the low surrogate area.
+
+				// Check for incomplete UTF-16 character.
+				if raw_unread < 2 {
+					if parser.eof {
+						return yaml_parser_set_reader_error(parser,
+							"incomplete UTF-16 character",
+							parser.offset, -1)
+					}
+					break inner
+				}
+
+				// Get the character.
+				value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+					(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+				// Check for unexpected low surrogate area.
+				if value&0xFC00 == 0xDC00 {
+					return yaml_parser_set_reader_error(parser,
+						"unexpected low surrogate area",
+						parser.offset, int(value))
+				}
+
+				// Check for a high surrogate area.
+				if value&0xFC00 == 0xD800 {
+					width = 4
+
+					// Check for incomplete surrogate pair.
+					if raw_unread < 4 {
+						if parser.eof {
+							return yaml_parser_set_reader_error(parser,
+								"incomplete UTF-16 surrogate pair",
+								parser.offset, -1)
+						}
+						break inner
+					}
+
+					// Get the next character.
+					value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+						(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+					// Check for a low surrogate area.
+					if value2&0xFC00 != 0xDC00 {
+						return yaml_parser_set_reader_error(parser,
+							"expected low surrogate area",
+							parser.offset+2, int(value2))
+					}
+
+					// Generate the value of the surrogate pair.
+					value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+				} else {
+					width = 2
+				}
+
+			default:
+				panic("impossible")
+			}
+
+			// Check if the character is in the allowed range:
+			//      #x9 | #xA | #xD | [#x20-#x7E]               (8 bit)
+			//      | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD]    (16 bit)
+			//      | [#x10000-#x10FFFF]                        (32 bit)
+			switch {
+			case value == 0x09:
+			case value == 0x0A:
+			case value == 0x0D:
+			case value >= 0x20 && value <= 0x7E:
+			case value == 0x85:
+			case value >= 0xA0 && value <= 0xD7FF:
+			case value >= 0xE000 && value <= 0xFFFD:
+			case value >= 0x10000 && value <= 0x10FFFF:
+			default:
+				return yaml_parser_set_reader_error(parser,
+					"control characters are not allowed",
+					parser.offset, int(value))
+			}
+
+			// Move the raw pointers.
+			parser.raw_buffer_pos += width
+			parser.offset += width
+
+			// Finally put the character into the buffer.
+			if value <= 0x7F {
+				// 0000 0000-0000 007F . 0xxxxxxx
+				parser.buffer[buffer_len+0] = byte(value)
+				buffer_len += 1
+			} else if value <= 0x7FF {
+				// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+				parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+				parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+				buffer_len += 2
+			} else if value <= 0xFFFF {
+				// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+				parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+				parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+				parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+				buffer_len += 3
+			} else {
+				// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+				parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+				parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+				parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+				parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+				buffer_len += 4
+			}
+
+			parser.unread++
+		}
+
+		// On EOF, put NUL into the buffer and return.
+		if parser.eof {
+			parser.buffer[buffer_len] = 0
+			buffer_len++
+			parser.unread++
+			break
+		}
+	}
+	parser.buffer = parser.buffer[:buffer_len]
+	return true
+}

+ 203 - 0
vendor/gopkg.in/yaml.v2/resolve.go

@@ -0,0 +1,203 @@
+package yaml
+
+import (
+	"encoding/base64"
+	"math"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+)
+
+type resolveMapItem struct {
+	value interface{}
+	tag   string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+	t := resolveTable
+	t[int('+')] = 'S' // Sign
+	t[int('-')] = 'S'
+	for _, c := range "0123456789" {
+		t[int(c)] = 'D' // Digit
+	}
+	for _, c := range "yYnNtTfFoO~" {
+		t[int(c)] = 'M' // In map
+	}
+	t[int('.')] = '.' // Float (potentially in map)
+
+	var resolveMapList = []struct {
+		v   interface{}
+		tag string
+		l   []string
+	}{
+		{true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
+		{true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
+		{true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
+		{false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
+		{false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
+		{false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
+		{nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
+		{math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
+		{math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
+		{math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
+		{math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
+		{"<<", yaml_MERGE_TAG, []string{"<<"}},
+	}
+
+	m := resolveMap
+	for _, item := range resolveMapList {
+		for _, s := range item.l {
+			m[s] = resolveMapItem{item.v, item.tag}
+		}
+	}
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+	// TODO This can easily be made faster and produce less garbage.
+	if strings.HasPrefix(tag, longTagPrefix) {
+		return "!!" + tag[len(longTagPrefix):]
+	}
+	return tag
+}
+
+func longTag(tag string) string {
+	if strings.HasPrefix(tag, "!!") {
+		return longTagPrefix + tag[2:]
+	}
+	return tag
+}
+
+func resolvableTag(tag string) bool {
+	switch tag {
+	case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
+		return true
+	}
+	return false
+}
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+	if !resolvableTag(tag) {
+		return tag, in
+	}
+
+	defer func() {
+		switch tag {
+		case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
+			return
+		}
+		failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+	}()
+
+	// Any data is accepted as a !!str or !!binary.
+	// Otherwise, the prefix is enough of a hint about what it might be.
+	hint := byte('N')
+	if in != "" {
+		hint = resolveTable[in[0]]
+	}
+	if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
+		// Handle things we can lookup in a map.
+		if item, ok := resolveMap[in]; ok {
+			return item.tag, item.value
+		}
+
+		// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+		// are purposefully unsupported here. They're still quoted on
+		// the way out for compatibility with other parser, though.
+
+		switch hint {
+		case 'M':
+			// We've already checked the map above.
+
+		case '.':
+			// Not in the map, so maybe a normal float.
+			floatv, err := strconv.ParseFloat(in, 64)
+			if err == nil {
+				return yaml_FLOAT_TAG, floatv
+			}
+
+		case 'D', 'S':
+			// Int, float, or timestamp.
+			plain := strings.Replace(in, "_", "", -1)
+			intv, err := strconv.ParseInt(plain, 0, 64)
+			if err == nil {
+				if intv == int64(int(intv)) {
+					return yaml_INT_TAG, int(intv)
+				} else {
+					return yaml_INT_TAG, intv
+				}
+			}
+			uintv, err := strconv.ParseUint(plain, 0, 64)
+			if err == nil {
+				return yaml_INT_TAG, uintv
+			}
+			floatv, err := strconv.ParseFloat(plain, 64)
+			if err == nil {
+				return yaml_FLOAT_TAG, floatv
+			}
+			if strings.HasPrefix(plain, "0b") {
+				intv, err := strconv.ParseInt(plain[2:], 2, 64)
+				if err == nil {
+					if intv == int64(int(intv)) {
+						return yaml_INT_TAG, int(intv)
+					} else {
+						return yaml_INT_TAG, intv
+					}
+				}
+				uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+				if err == nil {
+					return yaml_INT_TAG, uintv
+				}
+			} else if strings.HasPrefix(plain, "-0b") {
+				intv, err := strconv.ParseInt(plain[3:], 2, 64)
+				if err == nil {
+					if intv == int64(int(intv)) {
+						return yaml_INT_TAG, -int(intv)
+					} else {
+						return yaml_INT_TAG, -intv
+					}
+				}
+			}
+			// XXX Handle timestamps here.
+
+		default:
+			panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
+		}
+	}
+	if tag == yaml_BINARY_TAG {
+		return yaml_BINARY_TAG, in
+	}
+	if utf8.ValidString(in) {
+		return yaml_STR_TAG, in
+	}
+	return yaml_BINARY_TAG, encodeBase64(in)
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+	const lineLen = 70
+	encLen := base64.StdEncoding.EncodedLen(len(s))
+	lines := encLen/lineLen + 1
+	buf := make([]byte, encLen*2+lines)
+	in := buf[0:encLen]
+	out := buf[encLen:]
+	base64.StdEncoding.Encode(in, []byte(s))
+	k := 0
+	for i := 0; i < len(in); i += lineLen {
+		j := i + lineLen
+		if j > len(in) {
+			j = len(in)
+		}
+		k += copy(out[k:], in[i:j])
+		if lines > 1 {
+			out[k] = '\n'
+			k++
+		}
+	}
+	return string(out[:k])
+}

+ 2710 - 0
vendor/gopkg.in/yaml.v2/scannerc.go

@@ -0,0 +1,2710 @@
+package yaml
+
+import (
+	"bytes"
+	"fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/cvs/current.html).  We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward.  The issues are "block collection start" and
+// "simple keys".  Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented.  We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+//      STREAM-START(encoding)          # The stream start.
+//      STREAM-END                      # The stream end.
+//      VERSION-DIRECTIVE(major,minor)  # The '%YAML' directive.
+//      TAG-DIRECTIVE(handle,prefix)    # The '%TAG' directive.
+//      DOCUMENT-START                  # '---'
+//      DOCUMENT-END                    # '...'
+//      BLOCK-SEQUENCE-START            # Indentation increase denoting a block
+//      BLOCK-MAPPING-START             # sequence or a block mapping.
+//      BLOCK-END                       # Indentation decrease.
+//      FLOW-SEQUENCE-START             # '['
+//      FLOW-SEQUENCE-END               # ']'
+//      BLOCK-SEQUENCE-START            # '{'
+//      BLOCK-SEQUENCE-END              # '}'
+//      BLOCK-ENTRY                     # '-'
+//      FLOW-ENTRY                      # ','
+//      KEY                             # '?' or nothing (simple keys).
+//      VALUE                           # ':'
+//      ALIAS(anchor)                   # '*anchor'
+//      ANCHOR(anchor)                  # '&anchor'
+//      TAG(handle,suffix)              # '!handle!suffix'
+//      SCALAR(value,style)             # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+//      STREAM-START(encoding)
+//      STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+//      VERSION-DIRECTIVE(major,minor)
+//      TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+//      %YAML   1.1
+//      %TAG    !   !foo
+//      %TAG    !yaml!  tag:yaml.org,2002:
+//      ---
+//
+// The correspoding sequence of tokens:
+//
+//      STREAM-START(utf-8)
+//      VERSION-DIRECTIVE(1,1)
+//      TAG-DIRECTIVE("!","!foo")
+//      TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+//      DOCUMENT-START
+//      STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+//      DOCUMENT-START
+//      DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+//      1. An implicit document:
+//
+//          'a scalar'
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          SCALAR("a scalar",single-quoted)
+//          STREAM-END
+//
+//      2. An explicit document:
+//
+//          ---
+//          'a scalar'
+//          ...
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          DOCUMENT-START
+//          SCALAR("a scalar",single-quoted)
+//          DOCUMENT-END
+//          STREAM-END
+//
+//      3. Several documents in a stream:
+//
+//          'a scalar'
+//          ---
+//          'another scalar'
+//          ---
+//          'yet another scalar'
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          SCALAR("a scalar",single-quoted)
+//          DOCUMENT-START
+//          SCALAR("another scalar",single-quoted)
+//          DOCUMENT-START
+//          SCALAR("yet another scalar",single-quoted)
+//          STREAM-END
+//
+// We have already introduced the SCALAR token above.  The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+//      ALIAS(anchor)
+//      ANCHOR(anchor)
+//      TAG(handle,suffix)
+//      SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+//      1. A recursive sequence:
+//
+//          &A [ *A ]
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          ANCHOR("A")
+//          FLOW-SEQUENCE-START
+//          ALIAS("A")
+//          FLOW-SEQUENCE-END
+//          STREAM-END
+//
+//      2. A tagged scalar:
+//
+//          !!float "3.14"  # A good approximation.
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          TAG("!!","float")
+//          SCALAR("3.14",double-quoted)
+//          STREAM-END
+//
+//      3. Various scalar styles:
+//
+//          --- # Implicit empty plain scalars do not produce tokens.
+//          --- a plain scalar
+//          --- 'a single-quoted scalar'
+//          --- "a double-quoted scalar"
+//          --- |-
+//            a literal scalar
+//          --- >-
+//            a folded
+//            scalar
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          DOCUMENT-START
+//          DOCUMENT-START
+//          SCALAR("a plain scalar",plain)
+//          DOCUMENT-START
+//          SCALAR("a single-quoted scalar",single-quoted)
+//          DOCUMENT-START
+//          SCALAR("a double-quoted scalar",double-quoted)
+//          DOCUMENT-START
+//          SCALAR("a literal scalar",literal)
+//          DOCUMENT-START
+//          SCALAR("a folded scalar",folded)
+//          STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+//      FLOW-SEQUENCE-START
+//      FLOW-SEQUENCE-END
+//      FLOW-MAPPING-START
+//      FLOW-MAPPING-END
+//      FLOW-ENTRY
+//      KEY
+//      VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly.  FLOW-ENTRY represent the ',' indicator.  Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+//      1. A flow sequence:
+//
+//          [item 1, item 2, item 3]
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          FLOW-SEQUENCE-START
+//          SCALAR("item 1",plain)
+//          FLOW-ENTRY
+//          SCALAR("item 2",plain)
+//          FLOW-ENTRY
+//          SCALAR("item 3",plain)
+//          FLOW-SEQUENCE-END
+//          STREAM-END
+//
+//      2. A flow mapping:
+//
+//          {
+//              a simple key: a value,  # Note that the KEY token is produced.
+//              ? a complex key: another value,
+//          }
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          FLOW-MAPPING-START
+//          KEY
+//          SCALAR("a simple key",plain)
+//          VALUE
+//          SCALAR("a value",plain)
+//          FLOW-ENTRY
+//          KEY
+//          SCALAR("a complex key",plain)
+//          VALUE
+//          SCALAR("another value",plain)
+//          FLOW-ENTRY
+//          FLOW-MAPPING-END
+//          STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator.  Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+//      BLOCK-SEQUENCE-START
+//      BLOCK-MAPPING-START
+//      BLOCK-END
+//      BLOCK-ENTRY
+//      KEY
+//      VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python).  However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+//      1. Block sequences:
+//
+//          - item 1
+//          - item 2
+//          -
+//            - item 3.1
+//            - item 3.2
+//          -
+//            key 1: value 1
+//            key 2: value 2
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-ENTRY
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 3.1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 3.2",plain)
+//          BLOCK-END
+//          BLOCK-ENTRY
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+//      2. Block mappings:
+//
+//          a simple key: a value   # The KEY token is produced here.
+//          ? a complex key
+//          : another value
+//          a mapping:
+//            key 1: value 1
+//            key 2: value 2
+//          a sequence:
+//            - item 1
+//            - item 2
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("a simple key",plain)
+//          VALUE
+//          SCALAR("a value",plain)
+//          KEY
+//          SCALAR("a complex key",plain)
+//          VALUE
+//          SCALAR("another value",plain)
+//          KEY
+//          SCALAR("a mapping",plain)
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          KEY
+//          SCALAR("a sequence",plain)
+//          VALUE
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line.  If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line.  The following examples
+// illustrate this case:
+//
+//      1. Collections in a sequence:
+//
+//          - - item 1
+//            - item 2
+//          - key 1: value 1
+//            key 2: value 2
+//          - ? complex key
+//            : complex value
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-END
+//          BLOCK-ENTRY
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          BLOCK-ENTRY
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("complex key")
+//          VALUE
+//          SCALAR("complex value")
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+//      2. Collections in a mapping:
+//
+//          ? a sequence
+//          : - item 1
+//            - item 2
+//          ? a mapping
+//          : key 1: value 1
+//            key 2: value 2
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("a sequence",plain)
+//          VALUE
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-END
+//          KEY
+//          SCALAR("a mapping",plain)
+//          VALUE
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping.  In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+//      key:
+//      - item 1    # BLOCK-SEQUENCE-START is NOT produced here.
+//      - item 2
+//
+// Tokens:
+//
+//      STREAM-START(utf-8)
+//      BLOCK-MAPPING-START
+//      KEY
+//      SCALAR("key",plain)
+//      VALUE
+//      BLOCK-ENTRY
+//      SCALAR("item 1",plain)
+//      BLOCK-ENTRY
+//      SCALAR("item 2",plain)
+//      BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+	// [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+	return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+	parser.mark.index++
+	parser.mark.column++
+	parser.unread--
+	parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+	if is_crlf(parser.buffer, parser.buffer_pos) {
+		parser.mark.index += 2
+		parser.mark.column = 0
+		parser.mark.line++
+		parser.unread -= 2
+		parser.buffer_pos += 2
+	} else if is_break(parser.buffer, parser.buffer_pos) {
+		parser.mark.index++
+		parser.mark.column = 0
+		parser.mark.line++
+		parser.unread--
+		parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+	}
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+	w := width(parser.buffer[parser.buffer_pos])
+	if w == 0 {
+		panic("invalid character sequence")
+	}
+	if len(s) == 0 {
+		s = make([]byte, 0, 32)
+	}
+	if w == 1 && len(s)+w <= cap(s) {
+		s = s[:len(s)+1]
+		s[len(s)-1] = parser.buffer[parser.buffer_pos]
+		parser.buffer_pos++
+	} else {
+		s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+		parser.buffer_pos += w
+	}
+	parser.mark.index++
+	parser.mark.column++
+	parser.unread--
+	return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+	buf := parser.buffer
+	pos := parser.buffer_pos
+	switch {
+	case buf[pos] == '\r' && buf[pos+1] == '\n':
+		// CR LF . LF
+		s = append(s, '\n')
+		parser.buffer_pos += 2
+		parser.mark.index++
+		parser.unread--
+	case buf[pos] == '\r' || buf[pos] == '\n':
+		// CR|LF . LF
+		s = append(s, '\n')
+		parser.buffer_pos += 1
+	case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+		// NEL . LF
+		s = append(s, '\n')
+		parser.buffer_pos += 2
+	case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+		// LS|PS . LS|PS
+		s = append(s, buf[parser.buffer_pos:pos+3]...)
+		parser.buffer_pos += 3
+	default:
+		return s
+	}
+	parser.mark.index++
+	parser.mark.column = 0
+	parser.mark.line++
+	parser.unread--
+	return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+	// Erase the token object.
+	*token = yaml_token_t{} // [Go] Is this necessary?
+
+	// No tokens after STREAM-END or error.
+	if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+		return true
+	}
+
+	// Ensure that the tokens queue contains enough tokens.
+	if !parser.token_available {
+		if !yaml_parser_fetch_more_tokens(parser) {
+			return false
+		}
+	}
+
+	// Fetch the next token from the queue.
+	*token = parser.tokens[parser.tokens_head]
+	parser.tokens_head++
+	parser.tokens_parsed++
+	parser.token_available = false
+
+	if token.typ == yaml_STREAM_END_TOKEN {
+		parser.stream_end_produced = true
+	}
+	return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+	parser.error = yaml_SCANNER_ERROR
+	parser.context = context
+	parser.context_mark = context_mark
+	parser.problem = problem
+	parser.problem_mark = parser.mark
+	return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+	context := "while parsing a tag"
+	if directive {
+		context = "while parsing a %TAG directive"
+	}
+	return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet")
+}
+
+func trace(args ...interface{}) func() {
+	pargs := append([]interface{}{"+++"}, args...)
+	fmt.Println(pargs...)
+	pargs = append([]interface{}{"---"}, args...)
+	return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+	// While we need more tokens to fetch, do it.
+	for {
+		// Check if we really need to fetch more tokens.
+		need_more_tokens := false
+
+		if parser.tokens_head == len(parser.tokens) {
+			// Queue is empty.
+			need_more_tokens = true
+		} else {
+			// Check if any potential simple key may occupy the head position.
+			if !yaml_parser_stale_simple_keys(parser) {
+				return false
+			}
+
+			for i := range parser.simple_keys {
+				simple_key := &parser.simple_keys[i]
+				if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
+					need_more_tokens = true
+					break
+				}
+			}
+		}
+
+		// We are finished.
+		if !need_more_tokens {
+			break
+		}
+		// Fetch the next token.
+		if !yaml_parser_fetch_next_token(parser) {
+			return false
+		}
+	}
+
+	parser.token_available = true
+	return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
+	// Ensure that the buffer is initialized.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	// Check if we just started scanning.  Fetch STREAM-START then.
+	if !parser.stream_start_produced {
+		return yaml_parser_fetch_stream_start(parser)
+	}
+
+	// Eat whitespaces and comments until we reach the next token.
+	if !yaml_parser_scan_to_next_token(parser) {
+		return false
+	}
+
+	// Remove obsolete potential simple keys.
+	if !yaml_parser_stale_simple_keys(parser) {
+		return false
+	}
+
+	// Check the indentation level against the current column.
+	if !yaml_parser_unroll_indent(parser, parser.mark.column) {
+		return false
+	}
+
+	// Ensure that the buffer contains at least 4 characters.  4 is the length
+	// of the longest indicators ('--- ' and '... ').
+	if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+		return false
+	}
+
+	// Is it the end of the stream?
+	if is_z(parser.buffer, parser.buffer_pos) {
+		return yaml_parser_fetch_stream_end(parser)
+	}
+
+	// Is it a directive?
+	if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+		return yaml_parser_fetch_directive(parser)
+	}
+
+	buf := parser.buffer
+	pos := parser.buffer_pos
+
+	// Is it the document start indicator?
+	if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+		return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+	}
+
+	// Is it the document end indicator?
+	if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+		return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+	}
+
+	// Is it the flow sequence start indicator?
+	if buf[pos] == '[' {
+		return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+	}
+
+	// Is it the flow mapping start indicator?
+	if parser.buffer[parser.buffer_pos] == '{' {
+		return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+	}
+
+	// Is it the flow sequence end indicator?
+	if parser.buffer[parser.buffer_pos] == ']' {
+		return yaml_parser_fetch_flow_collection_end(parser,
+			yaml_FLOW_SEQUENCE_END_TOKEN)
+	}
+
+	// Is it the flow mapping end indicator?
+	if parser.buffer[parser.buffer_pos] == '}' {
+		return yaml_parser_fetch_flow_collection_end(parser,
+			yaml_FLOW_MAPPING_END_TOKEN)
+	}
+
+	// Is it the flow entry indicator?
+	if parser.buffer[parser.buffer_pos] == ',' {
+		return yaml_parser_fetch_flow_entry(parser)
+	}
+
+	// Is it the block entry indicator?
+	if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+		return yaml_parser_fetch_block_entry(parser)
+	}
+
+	// Is it the key indicator?
+	if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+		return yaml_parser_fetch_key(parser)
+	}
+
+	// Is it the value indicator?
+	if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+		return yaml_parser_fetch_value(parser)
+	}
+
+	// Is it an alias?
+	if parser.buffer[parser.buffer_pos] == '*' {
+		return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+	}
+
+	// Is it an anchor?
+	if parser.buffer[parser.buffer_pos] == '&' {
+		return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+	}
+
+	// Is it a tag?
+	if parser.buffer[parser.buffer_pos] == '!' {
+		return yaml_parser_fetch_tag(parser)
+	}
+
+	// Is it a literal scalar?
+	if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+		return yaml_parser_fetch_block_scalar(parser, true)
+	}
+
+	// Is it a folded scalar?
+	if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+		return yaml_parser_fetch_block_scalar(parser, false)
+	}
+
+	// Is it a single-quoted scalar?
+	if parser.buffer[parser.buffer_pos] == '\'' {
+		return yaml_parser_fetch_flow_scalar(parser, true)
+	}
+
+	// Is it a double-quoted scalar?
+	if parser.buffer[parser.buffer_pos] == '"' {
+		return yaml_parser_fetch_flow_scalar(parser, false)
+	}
+
+	// Is it a plain scalar?
+	//
+	// A plain scalar may start with any non-blank characters except
+	//
+	//      '-', '?', ':', ',', '[', ']', '{', '}',
+	//      '#', '&', '*', '!', '|', '>', '\'', '\"',
+	//      '%', '@', '`'.
+	//
+	// In the block context (and, for the '-' indicator, in the flow context
+	// too), it may also start with the characters
+	//
+	//      '-', '?', ':'
+	//
+	// if it is followed by a non-space character.
+	//
+	// The last rule is more restrictive than the specification requires.
+	// [Go] Make this logic more reasonable.
+	//switch parser.buffer[parser.buffer_pos] {
+	//case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+	//}
+	if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+		parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+		parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+		parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+		parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+		parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+		parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+		parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+		parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+		parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+		(parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+		(parser.flow_level == 0 &&
+			(parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+			!is_blankz(parser.buffer, parser.buffer_pos+1)) {
+		return yaml_parser_fetch_plain_scalar(parser)
+	}
+
+	// If we don't determine the token type so far, it is an error.
+	return yaml_parser_set_scanner_error(parser,
+		"while scanning for the next token", parser.mark,
+		"found character that cannot start any token")
+}
+
+// Check the list of potential simple keys and remove the positions that
+// cannot contain simple keys anymore.
+func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
+	// Check for a potential simple key for each flow level.
+	for i := range parser.simple_keys {
+		simple_key := &parser.simple_keys[i]
+
+		// The specification requires that a simple key
+		//
+		//  - is limited to a single line,
+		//  - is shorter than 1024 characters.
+		if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
+
+			// Check if the potential simple key to be removed is required.
+			if simple_key.required {
+				return yaml_parser_set_scanner_error(parser,
+					"while scanning a simple key", simple_key.mark,
+					"could not find expected ':'")
+			}
+			simple_key.possible = false
+		}
+	}
+	return true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+	// A simple key is required at the current position if the scanner is in
+	// the block context and the current column coincides with the indentation
+	// level.
+
+	required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+	// A simple key is required only when it is the first token in the current
+	// line.  Therefore it is always allowed.  But we add a check anyway.
+	if required && !parser.simple_key_allowed {
+		panic("should not happen")
+	}
+
+	//
+	// If the current position may start a simple key, save it.
+	//
+	if parser.simple_key_allowed {
+		simple_key := yaml_simple_key_t{
+			possible:     true,
+			required:     required,
+			token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+		}
+		simple_key.mark = parser.mark
+
+		if !yaml_parser_remove_simple_key(parser) {
+			return false
+		}
+		parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+	}
+	return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+	i := len(parser.simple_keys) - 1
+	if parser.simple_keys[i].possible {
+		// If the key is required, it is an error.
+		if parser.simple_keys[i].required {
+			return yaml_parser_set_scanner_error(parser,
+				"while scanning a simple key", parser.simple_keys[i].mark,
+				"could not find expected ':'")
+		}
+	}
+	// Remove the key from the stack.
+	parser.simple_keys[i].possible = false
+	return true
+}
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+	// Reset the simple key on the next level.
+	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+	// Increase the flow level.
+	parser.flow_level++
+	return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+	if parser.flow_level > 0 {
+		parser.flow_level--
+		parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
+	}
+	return true
+}
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level.  In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+	// In the flow context, do nothing.
+	if parser.flow_level > 0 {
+		return true
+	}
+
+	if parser.indent < column {
+		// Push the current indentation level to the stack and set the new
+		// indentation level.
+		parser.indents = append(parser.indents, parser.indent)
+		parser.indent = column
+
+		// Create a token and insert it into the queue.
+		token := yaml_token_t{
+			typ:        typ,
+			start_mark: mark,
+			end_mark:   mark,
+		}
+		if number > -1 {
+			number -= parser.tokens_parsed
+		}
+		yaml_insert_token(parser, number, &token)
+	}
+	return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column.  For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
+	// In the flow context, do nothing.
+	if parser.flow_level > 0 {
+		return true
+	}
+
+	// Loop through the indentation levels in the stack.
+	for parser.indent > column {
+		// Create a token and append it to the queue.
+		token := yaml_token_t{
+			typ:        yaml_BLOCK_END_TOKEN,
+			start_mark: parser.mark,
+			end_mark:   parser.mark,
+		}
+		yaml_insert_token(parser, -1, &token)
+
+		// Pop the indentation level.
+		parser.indent = parser.indents[len(parser.indents)-1]
+		parser.indents = parser.indents[:len(parser.indents)-1]
+	}
+	return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+	// Set the initial indentation.
+	parser.indent = -1
+
+	// Initialize the simple key stack.
+	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+	// A simple key is allowed at the beginning of the stream.
+	parser.simple_key_allowed = true
+
+	// We have started.
+	parser.stream_start_produced = true
+
+	// Create the STREAM-START token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_STREAM_START_TOKEN,
+		start_mark: parser.mark,
+		end_mark:   parser.mark,
+		encoding:   parser.encoding,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+	// Force new line.
+	if parser.mark.column != 0 {
+		parser.mark.column = 0
+		parser.mark.line++
+	}
+
+	// Reset the indentation level.
+	if !yaml_parser_unroll_indent(parser, -1) {
+		return false
+	}
+
+	// Reset simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	parser.simple_key_allowed = false
+
+	// Create the STREAM-END token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_STREAM_END_TOKEN,
+		start_mark: parser.mark,
+		end_mark:   parser.mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+	// Reset the indentation level.
+	if !yaml_parser_unroll_indent(parser, -1) {
+		return false
+	}
+
+	// Reset simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	parser.simple_key_allowed = false
+
+	// Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+	token := yaml_token_t{}
+	if !yaml_parser_scan_directive(parser, &token) {
+		return false
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// Reset the indentation level.
+	if !yaml_parser_unroll_indent(parser, -1) {
+		return false
+	}
+
+	// Reset simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	parser.simple_key_allowed = false
+
+	// Consume the token.
+	start_mark := parser.mark
+
+	skip(parser)
+	skip(parser)
+	skip(parser)
+
+	end_mark := parser.mark
+
+	// Create the DOCUMENT-START or DOCUMENT-END token.
+	token := yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// The indicators '[' and '{' may start a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// Increase the flow level.
+	if !yaml_parser_increase_flow_level(parser) {
+		return false
+	}
+
+	// A simple key may follow the indicators '[' and '{'.
+	parser.simple_key_allowed = true
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+	token := yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// Reset any potential simple key on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Decrease the flow level.
+	if !yaml_parser_decrease_flow_level(parser) {
+		return false
+	}
+
+	// No simple keys after the indicators ']' and '}'.
+	parser.simple_key_allowed = false
+
+	// Consume the token.
+
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+	token := yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+	// Reset any potential simple keys on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Simple keys are allowed after ','.
+	parser.simple_key_allowed = true
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the FLOW-ENTRY token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_FLOW_ENTRY_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+	// Check if the scanner is in the block context.
+	if parser.flow_level == 0 {
+		// Check if we are allowed to start a new entry.
+		if !parser.simple_key_allowed {
+			return yaml_parser_set_scanner_error(parser, "", parser.mark,
+				"block sequence entries are not allowed in this context")
+		}
+		// Add the BLOCK-SEQUENCE-START token if needed.
+		if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+			return false
+		}
+	} else {
+		// It is an error for the '-' indicator to occur in the flow context,
+		// but we let the Parser detect and report about it because the Parser
+		// is able to point to the context.
+	}
+
+	// Reset any potential simple keys on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Simple keys are allowed after '-'.
+	parser.simple_key_allowed = true
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the BLOCK-ENTRY token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_BLOCK_ENTRY_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+	// In the block context, additional checks are required.
+	if parser.flow_level == 0 {
+		// Check if we are allowed to start a new key (not nessesary simple).
+		if !parser.simple_key_allowed {
+			return yaml_parser_set_scanner_error(parser, "", parser.mark,
+				"mapping keys are not allowed in this context")
+		}
+		// Add the BLOCK-MAPPING-START token if needed.
+		if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+			return false
+		}
+	}
+
+	// Reset any potential simple keys on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Simple keys are allowed after '?' in the block context.
+	parser.simple_key_allowed = parser.flow_level == 0
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the KEY token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_KEY_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+	simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+	// Have we found a simple key?
+	if simple_key.possible {
+		// Create the KEY token and insert it into the queue.
+		token := yaml_token_t{
+			typ:        yaml_KEY_TOKEN,
+			start_mark: simple_key.mark,
+			end_mark:   simple_key.mark,
+		}
+		yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+		// In the block context, we may need to add the BLOCK-MAPPING-START token.
+		if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+			simple_key.token_number,
+			yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+			return false
+		}
+
+		// Remove the simple key.
+		simple_key.possible = false
+
+		// A simple key cannot follow another simple key.
+		parser.simple_key_allowed = false
+
+	} else {
+		// The ':' indicator follows a complex key.
+
+		// In the block context, extra checks are required.
+		if parser.flow_level == 0 {
+
+			// Check if we are allowed to start a complex value.
+			if !parser.simple_key_allowed {
+				return yaml_parser_set_scanner_error(parser, "", parser.mark,
+					"mapping values are not allowed in this context")
+			}
+
+			// Add the BLOCK-MAPPING-START token if needed.
+			if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+				return false
+			}
+		}
+
+		// Simple keys after ':' are allowed in the block context.
+		parser.simple_key_allowed = parser.flow_level == 0
+	}
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the VALUE token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_VALUE_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// An anchor or an alias could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow an anchor or an alias.
+	parser.simple_key_allowed = false
+
+	// Create the ALIAS or ANCHOR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_anchor(parser, &token, typ) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+	// A tag could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow a tag.
+	parser.simple_key_allowed = false
+
+	// Create the TAG token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_tag(parser, &token) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+	// Remove any potential simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// A simple key may follow a block scalar.
+	parser.simple_key_allowed = true
+
+	// Create the SCALAR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+	// A plain scalar could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow a flow scalar.
+	parser.simple_key_allowed = false
+
+	// Create the SCALAR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+	// A plain scalar could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow a flow scalar.
+	parser.simple_key_allowed = false
+
+	// Create the SCALAR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_plain_scalar(parser, &token) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+	// Until the next token is not found.
+	for {
+		// Allow the BOM mark to start a line.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+		}
+
+		// Eat whitespaces.
+		// Tabs are allowed:
+		//  - in the flow context
+		//  - in the block context, but not at the beginning of the line or
+		//  after '-', '?', or ':' (complex value).
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+
+		for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Eat a comment until a line break.
+		if parser.buffer[parser.buffer_pos] == '#' {
+			for !is_breakz(parser.buffer, parser.buffer_pos) {
+				skip(parser)
+				if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+					return false
+				}
+			}
+		}
+
+		// If it is a line break, eat it.
+		if is_break(parser.buffer, parser.buffer_pos) {
+			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+				return false
+			}
+			skip_line(parser)
+
+			// In the block context, a new line may start a simple key.
+			if parser.flow_level == 0 {
+				parser.simple_key_allowed = true
+			}
+		} else {
+			break // We have found a token.
+		}
+	}
+
+	return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+//      %YAML    1.1    # a comment \n
+//      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//      %TAG    !yaml!  tag:yaml.org,2002:  \n
+//      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+	// Eat '%'.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Scan the directive name.
+	var name []byte
+	if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+		return false
+	}
+
+	// Is it a YAML directive?
+	if bytes.Equal(name, []byte("YAML")) {
+		// Scan the VERSION directive value.
+		var major, minor int8
+		if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+			return false
+		}
+		end_mark := parser.mark
+
+		// Create a VERSION-DIRECTIVE token.
+		*token = yaml_token_t{
+			typ:        yaml_VERSION_DIRECTIVE_TOKEN,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			major:      major,
+			minor:      minor,
+		}
+
+		// Is it a TAG directive?
+	} else if bytes.Equal(name, []byte("TAG")) {
+		// Scan the TAG directive value.
+		var handle, prefix []byte
+		if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+			return false
+		}
+		end_mark := parser.mark
+
+		// Create a TAG-DIRECTIVE token.
+		*token = yaml_token_t{
+			typ:        yaml_TAG_DIRECTIVE_TOKEN,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			value:      handle,
+			prefix:     prefix,
+		}
+
+		// Unknown directive.
+	} else {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "found unknown directive name")
+		return false
+	}
+
+	// Eat the rest of the line including any comments.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	if parser.buffer[parser.buffer_pos] == '#' {
+		for !is_breakz(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+	}
+
+	// Check if we are at the end of the line.
+	if !is_breakz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "did not find expected comment or line break")
+		return false
+	}
+
+	// Eat a line break.
+	if is_break(parser.buffer, parser.buffer_pos) {
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+		skip_line(parser)
+	}
+
+	return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+//      %YAML   1.1     # a comment \n
+//       ^^^^
+//      %TAG    !yaml!  tag:yaml.org,2002:  \n
+//       ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+	// Consume the directive name.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	var s []byte
+	for is_alpha(parser.buffer, parser.buffer_pos) {
+		s = read(parser, s)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Check if the name is empty.
+	if len(s) == 0 {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "could not find expected directive name")
+		return false
+	}
+
+	// Check for an blank character after the name.
+	if !is_blankz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "found unexpected non-alphabetical character")
+		return false
+	}
+	*name = s
+	return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+//      %YAML   1.1     # a comment \n
+//           ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+	// Eat whitespaces.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Consume the major version number.
+	if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+		return false
+	}
+
+	// Eat '.'.
+	if parser.buffer[parser.buffer_pos] != '.' {
+		return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+			start_mark, "did not find expected digit or '.' character")
+	}
+
+	skip(parser)
+
+	// Consume the minor version number.
+	if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+		return false
+	}
+	return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+//      %YAML   1.1     # a comment \n
+//              ^
+//      %YAML   1.1     # a comment \n
+//                ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+	// Repeat while the next character is digit.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	var value, length int8
+	for is_digit(parser.buffer, parser.buffer_pos) {
+		// Check if the number is too long.
+		length++
+		if length > max_number_length {
+			return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+				start_mark, "found extremely long version number")
+		}
+		value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Check if the number was present.
+	if length == 0 {
+		return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+			start_mark, "did not find expected version number")
+	}
+	*number = value
+	return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+//      %TAG    !yaml!  tag:yaml.org,2002:  \n
+//          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+	var handle_value, prefix_value []byte
+
+	// Eat whitespaces.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Scan a handle.
+	if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+		return false
+	}
+
+	// Expect a whitespace.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if !is_blank(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+			start_mark, "did not find expected whitespace")
+		return false
+	}
+
+	// Eat whitespaces.
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Scan a prefix.
+	if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+		return false
+	}
+
+	// Expect a whitespace or line break.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if !is_blankz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+			start_mark, "did not find expected whitespace or line break")
+		return false
+	}
+
+	*handle = handle_value
+	*prefix = prefix_value
+	return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+	var s []byte
+
+	// Eat the indicator character.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Consume the value.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	for is_alpha(parser.buffer, parser.buffer_pos) {
+		s = read(parser, s)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	end_mark := parser.mark
+
+	/*
+	 * Check if length of the anchor is greater than 0 and it is followed by
+	 * a whitespace character or one of the indicators:
+	 *
+	 *      '?', ':', ',', ']', '}', '%', '@', '`'.
+	 */
+
+	if len(s) == 0 ||
+		!(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+			parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+			parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+			parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+			parser.buffer[parser.buffer_pos] == '`') {
+		context := "while scanning an alias"
+		if typ == yaml_ANCHOR_TOKEN {
+			context = "while scanning an anchor"
+		}
+		yaml_parser_set_scanner_error(parser, context, start_mark,
+			"did not find expected alphabetic or numeric character")
+		return false
+	}
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+	}
+
+	return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+	var handle, suffix []byte
+
+	start_mark := parser.mark
+
+	// Check if the tag is in the canonical form.
+	if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+		return false
+	}
+
+	if parser.buffer[parser.buffer_pos+1] == '<' {
+		// Keep the handle as ''
+
+		// Eat '!<'
+		skip(parser)
+		skip(parser)
+
+		// Consume the tag value.
+		if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+			return false
+		}
+
+		// Check for '>' and eat it.
+		if parser.buffer[parser.buffer_pos] != '>' {
+			yaml_parser_set_scanner_error(parser, "while scanning a tag",
+				start_mark, "did not find the expected '>'")
+			return false
+		}
+
+		skip(parser)
+	} else {
+		// The tag has either the '!suffix' or the '!handle!suffix' form.
+
+		// First, try to scan a handle.
+		if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+			return false
+		}
+
+		// Check if it is, indeed, handle.
+		if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+			// Scan the suffix now.
+			if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+				return false
+			}
+		} else {
+			// It wasn't a handle after all.  Scan the rest of the tag.
+			if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+				return false
+			}
+
+			// Set the handle to '!'.
+			handle = []byte{'!'}
+
+			// A special case: the '!' tag.  Set the handle to '' and the
+			// suffix to '!'.
+			if len(suffix) == 0 {
+				handle, suffix = suffix, handle
+			}
+		}
+	}
+
+	// Check the character which ends the tag.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if !is_blankz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a tag",
+			start_mark, "did not find expected whitespace or line break")
+		return false
+	}
+
+	end_mark := parser.mark
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_TAG_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      handle,
+		suffix:     suffix,
+	}
+	return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+	// Check the initial '!' character.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if parser.buffer[parser.buffer_pos] != '!' {
+		yaml_parser_set_scanner_tag_error(parser, directive,
+			start_mark, "did not find expected '!'")
+		return false
+	}
+
+	var s []byte
+
+	// Copy the '!' character.
+	s = read(parser, s)
+
+	// Copy all subsequent alphabetical and numerical characters.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	for is_alpha(parser.buffer, parser.buffer_pos) {
+		s = read(parser, s)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Check if the trailing character is '!' and copy it.
+	if parser.buffer[parser.buffer_pos] == '!' {
+		s = read(parser, s)
+	} else {
+		// It's either the '!' tag or not really a tag handle.  If it's a %TAG
+		// directive, it's an error.  If it's a tag token, it must be a part of URI.
+		if directive && !(s[0] == '!' && s[1] == 0) {
+			yaml_parser_set_scanner_tag_error(parser, directive,
+				start_mark, "did not find expected '!'")
+			return false
+		}
+	}
+
+	*handle = s
+	return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+	//size_t length = head ? strlen((char *)head) : 0
+	var s []byte
+
+	// Copy the head if needed.
+	//
+	// Note that we don't copy the leading '!' character.
+	if len(head) > 1 {
+		s = append(s, head[1:]...)
+	}
+
+	// Scan the tag.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	// The set of characters that may appear in URI is as follows:
+	//
+	//      '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+	//      '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+	//      '%'.
+	// [Go] Convert this into more reasonable logic.
+	for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+		parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+		parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+		parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+		parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+		parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+		parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+		parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+		parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+		parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+		parser.buffer[parser.buffer_pos] == '%' {
+		// Check if it is a URI-escape sequence.
+		if parser.buffer[parser.buffer_pos] == '%' {
+			if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+				return false
+			}
+		} else {
+			s = read(parser, s)
+		}
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Check if the tag is non-empty.
+	if len(s) == 0 {
+		yaml_parser_set_scanner_tag_error(parser, directive,
+			start_mark, "did not find expected tag URI")
+		return false
+	}
+	*uri = s
+	return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+	// Decode the required number of characters.
+	w := 1024
+	for w > 0 {
+		// Check for a URI-escaped octet.
+		if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+			return false
+		}
+
+		if !(parser.buffer[parser.buffer_pos] == '%' &&
+			is_hex(parser.buffer, parser.buffer_pos+1) &&
+			is_hex(parser.buffer, parser.buffer_pos+2)) {
+			return yaml_parser_set_scanner_tag_error(parser, directive,
+				start_mark, "did not find URI escaped octet")
+		}
+
+		// Get the octet.
+		octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+		// If it is the leading octet, determine the length of the UTF-8 sequence.
+		if w == 1024 {
+			w = width(octet)
+			if w == 0 {
+				return yaml_parser_set_scanner_tag_error(parser, directive,
+					start_mark, "found an incorrect leading UTF-8 octet")
+			}
+		} else {
+			// Check if the trailing octet is correct.
+			if octet&0xC0 != 0x80 {
+				return yaml_parser_set_scanner_tag_error(parser, directive,
+					start_mark, "found an incorrect trailing UTF-8 octet")
+			}
+		}
+
+		// Copy the octet and move the pointers.
+		*s = append(*s, octet)
+		skip(parser)
+		skip(parser)
+		skip(parser)
+		w--
+	}
+	return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+	// Eat the indicator '|' or '>'.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Scan the additional block scalar indicators.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	// Check for a chomping indicator.
+	var chomping, increment int
+	if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+		// Set the chomping method and eat the indicator.
+		if parser.buffer[parser.buffer_pos] == '+' {
+			chomping = +1
+		} else {
+			chomping = -1
+		}
+		skip(parser)
+
+		// Check for an indentation indicator.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		if is_digit(parser.buffer, parser.buffer_pos) {
+			// Check that the indentation is greater than 0.
+			if parser.buffer[parser.buffer_pos] == '0' {
+				yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+					start_mark, "found an indentation indicator equal to 0")
+				return false
+			}
+
+			// Get the indentation level and eat the indicator.
+			increment = as_digit(parser.buffer, parser.buffer_pos)
+			skip(parser)
+		}
+
+	} else if is_digit(parser.buffer, parser.buffer_pos) {
+		// Do the same as above, but in the opposite order.
+
+		if parser.buffer[parser.buffer_pos] == '0' {
+			yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+				start_mark, "found an indentation indicator equal to 0")
+			return false
+		}
+		increment = as_digit(parser.buffer, parser.buffer_pos)
+		skip(parser)
+
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+			if parser.buffer[parser.buffer_pos] == '+' {
+				chomping = +1
+			} else {
+				chomping = -1
+			}
+			skip(parser)
+		}
+	}
+
+	// Eat whitespaces and comments to the end of the line.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+	if parser.buffer[parser.buffer_pos] == '#' {
+		for !is_breakz(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+	}
+
+	// Check if we are at the end of the line.
+	if !is_breakz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+			start_mark, "did not find expected comment or line break")
+		return false
+	}
+
+	// Eat a line break.
+	if is_break(parser.buffer, parser.buffer_pos) {
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+		skip_line(parser)
+	}
+
+	end_mark := parser.mark
+
+	// Set the indentation level if it was specified.
+	var indent int
+	if increment > 0 {
+		if parser.indent >= 0 {
+			indent = parser.indent + increment
+		} else {
+			indent = increment
+		}
+	}
+
+	// Scan the leading line breaks and determine the indentation level if needed.
+	var s, leading_break, trailing_breaks []byte
+	if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+		return false
+	}
+
+	// Scan the block scalar content.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	var leading_blank, trailing_blank bool
+	for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+		// We are at the beginning of a non-empty line.
+
+		// Is it a trailing whitespace?
+		trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+		// Check if we need to fold the leading line break.
+		if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+			// Do we need to join the lines by space?
+			if len(trailing_breaks) == 0 {
+				s = append(s, ' ')
+			}
+		} else {
+			s = append(s, leading_break...)
+		}
+		leading_break = leading_break[:0]
+
+		// Append the remaining line breaks.
+		s = append(s, trailing_breaks...)
+		trailing_breaks = trailing_breaks[:0]
+
+		// Is it a leading whitespace?
+		leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+		// Consume the current line.
+		for !is_breakz(parser.buffer, parser.buffer_pos) {
+			s = read(parser, s)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Consume the line break.
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+
+		leading_break = read_line(parser, leading_break)
+
+		// Eat the following indentation spaces and line breaks.
+		if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+			return false
+		}
+	}
+
+	// Chomp the tail.
+	if chomping != -1 {
+		s = append(s, leading_break...)
+	}
+	if chomping == 1 {
+		s = append(s, trailing_breaks...)
+	}
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_SCALAR_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+		style:      yaml_LITERAL_SCALAR_STYLE,
+	}
+	if !literal {
+		token.style = yaml_FOLDED_SCALAR_STYLE
+	}
+	return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar.  Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+	*end_mark = parser.mark
+
+	// Eat the indentation spaces and line breaks.
+	max_indent := 0
+	for {
+		// Eat the indentation spaces.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+		if parser.mark.column > max_indent {
+			max_indent = parser.mark.column
+		}
+
+		// Check for a tab character messing the indentation.
+		if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+			return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+				start_mark, "found a tab character where an indentation space is expected")
+		}
+
+		// Have we found a non-empty line?
+		if !is_break(parser.buffer, parser.buffer_pos) {
+			break
+		}
+
+		// Consume the line break.
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+		// [Go] Should really be returning breaks instead.
+		*breaks = read_line(parser, *breaks)
+		*end_mark = parser.mark
+	}
+
+	// Determine the indentation level if needed.
+	if *indent == 0 {
+		*indent = max_indent
+		if *indent < parser.indent+1 {
+			*indent = parser.indent + 1
+		}
+		if *indent < 1 {
+			*indent = 1
+		}
+	}
+	return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+	// Eat the left quote.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Consume the content of the quoted scalar.
+	var s, leading_break, trailing_breaks, whitespaces []byte
+	for {
+		// Check that there are no document indicators at the beginning of the line.
+		if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+			return false
+		}
+
+		if parser.mark.column == 0 &&
+			((parser.buffer[parser.buffer_pos+0] == '-' &&
+				parser.buffer[parser.buffer_pos+1] == '-' &&
+				parser.buffer[parser.buffer_pos+2] == '-') ||
+				(parser.buffer[parser.buffer_pos+0] == '.' &&
+					parser.buffer[parser.buffer_pos+1] == '.' &&
+					parser.buffer[parser.buffer_pos+2] == '.')) &&
+			is_blankz(parser.buffer, parser.buffer_pos+3) {
+			yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+				start_mark, "found unexpected document indicator")
+			return false
+		}
+
+		// Check for EOF.
+		if is_z(parser.buffer, parser.buffer_pos) {
+			yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+				start_mark, "found unexpected end of stream")
+			return false
+		}
+
+		// Consume non-blank characters.
+		leading_blanks := false
+		for !is_blankz(parser.buffer, parser.buffer_pos) {
+			if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+				// Is is an escaped single quote.
+				s = append(s, '\'')
+				skip(parser)
+				skip(parser)
+
+			} else if single && parser.buffer[parser.buffer_pos] == '\'' {
+				// It is a right single quote.
+				break
+			} else if !single && parser.buffer[parser.buffer_pos] == '"' {
+				// It is a right double quote.
+				break
+
+			} else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+				// It is an escaped line break.
+				if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+					return false
+				}
+				skip(parser)
+				skip_line(parser)
+				leading_blanks = true
+				break
+
+			} else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+				// It is an escape sequence.
+				code_length := 0
+
+				// Check the escape character.
+				switch parser.buffer[parser.buffer_pos+1] {
+				case '0':
+					s = append(s, 0)
+				case 'a':
+					s = append(s, '\x07')
+				case 'b':
+					s = append(s, '\x08')
+				case 't', '\t':
+					s = append(s, '\x09')
+				case 'n':
+					s = append(s, '\x0A')
+				case 'v':
+					s = append(s, '\x0B')
+				case 'f':
+					s = append(s, '\x0C')
+				case 'r':
+					s = append(s, '\x0D')
+				case 'e':
+					s = append(s, '\x1B')
+				case ' ':
+					s = append(s, '\x20')
+				case '"':
+					s = append(s, '"')
+				case '\'':
+					s = append(s, '\'')
+				case '\\':
+					s = append(s, '\\')
+				case 'N': // NEL (#x85)
+					s = append(s, '\xC2')
+					s = append(s, '\x85')
+				case '_': // #xA0
+					s = append(s, '\xC2')
+					s = append(s, '\xA0')
+				case 'L': // LS (#x2028)
+					s = append(s, '\xE2')
+					s = append(s, '\x80')
+					s = append(s, '\xA8')
+				case 'P': // PS (#x2029)
+					s = append(s, '\xE2')
+					s = append(s, '\x80')
+					s = append(s, '\xA9')
+				case 'x':
+					code_length = 2
+				case 'u':
+					code_length = 4
+				case 'U':
+					code_length = 8
+				default:
+					yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+						start_mark, "found unknown escape character")
+					return false
+				}
+
+				skip(parser)
+				skip(parser)
+
+				// Consume an arbitrary escape code.
+				if code_length > 0 {
+					var value int
+
+					// Scan the character value.
+					if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+						return false
+					}
+					for k := 0; k < code_length; k++ {
+						if !is_hex(parser.buffer, parser.buffer_pos+k) {
+							yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+								start_mark, "did not find expected hexdecimal number")
+							return false
+						}
+						value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+					}
+
+					// Check the value and write the character.
+					if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+						yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+							start_mark, "found invalid Unicode character escape code")
+						return false
+					}
+					if value <= 0x7F {
+						s = append(s, byte(value))
+					} else if value <= 0x7FF {
+						s = append(s, byte(0xC0+(value>>6)))
+						s = append(s, byte(0x80+(value&0x3F)))
+					} else if value <= 0xFFFF {
+						s = append(s, byte(0xE0+(value>>12)))
+						s = append(s, byte(0x80+((value>>6)&0x3F)))
+						s = append(s, byte(0x80+(value&0x3F)))
+					} else {
+						s = append(s, byte(0xF0+(value>>18)))
+						s = append(s, byte(0x80+((value>>12)&0x3F)))
+						s = append(s, byte(0x80+((value>>6)&0x3F)))
+						s = append(s, byte(0x80+(value&0x3F)))
+					}
+
+					// Advance the pointer.
+					for k := 0; k < code_length; k++ {
+						skip(parser)
+					}
+				}
+			} else {
+				// It is a non-escaped non-blank character.
+				s = read(parser, s)
+			}
+			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+				return false
+			}
+		}
+
+		// Check if we are at the end of the scalar.
+		if single {
+			if parser.buffer[parser.buffer_pos] == '\'' {
+				break
+			}
+		} else {
+			if parser.buffer[parser.buffer_pos] == '"' {
+				break
+			}
+		}
+
+		// Consume blank characters.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+
+		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+			if is_blank(parser.buffer, parser.buffer_pos) {
+				// Consume a space or a tab character.
+				if !leading_blanks {
+					whitespaces = read(parser, whitespaces)
+				} else {
+					skip(parser)
+				}
+			} else {
+				if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+					return false
+				}
+
+				// Check if it is a first line break.
+				if !leading_blanks {
+					whitespaces = whitespaces[:0]
+					leading_break = read_line(parser, leading_break)
+					leading_blanks = true
+				} else {
+					trailing_breaks = read_line(parser, trailing_breaks)
+				}
+			}
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Join the whitespaces or fold line breaks.
+		if leading_blanks {
+			// Do we need to fold line breaks?
+			if len(leading_break) > 0 && leading_break[0] == '\n' {
+				if len(trailing_breaks) == 0 {
+					s = append(s, ' ')
+				} else {
+					s = append(s, trailing_breaks...)
+				}
+			} else {
+				s = append(s, leading_break...)
+				s = append(s, trailing_breaks...)
+			}
+			trailing_breaks = trailing_breaks[:0]
+			leading_break = leading_break[:0]
+		} else {
+			s = append(s, whitespaces...)
+			whitespaces = whitespaces[:0]
+		}
+	}
+
+	// Eat the right quote.
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_SCALAR_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+		style:      yaml_SINGLE_QUOTED_SCALAR_STYLE,
+	}
+	if !single {
+		token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	}
+	return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+	var s, leading_break, trailing_breaks, whitespaces []byte
+	var leading_blanks bool
+	var indent = parser.indent + 1
+
+	start_mark := parser.mark
+	end_mark := parser.mark
+
+	// Consume the content of the plain scalar.
+	for {
+		// Check for a document indicator.
+		if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+			return false
+		}
+		if parser.mark.column == 0 &&
+			((parser.buffer[parser.buffer_pos+0] == '-' &&
+				parser.buffer[parser.buffer_pos+1] == '-' &&
+				parser.buffer[parser.buffer_pos+2] == '-') ||
+				(parser.buffer[parser.buffer_pos+0] == '.' &&
+					parser.buffer[parser.buffer_pos+1] == '.' &&
+					parser.buffer[parser.buffer_pos+2] == '.')) &&
+			is_blankz(parser.buffer, parser.buffer_pos+3) {
+			break
+		}
+
+		// Check for a comment.
+		if parser.buffer[parser.buffer_pos] == '#' {
+			break
+		}
+
+		// Consume non-blank characters.
+		for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+			// Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13".
+			if parser.flow_level > 0 &&
+				parser.buffer[parser.buffer_pos] == ':' &&
+				!is_blankz(parser.buffer, parser.buffer_pos+1) {
+				yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+					start_mark, "found unexpected ':'")
+				return false
+			}
+
+			// Check for indicators that may end a plain scalar.
+			if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+				(parser.flow_level > 0 &&
+					(parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' ||
+						parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+						parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+						parser.buffer[parser.buffer_pos] == '}')) {
+				break
+			}
+
+			// Check if we need to join whitespaces and breaks.
+			if leading_blanks || len(whitespaces) > 0 {
+				if leading_blanks {
+					// Do we need to fold line breaks?
+					if leading_break[0] == '\n' {
+						if len(trailing_breaks) == 0 {
+							s = append(s, ' ')
+						} else {
+							s = append(s, trailing_breaks...)
+						}
+					} else {
+						s = append(s, leading_break...)
+						s = append(s, trailing_breaks...)
+					}
+					trailing_breaks = trailing_breaks[:0]
+					leading_break = leading_break[:0]
+					leading_blanks = false
+				} else {
+					s = append(s, whitespaces...)
+					whitespaces = whitespaces[:0]
+				}
+			}
+
+			// Copy the character.
+			s = read(parser, s)
+
+			end_mark = parser.mark
+			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+				return false
+			}
+		}
+
+		// Is it the end?
+		if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+			break
+		}
+
+		// Consume blank characters.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+
+		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+			if is_blank(parser.buffer, parser.buffer_pos) {
+
+				// Check for tab character that abuse indentation.
+				if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+					yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+						start_mark, "found a tab character that violate indentation")
+					return false
+				}
+
+				// Consume a space or a tab character.
+				if !leading_blanks {
+					whitespaces = read(parser, whitespaces)
+				} else {
+					skip(parser)
+				}
+			} else {
+				if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+					return false
+				}
+
+				// Check if it is a first line break.
+				if !leading_blanks {
+					whitespaces = whitespaces[:0]
+					leading_break = read_line(parser, leading_break)
+					leading_blanks = true
+				} else {
+					trailing_breaks = read_line(parser, trailing_breaks)
+				}
+			}
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Check indentation level.
+		if parser.flow_level == 0 && parser.mark.column < indent {
+			break
+		}
+	}
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_SCALAR_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+		style:      yaml_PLAIN_SCALAR_STYLE,
+	}
+
+	// Note that we change the 'simple_key_allowed' flag.
+	if leading_blanks {
+		parser.simple_key_allowed = true
+	}
+	return true
+}

+ 104 - 0
vendor/gopkg.in/yaml.v2/sorter.go

@@ -0,0 +1,104 @@
+package yaml
+
+import (
+	"reflect"
+	"unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int      { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+	a := l[i]
+	b := l[j]
+	ak := a.Kind()
+	bk := b.Kind()
+	for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+		a = a.Elem()
+		ak = a.Kind()
+	}
+	for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+		b = b.Elem()
+		bk = b.Kind()
+	}
+	af, aok := keyFloat(a)
+	bf, bok := keyFloat(b)
+	if aok && bok {
+		if af != bf {
+			return af < bf
+		}
+		if ak != bk {
+			return ak < bk
+		}
+		return numLess(a, b)
+	}
+	if ak != reflect.String || bk != reflect.String {
+		return ak < bk
+	}
+	ar, br := []rune(a.String()), []rune(b.String())
+	for i := 0; i < len(ar) && i < len(br); i++ {
+		if ar[i] == br[i] {
+			continue
+		}
+		al := unicode.IsLetter(ar[i])
+		bl := unicode.IsLetter(br[i])
+		if al && bl {
+			return ar[i] < br[i]
+		}
+		if al || bl {
+			return bl
+		}
+		var ai, bi int
+		var an, bn int64
+		for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+			an = an*10 + int64(ar[ai]-'0')
+		}
+		for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+			bn = bn*10 + int64(br[bi]-'0')
+		}
+		if an != bn {
+			return an < bn
+		}
+		if ai != bi {
+			return ai < bi
+		}
+		return ar[i] < br[i]
+	}
+	return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+	switch v.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return float64(v.Int()), true
+	case reflect.Float32, reflect.Float64:
+		return v.Float(), true
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return float64(v.Uint()), true
+	case reflect.Bool:
+		if v.Bool() {
+			return 1, true
+		}
+		return 0, true
+	}
+	return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+	switch a.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return a.Int() < b.Int()
+	case reflect.Float32, reflect.Float64:
+		return a.Float() < b.Float()
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return a.Uint() < b.Uint()
+	case reflect.Bool:
+		return !a.Bool() && b.Bool()
+	}
+	panic("not a number")
+}

+ 89 - 0
vendor/gopkg.in/yaml.v2/writerc.go

@@ -0,0 +1,89 @@
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+	emitter.error = yaml_WRITER_ERROR
+	emitter.problem = problem
+	return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+	if emitter.write_handler == nil {
+		panic("write handler not set")
+	}
+
+	// Check if the buffer is empty.
+	if emitter.buffer_pos == 0 {
+		return true
+	}
+
+	// If the output encoding is UTF-8, we don't need to recode the buffer.
+	if emitter.encoding == yaml_UTF8_ENCODING {
+		if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+			return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+		}
+		emitter.buffer_pos = 0
+		return true
+	}
+
+	// Recode the buffer into the raw buffer.
+	var low, high int
+	if emitter.encoding == yaml_UTF16LE_ENCODING {
+		low, high = 0, 1
+	} else {
+		high, low = 1, 0
+	}
+
+	pos := 0
+	for pos < emitter.buffer_pos {
+		// See the "reader.c" code for more details on UTF-8 encoding.  Note
+		// that we assume that the buffer contains a valid UTF-8 sequence.
+
+		// Read the next UTF-8 character.
+		octet := emitter.buffer[pos]
+
+		var w int
+		var value rune
+		switch {
+		case octet&0x80 == 0x00:
+			w, value = 1, rune(octet&0x7F)
+		case octet&0xE0 == 0xC0:
+			w, value = 2, rune(octet&0x1F)
+		case octet&0xF0 == 0xE0:
+			w, value = 3, rune(octet&0x0F)
+		case octet&0xF8 == 0xF0:
+			w, value = 4, rune(octet&0x07)
+		}
+		for k := 1; k < w; k++ {
+			octet = emitter.buffer[pos+k]
+			value = (value << 6) + (rune(octet) & 0x3F)
+		}
+		pos += w
+
+		// Write the character.
+		if value < 0x10000 {
+			var b [2]byte
+			b[high] = byte(value >> 8)
+			b[low] = byte(value & 0xFF)
+			emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
+		} else {
+			// Write the character using a surrogate pair (check "reader.c").
+			var b [4]byte
+			value -= 0x10000
+			b[high] = byte(0xD8 + (value >> 18))
+			b[low] = byte((value >> 10) & 0xFF)
+			b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
+			b[low+2] = byte(value & 0xFF)
+			emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
+		}
+	}
+
+	// Write the raw buffer.
+	if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
+		return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+	}
+	emitter.buffer_pos = 0
+	emitter.raw_buffer = emitter.raw_buffer[:0]
+	return true
+}

+ 346 - 0
vendor/gopkg.in/yaml.v2/yaml.go

@@ -0,0 +1,346 @@
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+//   https://github.com/go-yaml/yaml
+//
+package yaml
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+)
+
+// MapSlice encodes and decodes as a YAML map.
+// The order of keys is preserved when encoding and decoding.
+type MapSlice []MapItem
+
+// MapItem is an item in a MapSlice.
+type MapItem struct {
+	Key, Value interface{}
+}
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
+// method receives a function that may be called to unmarshal the original
+// YAML value into a field or variable. It is safe to call the unmarshal
+// function parameter more than once if necessary.
+type Unmarshaler interface {
+	UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+	MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+//     type T struct {
+//         F int `yaml:"a,omitempty"`
+//         B int
+//     }
+//     var t T
+//     yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+	defer handleErr(&err)
+	d := newDecoder()
+	p := newParser(in)
+	defer p.destroy()
+	node := p.parse()
+	if node != nil {
+		v := reflect.ValueOf(out)
+		if v.Kind() == reflect.Ptr && !v.IsNil() {
+			v = v.Elem()
+		}
+		d.unmarshal(node, v)
+	}
+	if len(d.terrors) > 0 {
+		return &TypeError{d.terrors}
+	}
+	return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only unmarshalled if they are exported (have an upper case
+// first letter), and are unmarshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+//     `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported:
+//
+//     omitempty    Only include the field if it's not set to the zero
+//                  value for the type or to empty slices or maps.
+//                  Does not apply to zero valued structs.
+//
+//     flow         Marshal using a flow style (useful for structs,
+//                  sequences and maps).
+//
+//     inline       Inline the field, which must be a struct or a map,
+//                  causing all of its fields or keys to be processed as if
+//                  they were part of the outer struct. For maps, keys must
+//                  not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+//     type T struct {
+//         F int "a,omitempty"
+//         B int
+//     }
+//     yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+//     yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+	defer handleErr(&err)
+	e := newEncoder()
+	defer e.destroy()
+	e.marshal("", reflect.ValueOf(in))
+	e.finish()
+	out = e.out
+	return
+}
+
+func handleErr(err *error) {
+	if v := recover(); v != nil {
+		if e, ok := v.(yamlError); ok {
+			*err = e.err
+		} else {
+			panic(v)
+		}
+	}
+}
+
+type yamlError struct {
+	err error
+}
+
+func fail(err error) {
+	panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+	panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+	Errors []string
+}
+
+func (e *TypeError) Error() string {
+	return fmt.Sprintf("yaml: unmarshal errors:\n  %s", strings.Join(e.Errors, "\n  "))
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+	FieldsMap  map[string]fieldInfo
+	FieldsList []fieldInfo
+
+	// InlineMap is the number of the field in the struct that
+	// contains an ,inline map, or -1 if there's none.
+	InlineMap int
+}
+
+type fieldInfo struct {
+	Key       string
+	Num       int
+	OmitEmpty bool
+	Flow      bool
+
+	// Inline holds the field index if the field is part of an inlined struct.
+	Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+	fieldMapMutex.RLock()
+	sinfo, found := structMap[st]
+	fieldMapMutex.RUnlock()
+	if found {
+		return sinfo, nil
+	}
+
+	n := st.NumField()
+	fieldsMap := make(map[string]fieldInfo)
+	fieldsList := make([]fieldInfo, 0, n)
+	inlineMap := -1
+	for i := 0; i != n; i++ {
+		field := st.Field(i)
+		if field.PkgPath != "" && !field.Anonymous {
+			continue // Private field
+		}
+
+		info := fieldInfo{Num: i}
+
+		tag := field.Tag.Get("yaml")
+		if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+			tag = string(field.Tag)
+		}
+		if tag == "-" {
+			continue
+		}
+
+		inline := false
+		fields := strings.Split(tag, ",")
+		if len(fields) > 1 {
+			for _, flag := range fields[1:] {
+				switch flag {
+				case "omitempty":
+					info.OmitEmpty = true
+				case "flow":
+					info.Flow = true
+				case "inline":
+					inline = true
+				default:
+					return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
+				}
+			}
+			tag = fields[0]
+		}
+
+		if inline {
+			switch field.Type.Kind() {
+			case reflect.Map:
+				if inlineMap >= 0 {
+					return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+				}
+				if field.Type.Key() != reflect.TypeOf("") {
+					return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+				}
+				inlineMap = info.Num
+			case reflect.Struct:
+				sinfo, err := getStructInfo(field.Type)
+				if err != nil {
+					return nil, err
+				}
+				for _, finfo := range sinfo.FieldsList {
+					if _, found := fieldsMap[finfo.Key]; found {
+						msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+						return nil, errors.New(msg)
+					}
+					if finfo.Inline == nil {
+						finfo.Inline = []int{i, finfo.Num}
+					} else {
+						finfo.Inline = append([]int{i}, finfo.Inline...)
+					}
+					fieldsMap[finfo.Key] = finfo
+					fieldsList = append(fieldsList, finfo)
+				}
+			default:
+				//return nil, errors.New("Option ,inline needs a struct value or map field")
+				return nil, errors.New("Option ,inline needs a struct value field")
+			}
+			continue
+		}
+
+		if tag != "" {
+			info.Key = tag
+		} else {
+			info.Key = strings.ToLower(field.Name)
+		}
+
+		if _, found = fieldsMap[info.Key]; found {
+			msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+			return nil, errors.New(msg)
+		}
+
+		fieldsList = append(fieldsList, info)
+		fieldsMap[info.Key] = info
+	}
+
+	sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
+
+	fieldMapMutex.Lock()
+	structMap[st] = sinfo
+	fieldMapMutex.Unlock()
+	return sinfo, nil
+}
+
+func isZero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.String:
+		return len(v.String()) == 0
+	case reflect.Interface, reflect.Ptr:
+		return v.IsNil()
+	case reflect.Slice:
+		return v.Len() == 0
+	case reflect.Map:
+		return v.Len() == 0
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Struct:
+		vt := v.Type()
+		for i := v.NumField() - 1; i >= 0; i-- {
+			if vt.Field(i).PkgPath != "" {
+				continue // Private field
+			}
+			if !isZero(v.Field(i)) {
+				return false
+			}
+		}
+		return true
+	}
+	return false
+}

+ 716 - 0
vendor/gopkg.in/yaml.v2/yamlh.go

@@ -0,0 +1,716 @@
+package yaml
+
+import (
+	"io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+	major int8 // The major version number.
+	minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+	handle []byte // The tag handle.
+	prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+	// Let the parser choose the encoding.
+	yaml_ANY_ENCODING yaml_encoding_t = iota
+
+	yaml_UTF8_ENCODING    // The default UTF-8 encoding.
+	yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+	yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+	// Let the parser choose the break type.
+	yaml_ANY_BREAK yaml_break_t = iota
+
+	yaml_CR_BREAK   // Use CR for line breaks (Mac style).
+	yaml_LN_BREAK   // Use LN for line breaks (Unix style).
+	yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+	// No error is produced.
+	yaml_NO_ERROR yaml_error_type_t = iota
+
+	yaml_MEMORY_ERROR   // Cannot allocate or reallocate a block of memory.
+	yaml_READER_ERROR   // Cannot read or decode the input stream.
+	yaml_SCANNER_ERROR  // Cannot scan the input stream.
+	yaml_PARSER_ERROR   // Cannot parse the input stream.
+	yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+	yaml_WRITER_ERROR   // Cannot write to the output stream.
+	yaml_EMITTER_ERROR  // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+	index  int // The position index.
+	line   int // The position line.
+	column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+	// Let the emitter choose the style.
+	yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
+
+	yaml_PLAIN_SCALAR_STYLE         // The plain scalar style.
+	yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+	yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+	yaml_LITERAL_SCALAR_STYLE       // The literal scalar style.
+	yaml_FOLDED_SCALAR_STYLE        // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+	// Let the emitter choose the style.
+	yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+	yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+	yaml_FLOW_SEQUENCE_STYLE  // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+	// Let the emitter choose the style.
+	yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+	yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+	yaml_FLOW_MAPPING_STYLE  // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+	// An empty token.
+	yaml_NO_TOKEN yaml_token_type_t = iota
+
+	yaml_STREAM_START_TOKEN // A STREAM-START token.
+	yaml_STREAM_END_TOKEN   // A STREAM-END token.
+
+	yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+	yaml_TAG_DIRECTIVE_TOKEN     // A TAG-DIRECTIVE token.
+	yaml_DOCUMENT_START_TOKEN    // A DOCUMENT-START token.
+	yaml_DOCUMENT_END_TOKEN      // A DOCUMENT-END token.
+
+	yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+	yaml_BLOCK_MAPPING_START_TOKEN  // A BLOCK-SEQUENCE-END token.
+	yaml_BLOCK_END_TOKEN            // A BLOCK-END token.
+
+	yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+	yaml_FLOW_SEQUENCE_END_TOKEN   // A FLOW-SEQUENCE-END token.
+	yaml_FLOW_MAPPING_START_TOKEN  // A FLOW-MAPPING-START token.
+	yaml_FLOW_MAPPING_END_TOKEN    // A FLOW-MAPPING-END token.
+
+	yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+	yaml_FLOW_ENTRY_TOKEN  // A FLOW-ENTRY token.
+	yaml_KEY_TOKEN         // A KEY token.
+	yaml_VALUE_TOKEN       // A VALUE token.
+
+	yaml_ALIAS_TOKEN  // An ALIAS token.
+	yaml_ANCHOR_TOKEN // An ANCHOR token.
+	yaml_TAG_TOKEN    // A TAG token.
+	yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+	switch tt {
+	case yaml_NO_TOKEN:
+		return "yaml_NO_TOKEN"
+	case yaml_STREAM_START_TOKEN:
+		return "yaml_STREAM_START_TOKEN"
+	case yaml_STREAM_END_TOKEN:
+		return "yaml_STREAM_END_TOKEN"
+	case yaml_VERSION_DIRECTIVE_TOKEN:
+		return "yaml_VERSION_DIRECTIVE_TOKEN"
+	case yaml_TAG_DIRECTIVE_TOKEN:
+		return "yaml_TAG_DIRECTIVE_TOKEN"
+	case yaml_DOCUMENT_START_TOKEN:
+		return "yaml_DOCUMENT_START_TOKEN"
+	case yaml_DOCUMENT_END_TOKEN:
+		return "yaml_DOCUMENT_END_TOKEN"
+	case yaml_BLOCK_SEQUENCE_START_TOKEN:
+		return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+	case yaml_BLOCK_MAPPING_START_TOKEN:
+		return "yaml_BLOCK_MAPPING_START_TOKEN"
+	case yaml_BLOCK_END_TOKEN:
+		return "yaml_BLOCK_END_TOKEN"
+	case yaml_FLOW_SEQUENCE_START_TOKEN:
+		return "yaml_FLOW_SEQUENCE_START_TOKEN"
+	case yaml_FLOW_SEQUENCE_END_TOKEN:
+		return "yaml_FLOW_SEQUENCE_END_TOKEN"
+	case yaml_FLOW_MAPPING_START_TOKEN:
+		return "yaml_FLOW_MAPPING_START_TOKEN"
+	case yaml_FLOW_MAPPING_END_TOKEN:
+		return "yaml_FLOW_MAPPING_END_TOKEN"
+	case yaml_BLOCK_ENTRY_TOKEN:
+		return "yaml_BLOCK_ENTRY_TOKEN"
+	case yaml_FLOW_ENTRY_TOKEN:
+		return "yaml_FLOW_ENTRY_TOKEN"
+	case yaml_KEY_TOKEN:
+		return "yaml_KEY_TOKEN"
+	case yaml_VALUE_TOKEN:
+		return "yaml_VALUE_TOKEN"
+	case yaml_ALIAS_TOKEN:
+		return "yaml_ALIAS_TOKEN"
+	case yaml_ANCHOR_TOKEN:
+		return "yaml_ANCHOR_TOKEN"
+	case yaml_TAG_TOKEN:
+		return "yaml_TAG_TOKEN"
+	case yaml_SCALAR_TOKEN:
+		return "yaml_SCALAR_TOKEN"
+	}
+	return "<unknown token>"
+}
+
+// The token structure.
+type yaml_token_t struct {
+	// The token type.
+	typ yaml_token_type_t
+
+	// The start/end of the token.
+	start_mark, end_mark yaml_mark_t
+
+	// The stream encoding (for yaml_STREAM_START_TOKEN).
+	encoding yaml_encoding_t
+
+	// The alias/anchor/scalar value or tag/tag directive handle
+	// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+	value []byte
+
+	// The tag suffix (for yaml_TAG_TOKEN).
+	suffix []byte
+
+	// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+	prefix []byte
+
+	// The scalar style (for yaml_SCALAR_TOKEN).
+	style yaml_scalar_style_t
+
+	// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+	major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+	// An empty event.
+	yaml_NO_EVENT yaml_event_type_t = iota
+
+	yaml_STREAM_START_EVENT   // A STREAM-START event.
+	yaml_STREAM_END_EVENT     // A STREAM-END event.
+	yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+	yaml_DOCUMENT_END_EVENT   // A DOCUMENT-END event.
+	yaml_ALIAS_EVENT          // An ALIAS event.
+	yaml_SCALAR_EVENT         // A SCALAR event.
+	yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+	yaml_SEQUENCE_END_EVENT   // A SEQUENCE-END event.
+	yaml_MAPPING_START_EVENT  // A MAPPING-START event.
+	yaml_MAPPING_END_EVENT    // A MAPPING-END event.
+)
+
+// The event structure.
+type yaml_event_t struct {
+
+	// The event type.
+	typ yaml_event_type_t
+
+	// The start and end of the event.
+	start_mark, end_mark yaml_mark_t
+
+	// The document encoding (for yaml_STREAM_START_EVENT).
+	encoding yaml_encoding_t
+
+	// The version directive (for yaml_DOCUMENT_START_EVENT).
+	version_directive *yaml_version_directive_t
+
+	// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+	tag_directives []yaml_tag_directive_t
+
+	// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+	anchor []byte
+
+	// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+	tag []byte
+
+	// The scalar value (for yaml_SCALAR_EVENT).
+	value []byte
+
+	// Is the document start/end indicator implicit, or the tag optional?
+	// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+	implicit bool
+
+	// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+	quoted_implicit bool
+
+	// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+	style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t     { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t   { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+	yaml_NULL_TAG      = "tag:yaml.org,2002:null"      // The tag !!null with the only possible value: null.
+	yaml_BOOL_TAG      = "tag:yaml.org,2002:bool"      // The tag !!bool with the values: true and false.
+	yaml_STR_TAG       = "tag:yaml.org,2002:str"       // The tag !!str for string values.
+	yaml_INT_TAG       = "tag:yaml.org,2002:int"       // The tag !!int for integer values.
+	yaml_FLOAT_TAG     = "tag:yaml.org,2002:float"     // The tag !!float for float values.
+	yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+	yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+	yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+	// Not in original libyaml.
+	yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+	yaml_MERGE_TAG  = "tag:yaml.org,2002:merge"
+
+	yaml_DEFAULT_SCALAR_TAG   = yaml_STR_TAG // The default scalar tag is !!str.
+	yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+	yaml_DEFAULT_MAPPING_TAG  = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+	// An empty node.
+	yaml_NO_NODE yaml_node_type_t = iota
+
+	yaml_SCALAR_NODE   // A scalar node.
+	yaml_SEQUENCE_NODE // A sequence node.
+	yaml_MAPPING_NODE  // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+	key   int // The key of the element.
+	value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+	typ yaml_node_type_t // The node type.
+	tag []byte           // The node tag.
+
+	// The node data.
+
+	// The scalar parameters (for yaml_SCALAR_NODE).
+	scalar struct {
+		value  []byte              // The scalar value.
+		length int                 // The length of the scalar value.
+		style  yaml_scalar_style_t // The scalar style.
+	}
+
+	// The sequence parameters (for YAML_SEQUENCE_NODE).
+	sequence struct {
+		items_data []yaml_node_item_t    // The stack of sequence items.
+		style      yaml_sequence_style_t // The sequence style.
+	}
+
+	// The mapping parameters (for yaml_MAPPING_NODE).
+	mapping struct {
+		pairs_data  []yaml_node_pair_t   // The stack of mapping pairs (key, value).
+		pairs_start *yaml_node_pair_t    // The beginning of the stack.
+		pairs_end   *yaml_node_pair_t    // The end of the stack.
+		pairs_top   *yaml_node_pair_t    // The top of the stack.
+		style       yaml_mapping_style_t // The mapping style.
+	}
+
+	start_mark yaml_mark_t // The beginning of the node.
+	end_mark   yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+	// The document nodes.
+	nodes []yaml_node_t
+
+	// The version directive.
+	version_directive *yaml_version_directive_t
+
+	// The list of tag directives.
+	tag_directives_data  []yaml_tag_directive_t
+	tag_directives_start int // The beginning of the tag directives list.
+	tag_directives_end   int // The end of the tag directives list.
+
+	start_implicit int // Is the document start indicator implicit?
+	end_implicit   int // Is the document end indicator implicit?
+
+	// The start/end of the document.
+	start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out]   data        A pointer to an application data specified by
+//                        yaml_parser_set_input().
+// [out]      buffer      The buffer to write the data from the source.
+// [in]       size        The size of the buffer.
+// [out]      size_read   The actual number of bytes read from the source.
+//
+// On success, the handler should return 1.  If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+	possible     bool        // Is a simple key possible?
+	required     bool        // Is a simple key required?
+	token_number int         // The number of the token.
+	mark         yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+	yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+	yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE           // Expect the beginning of an implicit document.
+	yaml_PARSE_DOCUMENT_START_STATE                    // Expect DOCUMENT-START.
+	yaml_PARSE_DOCUMENT_CONTENT_STATE                  // Expect the content of a document.
+	yaml_PARSE_DOCUMENT_END_STATE                      // Expect DOCUMENT-END.
+	yaml_PARSE_BLOCK_NODE_STATE                        // Expect a block node.
+	yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+	yaml_PARSE_FLOW_NODE_STATE                         // Expect a flow node.
+	yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE        // Expect the first entry of a block sequence.
+	yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE              // Expect an entry of a block sequence.
+	yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE         // Expect an entry of an indentless sequence.
+	yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE           // Expect the first key of a block mapping.
+	yaml_PARSE_BLOCK_MAPPING_KEY_STATE                 // Expect a block mapping key.
+	yaml_PARSE_BLOCK_MAPPING_VALUE_STATE               // Expect a block mapping value.
+	yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE         // Expect the first entry of a flow sequence.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE               // Expect an entry of a flow sequence.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE   // Expect a key of an ordered mapping.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE   // Expect the and of an ordered mapping entry.
+	yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE            // Expect the first key of a flow mapping.
+	yaml_PARSE_FLOW_MAPPING_KEY_STATE                  // Expect a key of a flow mapping.
+	yaml_PARSE_FLOW_MAPPING_VALUE_STATE                // Expect a value of a flow mapping.
+	yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE          // Expect an empty value of a flow mapping.
+	yaml_PARSE_END_STATE                               // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+	switch ps {
+	case yaml_PARSE_STREAM_START_STATE:
+		return "yaml_PARSE_STREAM_START_STATE"
+	case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+		return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+	case yaml_PARSE_DOCUMENT_START_STATE:
+		return "yaml_PARSE_DOCUMENT_START_STATE"
+	case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+		return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+	case yaml_PARSE_DOCUMENT_END_STATE:
+		return "yaml_PARSE_DOCUMENT_END_STATE"
+	case yaml_PARSE_BLOCK_NODE_STATE:
+		return "yaml_PARSE_BLOCK_NODE_STATE"
+	case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+		return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+	case yaml_PARSE_FLOW_NODE_STATE:
+		return "yaml_PARSE_FLOW_NODE_STATE"
+	case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+		return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+	case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+		return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+	case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+		return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+	case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+		return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+	case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+		return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+	case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+		return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+	case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+	case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+	case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+	case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+	case yaml_PARSE_END_STATE:
+		return "yaml_PARSE_END_STATE"
+	}
+	return "<unknown parser state>"
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+	anchor []byte      // The anchor.
+	index  int         // The node id.
+	mark   yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+	// Error handling
+
+	error yaml_error_type_t // Error type.
+
+	problem string // Error description.
+
+	// The byte about which the problem occured.
+	problem_offset int
+	problem_value  int
+	problem_mark   yaml_mark_t
+
+	// The error context.
+	context      string
+	context_mark yaml_mark_t
+
+	// Reader stuff
+
+	read_handler yaml_read_handler_t // Read handler.
+
+	input_file io.Reader // File input data.
+	input      []byte    // String input data.
+	input_pos  int
+
+	eof bool // EOF flag
+
+	buffer     []byte // The working buffer.
+	buffer_pos int    // The current position of the buffer.
+
+	unread int // The number of unread characters in the buffer.
+
+	raw_buffer     []byte // The raw buffer.
+	raw_buffer_pos int    // The current position of the buffer.
+
+	encoding yaml_encoding_t // The input encoding.
+
+	offset int         // The offset of the current position (in bytes).
+	mark   yaml_mark_t // The mark of the current position.
+
+	// Scanner stuff
+
+	stream_start_produced bool // Have we started to scan the input stream?
+	stream_end_produced   bool // Have we reached the end of the input stream?
+
+	flow_level int // The number of unclosed '[' and '{' indicators.
+
+	tokens          []yaml_token_t // The tokens queue.
+	tokens_head     int            // The head of the tokens queue.
+	tokens_parsed   int            // The number of tokens fetched from the queue.
+	token_available bool           // Does the tokens queue contain a token ready for dequeueing.
+
+	indent  int   // The current indentation level.
+	indents []int // The indentation levels stack.
+
+	simple_key_allowed bool                // May a simple key occur at the current position?
+	simple_keys        []yaml_simple_key_t // The stack of simple keys.
+
+	// Parser stuff
+
+	state          yaml_parser_state_t    // The current parser state.
+	states         []yaml_parser_state_t  // The parser states stack.
+	marks          []yaml_mark_t          // The stack of marks.
+	tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+	// Dumper stuff
+
+	aliases []yaml_alias_data_t // The alias data.
+
+	document *yaml_document_t // The currently parsed document.
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output.  The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out]   data        A pointer to an application data specified by
+//                              yaml_emitter_set_output().
+// @param[in]       buffer      The buffer with bytes to be written.
+// @param[in]       size        The size of the buffer.
+//
+// @returns On success, the handler should return @c 1.  If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+	// Expect STREAM-START.
+	yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+	yaml_EMIT_FIRST_DOCUMENT_START_STATE       // Expect the first DOCUMENT-START or STREAM-END.
+	yaml_EMIT_DOCUMENT_START_STATE             // Expect DOCUMENT-START or STREAM-END.
+	yaml_EMIT_DOCUMENT_CONTENT_STATE           // Expect the content of a document.
+	yaml_EMIT_DOCUMENT_END_STATE               // Expect DOCUMENT-END.
+	yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE   // Expect the first item of a flow sequence.
+	yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE         // Expect an item of a flow sequence.
+	yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE     // Expect the first key of a flow mapping.
+	yaml_EMIT_FLOW_MAPPING_KEY_STATE           // Expect a key of a flow mapping.
+	yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE  // Expect a value for a simple key of a flow mapping.
+	yaml_EMIT_FLOW_MAPPING_VALUE_STATE         // Expect a value of a flow mapping.
+	yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE  // Expect the first item of a block sequence.
+	yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE        // Expect an item of a block sequence.
+	yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE    // Expect the first key of a block mapping.
+	yaml_EMIT_BLOCK_MAPPING_KEY_STATE          // Expect the key of a block mapping.
+	yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+	yaml_EMIT_BLOCK_MAPPING_VALUE_STATE        // Expect a value of a block mapping.
+	yaml_EMIT_END_STATE                        // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal.  Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+	// Error handling
+
+	error   yaml_error_type_t // Error type.
+	problem string            // Error description.
+
+	// Writer stuff
+
+	write_handler yaml_write_handler_t // Write handler.
+
+	output_buffer *[]byte   // String output data.
+	output_file   io.Writer // File output data.
+
+	buffer     []byte // The working buffer.
+	buffer_pos int    // The current position of the buffer.
+
+	raw_buffer     []byte // The raw buffer.
+	raw_buffer_pos int    // The current position of the buffer.
+
+	encoding yaml_encoding_t // The stream encoding.
+
+	// Emitter stuff
+
+	canonical   bool         // If the output is in the canonical style?
+	best_indent int          // The number of indentation spaces.
+	best_width  int          // The preferred width of the output lines.
+	unicode     bool         // Allow unescaped non-ASCII characters?
+	line_break  yaml_break_t // The preferred line break.
+
+	state  yaml_emitter_state_t   // The current emitter state.
+	states []yaml_emitter_state_t // The stack of states.
+
+	events      []yaml_event_t // The event queue.
+	events_head int            // The head of the event queue.
+
+	indents []int // The stack of indentation levels.
+
+	tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+	indent int // The current indentation level.
+
+	flow_level int // The current flow level.
+
+	root_context       bool // Is it the document root context?
+	sequence_context   bool // Is it a sequence context?
+	mapping_context    bool // Is it a mapping context?
+	simple_key_context bool // Is it a simple mapping key context?
+
+	line       int  // The current line.
+	column     int  // The current column.
+	whitespace bool // If the last character was a whitespace?
+	indention  bool // If the last character was an indentation character (' ', '-', '?', ':')?
+	open_ended bool // If an explicit document end is required?
+
+	// Anchor analysis.
+	anchor_data struct {
+		anchor []byte // The anchor value.
+		alias  bool   // Is it an alias?
+	}
+
+	// Tag analysis.
+	tag_data struct {
+		handle []byte // The tag handle.
+		suffix []byte // The tag suffix.
+	}
+
+	// Scalar analysis.
+	scalar_data struct {
+		value                 []byte              // The scalar value.
+		multiline             bool                // Does the scalar contain line breaks?
+		flow_plain_allowed    bool                // Can the scalar be expessed in the flow plain style?
+		block_plain_allowed   bool                // Can the scalar be expressed in the block plain style?
+		single_quoted_allowed bool                // Can the scalar be expressed in the single quoted style?
+		block_allowed         bool                // Can the scalar be expressed in the literal or folded styles?
+		style                 yaml_scalar_style_t // The output style.
+	}
+
+	// Dumper stuff
+
+	opened bool // If the stream was already opened?
+	closed bool // If the stream was already closed?
+
+	// The information associated with the document nodes.
+	anchors *struct {
+		references int  // The number of references.
+		anchor     int  // The anchor id.
+		serialized bool // If the node has been emitted?
+	}
+
+	last_anchor_id int // The last assigned anchor id.
+
+	document *yaml_document_t // The currently emitted document.
+}

+ 173 - 0
vendor/gopkg.in/yaml.v2/yamlprivateh.go

@@ -0,0 +1,173 @@
+package yaml
+
+const (
+	// The size of the input raw buffer.
+	input_raw_buffer_size = 512
+
+	// The size of the input buffer.
+	// It should be possible to decode the whole raw buffer.
+	input_buffer_size = input_raw_buffer_size * 3
+
+	// The size of the output buffer.
+	output_buffer_size = 128
+
+	// The size of the output raw buffer.
+	// It should be possible to encode the whole output buffer.
+	output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+	// The size of other stacks and queues.
+	initial_stack_size  = 16
+	initial_queue_size  = 16
+	initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+	return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+	return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+	return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+	return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+	bi := b[i]
+	if bi >= 'A' && bi <= 'F' {
+		return int(bi) - 'A' + 10
+	}
+	if bi >= 'a' && bi <= 'f' {
+		return int(bi) - 'a' + 10
+	}
+	return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+	return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+	return ((b[i] == 0x0A) || // . == #x0A
+		(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+		(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+		(b[i] > 0xC2 && b[i] < 0xED) ||
+		(b[i] == 0xED && b[i+1] < 0xA0) ||
+		(b[i] == 0xEE) ||
+		(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+			!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+			!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+	return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+	return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+	return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+	return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+	//return is_space(b, i) || is_tab(b, i)
+	return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+	return (b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+	return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+	//return is_break(b, i) || is_z(b, i)
+	return (        // is_break:
+	b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+		// is_z:
+		b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+	//return is_space(b, i) || is_breakz(b, i)
+	return ( // is_space:
+	b[i] == ' ' ||
+		// is_breakz:
+		b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+		b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+	//return is_blank(b, i) || is_breakz(b, i)
+	return ( // is_blank:
+	b[i] == ' ' || b[i] == '\t' ||
+		// is_breakz:
+		b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+		b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+	// Don't replace these by a switch without first
+	// confirming that it is being inlined.
+	if b&0x80 == 0x00 {
+		return 1
+	}
+	if b&0xE0 == 0xC0 {
+		return 2
+	}
+	if b&0xF0 == 0xE0 {
+		return 3
+	}
+	if b&0xF8 == 0xF0 {
+		return 4
+	}
+	return 0
+
+}

Some files were not shown because too many files changed in this diff