Преглед на файлове

Merge pull request #23063 from yongtang/22961-aws-sdk-go

Update aws-sdk-go to v1.1.30
Brian Goff преди 9 години
родител
ревизия
3f970819bb
променени са 83 файла, в които са добавени 8574 реда и са изтрити 1414 реда
  1. 14 15
      daemon/logger/awslogs/cloudwatchlogs.go
  2. 2 2
      daemon/logger/awslogs/cloudwatchlogs_test.go
  3. 3 2
      hack/vendor.sh
  4. 48 8
      vendor/src/github.com/aws/aws-sdk-go/aws/awserr/error.go
  5. 72 13
      vendor/src/github.com/aws/aws-sdk-go/aws/awserr/types.go
  6. 4 7
      vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
  7. 27 0
      vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
  8. 70 35
      vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
  9. 4 0
      vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
  10. 89 0
      vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
  11. 120 0
      vendor/src/github.com/aws/aws-sdk-go/aws/client/client.go
  12. 90 0
      vendor/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
  13. 12 0
      vendor/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
  14. 135 16
      vendor/src/github.com/aws/aws-sdk-go/aws/config.go
  15. 12 0
      vendor/src/github.com/aws/aws-sdk-go/aws/convert_types.go
  16. 50 34
      vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
  17. 8 135
      vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
  18. 23 8
      vendor/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
  19. 5 2
      vendor/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
  20. 49 39
      vendor/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
  21. 6 2
      vendor/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
  22. 4 0
      vendor/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini
  23. 25 17
      vendor/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
  24. 6 2
      vendor/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
  25. 84 25
      vendor/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
  26. 102 5
      vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
  27. 78 89
      vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
  28. 2 2
      vendor/src/github.com/aws/aws-sdk-go/aws/errors.go
  29. 14 0
      vendor/src/github.com/aws/aws-sdk-go/aws/logger.go
  30. 78 3
      vendor/src/github.com/aws/aws-sdk-go/aws/request/handlers.go
  31. 33 0
      vendor/src/github.com/aws/aws-sdk-go/aws/request/http_request.go
  32. 31 0
      vendor/src/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go
  33. 49 0
      vendor/src/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
  34. 100 119
      vendor/src/github.com/aws/aws-sdk-go/aws/request/request.go
  35. 104 0
      vendor/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
  36. 32 2
      vendor/src/github.com/aws/aws-sdk-go/aws/request/retryer.go
  37. 234 0
      vendor/src/github.com/aws/aws-sdk-go/aws/request/validation.go
  38. 0 51
      vendor/src/github.com/aws/aws-sdk-go/aws/service/default_retryer.go
  39. 0 133
      vendor/src/github.com/aws/aws-sdk-go/aws/service/service.go
  40. 0 15
      vendor/src/github.com/aws/aws-sdk-go/aws/service/serviceinfo/service_info.go
  41. 120 0
      vendor/src/github.com/aws/aws-sdk-go/aws/session/session.go
  42. 25 7
      vendor/src/github.com/aws/aws-sdk-go/aws/types.go
  43. 1 1
      vendor/src/github.com/aws/aws-sdk-go/aws/version.go
  44. 27 0
      vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/LICENSE
  45. 0 31
      vendor/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.go
  46. 65 0
      vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go
  47. 21 23
      vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json
  48. 19 20
      vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go
  49. 75 0
      vendor/src/github.com/aws/aws-sdk-go/private/protocol/idempotency.go
  50. 81 45
      vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go
  51. 2 2
      vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
  52. 21 9
      vendor/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go
  53. 74 35
      vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
  54. 2 2
      vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go
  55. 16 1
      vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
  56. 21 0
      vendor/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go
  57. 82 0
      vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go
  58. 141 41
      vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go
  59. 382 151
      vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go
  60. 76 56
      vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go
  61. 4 0
      vendor/src/github.com/go-ini/ini/.gitignore
  62. 191 0
      vendor/src/github.com/go-ini/ini/LICENSE
  63. 560 0
      vendor/src/github.com/go-ini/ini/README.md
  64. 547 0
      vendor/src/github.com/go-ini/ini/README_ZH.md
  65. 1226 0
      vendor/src/github.com/go-ini/ini/ini.go
  66. 350 0
      vendor/src/github.com/go-ini/ini/struct.go
  67. 4 0
      vendor/src/github.com/jmespath/go-jmespath/.gitignore
  68. 9 0
      vendor/src/github.com/jmespath/go-jmespath/.travis.yml
  69. 13 0
      vendor/src/github.com/jmespath/go-jmespath/LICENSE
  70. 44 0
      vendor/src/github.com/jmespath/go-jmespath/Makefile
  71. 7 0
      vendor/src/github.com/jmespath/go-jmespath/README.md
  72. 49 0
      vendor/src/github.com/jmespath/go-jmespath/api.go
  73. 16 0
      vendor/src/github.com/jmespath/go-jmespath/astnodetype_string.go
  74. 842 0
      vendor/src/github.com/jmespath/go-jmespath/functions.go
  75. 418 0
      vendor/src/github.com/jmespath/go-jmespath/interpreter.go
  76. 420 0
      vendor/src/github.com/jmespath/go-jmespath/lexer.go
  77. 603 0
      vendor/src/github.com/jmespath/go-jmespath/parser.go
  78. 16 0
      vendor/src/github.com/jmespath/go-jmespath/toktype_string.go
  79. 185 0
      vendor/src/github.com/jmespath/go-jmespath/util.go
  80. 0 14
      vendor/src/github.com/vaughan0/go-ini/LICENSE
  81. 0 70
      vendor/src/github.com/vaughan0/go-ini/README.md
  82. 0 123
      vendor/src/github.com/vaughan0/go-ini/ini.go
  83. 0 2
      vendor/src/github.com/vaughan0/go-ini/test.ini

+ 14 - 15
daemon/logger/awslogs/cloudwatchlogs.go

@@ -14,9 +14,9 @@ import (
 	"github.com/Sirupsen/logrus"
 	"github.com/aws/aws-sdk-go/aws"
 	"github.com/aws/aws-sdk-go/aws/awserr"
-	"github.com/aws/aws-sdk-go/aws/defaults"
 	"github.com/aws/aws-sdk-go/aws/ec2metadata"
 	"github.com/aws/aws-sdk-go/aws/request"
+	"github.com/aws/aws-sdk-go/aws/session"
 	"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
 	"github.com/docker/docker/daemon/logger"
 	"github.com/docker/docker/dockerversion"
@@ -66,11 +66,8 @@ type regionFinder interface {
 
 type byTimestamp []*cloudwatchlogs.InputLogEvent
 
-// init registers the awslogs driver and sets the default region, if provided
+// init registers the awslogs driver
 func init() {
-	if os.Getenv(regionEnvKey) != "" {
-		defaults.DefaultConfig.Region = aws.String(os.Getenv(regionEnvKey))
-	}
 	if err := logger.RegisterLogDriver(name, New); err != nil {
 		logrus.Fatal(err)
 	}
@@ -113,7 +110,7 @@ func New(ctx logger.Context) (logger.Logger, error) {
 // newRegionFinder is a variable such that the implementation
 // can be swapped out for unit tests.
 var newRegionFinder = func() regionFinder {
-	return ec2metadata.New(nil)
+	return ec2metadata.New(session.New())
 }
 
 // newAWSLogsClient creates the service client for Amazon CloudWatch Logs.
@@ -121,28 +118,30 @@ var newRegionFinder = func() regionFinder {
 // User-Agent string and automatic region detection using the EC2 Instance
 // Metadata Service when region is otherwise unspecified.
 func newAWSLogsClient(ctx logger.Context) (api, error) {
-	config := defaults.DefaultConfig
+	var region *string
+	if os.Getenv(regionEnvKey) != "" {
+		region = aws.String(os.Getenv(regionEnvKey))
+	}
 	if ctx.Config[regionKey] != "" {
-		config = defaults.DefaultConfig.Merge(&aws.Config{
-			Region: aws.String(ctx.Config[regionKey]),
-		})
+		region = aws.String(ctx.Config[regionKey])
 	}
-	if config.Region == nil || *config.Region == "" {
+	if region == nil || *region == "" {
 		logrus.Info("Trying to get region from EC2 Metadata")
 		ec2MetadataClient := newRegionFinder()
-		region, err := ec2MetadataClient.Region()
+		r, err := ec2MetadataClient.Region()
 		if err != nil {
 			logrus.WithFields(logrus.Fields{
 				"error": err,
 			}).Error("Could not get region from EC2 metadata, environment, or log option")
 			return nil, errors.New("Cannot determine region for awslogs driver")
 		}
-		config.Region = &region
+		region = &r
 	}
 	logrus.WithFields(logrus.Fields{
-		"region": *config.Region,
+		"region": *region,
 	}).Debug("Created awslogs client")
-	client := cloudwatchlogs.New(config)
+
+	client := cloudwatchlogs.New(session.New(), aws.NewConfig().WithRegion(*region))
 
 	client.Handlers.Build.PushBackNamed(request.NamedHandler{
 		Name: "DockerUserAgentHandler",

+ 2 - 2
daemon/logger/awslogs/cloudwatchlogs_test.go

@@ -47,8 +47,8 @@ func TestNewAWSLogsClientUserAgentHandler(t *testing.T) {
 		},
 	}
 	buildHandlerList.Run(request)
-	expectedUserAgentString := fmt.Sprintf("Docker %s (%s) %s/%s",
-		dockerversion.Version, runtime.GOOS, aws.SDKName, aws.SDKVersion)
+	expectedUserAgentString := fmt.Sprintf("Docker %s (%s) %s/%s (%s; %s; %s)",
+		dockerversion.Version, runtime.GOOS, aws.SDKName, aws.SDKVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH)
 	userAgent := request.HTTPRequest.Header.Get("User-Agent")
 	if userAgent != expectedUserAgentString {
 		t.Errorf("Wrong User-Agent string, expected \"%s\" but was \"%s\"",

+ 3 - 2
hack/vendor.sh

@@ -123,8 +123,9 @@ clone git github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c
 clone git gopkg.in/fsnotify.v1 v1.2.11
 
 # awslogs deps
-clone git github.com/aws/aws-sdk-go v0.9.9
-clone git github.com/vaughan0/go-ini a98ad7ee00ec53921f08832bc06ecf7fd600e6a1
+clone git github.com/aws/aws-sdk-go v1.1.30
+clone git github.com/go-ini/ini 060d7da055ba6ec5ea7a31f116332fe5efa04ce0
+clone git github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74
 
 # gcplogs deps
 clone git golang.org/x/oauth2 2baa8a1b9338cf13d9eeb27696d761155fa480be https://github.com/golang/oauth2.git

+ 48 - 8
vendor/src/github.com/aws/aws-sdk-go/aws/awserr/error.go

@@ -14,13 +14,13 @@ package awserr
 //     if err != nil {
 //         if awsErr, ok := err.(awserr.Error); ok {
 //             // Get error details
-//             log.Println("Error:", err.Code(), err.Message())
+//             log.Println("Error:", awsErr.Code(), awsErr.Message())
 //
 //             // Prints out full error message, including original error if there was one.
-//             log.Println("Error:", err.Error())
+//             log.Println("Error:", awsErr.Error())
 //
 //             // Get original error
-//             if origErr := err.Err(); origErr != nil {
+//             if origErr := awsErr.OrigErr(); origErr != nil {
 //                 // operate on original error.
 //             }
 //         } else {
@@ -42,15 +42,55 @@ type Error interface {
 	OrigErr() error
 }
 
+// BatchError is a batch of errors which also wraps lower level errors with
+// code, message, and original errors. Calling Error() will include all errors
+// that occured in the batch.
+//
+// Deprecated: Replaced with BatchedErrors. Only defined for backwards
+// compatibility.
+type BatchError interface {
+	// Satisfy the generic error interface.
+	error
+
+	// Returns the short phrase depicting the classification of the error.
+	Code() string
+
+	// Returns the error details message.
+	Message() string
+
+	// Returns the original error if one was set.  Nil is returned if not set.
+	OrigErrs() []error
+}
+
+// BatchedErrors is a batch of errors which also wraps lower level errors with
+// code, message, and original errors. Calling Error() will include all errors
+// that occured in the batch.
+//
+// Replaces BatchError
+type BatchedErrors interface {
+	// Satisfy the base Error interface.
+	Error
+
+	// Returns the original error if one was set.  Nil is returned if not set.
+	OrigErrs() []error
+}
+
 // New returns an Error object described by the code, message, and origErr.
 //
 // If origErr satisfies the Error interface it will not be wrapped within a new
 // Error object and will instead be returned.
 func New(code, message string, origErr error) Error {
-	if e, ok := origErr.(Error); ok && e != nil {
-		return e
+	var errs []error
+	if origErr != nil {
+		errs = append(errs, origErr)
 	}
-	return newBaseError(code, message, origErr)
+	return newBaseError(code, message, errs)
+}
+
+// NewBatchError returns an BatchedErrors with a collection of errors as an
+// array of errors.
+func NewBatchError(code, message string, errs []error) BatchedErrors {
+	return newBaseError(code, message, errs)
 }
 
 // A RequestFailure is an interface to extract request failure information from
@@ -63,9 +103,9 @@ func New(code, message string, origErr error) Error {
 //     output, err := s3manage.Upload(svc, input, opts)
 //     if err != nil {
 //         if reqerr, ok := err.(RequestFailure); ok {
-//             log.Printf("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
+//             log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
 //         } else {
-//             log.Printf("Error:", err.Error()
+//             log.Println("Error:", err.Error())
 //         }
 //     }
 //

+ 72 - 13
vendor/src/github.com/aws/aws-sdk-go/aws/awserr/types.go

@@ -31,23 +31,27 @@ type baseError struct {
 
 	// Optional original error this error is based off of. Allows building
 	// chained errors.
-	origErr error
+	errs []error
 }
 
-// newBaseError returns an error object for the code, message, and err.
+// newBaseError returns an error object for the code, message, and errors.
 //
 // code is a short no whitespace phrase depicting the classification of
 // the error that is being created.
 //
-// message is the free flow string containing detailed information about the error.
+// message is the free flow string containing detailed information about the
+// error.
 //
-// origErr is the error object which will be nested under the new error to be returned.
-func newBaseError(code, message string, origErr error) *baseError {
-	return &baseError{
+// origErrs is the error objects which will be nested under the new errors to
+// be returned.
+func newBaseError(code, message string, origErrs []error) *baseError {
+	b := &baseError{
 		code:    code,
 		message: message,
-		origErr: origErr,
+		errs:    origErrs,
 	}
+
+	return b
 }
 
 // Error returns the string representation of the error.
@@ -56,7 +60,12 @@ func newBaseError(code, message string, origErr error) *baseError {
 //
 // Satisfies the error interface.
 func (b baseError) Error() string {
-	return SprintError(b.code, b.message, "", b.origErr)
+	size := len(b.errs)
+	if size > 0 {
+		return SprintError(b.code, b.message, "", errorList(b.errs))
+	}
+
+	return SprintError(b.code, b.message, "", nil)
 }
 
 // String returns the string representation of the error.
@@ -75,10 +84,28 @@ func (b baseError) Message() string {
 	return b.message
 }
 
-// OrigErr returns the original error if one was set. Nil is returned if no error
-// was set.
+// OrigErr returns the original error if one was set. Nil is returned if no
+// error was set. This only returns the first element in the list. If the full
+// list is needed, use BatchedErrors.
 func (b baseError) OrigErr() error {
-	return b.origErr
+	switch len(b.errs) {
+	case 0:
+		return nil
+	case 1:
+		return b.errs[0]
+	default:
+		if err, ok := b.errs[0].(Error); ok {
+			return NewBatchError(err.Code(), err.Message(), b.errs[1:])
+		}
+		return NewBatchError("BatchedErrors",
+			"multiple errors occured", b.errs)
+	}
+}
+
+// OrigErrs returns the original errors if one was set. An empty slice is
+// returned if no error was set.
+func (b baseError) OrigErrs() []error {
+	return b.errs
 }
 
 // So that the Error interface type can be included as an anonymous field
@@ -94,8 +121,8 @@ type requestError struct {
 	requestID  string
 }
 
-// newRequestError returns a wrapped error with additional information for request
-// status code, and service requestID.
+// newRequestError returns a wrapped error with additional information for
+// request status code, and service requestID.
 //
 // Should be used to wrap all request which involve service requests. Even if
 // the request failed without a service response, but had an HTTP status code
@@ -133,3 +160,35 @@ func (r requestError) StatusCode() int {
 func (r requestError) RequestID() string {
 	return r.requestID
 }
+
+// OrigErrs returns the original errors if one was set. An empty slice is
+// returned if no error was set.
+func (r requestError) OrigErrs() []error {
+	if b, ok := r.awsError.(BatchedErrors); ok {
+		return b.OrigErrs()
+	}
+	return []error{r.OrigErr()}
+}
+
+// An error list that satisfies the golang interface
+type errorList []error
+
+// Error returns the string representation of the error.
+//
+// Satisfies the error interface.
+func (e errorList) Error() string {
+	msg := ""
+	// How do we want to handle the array size being zero
+	if size := len(e); size > 0 {
+		for i := 0; i < size; i++ {
+			msg += fmt.Sprintf("%s", e[i].Error())
+			// We check the next index to see if it is within the slice.
+			// If it is, then we append a newline. We do this, because unit tests
+			// could be broken with the additional '\n'
+			if i+1 < size {
+				msg += "\n"
+			}
+		}
+	}
+	return msg
+}

+ 4 - 7
vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go

@@ -57,16 +57,13 @@ func rcopy(dst, src reflect.Value, root bool) {
 			}
 		}
 	case reflect.Struct:
-		if !root {
-			dst.Set(reflect.New(src.Type()).Elem())
-		}
-
 		t := dst.Type()
 		for i := 0; i < t.NumField(); i++ {
 			name := t.Field(i).Name
-			srcval := src.FieldByName(name)
-			if srcval.IsValid() {
-				rcopy(dst.FieldByName(name), srcval, false)
+			srcVal := src.FieldByName(name)
+			dstVal := dst.FieldByName(name)
+			if srcVal.IsValid() && dstVal.CanSet() {
+				rcopy(dstVal, srcVal, false)
 			}
 		}
 	case reflect.Slice:

+ 27 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go

@@ -0,0 +1,27 @@
+package awsutil
+
+import (
+	"reflect"
+)
+
+// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
+// In addition to this, this method will also dereference the input values if
+// possible so the DeepEqual performed will not fail if one parameter is a
+// pointer and the other is not.
+//
+// DeepEqual will not perform indirection of nested values of the input parameters.
+func DeepEqual(a, b interface{}) bool {
+	ra := reflect.Indirect(reflect.ValueOf(a))
+	rb := reflect.Indirect(reflect.ValueOf(b))
+
+	if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
+		// If the elements are both nil, and of the same type the are equal
+		// If they are of different types they are not equal
+		return reflect.TypeOf(a) == reflect.TypeOf(b)
+	} else if raValid != rbValid {
+		// Both values must be valid to be equal
+		return false
+	}
+
+	return reflect.DeepEqual(ra.Interface(), rb.Interface())
+}

+ 70 - 35
vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go

@@ -5,18 +5,20 @@ import (
 	"regexp"
 	"strconv"
 	"strings"
+
+	"github.com/jmespath/go-jmespath"
 )
 
 var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
 
 // rValuesAtPath returns a slice of values found in value v. The values
 // in v are explored recursively so all nested values are collected.
-func rValuesAtPath(v interface{}, path string, create bool, caseSensitive bool) []reflect.Value {
+func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value {
 	pathparts := strings.Split(path, "||")
 	if len(pathparts) > 1 {
 		for _, pathpart := range pathparts {
-			vals := rValuesAtPath(v, pathpart, create, caseSensitive)
-			if vals != nil && len(vals) > 0 {
+			vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm)
+			if len(vals) > 0 {
 				return vals
 			}
 		}
@@ -74,7 +76,16 @@ func rValuesAtPath(v interface{}, path string, create bool, caseSensitive bool)
 				return false
 			})
 
-			if create && value.Kind() == reflect.Ptr && value.IsNil() {
+			if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 {
+				if !value.IsNil() {
+					value.Set(reflect.Zero(value.Type()))
+				}
+				return []reflect.Value{value}
+			}
+
+			if createPath && value.Kind() == reflect.Ptr && value.IsNil() {
+				// TODO if the value is the terminus it should not be created
+				// if the value to be set to its position is nil.
 				value.Set(reflect.New(value.Type().Elem()))
 				value = value.Elem()
 			} else {
@@ -82,7 +93,7 @@ func rValuesAtPath(v interface{}, path string, create bool, caseSensitive bool)
 			}
 
 			if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
-				if !create && value.IsNil() {
+				if !createPath && value.IsNil() {
 					value = reflect.ValueOf(nil)
 				}
 			}
@@ -114,7 +125,7 @@ func rValuesAtPath(v interface{}, path string, create bool, caseSensitive bool)
 				// pull out index
 				i := int(*index)
 				if i >= value.Len() { // check out of bounds
-					if create {
+					if createPath {
 						// TODO resize slice
 					} else {
 						continue
@@ -125,7 +136,7 @@ func rValuesAtPath(v interface{}, path string, create bool, caseSensitive bool)
 				value = reflect.Indirect(value.Index(i))
 
 				if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
-					if !create && value.IsNil() {
+					if !createPath && value.IsNil() {
 						value = reflect.ValueOf(nil)
 					}
 				}
@@ -142,46 +153,70 @@ func rValuesAtPath(v interface{}, path string, create bool, caseSensitive bool)
 	return values
 }
 
-// ValuesAtPath returns a list of objects at the lexical path inside of a structure
-func ValuesAtPath(i interface{}, path string) []interface{} {
-	if rvals := rValuesAtPath(i, path, false, true); rvals != nil {
-		vals := make([]interface{}, len(rvals))
-		for i, rval := range rvals {
-			vals[i] = rval.Interface()
-		}
-		return vals
+// ValuesAtPath returns a list of values at the case insensitive lexical
+// path inside of a structure.
+func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
+	result, err := jmespath.Search(path, i)
+	if err != nil {
+		return nil, err
 	}
-	return nil
-}
 
-// ValuesAtAnyPath returns a list of objects at the case-insensitive lexical
-// path inside of a structure
-func ValuesAtAnyPath(i interface{}, path string) []interface{} {
-	if rvals := rValuesAtPath(i, path, false, false); rvals != nil {
-		vals := make([]interface{}, len(rvals))
-		for i, rval := range rvals {
-			vals[i] = rval.Interface()
+	v := reflect.ValueOf(result)
+	if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) {
+		return nil, nil
+	}
+	if s, ok := result.([]interface{}); ok {
+		return s, err
+	}
+	if v.Kind() == reflect.Map && v.Len() == 0 {
+		return nil, nil
+	}
+	if v.Kind() == reflect.Slice {
+		out := make([]interface{}, v.Len())
+		for i := 0; i < v.Len(); i++ {
+			out[i] = v.Index(i).Interface()
 		}
-		return vals
+		return out, nil
 	}
-	return nil
+
+	return []interface{}{result}, nil
 }
 
-// SetValueAtPath sets an object at the lexical path inside of a structure
+// SetValueAtPath sets a value at the case insensitive lexical path inside
+// of a structure.
 func SetValueAtPath(i interface{}, path string, v interface{}) {
-	if rvals := rValuesAtPath(i, path, true, true); rvals != nil {
+	if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil {
 		for _, rval := range rvals {
-			rval.Set(reflect.ValueOf(v))
+			if rval.Kind() == reflect.Ptr && rval.IsNil() {
+				continue
+			}
+			setValue(rval, v)
 		}
 	}
 }
 
-// SetValueAtAnyPath sets an object at the case insensitive lexical path inside
-// of a structure
-func SetValueAtAnyPath(i interface{}, path string, v interface{}) {
-	if rvals := rValuesAtPath(i, path, true, false); rvals != nil {
-		for _, rval := range rvals {
-			rval.Set(reflect.ValueOf(v))
+func setValue(dstVal reflect.Value, src interface{}) {
+	if dstVal.Kind() == reflect.Ptr {
+		dstVal = reflect.Indirect(dstVal)
+	}
+	srcVal := reflect.ValueOf(src)
+
+	if !srcVal.IsValid() { // src is literal nil
+		if dstVal.CanAddr() {
+			// Convert to pointer so that pointer's value can be nil'ed
+			//                     dstVal = dstVal.Addr()
+		}
+		dstVal.Set(reflect.Zero(dstVal.Type()))
+
+	} else if srcVal.Kind() == reflect.Ptr {
+		if srcVal.IsNil() {
+			srcVal = reflect.Zero(dstVal.Type())
+		} else {
+			srcVal = reflect.ValueOf(src).Elem()
 		}
+		dstVal.Set(srcVal)
+	} else {
+		dstVal.Set(srcVal)
 	}
+
 }

+ 4 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go

@@ -91,6 +91,10 @@ func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
 
 		buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
 	default:
+		if !v.IsValid() {
+			fmt.Fprint(buf, "<invalid value>")
+			return
+		}
 		format := "%v"
 		switch v.Interface().(type) {
 		case string:

+ 89 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go

@@ -0,0 +1,89 @@
+package awsutil
+
+import (
+	"bytes"
+	"fmt"
+	"reflect"
+	"strings"
+)
+
+// StringValue returns the string representation of a value.
+func StringValue(i interface{}) string {
+	var buf bytes.Buffer
+	stringValue(reflect.ValueOf(i), 0, &buf)
+	return buf.String()
+}
+
+func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
+	for v.Kind() == reflect.Ptr {
+		v = v.Elem()
+	}
+
+	switch v.Kind() {
+	case reflect.Struct:
+		buf.WriteString("{\n")
+
+		names := []string{}
+		for i := 0; i < v.Type().NumField(); i++ {
+			name := v.Type().Field(i).Name
+			f := v.Field(i)
+			if name[0:1] == strings.ToLower(name[0:1]) {
+				continue // ignore unexported fields
+			}
+			if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() {
+				continue // ignore unset fields
+			}
+			names = append(names, name)
+		}
+
+		for i, n := range names {
+			val := v.FieldByName(n)
+			buf.WriteString(strings.Repeat(" ", indent+2))
+			buf.WriteString(n + ": ")
+			stringValue(val, indent+2, buf)
+
+			if i < len(names)-1 {
+				buf.WriteString(",\n")
+			}
+		}
+
+		buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+	case reflect.Slice:
+		nl, id, id2 := "", "", ""
+		if v.Len() > 3 {
+			nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+		}
+		buf.WriteString("[" + nl)
+		for i := 0; i < v.Len(); i++ {
+			buf.WriteString(id2)
+			stringValue(v.Index(i), indent+2, buf)
+
+			if i < v.Len()-1 {
+				buf.WriteString("," + nl)
+			}
+		}
+
+		buf.WriteString(nl + id + "]")
+	case reflect.Map:
+		buf.WriteString("{\n")
+
+		for i, k := range v.MapKeys() {
+			buf.WriteString(strings.Repeat(" ", indent+2))
+			buf.WriteString(k.String() + ": ")
+			stringValue(v.MapIndex(k), indent+2, buf)
+
+			if i < v.Len()-1 {
+				buf.WriteString(",\n")
+			}
+		}
+
+		buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+	default:
+		format := "%v"
+		switch v.Interface().(type) {
+		case string:
+			format = "%q"
+		}
+		fmt.Fprintf(buf, format, v.Interface())
+	}
+}

+ 120 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/client/client.go

@@ -0,0 +1,120 @@
+package client
+
+import (
+	"fmt"
+	"io/ioutil"
+	"net/http/httputil"
+
+	"github.com/aws/aws-sdk-go/aws"
+	"github.com/aws/aws-sdk-go/aws/client/metadata"
+	"github.com/aws/aws-sdk-go/aws/request"
+)
+
+// A Config provides configuration to a service client instance.
+type Config struct {
+	Config                  *aws.Config
+	Handlers                request.Handlers
+	Endpoint, SigningRegion string
+}
+
+// ConfigProvider provides a generic way for a service client to receive
+// the ClientConfig without circular dependencies.
+type ConfigProvider interface {
+	ClientConfig(serviceName string, cfgs ...*aws.Config) Config
+}
+
+// A Client implements the base client request and response handling
+// used by all service clients.
+type Client struct {
+	request.Retryer
+	metadata.ClientInfo
+
+	Config   aws.Config
+	Handlers request.Handlers
+}
+
+// New will return a pointer to a new initialized service client.
+func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client {
+	svc := &Client{
+		Config:     cfg,
+		ClientInfo: info,
+		Handlers:   handlers,
+	}
+
+	switch retryer, ok := cfg.Retryer.(request.Retryer); {
+	case ok:
+		svc.Retryer = retryer
+	case cfg.Retryer != nil && cfg.Logger != nil:
+		s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer)
+		cfg.Logger.Log(s)
+		fallthrough
+	default:
+		maxRetries := aws.IntValue(cfg.MaxRetries)
+		if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
+			maxRetries = 3
+		}
+		svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
+	}
+
+	svc.AddDebugHandlers()
+
+	for _, option := range options {
+		option(svc)
+	}
+
+	return svc
+}
+
+// NewRequest returns a new Request pointer for the service API
+// operation and parameters.
+func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
+	return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data)
+}
+
+// AddDebugHandlers injects debug logging handlers into the service to log request
+// debug information.
+func (c *Client) AddDebugHandlers() {
+	if !c.Config.LogLevel.AtLeast(aws.LogDebug) {
+		return
+	}
+
+	c.Handlers.Send.PushFront(logRequest)
+	c.Handlers.Send.PushBack(logResponse)
+}
+
+const logReqMsg = `DEBUG: Request %s/%s Details:
+---[ REQUEST POST-SIGN ]-----------------------------
+%s
+-----------------------------------------------------`
+
+func logRequest(r *request.Request) {
+	logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
+	dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody)
+
+	if logBody {
+		// Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
+		// Body as a NoOpCloser and will not be reset after read by the HTTP
+		// client reader.
+		r.Body.Seek(r.BodyStart, 0)
+		r.HTTPRequest.Body = ioutil.NopCloser(r.Body)
+	}
+
+	r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody)))
+}
+
+const logRespMsg = `DEBUG: Response %s/%s Details:
+---[ RESPONSE ]--------------------------------------
+%s
+-----------------------------------------------------`
+
+func logResponse(r *request.Request) {
+	var msg = "no response data"
+	if r.HTTPResponse != nil {
+		logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
+		dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody)
+		msg = string(dumpedBody)
+	} else if r.Error != nil {
+		msg = r.Error.Error()
+	}
+	r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg))
+}

+ 90 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go

@@ -0,0 +1,90 @@
+package client
+
+import (
+	"math/rand"
+	"sync"
+	"time"
+
+	"github.com/aws/aws-sdk-go/aws/request"
+)
+
+// DefaultRetryer implements basic retry logic using exponential backoff for
+// most services. If you want to implement custom retry logic, implement the
+// request.Retryer interface or create a structure type that composes this
+// struct and override the specific methods. For example, to override only
+// the MaxRetries method:
+//
+//		type retryer struct {
+//      service.DefaultRetryer
+//    }
+//
+//    // This implementation always has 100 max retries
+//    func (d retryer) MaxRetries() uint { return 100 }
+type DefaultRetryer struct {
+	NumMaxRetries int
+}
+
+// MaxRetries returns the number of maximum returns the service will use to make
+// an individual API request.
+func (d DefaultRetryer) MaxRetries() int {
+	return d.NumMaxRetries
+}
+
+var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
+
+// RetryRules returns the delay duration before retrying this request again
+func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
+	// Set the upper limit of delay in retrying at ~five minutes
+	minTime := 30
+	throttle := d.shouldThrottle(r)
+	if throttle {
+		minTime = 500
+	}
+
+	retryCount := r.RetryCount
+	if retryCount > 13 {
+		retryCount = 13
+	} else if throttle && retryCount > 8 {
+		retryCount = 8
+	}
+
+	delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime)
+	return time.Duration(delay) * time.Millisecond
+}
+
+// ShouldRetry returns true if the request should be retried.
+func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
+	if r.HTTPResponse.StatusCode >= 500 {
+		return true
+	}
+	return r.IsErrorRetryable() || d.shouldThrottle(r)
+}
+
+// ShouldThrottle returns true if the request should be throttled.
+func (d DefaultRetryer) shouldThrottle(r *request.Request) bool {
+	if r.HTTPResponse.StatusCode == 502 ||
+		r.HTTPResponse.StatusCode == 503 ||
+		r.HTTPResponse.StatusCode == 504 {
+		return true
+	}
+	return r.IsErrorThrottle()
+}
+
+// lockedSource is a thread-safe implementation of rand.Source
+type lockedSource struct {
+	lk  sync.Mutex
+	src rand.Source
+}
+
+func (r *lockedSource) Int63() (n int64) {
+	r.lk.Lock()
+	n = r.src.Int63()
+	r.lk.Unlock()
+	return
+}
+
+func (r *lockedSource) Seed(seed int64) {
+	r.lk.Lock()
+	r.src.Seed(seed)
+	r.lk.Unlock()
+}

+ 12 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go

@@ -0,0 +1,12 @@
+package metadata
+
+// ClientInfo wraps immutable data from the client.Client structure.
+type ClientInfo struct {
+	ServiceName   string
+	APIVersion    string
+	Endpoint      string
+	SigningName   string
+	SigningRegion string
+	JSONVersion   string
+	TargetPrefix  string
+}

+ 135 - 16
vendor/src/github.com/aws/aws-sdk-go/aws/config.go

@@ -7,15 +7,25 @@ import (
 	"github.com/aws/aws-sdk-go/aws/credentials"
 )
 
-// The default number of retries for a service. The value of -1 indicates that
-// the service specific retry default will be used.
-const DefaultRetries = -1
+// UseServiceDefaultRetries instructs the config to use the service's own default
+// number of retries. This will be the default action if Config.MaxRetries
+// is nil also.
+const UseServiceDefaultRetries = -1
+
+// RequestRetryer is an alias for a type that implements the request.Retryer interface.
+type RequestRetryer interface{}
 
 // A Config provides service configuration for service clients. By default,
 // all clients will use the {defaults.DefaultConfig} structure.
 type Config struct {
+	// Enables verbose error printing of all credential chain errors.
+	// Should be used when wanting to see all errors while attempting to retreive
+	// credentials.
+	CredentialsChainVerboseErrors *bool
+
 	// The credentials object to use when signing requests. Defaults to
-	// {defaults.DefaultChainCredentials}.
+	// a chain of credential providers to search for credentials in environment
+	// variables, shared credential file, and EC2 Instance Roles.
 	Credentials *credentials.Credentials
 
 	// An optional endpoint URL (hostname only or fully qualified URI)
@@ -57,6 +67,21 @@ type Config struct {
 	// configuration.
 	MaxRetries *int
 
+	// Retryer guides how HTTP requests should be retried in case of recoverable failures.
+	//
+	// When nil or the value does not implement the request.Retryer interface,
+	// the request.DefaultRetryer will be used.
+	//
+	// When both Retryer and MaxRetries are non-nil, the former is used and
+	// the latter ignored.
+	//
+	// To set the Retryer field in a type-safe manner and with chaining, use
+	// the request.WithRetryer helper function:
+	//
+	//   cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
+	//
+	Retryer RequestRetryer
+
 	// Disables semantic parameter validation, which validates input for missing
 	// required fields and/or other semantic request input errors.
 	DisableParamValidation *bool
@@ -75,6 +100,45 @@ type Config struct {
 	//   Amazon S3: Virtual Hosting of Buckets
 	S3ForcePathStyle *bool
 
+	// Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
+	// header to PUT requests over 2MB of content. 100-Continue instructs the
+	// HTTP client not to send the body until the service responds with a
+	// `continue` status. This is useful to prevent sending the request body
+	// until after the request is authenticated, and validated.
+	//
+	// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
+	//
+	// 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s
+	// `ExpectContinueTimeout` for information on adjusting the continue wait timeout.
+	// https://golang.org/pkg/net/http/#Transport
+	//
+	// You should use this flag to disble 100-Continue if you experiance issues
+	// with proxies or thrid party S3 compatible services.
+	S3Disable100Continue *bool
+
+	// Set this to `true` to enable S3 Accelerate feature. For all operations compatible
+	// with S3 Accelerate will use the accelerate endpoint for requests. Requests not compatible
+	// will fall back to normal S3 requests.
+	//
+	// The bucket must be enable for accelerate to be used with S3 client with accelerate
+	// enabled. If the bucket is not enabled for accelerate an error will be returned.
+	// The bucket name must be DNS compatible to also work with accelerate.
+	S3UseAccelerate *bool
+
+	// Set this to `true` to disable the EC2Metadata client from overriding the
+	// default http.Client's Timeout. This is helpful if you do not want the EC2Metadata
+	// client to create a new http.Client. This options is only meaningful if you're not
+	// already using a custom HTTP client with the SDK. Enabled by default.
+	//
+	// Must be set and provided to the session.New() in order to disable the EC2Metadata
+	// overriding the timeout for default credentials chain.
+	//
+	// Example:
+	//    sess := session.New(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true))
+	//    svc := s3.New(sess)
+	//
+	EC2MetadataDisableTimeoutOverride *bool
+
 	SleepDelay func(time.Duration)
 }
 
@@ -87,6 +151,13 @@ func NewConfig() *Config {
 	return &Config{}
 }
 
+// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning
+// a Config pointer.
+func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config {
+	c.CredentialsChainVerboseErrors = &verboseErrs
+	return c
+}
+
 // WithCredentials sets a config Credentials value returning a Config pointer
 // for chaining.
 func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
@@ -164,6 +235,27 @@ func (c *Config) WithS3ForcePathStyle(force bool) *Config {
 	return c
 }
 
+// WithS3Disable100Continue sets a config S3Disable100Continue value returning
+// a Config pointer for chaining.
+func (c *Config) WithS3Disable100Continue(disable bool) *Config {
+	c.S3Disable100Continue = &disable
+	return c
+}
+
+// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config
+// pointer for chaining.
+func (c *Config) WithS3UseAccelerate(enable bool) *Config {
+	c.S3UseAccelerate = &enable
+	return c
+}
+
+// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
+// returning a Config pointer for chaining.
+func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
+	c.EC2MetadataDisableTimeoutOverride = &enable
+	return c
+}
+
 // WithSleepDelay overrides the function used to sleep while waiting for the
 // next retry. Defaults to time.Sleep.
 func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
@@ -171,15 +263,21 @@ func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
 	return c
 }
 
-// Merge returns a new Config with the other Config's attribute values merged into
-// this Config. If the other Config's attribute is nil it will not be merged into
-// the new Config to be returned.
-func (c Config) Merge(other *Config) *Config {
+// MergeIn merges the passed in configs into the existing config object.
+func (c *Config) MergeIn(cfgs ...*Config) {
+	for _, other := range cfgs {
+		mergeInConfig(c, other)
+	}
+}
+
+func mergeInConfig(dst *Config, other *Config) {
 	if other == nil {
-		return &c
+		return
 	}
 
-	dst := c
+	if other.CredentialsChainVerboseErrors != nil {
+		dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors
+	}
 
 	if other.Credentials != nil {
 		dst.Credentials = other.Credentials
@@ -213,6 +311,10 @@ func (c Config) Merge(other *Config) *Config {
 		dst.MaxRetries = other.MaxRetries
 	}
 
+	if other.Retryer != nil {
+		dst.Retryer = other.Retryer
+	}
+
 	if other.DisableParamValidation != nil {
 		dst.DisableParamValidation = other.DisableParamValidation
 	}
@@ -225,15 +327,32 @@ func (c Config) Merge(other *Config) *Config {
 		dst.S3ForcePathStyle = other.S3ForcePathStyle
 	}
 
+	if other.S3Disable100Continue != nil {
+		dst.S3Disable100Continue = other.S3Disable100Continue
+	}
+
+	if other.S3UseAccelerate != nil {
+		dst.S3UseAccelerate = other.S3UseAccelerate
+	}
+
+	if other.EC2MetadataDisableTimeoutOverride != nil {
+		dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
+	}
+
 	if other.SleepDelay != nil {
 		dst.SleepDelay = other.SleepDelay
 	}
-
-	return &dst
 }
 
-// Copy will return a shallow copy of the Config object.
-func (c Config) Copy() *Config {
-	dst := c
-	return &dst
+// Copy will return a shallow copy of the Config object. If any additional
+// configurations are provided they will be merged into the new config returned.
+func (c *Config) Copy(cfgs ...*Config) *Config {
+	dst := &Config{}
+	dst.MergeIn(c)
+
+	for _, cfg := range cfgs {
+		dst.MergeIn(cfg)
+	}
+
+	return dst
 }

+ 12 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/convert_types.go

@@ -311,6 +311,18 @@ func TimeValue(v *time.Time) time.Time {
 	return time.Time{}
 }
 
+// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
+// The result is undefined if the Unix time cannot be represented by an int64.
+// Which includes calling TimeUnixMilli on a zero Time is undefined.
+//
+// This utility is useful for service API's such as CloudWatch Logs which require
+// their unix time values to be in milliseconds.
+//
+// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information.
+func TimeUnixMilli(t time.Time) int64 {
+	return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
+}
+
 // TimeSlice converts a slice of time.Time values into a slice of
 // time.Time pointers
 func TimeSlice(src []time.Time) []*time.Time {

+ 50 - 34
vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go

@@ -8,6 +8,7 @@ import (
 	"net/http"
 	"net/url"
 	"regexp"
+	"runtime"
 	"strconv"
 
 	"github.com/aws/aws-sdk-go/aws"
@@ -20,47 +21,62 @@ type lener interface {
 	Len() int
 }
 
-// BuildContentLength builds the content length of a request based on the body,
+// BuildContentLengthHandler builds the content length of a request based on the body,
 // or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
 // to determine request body length and no "Content-Length" was specified it will panic.
-var BuildContentLengthHandler = request.NamedHandler{"core.BuildContentLengthHandler", func(r *request.Request) {
+//
+// The Content-Length will only be aded to the request if the length of the body
+// is greater than 0. If the body is empty or the current `Content-Length`
+// header is <= 0, the header will also be stripped.
+var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
+	var length int64
+
 	if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
-		length, _ := strconv.ParseInt(slength, 10, 64)
-		r.HTTPRequest.ContentLength = length
-		return
+		length, _ = strconv.ParseInt(slength, 10, 64)
+	} else {
+		switch body := r.Body.(type) {
+		case nil:
+			length = 0
+		case lener:
+			length = int64(body.Len())
+		case io.Seeker:
+			r.BodyStart, _ = body.Seek(0, 1)
+			end, _ := body.Seek(0, 2)
+			body.Seek(r.BodyStart, 0) // make sure to seek back to original location
+			length = end - r.BodyStart
+		default:
+			panic("Cannot get length of body, must provide `ContentLength`")
+		}
 	}
 
-	var length int64
-	switch body := r.Body.(type) {
-	case nil:
-		length = 0
-	case lener:
-		length = int64(body.Len())
-	case io.Seeker:
-		r.BodyStart, _ = body.Seek(0, 1)
-		end, _ := body.Seek(0, 2)
-		body.Seek(r.BodyStart, 0) // make sure to seek back to original location
-		length = end - r.BodyStart
-	default:
-		panic("Cannot get length of body, must provide `ContentLength`")
+	if length > 0 {
+		r.HTTPRequest.ContentLength = length
+		r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
+	} else {
+		r.HTTPRequest.ContentLength = 0
+		r.HTTPRequest.Header.Del("Content-Length")
 	}
-
-	r.HTTPRequest.ContentLength = length
-	r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
 }}
 
-// UserAgentHandler is a request handler for injecting User agent into requests.
-var UserAgentHandler = request.NamedHandler{"core.UserAgentHandler", func(r *request.Request) {
-	r.HTTPRequest.Header.Set("User-Agent", aws.SDKName+"/"+aws.SDKVersion)
-}}
+// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent.
+var SDKVersionUserAgentHandler = request.NamedHandler{
+	Name: "core.SDKVersionUserAgentHandler",
+	Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
+		runtime.Version(), runtime.GOOS, runtime.GOARCH),
+}
 
 var reStatusCode = regexp.MustCompile(`^(\d{3})`)
 
 // SendHandler is a request handler to send service request using HTTP client.
-var SendHandler = request.NamedHandler{"core.SendHandler", func(r *request.Request) {
+var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) {
 	var err error
-	r.HTTPResponse, err = r.Service.Config.HTTPClient.Do(r.HTTPRequest)
+	r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest)
 	if err != nil {
+		// Prevent leaking if an HTTPResponse was returned. Clean up
+		// the body.
+		if r.HTTPResponse != nil {
+			r.HTTPResponse.Body.Close()
+		}
 		// Capture the case where url.Error is returned for error processing
 		// response. e.g. 301 without location header comes back as string
 		// error and r.HTTPResponse is nil. Other url redirect errors will
@@ -92,7 +108,7 @@ var SendHandler = request.NamedHandler{"core.SendHandler", func(r *request.Reque
 }}
 
 // ValidateResponseHandler is a request handler to validate service response.
-var ValidateResponseHandler = request.NamedHandler{"core.ValidateResponseHandler", func(r *request.Request) {
+var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) {
 	if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
 		// this may be replaced by an UnmarshalError handler
 		r.Error = awserr.New("UnknownError", "unknown error", nil)
@@ -101,7 +117,7 @@ var ValidateResponseHandler = request.NamedHandler{"core.ValidateResponseHandler
 
 // AfterRetryHandler performs final checks to determine if the request should
 // be retried and how long to delay.
-var AfterRetryHandler = request.NamedHandler{"core.AfterRetryHandler", func(r *request.Request) {
+var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) {
 	// If one of the other handlers already set the retry state
 	// we don't want to override it based on the service's state
 	if r.Retryable == nil {
@@ -110,13 +126,13 @@ var AfterRetryHandler = request.NamedHandler{"core.AfterRetryHandler", func(r *r
 
 	if r.WillRetry() {
 		r.RetryDelay = r.RetryRules(r)
-		r.Service.Config.SleepDelay(r.RetryDelay)
+		r.Config.SleepDelay(r.RetryDelay)
 
 		// when the expired token exception occurs the credentials
 		// need to be expired locally so that the next request to
 		// get credentials will trigger a credentials refresh.
 		if r.IsErrorExpired() {
-			r.Service.Config.Credentials.Expire()
+			r.Config.Credentials.Expire()
 		}
 
 		r.RetryCount++
@@ -127,10 +143,10 @@ var AfterRetryHandler = request.NamedHandler{"core.AfterRetryHandler", func(r *r
 // ValidateEndpointHandler is a request handler to validate a request had the
 // appropriate Region and Endpoint set. Will set r.Error if the endpoint or
 // region is not valid.
-var ValidateEndpointHandler = request.NamedHandler{"core.ValidateEndpointHandler", func(r *request.Request) {
-	if r.Service.SigningRegion == "" && aws.StringValue(r.Service.Config.Region) == "" {
+var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) {
+	if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
 		r.Error = aws.ErrMissingRegion
-	} else if r.Service.Endpoint == "" {
+	} else if r.ClientInfo.Endpoint == "" {
 		r.Error = aws.ErrMissingEndpoint
 	}
 }}

+ 8 - 135
vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go

@@ -1,144 +1,17 @@
 package corehandlers
 
-import (
-	"fmt"
-	"reflect"
-	"strconv"
-	"strings"
+import "github.com/aws/aws-sdk-go/aws/request"
 
-	"github.com/aws/aws-sdk-go/aws/awserr"
-	"github.com/aws/aws-sdk-go/aws/request"
-)
-
-// ValidateParameters is a request handler to validate the input parameters.
+// ValidateParametersHandler is a request handler to validate the input parameters.
 // Validating parameters only has meaning if done prior to the request being sent.
-var ValidateParametersHandler = request.NamedHandler{"core.ValidateParametersHandler", func(r *request.Request) {
-	if r.ParamsFilled() {
-		v := validator{errors: []string{}}
-		v.validateAny(reflect.ValueOf(r.Params), "")
-
-		if count := len(v.errors); count > 0 {
-			format := "%d validation errors:\n- %s"
-			msg := fmt.Sprintf(format, count, strings.Join(v.errors, "\n- "))
-			r.Error = awserr.New("InvalidParameter", msg, nil)
-		}
-	}
-}}
-
-// A validator validates values. Collects validations errors which occurs.
-type validator struct {
-	errors []string
-}
-
-// validateAny will validate any struct, slice or map type. All validations
-// are also performed recursively for nested types.
-func (v *validator) validateAny(value reflect.Value, path string) {
-	value = reflect.Indirect(value)
-	if !value.IsValid() {
+var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
+	if !r.ParamsFilled() {
 		return
 	}
 
-	switch value.Kind() {
-	case reflect.Struct:
-		v.validateStruct(value, path)
-	case reflect.Slice:
-		for i := 0; i < value.Len(); i++ {
-			v.validateAny(value.Index(i), path+fmt.Sprintf("[%d]", i))
-		}
-	case reflect.Map:
-		for _, n := range value.MapKeys() {
-			v.validateAny(value.MapIndex(n), path+fmt.Sprintf("[%q]", n.String()))
-		}
-	}
-}
-
-// validateStruct will validate the struct value's fields. If the structure has
-// nested types those types will be validated also.
-func (v *validator) validateStruct(value reflect.Value, path string) {
-	prefix := "."
-	if path == "" {
-		prefix = ""
-	}
-
-	for i := 0; i < value.Type().NumField(); i++ {
-		f := value.Type().Field(i)
-		if strings.ToLower(f.Name[0:1]) == f.Name[0:1] {
-			continue
-		}
-		fvalue := value.FieldByName(f.Name)
-
-		err := validateField(f, fvalue, validateFieldRequired, validateFieldMin)
-		if err != nil {
-			v.errors = append(v.errors, fmt.Sprintf("%s: %s", err.Error(), path+prefix+f.Name))
-			continue
-		}
-
-		v.validateAny(fvalue, path+prefix+f.Name)
-	}
-}
-
-type validatorFunc func(f reflect.StructField, fvalue reflect.Value) error
-
-func validateField(f reflect.StructField, fvalue reflect.Value, funcs ...validatorFunc) error {
-	for _, fn := range funcs {
-		if err := fn(f, fvalue); err != nil {
-			return err
+	if v, ok := r.Params.(request.Validator); ok {
+		if err := v.Validate(); err != nil {
+			r.Error = err
 		}
 	}
-	return nil
-}
-
-// Validates that a field has a valid value provided for required fields.
-func validateFieldRequired(f reflect.StructField, fvalue reflect.Value) error {
-	if f.Tag.Get("required") == "" {
-		return nil
-	}
-
-	switch fvalue.Kind() {
-	case reflect.Ptr, reflect.Slice, reflect.Map:
-		if fvalue.IsNil() {
-			return fmt.Errorf("missing required parameter")
-		}
-	default:
-		if !fvalue.IsValid() {
-			return fmt.Errorf("missing required parameter")
-		}
-	}
-	return nil
-}
-
-// Validates that if a value is provided for a field, that value must be at
-// least a minimum length.
-func validateFieldMin(f reflect.StructField, fvalue reflect.Value) error {
-	minStr := f.Tag.Get("min")
-	if minStr == "" {
-		return nil
-	}
-	min, _ := strconv.ParseInt(minStr, 10, 64)
-
-	kind := fvalue.Kind()
-	if kind == reflect.Ptr {
-		if fvalue.IsNil() {
-			return nil
-		}
-		fvalue = fvalue.Elem()
-	}
-
-	switch fvalue.Kind() {
-	case reflect.String:
-		if int64(fvalue.Len()) < min {
-			return fmt.Errorf("field too short, minimum length %d", min)
-		}
-	case reflect.Slice, reflect.Map:
-		if fvalue.IsNil() {
-			return nil
-		}
-		if int64(fvalue.Len()) < min {
-			return fmt.Errorf("field too short, minimum length %d", min)
-		}
-
-		// TODO min can also apply to number minimum value.
-
-	}
-	return nil
-}
+}}

+ 23 - 8
vendor/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go

@@ -8,8 +8,14 @@ var (
 	// ErrNoValidProvidersFoundInChain Is returned when there are no valid
 	// providers in the ChainProvider.
 	//
+	// This has been deprecated. For verbose error messaging set
+	// aws.Config.CredentialsChainVerboseErrors to true
+	//
 	// @readonly
-	ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", "no valid providers in chain", nil)
+	ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
+		`no valid providers in chain. Deprecated. 
+	For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,
+		nil)
 )
 
 // A ChainProvider will search for a provider which returns credentials
@@ -36,15 +42,18 @@ var (
 //     creds := NewChainCredentials(
 //         []Provider{
 //             &EnvProvider{},
-//             &EC2RoleProvider{},
+//             &EC2RoleProvider{
+//                 Client: ec2metadata.New(sess),
+//             },
 //         })
 //
 //     // Usage of ChainCredentials with aws.Config
 //     svc := ec2.New(&aws.Config{Credentials: creds})
 //
 type ChainProvider struct {
-	Providers []Provider
-	curr      Provider
+	Providers     []Provider
+	curr          Provider
+	VerboseErrors bool
 }
 
 // NewChainCredentials returns a pointer to a new Credentials object
@@ -61,17 +70,23 @@ func NewChainCredentials(providers []Provider) *Credentials {
 // If a provider is found it will be cached and any calls to IsExpired()
 // will return the expired state of the cached provider.
 func (c *ChainProvider) Retrieve() (Value, error) {
+	var errs []error
 	for _, p := range c.Providers {
-		if creds, err := p.Retrieve(); err == nil {
+		creds, err := p.Retrieve()
+		if err == nil {
 			c.curr = p
 			return creds, nil
 		}
+		errs = append(errs, err)
 	}
 	c.curr = nil
 
-	// TODO better error reporting. maybe report error for each failed retrieve?
-
-	return Value{}, ErrNoValidProvidersFoundInChain
+	var err error
+	err = ErrNoValidProvidersFoundInChain
+	if c.VerboseErrors {
+		err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs)
+	}
+	return Value{}, err
 }
 
 // IsExpired will returned the expired state of the currently cached provider

+ 5 - 2
vendor/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go

@@ -53,8 +53,8 @@ import (
 	"time"
 )
 
-// Create an empty Credential object that can be used as dummy placeholder
-// credentials for requests that do not need signed.
+// AnonymousCredentials is an empty Credential object that can be used as
+// dummy placeholder credentials for requests that do not need signed.
 //
 // This Credentials can be used to configure a service to not sign requests
 // when making service API calls. For example, when accessing public
@@ -76,6 +76,9 @@ type Value struct {
 
 	// AWS Session Token
 	SessionToken string
+
+	// Provider used to get credentials
+	ProviderName string
 }
 
 // A Provider is the interface for any component which will provide credentials

+ 49 - 39
vendor/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go

@@ -9,10 +9,14 @@ import (
 	"time"
 
 	"github.com/aws/aws-sdk-go/aws/awserr"
+	"github.com/aws/aws-sdk-go/aws/client"
 	"github.com/aws/aws-sdk-go/aws/credentials"
 	"github.com/aws/aws-sdk-go/aws/ec2metadata"
 )
 
+// ProviderName provides a name of EC2Role provider
+const ProviderName = "EC2RoleProvider"
+
 // A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if
 // those credentials are expired.
 //
@@ -22,12 +26,10 @@ import (
 //     p := &ec2rolecreds.EC2RoleProvider{
 //         // Pass in a custom timeout to be used when requesting
 //         // IAM EC2 Role credentials.
-//         Client: &http.Client{
-//             Timeout: 10 * time.Second,
-//         },
-//         // Use default EC2 Role metadata endpoint, Alternate endpoints can be
-//         // specified setting Endpoint to something else.
-//         Endpoint: "",
+//         Client: ec2metadata.New(sess, aws.Config{
+//             HTTPClient: &http.Client{Timeout: 10 * time.Second},
+//         }),
+//
 //         // Do not use early expiry of credentials. If a non zero value is
 //         // specified the credentials will be expired early
 //         ExpiryWindow: 0,
@@ -35,8 +37,8 @@ import (
 type EC2RoleProvider struct {
 	credentials.Expiry
 
-	// EC2Metadata client to use when connecting to EC2 metadata service
-	Client *ec2metadata.Client
+	// Required EC2Metadata client to use when connecting to EC2 metadata service.
+	Client *ec2metadata.EC2Metadata
 
 	// ExpiryWindow will allow the credentials to trigger refreshing prior to
 	// the credentials actually expiring. This is beneficial so race conditions
@@ -50,46 +52,53 @@ type EC2RoleProvider struct {
 	ExpiryWindow time.Duration
 }
 
-// NewCredentials returns a pointer to a new Credentials object
-// wrapping the EC2RoleProvider.
-//
-// Takes a custom http.Client which can be configured for custom handling of
-// things such as timeout.
-//
-// Endpoint is the URL that the EC2RoleProvider will connect to when retrieving
-// role and credentials.
-//
-// Window is the expiry window that will be subtracted from the expiry returned
-// by the role credential request. This is done so that the credentials will
-// expire sooner than their actual lifespan.
-func NewCredentials(client *ec2metadata.Client, window time.Duration) *credentials.Credentials {
-	return credentials.NewCredentials(&EC2RoleProvider{
-		Client:       client,
-		ExpiryWindow: window,
-	})
+// NewCredentials returns a pointer to a new Credentials object wrapping
+// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client.
+// The ConfigProvider is satisfied by the session.Session type.
+func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials {
+	p := &EC2RoleProvider{
+		Client: ec2metadata.New(c),
+	}
+
+	for _, option := range options {
+		option(p)
+	}
+
+	return credentials.NewCredentials(p)
+}
+
+// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping
+// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2
+// metadata service.
+func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials {
+	p := &EC2RoleProvider{
+		Client: client,
+	}
+
+	for _, option := range options {
+		option(p)
+	}
+
+	return credentials.NewCredentials(p)
 }
 
 // Retrieve retrieves credentials from the EC2 service.
 // Error will be returned if the request fails, or unable to extract
 // the desired credentials.
 func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
-	if m.Client == nil {
-		m.Client = ec2metadata.New(nil)
-	}
-
 	credsList, err := requestCredList(m.Client)
 	if err != nil {
-		return credentials.Value{}, err
+		return credentials.Value{ProviderName: ProviderName}, err
 	}
 
 	if len(credsList) == 0 {
-		return credentials.Value{}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
+		return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
 	}
 	credsName := credsList[0]
 
 	roleCreds, err := requestCred(m.Client, credsName)
 	if err != nil {
-		return credentials.Value{}, err
+		return credentials.Value{ProviderName: ProviderName}, err
 	}
 
 	m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow)
@@ -98,10 +107,11 @@ func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
 		AccessKeyID:     roleCreds.AccessKeyID,
 		SecretAccessKey: roleCreds.SecretAccessKey,
 		SessionToken:    roleCreds.Token,
+		ProviderName:    ProviderName,
 	}, nil
 }
 
-// A ec2RoleCredRespBody provides the shape for deserializing credential
+// A ec2RoleCredRespBody provides the shape for unmarshalling credential
 // request responses.
 type ec2RoleCredRespBody struct {
 	// Success State
@@ -119,10 +129,10 @@ const iamSecurityCredsPath = "/iam/security-credentials"
 
 // requestCredList requests a list of credentials from the EC2 service.
 // If there are no credentials, or there is an error making or receiving the request
-func requestCredList(client *ec2metadata.Client) ([]string, error) {
+func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
 	resp, err := client.GetMetadata(iamSecurityCredsPath)
 	if err != nil {
-		return nil, awserr.New("EC2RoleRequestError", "failed to list EC2 Roles", err)
+		return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err)
 	}
 
 	credsList := []string{}
@@ -132,7 +142,7 @@ func requestCredList(client *ec2metadata.Client) ([]string, error) {
 	}
 
 	if err := s.Err(); err != nil {
-		return nil, awserr.New("SerializationError", "failed to read list of EC2 Roles", err)
+		return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err)
 	}
 
 	return credsList, nil
@@ -142,12 +152,12 @@ func requestCredList(client *ec2metadata.Client) ([]string, error) {
 //
 // If the credentials cannot be found, or there is an error reading the response
 // and error will be returned.
-func requestCred(client *ec2metadata.Client, credsName string) (ec2RoleCredRespBody, error) {
+func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
 	resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName))
 	if err != nil {
 		return ec2RoleCredRespBody{},
 			awserr.New("EC2RoleRequestError",
-				fmt.Sprintf("failed to get %s EC2 Role credentials", credsName),
+				fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName),
 				err)
 	}
 
@@ -155,7 +165,7 @@ func requestCred(client *ec2metadata.Client, credsName string) (ec2RoleCredRespB
 	if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
 		return ec2RoleCredRespBody{},
 			awserr.New("SerializationError",
-				fmt.Sprintf("failed to decode %s EC2 Role credentials", credsName),
+				fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
 				err)
 	}
 

+ 6 - 2
vendor/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go

@@ -6,6 +6,9 @@ import (
 	"github.com/aws/aws-sdk-go/aws/awserr"
 )
 
+// EnvProviderName provides a name of Env provider
+const EnvProviderName = "EnvProvider"
+
 var (
 	// ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
 	// found in the process's environment.
@@ -52,11 +55,11 @@ func (e *EnvProvider) Retrieve() (Value, error) {
 	}
 
 	if id == "" {
-		return Value{}, ErrAccessKeyIDNotFound
+		return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound
 	}
 
 	if secret == "" {
-		return Value{}, ErrSecretAccessKeyNotFound
+		return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound
 	}
 
 	e.retrieved = true
@@ -64,6 +67,7 @@ func (e *EnvProvider) Retrieve() (Value, error) {
 		AccessKeyID:     id,
 		SecretAccessKey: secret,
 		SessionToken:    os.Getenv("AWS_SESSION_TOKEN"),
+		ProviderName:    EnvProviderName,
 	}, nil
 }
 

+ 4 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini

@@ -6,3 +6,7 @@ aws_session_token = token
 [no_token]
 aws_access_key_id = accessKey
 aws_secret_access_key = secret
+
+[with_colon]
+aws_access_key_id: accessKey
+aws_secret_access_key: secret

+ 25 - 17
vendor/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go

@@ -5,11 +5,14 @@ import (
 	"os"
 	"path/filepath"
 
-	"github.com/vaughan0/go-ini"
+	"github.com/go-ini/ini"
 
 	"github.com/aws/aws-sdk-go/aws/awserr"
 )
 
+// SharedCredsProviderName provides a name of SharedCreds provider
+const SharedCredsProviderName = "SharedCredentialsProvider"
+
 var (
 	// ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
 	//
@@ -55,12 +58,12 @@ func (p *SharedCredentialsProvider) Retrieve() (Value, error) {
 
 	filename, err := p.filename()
 	if err != nil {
-		return Value{}, err
+		return Value{ProviderName: SharedCredsProviderName}, err
 	}
 
 	creds, err := loadProfile(filename, p.profile())
 	if err != nil {
-		return Value{}, err
+		return Value{ProviderName: SharedCredsProviderName}, err
 	}
 
 	p.retrieved = true
@@ -76,32 +79,37 @@ func (p *SharedCredentialsProvider) IsExpired() bool {
 // The credentials retrieved from the profile will be returned or error. Error will be
 // returned if it fails to read from the file, or the data is invalid.
 func loadProfile(filename, profile string) (Value, error) {
-	config, err := ini.LoadFile(filename)
+	config, err := ini.Load(filename)
+	if err != nil {
+		return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
+	}
+	iniProfile, err := config.GetSection(profile)
 	if err != nil {
-		return Value{}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
+		return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err)
 	}
-	iniProfile := config.Section(profile)
 
-	id, ok := iniProfile["aws_access_key_id"]
-	if !ok {
-		return Value{}, awserr.New("SharedCredsAccessKey",
+	id, err := iniProfile.GetKey("aws_access_key_id")
+	if err != nil {
+		return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey",
 			fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
-			nil)
+			err)
 	}
 
-	secret, ok := iniProfile["aws_secret_access_key"]
-	if !ok {
-		return Value{}, awserr.New("SharedCredsSecret",
+	secret, err := iniProfile.GetKey("aws_secret_access_key")
+	if err != nil {
+		return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret",
 			fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
 			nil)
 	}
 
-	token := iniProfile["aws_session_token"]
+	// Default to empty string if not found
+	token := iniProfile.Key("aws_session_token")
 
 	return Value{
-		AccessKeyID:     id,
-		SecretAccessKey: secret,
-		SessionToken:    token,
+		AccessKeyID:     id.String(),
+		SecretAccessKey: secret.String(),
+		SessionToken:    token.String(),
+		ProviderName:    SharedCredsProviderName,
 	}, nil
 }
 

+ 6 - 2
vendor/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go

@@ -4,6 +4,9 @@ import (
 	"github.com/aws/aws-sdk-go/aws/awserr"
 )
 
+// StaticProviderName provides a name of Static provider
+const StaticProviderName = "StaticProvider"
+
 var (
 	// ErrStaticCredentialsEmpty is emitted when static credentials are empty.
 	//
@@ -11,7 +14,7 @@ var (
 	ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
 )
 
-// A StaticProvider is a set of credentials which are set pragmatically,
+// A StaticProvider is a set of credentials which are set programmatically,
 // and will never expire.
 type StaticProvider struct {
 	Value
@@ -30,9 +33,10 @@ func NewStaticCredentials(id, secret, token string) *Credentials {
 // Retrieve returns the credentials or error if the credentials are invalid.
 func (s *StaticProvider) Retrieve() (Value, error) {
 	if s.AccessKeyID == "" || s.SecretAccessKey == "" {
-		return Value{}, ErrStaticCredentialsEmpty
+		return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty
 	}
 
+	s.Value.ProviderName = StaticProviderName
 	return s.Value, nil
 }
 

+ 84 - 25
vendor/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go

@@ -1,3 +1,10 @@
+// Package defaults is a collection of helpers to retrieve the SDK's default
+// configuration and handlers.
+//
+// Generally this package shouldn't be used directly, but session.Session
+// instead. This package is useful when you need to reset the defaults
+// of a session or service client to the SDK defaults before setting
+// additional parameters.
 package defaults
 
 import (
@@ -6,34 +13,86 @@ import (
 	"time"
 
 	"github.com/aws/aws-sdk-go/aws"
+	"github.com/aws/aws-sdk-go/aws/corehandlers"
 	"github.com/aws/aws-sdk-go/aws/credentials"
 	"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
+	"github.com/aws/aws-sdk-go/aws/ec2metadata"
+	"github.com/aws/aws-sdk-go/aws/request"
+	"github.com/aws/aws-sdk-go/private/endpoints"
 )
 
-// DefaultChainCredentials is a Credentials which will find the first available
-// credentials Value from the list of Providers.
+// A Defaults provides a collection of default values for SDK clients.
+type Defaults struct {
+	Config   *aws.Config
+	Handlers request.Handlers
+}
+
+// Get returns the SDK's default values with Config and handlers pre-configured.
+func Get() Defaults {
+	cfg := Config()
+	handlers := Handlers()
+	cfg.Credentials = CredChain(cfg, handlers)
+
+	return Defaults{
+		Config:   cfg,
+		Handlers: handlers,
+	}
+}
+
+// Config returns the default configuration without credentials.
+// To retrieve a config with credentials also included use
+// `defaults.Get().Config` instead.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the configuration of an
+// existing service client or session.
+func Config() *aws.Config {
+	return aws.NewConfig().
+		WithCredentials(credentials.AnonymousCredentials).
+		WithRegion(os.Getenv("AWS_REGION")).
+		WithHTTPClient(http.DefaultClient).
+		WithMaxRetries(aws.UseServiceDefaultRetries).
+		WithLogger(aws.NewDefaultLogger()).
+		WithLogLevel(aws.LogOff).
+		WithSleepDelay(time.Sleep)
+}
+
+// Handlers returns the default request handlers.
 //
-// This should be used in the default case. Once the type of credentials are
-// known switching to the specific Credentials will be more efficient.
-var DefaultChainCredentials = credentials.NewChainCredentials(
-	[]credentials.Provider{
-		&credentials.EnvProvider{},
-		&credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
-		&ec2rolecreds.EC2RoleProvider{ExpiryWindow: 5 * time.Minute},
-	})
-
-// DefaultConfig is the default all service configuration will be based off of.
-// By default, all clients use this structure for initialization options unless
-// a custom configuration object is passed in.
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the request handlers of an
+// existing service client or session.
+func Handlers() request.Handlers {
+	var handlers request.Handlers
+
+	handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
+	handlers.Validate.AfterEachFn = request.HandlerListStopOnError
+	handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
+	handlers.Build.AfterEachFn = request.HandlerListStopOnError
+	handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
+	handlers.Send.PushBackNamed(corehandlers.SendHandler)
+	handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
+	handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler)
+
+	return handlers
+}
+
+// CredChain returns the default credential chain.
 //
-// You may modify this global structure to change all default configuration
-// in the SDK. Note that configuration options are copied by value, so any
-// modifications must happen before constructing a client.
-var DefaultConfig = aws.NewConfig().
-	WithCredentials(DefaultChainCredentials).
-	WithRegion(os.Getenv("AWS_REGION")).
-	WithHTTPClient(http.DefaultClient).
-	WithMaxRetries(aws.DefaultRetries).
-	WithLogger(aws.NewDefaultLogger()).
-	WithLogLevel(aws.LogOff).
-	WithSleepDelay(time.Sleep)
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the credentials of an
+// existing service client or session's Config.
+func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
+	endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, *cfg.Region, true)
+
+	return credentials.NewCredentials(&credentials.ChainProvider{
+		VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
+		Providers: []credentials.Provider{
+			&credentials.EnvProvider{},
+			&credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
+			&ec2rolecreds.EC2RoleProvider{
+				Client:       ec2metadata.NewClient(*cfg, handlers, endpoint, signingRegion),
+				ExpiryWindow: 5 * time.Minute,
+			},
+		}})
+}

+ 102 - 5
vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go

@@ -1,13 +1,20 @@
 package ec2metadata
 
 import (
+	"encoding/json"
+	"fmt"
 	"path"
+	"strings"
+	"time"
 
+	"github.com/aws/aws-sdk-go/aws/awserr"
 	"github.com/aws/aws-sdk-go/aws/request"
 )
 
-// GetMetadata uses the path provided to request
-func (c *Client) GetMetadata(p string) (string, error) {
+// GetMetadata uses the path provided to request information from the EC2
+// instance metdata service. The content will be returned as a string, or
+// error if the request failed.
+func (c *EC2Metadata) GetMetadata(p string) (string, error) {
 	op := &request.Operation{
 		Name:       "GetMetadata",
 		HTTPMethod: "GET",
@@ -15,13 +22,75 @@ func (c *Client) GetMetadata(p string) (string, error) {
 	}
 
 	output := &metadataOutput{}
-	req := request.New(c.Service.ServiceInfo, c.Service.Handlers, c.Service.Retryer, op, nil, output)
+	req := c.NewRequest(op, nil, output)
 
 	return output.Content, req.Send()
 }
 
+// GetDynamicData uses the path provided to request information from the EC2
+// instance metadata service for dynamic data. The content will be returned
+// as a string, or error if the request failed.
+func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
+	op := &request.Operation{
+		Name:       "GetDynamicData",
+		HTTPMethod: "GET",
+		HTTPPath:   path.Join("/", "dynamic", p),
+	}
+
+	output := &metadataOutput{}
+	req := c.NewRequest(op, nil, output)
+
+	return output.Content, req.Send()
+}
+
+// GetInstanceIdentityDocument retrieves an identity document describing an
+// instance. Error is returned if the request fails or is unable to parse
+// the response.
+func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) {
+	resp, err := c.GetDynamicData("instance-identity/document")
+	if err != nil {
+		return EC2InstanceIdentityDocument{},
+			awserr.New("EC2MetadataRequestError",
+				"failed to get EC2 instance identity document", err)
+	}
+
+	doc := EC2InstanceIdentityDocument{}
+	if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
+		return EC2InstanceIdentityDocument{},
+			awserr.New("SerializationError",
+				"failed to decode EC2 instance identity document", err)
+	}
+
+	return doc, nil
+}
+
+// IAMInfo retrieves IAM info from the metadata API
+func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
+	resp, err := c.GetMetadata("iam/info")
+	if err != nil {
+		return EC2IAMInfo{},
+			awserr.New("EC2MetadataRequestError",
+				"failed to get EC2 IAM info", err)
+	}
+
+	info := EC2IAMInfo{}
+	if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil {
+		return EC2IAMInfo{},
+			awserr.New("SerializationError",
+				"failed to decode EC2 IAM info", err)
+	}
+
+	if info.Code != "Success" {
+		errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code)
+		return EC2IAMInfo{},
+			awserr.New("EC2MetadataError", errMsg, nil)
+	}
+
+	return info, nil
+}
+
 // Region returns the region the instance is running in.
-func (c *Client) Region() (string, error) {
+func (c *EC2Metadata) Region() (string, error) {
 	resp, err := c.GetMetadata("placement/availability-zone")
 	if err != nil {
 		return "", err
@@ -34,10 +103,38 @@ func (c *Client) Region() (string, error) {
 // Available returns if the application has access to the EC2 Metadata service.
 // Can be used to determine if application is running within an EC2 Instance and
 // the metadata service is available.
-func (c *Client) Available() bool {
+func (c *EC2Metadata) Available() bool {
 	if _, err := c.GetMetadata("instance-id"); err != nil {
 		return false
 	}
 
 	return true
 }
+
+// An EC2IAMInfo provides the shape for unmarshalling
+// an IAM info from the metadata API
+type EC2IAMInfo struct {
+	Code               string
+	LastUpdated        time.Time
+	InstanceProfileArn string
+	InstanceProfileID  string
+}
+
+// An EC2InstanceIdentityDocument provides the shape for unmarshalling
+// an instance identity document
+type EC2InstanceIdentityDocument struct {
+	DevpayProductCodes []string  `json:"devpayProductCodes"`
+	AvailabilityZone   string    `json:"availabilityZone"`
+	PrivateIP          string    `json:"privateIp"`
+	Version            string    `json:"version"`
+	Region             string    `json:"region"`
+	InstanceID         string    `json:"instanceId"`
+	BillingProducts    []string  `json:"billingProducts"`
+	InstanceType       string    `json:"instanceType"`
+	AccountID          string    `json:"accountId"`
+	PendingTime        time.Time `json:"pendingTime"`
+	ImageID            string    `json:"imageId"`
+	KernelID           string    `json:"kernelId"`
+	RamdiskID          string    `json:"ramdiskId"`
+	Architecture       string    `json:"architecture"`
+}

+ 78 - 89
vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go

@@ -1,106 +1,90 @@
+// Package ec2metadata provides the client for making API calls to the
+// EC2 Metadata service.
 package ec2metadata
 
 import (
-	"io/ioutil"
+	"bytes"
+	"errors"
+	"io"
 	"net/http"
+	"time"
 
 	"github.com/aws/aws-sdk-go/aws"
 	"github.com/aws/aws-sdk-go/aws/awserr"
-	"github.com/aws/aws-sdk-go/aws/credentials"
+	"github.com/aws/aws-sdk-go/aws/client"
+	"github.com/aws/aws-sdk-go/aws/client/metadata"
 	"github.com/aws/aws-sdk-go/aws/request"
-	"github.com/aws/aws-sdk-go/aws/service"
-	"github.com/aws/aws-sdk-go/aws/service/serviceinfo"
 )
 
-// DefaultRetries states the default number of times the service client will
-// attempt to retry a failed request before failing.
-const DefaultRetries = 3
-
-// A Config provides the configuration for the EC2 Metadata service.
-type Config struct {
-	// An optional endpoint URL (hostname only or fully qualified URI)
-	// that overrides the default service endpoint for a client. Set this
-	// to nil, or `""` to use the default service endpoint.
-	Endpoint *string
-
-	// The HTTP client to use when sending requests. Defaults to
-	// `http.DefaultClient`.
-	HTTPClient *http.Client
-
-	// An integer value representing the logging level. The default log level
-	// is zero (LogOff), which represents no logging. To enable logging set
-	// to a LogLevel Value.
-	Logger aws.Logger
-
-	// The logger writer interface to write logging messages to. Defaults to
-	// standard out.
-	LogLevel *aws.LogLevelType
-
-	// The maximum number of times that a request will be retried for failures.
-	// Defaults to DefaultRetries for the number of retries to be performed
-	// per request.
-	MaxRetries *int
-}
+// ServiceName is the name of the service.
+const ServiceName = "ec2metadata"
 
-// A Client is an EC2 Metadata service Client.
-type Client struct {
-	*service.Service
+// A EC2Metadata is an EC2 Metadata service Client.
+type EC2Metadata struct {
+	*client.Client
 }
 
-// New creates a new instance of the EC2 Metadata service client.
+// New creates a new instance of the EC2Metadata client with a session.
+// This client is safe to use across multiple goroutines.
 //
-// In the general use case the configuration for this service client should not
-// be needed and `nil` can be provided. Configuration is only needed if the
-// `ec2metadata.Config` defaults need to be overridden. Eg. Setting LogLevel.
 //
-// @note This configuration will NOT be merged with the default AWS service
-// client configuration `defaults.DefaultConfig`. Due to circular dependencies
-// with the defaults package and credentials EC2 Role Provider.
-func New(config *Config) *Client {
-	service := &service.Service{
-		ServiceInfo: serviceinfo.ServiceInfo{
-			Config:      copyConfig(config),
-			ServiceName: "Client",
-			Endpoint:    "http://169.254.169.254/latest",
-			APIVersion:  "latest",
-		},
-	}
-	service.Initialize()
-	service.Handlers.Unmarshal.PushBack(unmarshalHandler)
-	service.Handlers.UnmarshalError.PushBack(unmarshalError)
-	service.Handlers.Validate.Clear()
-	service.Handlers.Validate.PushBack(validateEndpointHandler)
-
-	return &Client{service}
+// Example:
+//     // Create a EC2Metadata client from just a session.
+//     svc := ec2metadata.New(mySession)
+//
+//     // Create a EC2Metadata client with additional configuration
+//     svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
+	c := p.ClientConfig(ServiceName, cfgs...)
+	return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
 }
 
-func copyConfig(config *Config) *aws.Config {
-	if config == nil {
-		config = &Config{}
-	}
-	c := &aws.Config{
-		Credentials: credentials.AnonymousCredentials,
-		Endpoint:    config.Endpoint,
-		HTTPClient:  config.HTTPClient,
-		Logger:      config.Logger,
-		LogLevel:    config.LogLevel,
-		MaxRetries:  config.MaxRetries,
+// NewClient returns a new EC2Metadata client. Should be used to create
+// a client when not using a session. Generally using just New with a session
+// is preferred.
+//
+// If an unmodified HTTP client is provided from the stdlib default, or no client
+// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened.
+// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default.
+func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata {
+	if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) {
+		// If the http client is unmodified and this feature is not disabled
+		// set custom timeouts for EC2Metadata requests.
+		cfg.HTTPClient = &http.Client{
+			// use a shorter timeout than default because the metadata
+			// service is local if it is running, and to fail faster
+			// if not running on an ec2 instance.
+			Timeout: 5 * time.Second,
+		}
 	}
 
-	if c.HTTPClient == nil {
-		c.HTTPClient = http.DefaultClient
-	}
-	if c.Logger == nil {
-		c.Logger = aws.NewDefaultLogger()
+	svc := &EC2Metadata{
+		Client: client.New(
+			cfg,
+			metadata.ClientInfo{
+				ServiceName: ServiceName,
+				Endpoint:    endpoint,
+				APIVersion:  "latest",
+			},
+			handlers,
+		),
 	}
-	if c.LogLevel == nil {
-		c.LogLevel = aws.LogLevel(aws.LogOff)
-	}
-	if c.MaxRetries == nil {
-		c.MaxRetries = aws.Int(DefaultRetries)
+
+	svc.Handlers.Unmarshal.PushBack(unmarshalHandler)
+	svc.Handlers.UnmarshalError.PushBack(unmarshalError)
+	svc.Handlers.Validate.Clear()
+	svc.Handlers.Validate.PushBack(validateEndpointHandler)
+
+	// Add additional options to the service config
+	for _, option := range opts {
+		option(svc.Client)
 	}
 
-	return c
+	return svc
+}
+
+func httpClientZero(c *http.Client) bool {
+	return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0)
 }
 
 type metadataOutput struct {
@@ -109,27 +93,32 @@ type metadataOutput struct {
 
 func unmarshalHandler(r *request.Request) {
 	defer r.HTTPResponse.Body.Close()
-	b, err := ioutil.ReadAll(r.HTTPResponse.Body)
-	if err != nil {
+	b := &bytes.Buffer{}
+	if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
 		r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err)
+		return
 	}
 
-	data := r.Data.(*metadataOutput)
-	data.Content = string(b)
+	if data, ok := r.Data.(*metadataOutput); ok {
+		data.Content = b.String()
+	}
 }
 
 func unmarshalError(r *request.Request) {
 	defer r.HTTPResponse.Body.Close()
-	_, err := ioutil.ReadAll(r.HTTPResponse.Body)
-	if err != nil {
+	b := &bytes.Buffer{}
+	if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
 		r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err)
+		return
 	}
 
-	// TODO extract the error...
+	// Response body format is not consistent between metadata endpoints.
+	// Grab the error message as a string and include that as the source error
+	r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String()))
 }
 
 func validateEndpointHandler(r *request.Request) {
-	if r.Service.Endpoint == "" {
+	if r.ClientInfo.Endpoint == "" {
 		r.Error = aws.ErrMissingEndpoint
 	}
 }

+ 2 - 2
vendor/src/github.com/aws/aws-sdk-go/aws/errors.go

@@ -7,11 +7,11 @@ var (
 	// not found.
 	//
 	// @readonly
-	ErrMissingRegion error = awserr.New("MissingRegion", "could not find region configuration", nil)
+	ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil)
 
 	// ErrMissingEndpoint is an error that is returned if an endpoint cannot be
 	// resolved for a service.
 	//
 	// @readonly
-	ErrMissingEndpoint error = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
+	ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
 )

+ 14 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/logger.go

@@ -79,6 +79,20 @@ type Logger interface {
 	Log(...interface{})
 }
 
+// A LoggerFunc is a convenience type to convert a function taking a variadic
+// list of arguments and wrap it so the Logger interface can be used.
+//
+// Example:
+//     s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) {
+//         fmt.Fprintln(os.Stdout, args...)
+//     })})
+type LoggerFunc func(...interface{})
+
+// Log calls the wrapped function with the arguments provided
+func (f LoggerFunc) Log(args ...interface{}) {
+	f(args...)
+}
+
 // NewDefaultLogger returns a Logger which will write log messages to stdout, and
 // use same formatting runes as the stdlib log.Logger
 func NewDefaultLogger() Logger {

+ 78 - 3
vendor/src/github.com/aws/aws-sdk-go/aws/request/handlers.go

@@ -1,5 +1,10 @@
 package request
 
+import (
+	"fmt"
+	"strings"
+)
+
 // A Handlers provides a collection of request handlers for various
 // stages of handling requests.
 type Handlers struct {
@@ -45,9 +50,28 @@ func (h *Handlers) Clear() {
 	h.AfterRetry.Clear()
 }
 
+// A HandlerListRunItem represents an entry in the HandlerList which
+// is being run.
+type HandlerListRunItem struct {
+	Index   int
+	Handler NamedHandler
+	Request *Request
+}
+
 // A HandlerList manages zero or more handlers in a list.
 type HandlerList struct {
 	list []NamedHandler
+
+	// Called after each request handler in the list is called. If set
+	// and the func returns true the HandlerList will continue to iterate
+	// over the request handlers. If false is returned the HandlerList
+	// will stop iterating.
+	//
+	// Should be used if extra logic to be performed between each handler
+	// in the list. This can be used to terminate a list's iteration
+	// based on a condition such as error like, HandlerListStopOnError.
+	// Or for logging like HandlerListLogItem.
+	AfterEachFn func(item HandlerListRunItem) bool
 }
 
 // A NamedHandler is a struct that contains a name and function callback.
@@ -58,7 +82,9 @@ type NamedHandler struct {
 
 // copy creates a copy of the handler list.
 func (l *HandlerList) copy() HandlerList {
-	var n HandlerList
+	n := HandlerList{
+		AfterEachFn: l.AfterEachFn,
+	}
 	n.list = append([]NamedHandler{}, l.list...)
 	return n
 }
@@ -106,7 +132,56 @@ func (l *HandlerList) Remove(n NamedHandler) {
 
 // Run executes all handlers in the list with a given request object.
 func (l *HandlerList) Run(r *Request) {
-	for _, f := range l.list {
-		f.Fn(r)
+	for i, h := range l.list {
+		h.Fn(r)
+		item := HandlerListRunItem{
+			Index: i, Handler: h, Request: r,
+		}
+		if l.AfterEachFn != nil && !l.AfterEachFn(item) {
+			return
+		}
+	}
+}
+
+// HandlerListLogItem logs the request handler and the state of the
+// request's Error value. Always returns true to continue iterating
+// request handlers in a HandlerList.
+func HandlerListLogItem(item HandlerListRunItem) bool {
+	if item.Request.Config.Logger == nil {
+		return true
+	}
+	item.Request.Config.Logger.Log("DEBUG: RequestHandler",
+		item.Index, item.Handler.Name, item.Request.Error)
+
+	return true
+}
+
+// HandlerListStopOnError returns false to stop the HandlerList iterating
+// over request handlers if Request.Error is not nil. True otherwise
+// to continue iterating.
+func HandlerListStopOnError(item HandlerListRunItem) bool {
+	return item.Request.Error == nil
+}
+
+// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
+// header. If the extra parameters are provided they will be added as metadata to the
+// name/version pair resulting in the following format.
+// "name/version (extra0; extra1; ...)"
+// The user agent part will be concatenated with this current request's user agent string.
+func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) {
+	ua := fmt.Sprintf("%s/%s", name, version)
+	if len(extra) > 0 {
+		ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; "))
+	}
+	return func(r *Request) {
+		AddToUserAgent(r, ua)
+	}
+}
+
+// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header.
+// The input string will be concatenated with the current request's user agent string.
+func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) {
+	return func(r *Request) {
+		AddToUserAgent(r, s)
 	}
 }

+ 33 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/request/http_request.go

@@ -0,0 +1,33 @@
+// +build go1.5
+
+package request
+
+import (
+	"io"
+	"net/http"
+	"net/url"
+)
+
+func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
+	req := &http.Request{
+		URL:           &url.URL{},
+		Header:        http.Header{},
+		Close:         r.Close,
+		Body:          body,
+		Host:          r.Host,
+		Method:        r.Method,
+		Proto:         r.Proto,
+		ContentLength: r.ContentLength,
+		// Cancel will be deprecated in 1.7 and will be replaced with Context
+		Cancel: r.Cancel,
+	}
+
+	*req.URL = *r.URL
+	for k, v := range r.Header {
+		for _, vv := range v {
+			req.Header.Add(k, vv)
+		}
+	}
+
+	return req
+}

+ 31 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go

@@ -0,0 +1,31 @@
+// +build !go1.5
+
+package request
+
+import (
+	"io"
+	"net/http"
+	"net/url"
+)
+
+func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
+	req := &http.Request{
+		URL:           &url.URL{},
+		Header:        http.Header{},
+		Close:         r.Close,
+		Body:          body,
+		Host:          r.Host,
+		Method:        r.Method,
+		Proto:         r.Proto,
+		ContentLength: r.ContentLength,
+	}
+
+	*req.URL = *r.URL
+	for k, v := range r.Header {
+		for _, vv := range v {
+			req.Header.Add(k, vv)
+		}
+	}
+
+	return req
+}

+ 49 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/request/offset_reader.go

@@ -0,0 +1,49 @@
+package request
+
+import (
+	"io"
+	"sync"
+)
+
+// offsetReader is a thread-safe io.ReadCloser to prevent racing
+// with retrying requests
+type offsetReader struct {
+	buf    io.ReadSeeker
+	lock   sync.RWMutex
+	closed bool
+}
+
+func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader {
+	reader := &offsetReader{}
+	buf.Seek(offset, 0)
+
+	reader.buf = buf
+	return reader
+}
+
+// Close is a thread-safe close. Uses the write lock.
+func (o *offsetReader) Close() error {
+	o.lock.Lock()
+	defer o.lock.Unlock()
+	o.closed = true
+	return nil
+}
+
+// Read is a thread-safe read using a read lock.
+func (o *offsetReader) Read(p []byte) (int, error) {
+	o.lock.RLock()
+	defer o.lock.RUnlock()
+
+	if o.closed {
+		return 0, io.EOF
+	}
+
+	return o.buf.Read(p)
+}
+
+// CloseAndCopy will return a new offsetReader with a copy of the old buffer
+// and close the old buffer.
+func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader {
+	o.Close()
+	return newOffsetReader(o.buf, offset)
+}

+ 100 - 119
vendor/src/github.com/aws/aws-sdk-go/aws/request/request.go

@@ -12,29 +12,33 @@ import (
 	"time"
 
 	"github.com/aws/aws-sdk-go/aws"
-	"github.com/aws/aws-sdk-go/aws/awsutil"
-	"github.com/aws/aws-sdk-go/aws/service/serviceinfo"
+	"github.com/aws/aws-sdk-go/aws/awserr"
+	"github.com/aws/aws-sdk-go/aws/client/metadata"
 )
 
 // A Request is the service request to be made.
 type Request struct {
+	Config     aws.Config
+	ClientInfo metadata.ClientInfo
+	Handlers   Handlers
+
 	Retryer
-	Service      serviceinfo.ServiceInfo
-	Handlers     Handlers
-	Time         time.Time
-	ExpireTime   time.Duration
-	Operation    *Operation
-	HTTPRequest  *http.Request
-	HTTPResponse *http.Response
-	Body         io.ReadSeeker
-	BodyStart    int64 // offset from beginning of Body that the request body starts
-	Params       interface{}
-	Error        error
-	Data         interface{}
-	RequestID    string
-	RetryCount   uint
-	Retryable    *bool
-	RetryDelay   time.Duration
+	Time             time.Time
+	ExpireTime       time.Duration
+	Operation        *Operation
+	HTTPRequest      *http.Request
+	HTTPResponse     *http.Response
+	Body             io.ReadSeeker
+	BodyStart        int64 // offset from beginning of Body that the request body starts
+	Params           interface{}
+	Error            error
+	Data             interface{}
+	RequestID        string
+	RetryCount       int
+	Retryable        *bool
+	RetryDelay       time.Duration
+	NotHoist         bool
+	SignedHeaderVals http.Header
 
 	built bool
 }
@@ -61,7 +65,9 @@ type Paginator struct {
 // Params is any value of input parameters to be the request payload.
 // Data is pointer value to an object which the request's response
 // payload will be deserialized to.
-func New(service serviceinfo.ServiceInfo, handlers Handlers, retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
+func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
+	retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
+
 	method := operation.HTTPMethod
 	if method == "" {
 		method = "POST"
@@ -72,19 +78,27 @@ func New(service serviceinfo.ServiceInfo, handlers Handlers, retryer Retryer, op
 	}
 
 	httpReq, _ := http.NewRequest(method, "", nil)
-	httpReq.URL, _ = url.Parse(service.Endpoint + p)
+
+	var err error
+	httpReq.URL, err = url.Parse(clientInfo.Endpoint + p)
+	if err != nil {
+		httpReq.URL = &url.URL{}
+		err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)
+	}
 
 	r := &Request{
+		Config:     cfg,
+		ClientInfo: clientInfo,
+		Handlers:   handlers.Copy(),
+
 		Retryer:     retryer,
-		Service:     service,
-		Handlers:    handlers.Copy(),
 		Time:        time.Now(),
 		ExpireTime:  0,
 		Operation:   operation,
 		HTTPRequest: httpReq,
 		Body:        nil,
 		Params:      params,
-		Error:       nil,
+		Error:       err,
 		Data:        data,
 	}
 	r.SetBufferBody([]byte{})
@@ -124,7 +138,7 @@ func (r *Request) SetStringBody(s string) {
 
 // SetReaderBody will set the request's body reader.
 func (r *Request) SetReaderBody(reader io.ReadSeeker) {
-	r.HTTPRequest.Body = ioutil.NopCloser(reader)
+	r.HTTPRequest.Body = newOffsetReader(reader, 0)
 	r.Body = reader
 }
 
@@ -132,6 +146,7 @@ func (r *Request) SetReaderBody(reader io.ReadSeeker) {
 // if the signing fails.
 func (r *Request) Presign(expireTime time.Duration) (string, error) {
 	r.ExpireTime = expireTime
+	r.NotHoist = false
 	r.Sign()
 	if r.Error != nil {
 		return "", r.Error
@@ -139,8 +154,20 @@ func (r *Request) Presign(expireTime time.Duration) (string, error) {
 	return r.HTTPRequest.URL.String(), nil
 }
 
+// PresignRequest behaves just like presign, but hoists all headers and signs them.
+// Also returns the signed hash back to the user
+func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) {
+	r.ExpireTime = expireTime
+	r.NotHoist = true
+	r.Sign()
+	if r.Error != nil {
+		return "", nil, r.Error
+	}
+	return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil
+}
+
 func debugLogReqError(r *Request, stage string, retrying bool, err error) {
-	if !r.Service.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) {
+	if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) {
 		return
 	}
 
@@ -149,8 +176,8 @@ func debugLogReqError(r *Request, stage string, retrying bool, err error) {
 		retryStr = "will retry"
 	}
 
-	r.Service.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
-		stage, r.Service.ServiceName, r.Operation.Name, retryStr, err))
+	r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
+		stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err))
 }
 
 // Build will build the request's object so it can be signed and sent
@@ -165,13 +192,16 @@ func debugLogReqError(r *Request, stage string, retrying bool, err error) {
 // which occurred will be returned.
 func (r *Request) Build() error {
 	if !r.built {
-		r.Error = nil
 		r.Handlers.Validate.Run(r)
 		if r.Error != nil {
 			debugLogReqError(r, "Validate Request", false, r.Error)
 			return r.Error
 		}
 		r.Handlers.Build.Run(r)
+		if r.Error != nil {
+			debugLogReqError(r, "Build Request", false, r.Error)
+			return r.Error
+		}
 		r.built = true
 	}
 
@@ -197,28 +227,53 @@ func (r *Request) Sign() error {
 //
 // Send will sign the request prior to sending. All Send Handlers will
 // be executed in the order they were set.
+//
+// Canceling a request is non-deterministic. If a request has been canceled,
+// then the transport will choose, randomly, one of the state channels during
+// reads or getting the connection.
+//
+// readLoop() and getConn(req *Request, cm connectMethod)
+// https://github.com/golang/go/blob/master/src/net/http/transport.go
 func (r *Request) Send() error {
 	for {
+		if aws.BoolValue(r.Retryable) {
+			if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
+				r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
+					r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
+			}
+
+			var body io.ReadCloser
+			if reader, ok := r.HTTPRequest.Body.(*offsetReader); ok {
+				body = reader.CloseAndCopy(r.BodyStart)
+			} else {
+				if r.Config.Logger != nil {
+					r.Config.Logger.Log("Request body type has been overwritten. May cause race conditions")
+				}
+				r.Body.Seek(r.BodyStart, 0)
+				body = ioutil.NopCloser(r.Body)
+			}
+
+			r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, body)
+			if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
+				// Closing response body. Since we are setting a new request to send off, this
+				// response will get squashed and leaked.
+				r.HTTPResponse.Body.Close()
+			}
+		}
+
 		r.Sign()
 		if r.Error != nil {
 			return r.Error
 		}
 
-		if aws.BoolValue(r.Retryable) {
-			if r.Service.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
-				r.Service.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
-					r.Service.ServiceName, r.Operation.Name, r.RetryCount))
-			}
-
-			// Re-seek the body back to the original point in for a retry so that
-			// send will send the body's contents again in the upcoming request.
-			r.Body.Seek(r.BodyStart, 0)
-			r.HTTPRequest.Body = ioutil.NopCloser(r.Body)
-		}
 		r.Retryable = nil
 
 		r.Handlers.Send.Run(r)
 		if r.Error != nil {
+			if strings.Contains(r.Error.Error(), "net/http: request canceled") {
+				return r.Error
+			}
+
 			err := r.Error
 			r.Handlers.Retry.Run(r)
 			r.Handlers.AfterRetry.Run(r)
@@ -264,85 +319,11 @@ func (r *Request) Send() error {
 	return nil
 }
 
-// HasNextPage returns true if this request has more pages of data available.
-func (r *Request) HasNextPage() bool {
-	return r.nextPageTokens() != nil
-}
-
-// nextPageTokens returns the tokens to use when asking for the next page of
-// data.
-func (r *Request) nextPageTokens() []interface{} {
-	if r.Operation.Paginator == nil {
-		return nil
-	}
-
-	if r.Operation.TruncationToken != "" {
-		tr := awsutil.ValuesAtAnyPath(r.Data, r.Operation.TruncationToken)
-		if tr == nil || len(tr) == 0 {
-			return nil
-		}
-		switch v := tr[0].(type) {
-		case bool:
-			if v == false {
-				return nil
-			}
-		}
-	}
-
-	found := false
-	tokens := make([]interface{}, len(r.Operation.OutputTokens))
-
-	for i, outtok := range r.Operation.OutputTokens {
-		v := awsutil.ValuesAtAnyPath(r.Data, outtok)
-		if v != nil && len(v) > 0 {
-			found = true
-			tokens[i] = v[0]
-		}
-	}
-
-	if found {
-		return tokens
+// AddToUserAgent adds the string to the end of the request's current user agent.
+func AddToUserAgent(r *Request, s string) {
+	curUA := r.HTTPRequest.Header.Get("User-Agent")
+	if len(curUA) > 0 {
+		s = curUA + " " + s
 	}
-	return nil
-}
-
-// NextPage returns a new Request that can be executed to return the next
-// page of result data. Call .Send() on this request to execute it.
-func (r *Request) NextPage() *Request {
-	tokens := r.nextPageTokens()
-	if tokens == nil {
-		return nil
-	}
-
-	data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface()
-	nr := New(r.Service, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data)
-	for i, intok := range nr.Operation.InputTokens {
-		awsutil.SetValueAtAnyPath(nr.Params, intok, tokens[i])
-	}
-	return nr
-}
-
-// EachPage iterates over each page of a paginated request object. The fn
-// parameter should be a function with the following sample signature:
-//
-//   func(page *T, lastPage bool) bool {
-//       return true // return false to stop iterating
-//   }
-//
-// Where "T" is the structure type matching the output structure of the given
-// operation. For example, a request object generated by
-// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput
-// as the structure "T". The lastPage value represents whether the page is
-// the last page of data or not. The return value of this function should
-// return true to keep iterating or false to stop.
-func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
-	for page := r; page != nil; page = page.NextPage() {
-		page.Send()
-		shouldContinue := fn(page.Data, !page.HasNextPage())
-		if page.Error != nil || !shouldContinue {
-			return page.Error
-		}
-	}
-
-	return nil
+	r.HTTPRequest.Header.Set("User-Agent", s)
 }

+ 104 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go

@@ -0,0 +1,104 @@
+package request
+
+import (
+	"reflect"
+
+	"github.com/aws/aws-sdk-go/aws"
+	"github.com/aws/aws-sdk-go/aws/awsutil"
+)
+
+//type Paginater interface {
+//	HasNextPage() bool
+//	NextPage() *Request
+//	EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error
+//}
+
+// HasNextPage returns true if this request has more pages of data available.
+func (r *Request) HasNextPage() bool {
+	return len(r.nextPageTokens()) > 0
+}
+
+// nextPageTokens returns the tokens to use when asking for the next page of
+// data.
+func (r *Request) nextPageTokens() []interface{} {
+	if r.Operation.Paginator == nil {
+		return nil
+	}
+
+	if r.Operation.TruncationToken != "" {
+		tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken)
+		if len(tr) == 0 {
+			return nil
+		}
+
+		switch v := tr[0].(type) {
+		case *bool:
+			if !aws.BoolValue(v) {
+				return nil
+			}
+		case bool:
+			if v == false {
+				return nil
+			}
+		}
+	}
+
+	tokens := []interface{}{}
+	tokenAdded := false
+	for _, outToken := range r.Operation.OutputTokens {
+		v, _ := awsutil.ValuesAtPath(r.Data, outToken)
+		if len(v) > 0 {
+			tokens = append(tokens, v[0])
+			tokenAdded = true
+		} else {
+			tokens = append(tokens, nil)
+		}
+	}
+	if !tokenAdded {
+		return nil
+	}
+
+	return tokens
+}
+
+// NextPage returns a new Request that can be executed to return the next
+// page of result data. Call .Send() on this request to execute it.
+func (r *Request) NextPage() *Request {
+	tokens := r.nextPageTokens()
+	if len(tokens) == 0 {
+		return nil
+	}
+
+	data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface()
+	nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data)
+	for i, intok := range nr.Operation.InputTokens {
+		awsutil.SetValueAtPath(nr.Params, intok, tokens[i])
+	}
+	return nr
+}
+
+// EachPage iterates over each page of a paginated request object. The fn
+// parameter should be a function with the following sample signature:
+//
+//   func(page *T, lastPage bool) bool {
+//       return true // return false to stop iterating
+//   }
+//
+// Where "T" is the structure type matching the output structure of the given
+// operation. For example, a request object generated by
+// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput
+// as the structure "T". The lastPage value represents whether the page is
+// the last page of data or not. The return value of this function should
+// return true to keep iterating or false to stop.
+func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
+	for page := r; page != nil; page = page.NextPage() {
+		if err := page.Send(); err != nil {
+			return err
+		}
+		if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage {
+			return page.Error
+		}
+	}
+
+	return nil
+}

+ 32 - 2
vendor/src/github.com/aws/aws-sdk-go/aws/request/retryer.go

@@ -3,6 +3,7 @@ package request
 import (
 	"time"
 
+	"github.com/aws/aws-sdk-go/aws"
 	"github.com/aws/aws-sdk-go/aws/awserr"
 )
 
@@ -12,18 +13,31 @@ import (
 type Retryer interface {
 	RetryRules(*Request) time.Duration
 	ShouldRetry(*Request) bool
-	MaxRetries() uint
+	MaxRetries() int
+}
+
+// WithRetryer sets a config Retryer value to the given Config returning it
+// for chaining.
+func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
+	cfg.Retryer = retryer
+	return cfg
 }
 
 // retryableCodes is a collection of service response codes which are retry-able
 // without any further action.
 var retryableCodes = map[string]struct{}{
-	"RequestError":                           {},
+	"RequestError":   {},
+	"RequestTimeout": {},
+}
+
+var throttleCodes = map[string]struct{}{
 	"ProvisionedThroughputExceededException": {},
 	"Throttling":                             {},
 	"ThrottlingException":                    {},
 	"RequestLimitExceeded":                   {},
 	"RequestThrottled":                       {},
+	"LimitExceededException":                 {}, // Deleting 10+ DynamoDb tables at once
+	"TooManyRequestsException":               {}, // Lambda functions
 }
 
 // credsExpiredCodes is a collection of error codes which signify the credentials
@@ -35,6 +49,11 @@ var credsExpiredCodes = map[string]struct{}{
 	"RequestExpired":        {}, // EC2 Only
 }
 
+func isCodeThrottle(code string) bool {
+	_, ok := throttleCodes[code]
+	return ok
+}
+
 func isCodeRetryable(code string) bool {
 	if _, ok := retryableCodes[code]; ok {
 		return true
@@ -59,6 +78,17 @@ func (r *Request) IsErrorRetryable() bool {
 	return false
 }
 
+// IsErrorThrottle returns whether the error is to be throttled based on its code.
+// Returns false if the request has no Error set
+func (r *Request) IsErrorThrottle() bool {
+	if r.Error != nil {
+		if err, ok := r.Error.(awserr.Error); ok {
+			return isCodeThrottle(err.Code())
+		}
+	}
+	return false
+}
+
 // IsErrorExpired returns whether the error code is a credential expiry error.
 // Returns false if the request has no Error set.
 func (r *Request) IsErrorExpired() bool {

+ 234 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/request/validation.go

@@ -0,0 +1,234 @@
+package request
+
+import (
+	"bytes"
+	"fmt"
+
+	"github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+const (
+	// InvalidParameterErrCode is the error code for invalid parameters errors
+	InvalidParameterErrCode = "InvalidParameter"
+	// ParamRequiredErrCode is the error code for required parameter errors
+	ParamRequiredErrCode = "ParamRequiredError"
+	// ParamMinValueErrCode is the error code for fields with too low of a
+	// number value.
+	ParamMinValueErrCode = "ParamMinValueError"
+	// ParamMinLenErrCode is the error code for fields without enough elements.
+	ParamMinLenErrCode = "ParamMinLenError"
+)
+
+// Validator provides a way for types to perform validation logic on their
+// input values that external code can use to determine if a type's values
+// are valid.
+type Validator interface {
+	Validate() error
+}
+
+// An ErrInvalidParams provides wrapping of invalid parameter errors found when
+// validating API operation input parameters.
+type ErrInvalidParams struct {
+	// Context is the base context of the invalid parameter group.
+	Context string
+	errs    []ErrInvalidParam
+}
+
+// Add adds a new invalid parameter error to the collection of invalid
+// parameters. The context of the invalid parameter will be updated to reflect
+// this collection.
+func (e *ErrInvalidParams) Add(err ErrInvalidParam) {
+	err.SetContext(e.Context)
+	e.errs = append(e.errs, err)
+}
+
+// AddNested adds the invalid parameter errors from another ErrInvalidParams
+// value into this collection. The nested errors will have their nested context
+// updated and base context to reflect the merging.
+//
+// Use for nested validations errors.
+func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) {
+	for _, err := range nested.errs {
+		err.SetContext(e.Context)
+		err.AddNestedContext(nestedCtx)
+		e.errs = append(e.errs, err)
+	}
+}
+
+// Len returns the number of invalid parameter errors
+func (e ErrInvalidParams) Len() int {
+	return len(e.errs)
+}
+
+// Code returns the code of the error
+func (e ErrInvalidParams) Code() string {
+	return InvalidParameterErrCode
+}
+
+// Message returns the message of the error
+func (e ErrInvalidParams) Message() string {
+	return fmt.Sprintf("%d validation error(s) found.", len(e.errs))
+}
+
+// Error returns the string formatted form of the invalid parameters.
+func (e ErrInvalidParams) Error() string {
+	w := &bytes.Buffer{}
+	fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message())
+
+	for _, err := range e.errs {
+		fmt.Fprintf(w, "- %s\n", err.Message())
+	}
+
+	return w.String()
+}
+
+// OrigErr returns the invalid parameters as a awserr.BatchedErrors value
+func (e ErrInvalidParams) OrigErr() error {
+	return awserr.NewBatchError(
+		InvalidParameterErrCode, e.Message(), e.OrigErrs())
+}
+
+// OrigErrs returns a slice of the invalid parameters
+func (e ErrInvalidParams) OrigErrs() []error {
+	errs := make([]error, len(e.errs))
+	for i := 0; i < len(errs); i++ {
+		errs[i] = e.errs[i]
+	}
+
+	return errs
+}
+
+// An ErrInvalidParam represents an invalid parameter error type.
+type ErrInvalidParam interface {
+	awserr.Error
+
+	// Field name the error occurred on.
+	Field() string
+
+	// SetContext updates the context of the error.
+	SetContext(string)
+
+	// AddNestedContext updates the error's context to include a nested level.
+	AddNestedContext(string)
+}
+
+type errInvalidParam struct {
+	context       string
+	nestedContext string
+	field         string
+	code          string
+	msg           string
+}
+
+// Code returns the error code for the type of invalid parameter.
+func (e *errInvalidParam) Code() string {
+	return e.code
+}
+
+// Message returns the reason the parameter was invalid, and its context.
+func (e *errInvalidParam) Message() string {
+	return fmt.Sprintf("%s, %s.", e.msg, e.Field())
+}
+
+// Error returns the string version of the invalid parameter error.
+func (e *errInvalidParam) Error() string {
+	return fmt.Sprintf("%s: %s", e.code, e.Message())
+}
+
+// OrigErr returns nil, Implemented for awserr.Error interface.
+func (e *errInvalidParam) OrigErr() error {
+	return nil
+}
+
+// Field Returns the field and context the error occurred.
+func (e *errInvalidParam) Field() string {
+	field := e.context
+	if len(field) > 0 {
+		field += "."
+	}
+	if len(e.nestedContext) > 0 {
+		field += fmt.Sprintf("%s.", e.nestedContext)
+	}
+	field += e.field
+
+	return field
+}
+
+// SetContext updates the base context of the error.
+func (e *errInvalidParam) SetContext(ctx string) {
+	e.context = ctx
+}
+
+// AddNestedContext prepends a context to the field's path.
+func (e *errInvalidParam) AddNestedContext(ctx string) {
+	if len(e.nestedContext) == 0 {
+		e.nestedContext = ctx
+	} else {
+		e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext)
+	}
+
+}
+
+// An ErrParamRequired represents an required parameter error.
+type ErrParamRequired struct {
+	errInvalidParam
+}
+
+// NewErrParamRequired creates a new required parameter error.
+func NewErrParamRequired(field string) *ErrParamRequired {
+	return &ErrParamRequired{
+		errInvalidParam{
+			code:  ParamRequiredErrCode,
+			field: field,
+			msg:   fmt.Sprintf("missing required field"),
+		},
+	}
+}
+
+// An ErrParamMinValue represents a minimum value parameter error.
+type ErrParamMinValue struct {
+	errInvalidParam
+	min float64
+}
+
+// NewErrParamMinValue creates a new minimum value parameter error.
+func NewErrParamMinValue(field string, min float64) *ErrParamMinValue {
+	return &ErrParamMinValue{
+		errInvalidParam: errInvalidParam{
+			code:  ParamMinValueErrCode,
+			field: field,
+			msg:   fmt.Sprintf("minimum field value of %v", min),
+		},
+		min: min,
+	}
+}
+
+// MinValue returns the field's require minimum value.
+//
+// float64 is returned for both int and float min values.
+func (e *ErrParamMinValue) MinValue() float64 {
+	return e.min
+}
+
+// An ErrParamMinLen represents a minimum length parameter error.
+type ErrParamMinLen struct {
+	errInvalidParam
+	min int
+}
+
+// NewErrParamMinLen creates a new minimum length parameter error.
+func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
+	return &ErrParamMinLen{
+		errInvalidParam: errInvalidParam{
+			code:  ParamMinValueErrCode,
+			field: field,
+			msg:   fmt.Sprintf("minimum field size of %v", min),
+		},
+		min: min,
+	}
+}
+
+// MinLen returns the field's required minimum length.
+func (e *ErrParamMinLen) MinLen() int {
+	return e.min
+}

+ 0 - 51
vendor/src/github.com/aws/aws-sdk-go/aws/service/default_retryer.go

@@ -1,51 +0,0 @@
-package service
-
-import (
-	"math"
-	"math/rand"
-	"time"
-
-	"github.com/aws/aws-sdk-go/aws"
-	"github.com/aws/aws-sdk-go/aws/request"
-)
-
-// DefaultRetryer implements basic retry logic using exponential backoff for
-// most services. If you want to implement custom retry logic, implement the
-// request.Retryer interface or create a structure type that composes this
-// struct and override the specific methods. For example, to override only
-// the MaxRetries method:
-//
-//		type retryer struct {
-//      service.DefaultRetryer
-//    }
-//
-//    // This implementation always has 100 max retries
-//    func (d retryer) MaxRetries() uint { return 100 }
-type DefaultRetryer struct {
-	*Service
-}
-
-// MaxRetries returns the number of maximum returns the service will use to make
-// an individual API request.
-func (d DefaultRetryer) MaxRetries() uint {
-	if aws.IntValue(d.Service.Config.MaxRetries) < 0 {
-		return d.DefaultMaxRetries
-	}
-	return uint(aws.IntValue(d.Service.Config.MaxRetries))
-}
-
-var seededRand = rand.New(rand.NewSource(time.Now().UnixNano()))
-
-// RetryRules returns the delay duration before retrying this request again
-func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
-	delay := int(math.Pow(2, float64(r.RetryCount))) * (seededRand.Intn(30) + 30)
-	return time.Duration(delay) * time.Millisecond
-}
-
-// ShouldRetry returns if the request should be retried.
-func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
-	if r.HTTPResponse.StatusCode >= 500 {
-		return true
-	}
-	return r.IsErrorRetryable()
-}

+ 0 - 133
vendor/src/github.com/aws/aws-sdk-go/aws/service/service.go

@@ -1,133 +0,0 @@
-package service
-
-import (
-	"fmt"
-	"io/ioutil"
-	"net/http"
-	"net/http/httputil"
-	"regexp"
-	"time"
-
-	"github.com/aws/aws-sdk-go/aws"
-	"github.com/aws/aws-sdk-go/aws/corehandlers"
-	"github.com/aws/aws-sdk-go/aws/request"
-	"github.com/aws/aws-sdk-go/aws/service/serviceinfo"
-	"github.com/aws/aws-sdk-go/internal/endpoints"
-)
-
-// A Service implements the base service request and response handling
-// used by all services.
-type Service struct {
-	serviceinfo.ServiceInfo
-	request.Retryer
-	DefaultMaxRetries uint
-	Handlers          request.Handlers
-}
-
-var schemeRE = regexp.MustCompile("^([^:]+)://")
-
-// New will return a pointer to a new Server object initialized.
-func New(config *aws.Config) *Service {
-	svc := &Service{ServiceInfo: serviceinfo.ServiceInfo{Config: config}}
-	svc.Initialize()
-	return svc
-}
-
-// Initialize initializes the service.
-func (s *Service) Initialize() {
-	if s.Config == nil {
-		s.Config = &aws.Config{}
-	}
-	if s.Config.HTTPClient == nil {
-		s.Config.HTTPClient = http.DefaultClient
-	}
-	if s.Config.SleepDelay == nil {
-		s.Config.SleepDelay = time.Sleep
-	}
-
-	s.Retryer = DefaultRetryer{s}
-	s.DefaultMaxRetries = 3
-	s.Handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
-	s.Handlers.Build.PushBackNamed(corehandlers.UserAgentHandler)
-	s.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
-	s.Handlers.Send.PushBackNamed(corehandlers.SendHandler)
-	s.Handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
-	s.Handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler)
-	if !aws.BoolValue(s.Config.DisableParamValidation) {
-		s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler)
-	}
-	s.AddDebugHandlers()
-	s.buildEndpoint()
-}
-
-// NewRequest returns a new Request pointer for the service API
-// operation and parameters.
-func (s *Service) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
-	return request.New(s.ServiceInfo, s.Handlers, s.Retryer, operation, params, data)
-}
-
-// buildEndpoint builds the endpoint values the service will use to make requests with.
-func (s *Service) buildEndpoint() {
-	if aws.StringValue(s.Config.Endpoint) != "" {
-		s.Endpoint = *s.Config.Endpoint
-	} else if s.Endpoint == "" {
-		s.Endpoint, s.SigningRegion =
-			endpoints.EndpointForRegion(s.ServiceName, aws.StringValue(s.Config.Region))
-	}
-
-	if s.Endpoint != "" && !schemeRE.MatchString(s.Endpoint) {
-		scheme := "https"
-		if aws.BoolValue(s.Config.DisableSSL) {
-			scheme = "http"
-		}
-		s.Endpoint = scheme + "://" + s.Endpoint
-	}
-}
-
-// AddDebugHandlers injects debug logging handlers into the service to log request
-// debug information.
-func (s *Service) AddDebugHandlers() {
-	if !s.Config.LogLevel.AtLeast(aws.LogDebug) {
-		return
-	}
-
-	s.Handlers.Send.PushFront(logRequest)
-	s.Handlers.Send.PushBack(logResponse)
-}
-
-const logReqMsg = `DEBUG: Request %s/%s Details:
----[ REQUEST POST-SIGN ]-----------------------------
-%s
------------------------------------------------------`
-
-func logRequest(r *request.Request) {
-	logBody := r.Service.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
-	dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody)
-
-	if logBody {
-		// Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
-		// Body as a NoOpCloser and will not be reset after read by the HTTP
-		// client reader.
-		r.Body.Seek(r.BodyStart, 0)
-		r.HTTPRequest.Body = ioutil.NopCloser(r.Body)
-	}
-
-	r.Service.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.Service.ServiceName, r.Operation.Name, string(dumpedBody)))
-}
-
-const logRespMsg = `DEBUG: Response %s/%s Details:
----[ RESPONSE ]--------------------------------------
-%s
------------------------------------------------------`
-
-func logResponse(r *request.Request) {
-	var msg = "no reponse data"
-	if r.HTTPResponse != nil {
-		logBody := r.Service.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
-		dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody)
-		msg = string(dumpedBody)
-	} else if r.Error != nil {
-		msg = r.Error.Error()
-	}
-	r.Service.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.Service.ServiceName, r.Operation.Name, msg))
-}

+ 0 - 15
vendor/src/github.com/aws/aws-sdk-go/aws/service/serviceinfo/service_info.go

@@ -1,15 +0,0 @@
-package serviceinfo
-
-import "github.com/aws/aws-sdk-go/aws"
-
-// ServiceInfo wraps immutable data from the service.Service structure.
-type ServiceInfo struct {
-	Config        *aws.Config
-	ServiceName   string
-	APIVersion    string
-	Endpoint      string
-	SigningName   string
-	SigningRegion string
-	JSONVersion   string
-	TargetPrefix  string
-}

+ 120 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/session/session.go

@@ -0,0 +1,120 @@
+// Package session provides a way to create service clients with shared configuration
+// and handlers.
+//
+// Generally this package should be used instead of the `defaults` package.
+//
+// A session should be used to share configurations and request handlers between multiple
+// service clients. When service clients need specific configuration aws.Config can be
+// used to provide additional configuration directly to the service client.
+package session
+
+import (
+	"github.com/aws/aws-sdk-go/aws"
+	"github.com/aws/aws-sdk-go/aws/client"
+	"github.com/aws/aws-sdk-go/aws/corehandlers"
+	"github.com/aws/aws-sdk-go/aws/defaults"
+	"github.com/aws/aws-sdk-go/aws/request"
+	"github.com/aws/aws-sdk-go/private/endpoints"
+)
+
+// A Session provides a central location to create service clients from and
+// store configurations and request handlers for those services.
+//
+// Sessions are safe to create service clients concurrently, but it is not safe
+// to mutate the session concurrently.
+type Session struct {
+	Config   *aws.Config
+	Handlers request.Handlers
+}
+
+// New creates a new instance of the handlers merging in the provided Configs
+// on top of the SDK's default configurations. Once the session is created it
+// can be mutated to modify Configs or Handlers. The session is safe to be read
+// concurrently, but it should not be written to concurrently.
+//
+// Example:
+//     // Create a session with the default config and request handlers.
+//     sess := session.New()
+//
+//     // Create a session with a custom region
+//     sess := session.New(&aws.Config{Region: aws.String("us-east-1")})
+//
+//     // Create a session, and add additional handlers for all service
+//     // clients created with the session to inherit. Adds logging handler.
+//     sess := session.New()
+//     sess.Handlers.Send.PushFront(func(r *request.Request) {
+//          // Log every request made and its payload
+//          logger.Println("Request: %s/%s, Payload: %s", r.ClientInfo.ServiceName, r.Operation, r.Params)
+//     })
+//
+//     // Create a S3 client instance from a session
+//     sess := session.New()
+//     svc := s3.New(sess)
+func New(cfgs ...*aws.Config) *Session {
+	cfg := defaults.Config()
+	handlers := defaults.Handlers()
+
+	// Apply the passed in configs so the configuration can be applied to the
+	// default credential chain
+	cfg.MergeIn(cfgs...)
+	cfg.Credentials = defaults.CredChain(cfg, handlers)
+
+	// Reapply any passed in configs to override credentials if set
+	cfg.MergeIn(cfgs...)
+
+	s := &Session{
+		Config:   cfg,
+		Handlers: handlers,
+	}
+
+	initHandlers(s)
+
+	return s
+}
+
+func initHandlers(s *Session) {
+	// Add the Validate parameter handler if it is not disabled.
+	s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler)
+	if !aws.BoolValue(s.Config.DisableParamValidation) {
+		s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler)
+	}
+}
+
+// Copy creates and returns a copy of the current session, coping the config
+// and handlers. If any additional configs are provided they will be merged
+// on top of the session's copied config.
+//
+// Example:
+//     // Create a copy of the current session, configured for the us-west-2 region.
+//     sess.Copy(&aws.Config{Region: aws.String("us-west-2")})
+func (s *Session) Copy(cfgs ...*aws.Config) *Session {
+	newSession := &Session{
+		Config:   s.Config.Copy(cfgs...),
+		Handlers: s.Handlers.Copy(),
+	}
+
+	initHandlers(newSession)
+
+	return newSession
+}
+
+// ClientConfig satisfies the client.ConfigProvider interface and is used to
+// configure the service client instances. Passing the Session to the service
+// client's constructor (New) will use this method to configure the client.
+//
+// Example:
+//     sess := session.New()
+//     s3.New(sess)
+func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config {
+	s = s.Copy(cfgs...)
+	endpoint, signingRegion := endpoints.NormalizeEndpoint(
+		aws.StringValue(s.Config.Endpoint), serviceName,
+		aws.StringValue(s.Config.Region), aws.BoolValue(s.Config.DisableSSL))
+
+	return client.Config{
+		Config:        s.Config,
+		Handlers:      s.Handlers,
+		Endpoint:      endpoint,
+		SigningRegion: signingRegion,
+	}
+}

+ 25 - 7
vendor/src/github.com/aws/aws-sdk-go/aws/types.go

@@ -5,7 +5,7 @@ import (
 	"sync"
 )
 
-// ReadSeekCloser wraps a io.Reader returning a ReaderSeakerCloser
+// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser
 func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
 	return ReaderSeekerCloser{r}
 }
@@ -61,23 +61,41 @@ func (r ReaderSeekerCloser) Close() error {
 type WriteAtBuffer struct {
 	buf []byte
 	m   sync.Mutex
+
+	// GrowthCoeff defines the growth rate of the internal buffer. By
+	// default, the growth rate is 1, where expanding the internal
+	// buffer will allocate only enough capacity to fit the new expected
+	// length.
+	GrowthCoeff float64
+}
+
+// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer
+// provided by buf.
+func NewWriteAtBuffer(buf []byte) *WriteAtBuffer {
+	return &WriteAtBuffer{buf: buf}
 }
 
 // WriteAt writes a slice of bytes to a buffer starting at the position provided
 // The number of bytes written will be returned, or error. Can overwrite previous
 // written slices if the write ats overlap.
 func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
+	pLen := len(p)
+	expLen := pos + int64(pLen)
 	b.m.Lock()
 	defer b.m.Unlock()
-
-	expLen := pos + int64(len(p))
 	if int64(len(b.buf)) < expLen {
-		newBuf := make([]byte, expLen)
-		copy(newBuf, b.buf)
-		b.buf = newBuf
+		if int64(cap(b.buf)) < expLen {
+			if b.GrowthCoeff < 1 {
+				b.GrowthCoeff = 1
+			}
+			newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen)))
+			copy(newBuf, b.buf)
+			b.buf = newBuf
+		}
+		b.buf = b.buf[:expLen]
 	}
 	copy(b.buf[pos:], p)
-	return len(p), nil
+	return pLen, nil
 }
 
 // Bytes returns a slice of bytes written to the buffer.

+ 1 - 1
vendor/src/github.com/aws/aws-sdk-go/aws/version.go

@@ -5,4 +5,4 @@ package aws
 const SDKName = "aws-sdk-go"
 
 // SDKVersion is the version of this SDK
-const SDKVersion = "0.9.9"
+const SDKVersion = "1.1.30"

+ 27 - 0
vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/LICENSE

@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 0 - 31
vendor/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.go

@@ -1,31 +0,0 @@
-// Package endpoints validates regional endpoints for services.
-package endpoints
-
-//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go
-//go:generate gofmt -s -w endpoints_map.go
-
-import "strings"
-
-// EndpointForRegion returns an endpoint and its signing region for a service and region.
-// if the service and region pair are not found endpoint and signingRegion will be empty.
-func EndpointForRegion(svcName, region string) (endpoint, signingRegion string) {
-	derivedKeys := []string{
-		region + "/" + svcName,
-		region + "/*",
-		"*/" + svcName,
-		"*/*",
-	}
-
-	for _, key := range derivedKeys {
-		if val, ok := endpointsMap.Endpoints[key]; ok {
-			ep := val.Endpoint
-			ep = strings.Replace(ep, "{region}", region, -1)
-			ep = strings.Replace(ep, "{service}", svcName, -1)
-
-			endpoint = ep
-			signingRegion = val.SigningRegion
-			return
-		}
-	}
-	return
-}

+ 65 - 0
vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go

@@ -0,0 +1,65 @@
+// Package endpoints validates regional endpoints for services.
+package endpoints
+
+//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go
+//go:generate gofmt -s -w endpoints_map.go
+
+import (
+	"fmt"
+	"regexp"
+	"strings"
+)
+
+// NormalizeEndpoint takes and endpoint and service API information to return a
+// normalized endpoint and signing region.  If the endpoint is not an empty string
+// the service name and region will be used to look up the service's API endpoint.
+// If the endpoint is provided the scheme will be added if it is not present.
+func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL bool) (normEndpoint, signingRegion string) {
+	if endpoint == "" {
+		return EndpointForRegion(serviceName, region, disableSSL)
+	}
+
+	return AddScheme(endpoint, disableSSL), ""
+}
+
+// EndpointForRegion returns an endpoint and its signing region for a service and region.
+// if the service and region pair are not found endpoint and signingRegion will be empty.
+func EndpointForRegion(svcName, region string, disableSSL bool) (endpoint, signingRegion string) {
+	derivedKeys := []string{
+		region + "/" + svcName,
+		region + "/*",
+		"*/" + svcName,
+		"*/*",
+	}
+
+	for _, key := range derivedKeys {
+		if val, ok := endpointsMap.Endpoints[key]; ok {
+			ep := val.Endpoint
+			ep = strings.Replace(ep, "{region}", region, -1)
+			ep = strings.Replace(ep, "{service}", svcName, -1)
+
+			endpoint = ep
+			signingRegion = val.SigningRegion
+			break
+		}
+	}
+
+	return AddScheme(endpoint, disableSSL), signingRegion
+}
+
+// Regular expression to determine if the endpoint string is prefixed with a scheme.
+var schemeRE = regexp.MustCompile("^([^:]+)://")
+
+// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no
+// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS.
+func AddScheme(endpoint string, disableSSL bool) string {
+	if endpoint != "" && !schemeRE.MatchString(endpoint) {
+		scheme := "https"
+		if disableSSL {
+			scheme = "http"
+		}
+		endpoint = fmt.Sprintf("%s://%s", scheme, endpoint)
+	}
+
+	return endpoint
+}

+ 21 - 23
vendor/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.json → vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json

@@ -8,6 +8,9 @@
       "endpoint": "{service}.{region}.amazonaws.com.cn",
       "signatureVersion": "v4"
     },
+    "cn-north-1/ec2metadata": {
+      "endpoint": "http://169.254.169.254/latest"
+    },
     "us-gov-west-1/iam": {
       "endpoint": "iam.us-gov.amazonaws.com"
     },
@@ -17,6 +20,9 @@
     "us-gov-west-1/s3": {
       "endpoint": "s3-{region}.amazonaws.com"
     },
+    "us-gov-west-1/ec2metadata": {
+      "endpoint": "http://169.254.169.254/latest"
+    },
     "*/cloudfront": {
       "endpoint": "cloudfront.amazonaws.com",
       "signingRegion": "us-east-1"
@@ -25,6 +31,13 @@
       "endpoint": "",
       "signingRegion": "us-east-1"
     },
+    "*/data.iot": {
+      "endpoint": "",
+      "signingRegion": "us-east-1"
+    },
+    "*/ec2metadata": {
+      "endpoint": "http://169.254.169.254/latest"
+    },
     "*/iam": {
       "endpoint": "iam.amazonaws.com",
       "signingRegion": "us-east-1"
@@ -41,37 +54,22 @@
       "endpoint": "sts.amazonaws.com",
       "signingRegion": "us-east-1"
     },
+    "*/waf": {
+      "endpoint": "waf.amazonaws.com",
+      "signingRegion": "us-east-1"
+    },
     "us-east-1/sdb": {
       "endpoint": "sdb.amazonaws.com",
       "signingRegion": "us-east-1"
     },
-    "us-east-1/s3": {
-      "endpoint": "s3.amazonaws.com"
-    },
-    "us-west-1/s3": {
-      "endpoint": "s3-{region}.amazonaws.com"
-    },
-    "us-west-2/s3": {
+    "*/s3": {
       "endpoint": "s3-{region}.amazonaws.com"
     },
-    "eu-west-1/s3": {
-      "endpoint": "s3-{region}.amazonaws.com"
-    },
-    "ap-southeast-1/s3": {
-      "endpoint": "s3-{region}.amazonaws.com"
-    },
-    "ap-southeast-2/s3": {
-      "endpoint": "s3-{region}.amazonaws.com"
-    },
-    "ap-northeast-1/s3": {
-      "endpoint": "s3-{region}.amazonaws.com"
-    },
-    "sa-east-1/s3": {
-      "endpoint": "s3-{region}.amazonaws.com"
+    "us-east-1/s3": {
+      "endpoint": "s3.amazonaws.com"
     },
     "eu-central-1/s3": {
-      "endpoint": "{service}.{region}.amazonaws.com",
-      "signatureVersion": "v4"
+      "endpoint": "{service}.{region}.amazonaws.com"
     }
   }
 }

+ 19 - 20
vendor/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_map.go → vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go

@@ -26,6 +26,13 @@ var endpointsMap = endpointStruct{
 			Endpoint:      "",
 			SigningRegion: "us-east-1",
 		},
+		"*/data.iot": {
+			Endpoint:      "",
+			SigningRegion: "us-east-1",
+		},
+		"*/ec2metadata": {
+			Endpoint: "http://169.254.169.254/latest",
+		},
 		"*/iam": {
 			Endpoint:      "iam.amazonaws.com",
 			SigningRegion: "us-east-1",
@@ -38,31 +45,26 @@ var endpointsMap = endpointStruct{
 			Endpoint:      "route53.amazonaws.com",
 			SigningRegion: "us-east-1",
 		},
+		"*/s3": {
+			Endpoint: "s3-{region}.amazonaws.com",
+		},
 		"*/sts": {
 			Endpoint:      "sts.amazonaws.com",
 			SigningRegion: "us-east-1",
 		},
-		"ap-northeast-1/s3": {
-			Endpoint: "s3-{region}.amazonaws.com",
-		},
-		"ap-southeast-1/s3": {
-			Endpoint: "s3-{region}.amazonaws.com",
-		},
-		"ap-southeast-2/s3": {
-			Endpoint: "s3-{region}.amazonaws.com",
+		"*/waf": {
+			Endpoint:      "waf.amazonaws.com",
+			SigningRegion: "us-east-1",
 		},
 		"cn-north-1/*": {
 			Endpoint: "{service}.{region}.amazonaws.com.cn",
 		},
+		"cn-north-1/ec2metadata": {
+			Endpoint: "http://169.254.169.254/latest",
+		},
 		"eu-central-1/s3": {
 			Endpoint: "{service}.{region}.amazonaws.com",
 		},
-		"eu-west-1/s3": {
-			Endpoint: "s3-{region}.amazonaws.com",
-		},
-		"sa-east-1/s3": {
-			Endpoint: "s3-{region}.amazonaws.com",
-		},
 		"us-east-1/s3": {
 			Endpoint: "s3.amazonaws.com",
 		},
@@ -70,6 +72,9 @@ var endpointsMap = endpointStruct{
 			Endpoint:      "sdb.amazonaws.com",
 			SigningRegion: "us-east-1",
 		},
+		"us-gov-west-1/ec2metadata": {
+			Endpoint: "http://169.254.169.254/latest",
+		},
 		"us-gov-west-1/iam": {
 			Endpoint: "iam.us-gov.amazonaws.com",
 		},
@@ -79,11 +84,5 @@ var endpointsMap = endpointStruct{
 		"us-gov-west-1/sts": {
 			Endpoint: "sts.us-gov-west-1.amazonaws.com",
 		},
-		"us-west-1/s3": {
-			Endpoint: "s3-{region}.amazonaws.com",
-		},
-		"us-west-2/s3": {
-			Endpoint: "s3-{region}.amazonaws.com",
-		},
 	},
 }

+ 75 - 0
vendor/src/github.com/aws/aws-sdk-go/private/protocol/idempotency.go

@@ -0,0 +1,75 @@
+package protocol
+
+import (
+	"crypto/rand"
+	"fmt"
+	"reflect"
+)
+
+// RandReader is the random reader the protocol package will use to read
+// random bytes from. This is exported for testing, and should not be used.
+var RandReader = rand.Reader
+
+const idempotencyTokenFillTag = `idempotencyToken`
+
+// CanSetIdempotencyToken returns true if the struct field should be
+// automatically populated with a Idempotency token.
+//
+// Only *string and string type fields that are tagged with idempotencyToken
+// which are not already set can be auto filled.
+func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool {
+	switch u := v.Interface().(type) {
+	// To auto fill an Idempotency token the field must be a string,
+	// tagged for auto fill, and have a zero value.
+	case *string:
+		return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
+	case string:
+		return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
+	}
+
+	return false
+}
+
+// GetIdempotencyToken returns a randomly generated idempotency token.
+func GetIdempotencyToken() string {
+	b := make([]byte, 16)
+	RandReader.Read(b)
+
+	return UUIDVersion4(b)
+}
+
+// SetIdempotencyToken will set the value provided with a Idempotency Token.
+// Given that the value can be set. Will panic if value is not setable.
+func SetIdempotencyToken(v reflect.Value) {
+	if v.Kind() == reflect.Ptr {
+		if v.IsNil() && v.CanSet() {
+			v.Set(reflect.New(v.Type().Elem()))
+		}
+		v = v.Elem()
+	}
+	v = reflect.Indirect(v)
+
+	if !v.CanSet() {
+		panic(fmt.Sprintf("unable to set idempotnecy token %v", v))
+	}
+
+	b := make([]byte, 16)
+	_, err := rand.Read(b)
+	if err != nil {
+		// TODO handle error
+		return
+	}
+
+	v.Set(reflect.ValueOf(UUIDVersion4(b)))
+}
+
+// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided
+func UUIDVersion4(u []byte) string {
+	// https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29
+	// 13th character is "4"
+	u[6] = (u[6] | 0x40) & 0x4F
+	// 17th character is "8", "9", "a", or "b"
+	u[8] = (u[8] | 0x80) & 0xBF
+
+	return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
+}

+ 81 - 45
vendor/src/github.com/aws/aws-sdk-go/internal/protocol/json/jsonutil/build.go → vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go

@@ -8,10 +8,14 @@ import (
 	"reflect"
 	"sort"
 	"strconv"
-	"strings"
 	"time"
+
+	"github.com/aws/aws-sdk-go/private/protocol"
 )
 
+var timeType = reflect.ValueOf(time.Time{}).Type()
+var byteSliceType = reflect.ValueOf([]byte{}).Type()
+
 // BuildJSON builds a JSON string for a given object v.
 func BuildJSON(v interface{}) ([]byte, error) {
 	var buf bytes.Buffer
@@ -33,7 +37,7 @@ func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) err
 		switch vtype.Kind() {
 		case reflect.Struct:
 			// also it can't be a time object
-			if _, ok := value.Interface().(time.Time); !ok {
+			if value.Type() != timeType {
 				t = "structure"
 			}
 		case reflect.Slice:
@@ -48,7 +52,7 @@ func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) err
 
 	switch t {
 	case "structure":
-		if field, ok := vtype.FieldByName("SDKShapeTraits"); ok {
+		if field, ok := vtype.FieldByName("_"); ok {
 			tag = field.Tag
 		}
 		return buildStruct(value, buf, tag)
@@ -77,27 +81,38 @@ func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag)
 		}
 	}
 
-	buf.WriteString("{")
+	buf.WriteByte('{')
 
-	t, fields := value.Type(), []*reflect.StructField{}
+	t := value.Type()
+	first := true
 	for i := 0; i < t.NumField(); i++ {
+		member := value.Field(i)
 		field := t.Field(i)
-		member := value.FieldByName(field.Name)
-		if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() {
-			continue // ignore unset fields
-		}
-		if c := field.Name[0:1]; strings.ToLower(c) == c {
+
+		if field.PkgPath != "" {
 			continue // ignore unexported fields
 		}
+		if field.Tag.Get("json") == "-" {
+			continue
+		}
 		if field.Tag.Get("location") != "" {
 			continue // ignore non-body elements
 		}
 
-		fields = append(fields, &field)
-	}
+		if protocol.CanSetIdempotencyToken(member, field) {
+			token := protocol.GetIdempotencyToken()
+			member = reflect.ValueOf(&token)
+		}
+
+		if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() {
+			continue // ignore unset fields
+		}
 
-	for i, field := range fields {
-		member := value.FieldByName(field.Name)
+		if first {
+			first = false
+		} else {
+			buf.WriteByte(',')
+		}
 
 		// figure out what this field is called
 		name := field.Name
@@ -105,16 +120,14 @@ func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag)
 			name = locName
 		}
 
-		buf.WriteString(fmt.Sprintf("%q:", name))
+		writeString(name, buf)
+		buf.WriteString(`:`)
 
 		err := buildAny(member, buf, field.Tag)
 		if err != nil {
 			return err
 		}
 
-		if i < len(fields)-1 {
-			buf.WriteString(",")
-		}
 	}
 
 	buf.WriteString("}")
@@ -138,22 +151,27 @@ func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) er
 	return nil
 }
 
+type sortedValues []reflect.Value
+
+func (sv sortedValues) Len() int           { return len(sv) }
+func (sv sortedValues) Swap(i, j int)      { sv[i], sv[j] = sv[j], sv[i] }
+func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() }
+
 func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
 	buf.WriteString("{")
 
-	keys := make([]string, value.Len())
-	for i, n := range value.MapKeys() {
-		keys[i] = n.String()
-	}
-	sort.Strings(keys)
-
-	for i, k := range keys {
-		buf.WriteString(fmt.Sprintf("%q:", k))
-		buildAny(value.MapIndex(reflect.ValueOf(k)), buf, "")
+	var sv sortedValues = value.MapKeys()
+	sort.Sort(sv)
 
-		if i < len(keys)-1 {
-			buf.WriteString(",")
+	for i, k := range sv {
+		if i > 0 {
+			buf.WriteByte(',')
 		}
+
+		writeString(k.String(), buf)
+		buf.WriteString(`:`)
+
+		buildAny(value.MapIndex(k), buf, "")
 	}
 
 	buf.WriteString("}")
@@ -162,23 +180,41 @@ func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) err
 }
 
 func buildScalar(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
-	switch converted := value.Interface().(type) {
-	case string:
-		writeString(converted, buf)
-	case []byte:
-		if !value.IsNil() {
-			buf.WriteString(fmt.Sprintf("%q", base64.StdEncoding.EncodeToString(converted)))
-		}
-	case bool:
-		buf.WriteString(strconv.FormatBool(converted))
-	case int64:
-		buf.WriteString(strconv.FormatInt(converted, 10))
-	case float64:
-		buf.WriteString(strconv.FormatFloat(converted, 'f', -1, 64))
-	case time.Time:
-		buf.WriteString(strconv.FormatInt(converted.UTC().Unix(), 10))
+	switch value.Kind() {
+	case reflect.String:
+		writeString(value.String(), buf)
+	case reflect.Bool:
+		buf.WriteString(strconv.FormatBool(value.Bool()))
+	case reflect.Int64:
+		buf.WriteString(strconv.FormatInt(value.Int(), 10))
+	case reflect.Float64:
+		buf.WriteString(strconv.FormatFloat(value.Float(), 'f', -1, 64))
 	default:
-		return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type())
+		switch value.Type() {
+		case timeType:
+			converted := value.Interface().(time.Time)
+			buf.WriteString(strconv.FormatInt(converted.UTC().Unix(), 10))
+		case byteSliceType:
+			if !value.IsNil() {
+				converted := value.Interface().([]byte)
+				buf.WriteByte('"')
+				if len(converted) < 1024 {
+					// for small buffers, using Encode directly is much faster.
+					dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted)))
+					base64.StdEncoding.Encode(dst, converted)
+					buf.Write(dst)
+				} else {
+					// for large buffers, avoid unnecessary extra temporary
+					// buffer space.
+					enc := base64.NewEncoder(base64.StdEncoding, buf)
+					enc.Write(converted)
+					enc.Close()
+				}
+				buf.WriteByte('"')
+			}
+		default:
+			return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type())
+		}
 	}
 	return nil
 }

+ 2 - 2
vendor/src/github.com/aws/aws-sdk-go/internal/protocol/json/jsonutil/unmarshal.go → vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go

@@ -56,7 +56,7 @@ func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag)
 
 	switch t {
 	case "structure":
-		if field, ok := vtype.FieldByName("SDKShapeTraits"); ok {
+		if field, ok := vtype.FieldByName("_"); ok {
 			tag = field.Tag
 		}
 		return unmarshalStruct(value, data, tag)
@@ -108,7 +108,7 @@ func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTa
 			name = locName
 		}
 
-		member := value.FieldByName(field.Name)
+		member := value.FieldByIndex(field.Index)
 		err := unmarshalAny(member, mapData[name], field.Tag)
 		if err != nil {
 			return err

+ 21 - 9
vendor/src/github.com/aws/aws-sdk-go/internal/protocol/jsonrpc/jsonrpc.go → vendor/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go

@@ -2,8 +2,8 @@
 // requests and responses.
 package jsonrpc
 
-//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/input/json.json build_test.go
-//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/output/json.json unmarshal_test.go
+//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/json.json build_test.go
+//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/json.json unmarshal_test.go
 
 import (
 	"encoding/json"
@@ -12,12 +12,24 @@ import (
 
 	"github.com/aws/aws-sdk-go/aws/awserr"
 	"github.com/aws/aws-sdk-go/aws/request"
-	"github.com/aws/aws-sdk-go/internal/protocol/json/jsonutil"
-	"github.com/aws/aws-sdk-go/internal/protocol/rest"
+	"github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
+	"github.com/aws/aws-sdk-go/private/protocol/rest"
 )
 
 var emptyJSON = []byte("{}")
 
+// BuildHandler is a named request handler for building jsonrpc protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Build", Fn: Build}
+
+// UnmarshalHandler is a named request handler for unmarshaling jsonrpc protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc protocol request errors
+var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalError", Fn: UnmarshalError}
+
 // Build builds a JSON payload for a JSON RPC request.
 func Build(req *request.Request) {
 	var buf []byte
@@ -32,16 +44,16 @@ func Build(req *request.Request) {
 		buf = emptyJSON
 	}
 
-	if req.Service.TargetPrefix != "" || string(buf) != "{}" {
+	if req.ClientInfo.TargetPrefix != "" || string(buf) != "{}" {
 		req.SetBufferBody(buf)
 	}
 
-	if req.Service.TargetPrefix != "" {
-		target := req.Service.TargetPrefix + "." + req.Operation.Name
+	if req.ClientInfo.TargetPrefix != "" {
+		target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name
 		req.HTTPRequest.Header.Add("X-Amz-Target", target)
 	}
-	if req.Service.JSONVersion != "" {
-		jsonVersion := req.Service.JSONVersion
+	if req.ClientInfo.JSONVersion != "" {
+		jsonVersion := req.ClientInfo.JSONVersion
 		req.HTTPRequest.Header.Add("Content-Type", "application/x-amz-json-"+jsonVersion)
 	}
 }

+ 74 - 35
vendor/src/github.com/aws/aws-sdk-go/internal/protocol/rest/build.go → vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go

@@ -6,6 +6,7 @@ import (
 	"encoding/base64"
 	"fmt"
 	"io"
+	"net/http"
 	"net/url"
 	"path"
 	"reflect"
@@ -23,6 +24,8 @@ const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT"
 // Whether the byte value can be sent without escaping in AWS URLs
 var noEscape [256]bool
 
+var errValueNotSet = fmt.Errorf("value not set")
+
 func init() {
 	for i := 0; i < len(noEscape); i++ {
 		// AWS expects every character except these to be escaped
@@ -36,6 +39,9 @@ func init() {
 	}
 }
 
+// BuildHandler is a named request handler for building rest protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build}
+
 // Build builds the REST component of a service request.
 func Build(r *request.Request) {
 	if r.ParamsFilled() {
@@ -67,16 +73,18 @@ func buildLocationElements(r *request.Request, v reflect.Value) {
 				continue
 			}
 
+			var err error
 			switch field.Tag.Get("location") {
 			case "headers": // header maps
-				buildHeaderMap(r, m, field.Tag.Get("locationName"))
+				err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag.Get("locationName"))
 			case "header":
-				buildHeader(r, m, name)
+				err = buildHeader(&r.HTTPRequest.Header, m, name)
 			case "uri":
-				buildURI(r, m, name)
+				err = buildURI(r.HTTPRequest.URL, m, name)
 			case "querystring":
-				buildQueryString(r, m, name, query)
+				err = buildQueryString(query, m, name)
 			}
+			r.Error = err
 		}
 		if r.Error != nil {
 			return
@@ -88,7 +96,7 @@ func buildLocationElements(r *request.Request, v reflect.Value) {
 }
 
 func buildBody(r *request.Request, v reflect.Value) {
-	if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok {
+	if field, ok := v.Type().FieldByName("_"); ok {
 		if payloadName := field.Tag.Get("payload"); payloadName != "" {
 			pfield, _ := v.Type().FieldByName(payloadName)
 			if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
@@ -112,45 +120,77 @@ func buildBody(r *request.Request, v reflect.Value) {
 	}
 }
 
-func buildHeader(r *request.Request, v reflect.Value, name string) {
+func buildHeader(header *http.Header, v reflect.Value, name string) error {
 	str, err := convertType(v)
-	if err != nil {
-		r.Error = awserr.New("SerializationError", "failed to encode REST request", err)
-	} else if str != nil {
-		r.HTTPRequest.Header.Add(name, *str)
+	if err == errValueNotSet {
+		return nil
+	} else if err != nil {
+		return awserr.New("SerializationError", "failed to encode REST request", err)
 	}
+
+	header.Add(name, str)
+
+	return nil
 }
 
-func buildHeaderMap(r *request.Request, v reflect.Value, prefix string) {
+func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error {
 	for _, key := range v.MapKeys() {
 		str, err := convertType(v.MapIndex(key))
-		if err != nil {
-			r.Error = awserr.New("SerializationError", "failed to encode REST request", err)
-		} else if str != nil {
-			r.HTTPRequest.Header.Add(prefix+key.String(), *str)
+		if err == errValueNotSet {
+			continue
+		} else if err != nil {
+			return awserr.New("SerializationError", "failed to encode REST request", err)
+
 		}
+
+		header.Add(prefix+key.String(), str)
 	}
+	return nil
 }
 
-func buildURI(r *request.Request, v reflect.Value, name string) {
+func buildURI(u *url.URL, v reflect.Value, name string) error {
 	value, err := convertType(v)
-	if err != nil {
-		r.Error = awserr.New("SerializationError", "failed to encode REST request", err)
-	} else if value != nil {
-		uri := r.HTTPRequest.URL.Path
-		uri = strings.Replace(uri, "{"+name+"}", EscapePath(*value, true), -1)
-		uri = strings.Replace(uri, "{"+name+"+}", EscapePath(*value, false), -1)
-		r.HTTPRequest.URL.Path = uri
+	if err == errValueNotSet {
+		return nil
+	} else if err != nil {
+		return awserr.New("SerializationError", "failed to encode REST request", err)
 	}
+
+	uri := u.Path
+	uri = strings.Replace(uri, "{"+name+"}", EscapePath(value, true), -1)
+	uri = strings.Replace(uri, "{"+name+"+}", EscapePath(value, false), -1)
+	u.Path = uri
+
+	return nil
 }
 
-func buildQueryString(r *request.Request, v reflect.Value, name string, query url.Values) {
-	str, err := convertType(v)
-	if err != nil {
-		r.Error = awserr.New("SerializationError", "failed to encode REST request", err)
-	} else if str != nil {
-		query.Set(name, *str)
+func buildQueryString(query url.Values, v reflect.Value, name string) error {
+	switch value := v.Interface().(type) {
+	case []*string:
+		for _, item := range value {
+			query.Add(name, *item)
+		}
+	case map[string]*string:
+		for key, item := range value {
+			query.Add(key, *item)
+		}
+	case map[string][]*string:
+		for key, items := range value {
+			for _, item := range items {
+				query.Add(key, *item)
+			}
+		}
+	default:
+		str, err := convertType(v)
+		if err == errValueNotSet {
+			return nil
+		} else if err != nil {
+			return awserr.New("SerializationError", "failed to encode REST request", err)
+		}
+		query.Set(name, str)
 	}
+
+	return nil
 }
 
 func updatePath(url *url.URL, urlPath string) {
@@ -182,17 +222,16 @@ func EscapePath(path string, encodeSep bool) string {
 		if noEscape[c] || (c == '/' && !encodeSep) {
 			buf.WriteByte(c)
 		} else {
-			buf.WriteByte('%')
-			buf.WriteString(strings.ToUpper(strconv.FormatUint(uint64(c), 16)))
+			fmt.Fprintf(&buf, "%%%02X", c)
 		}
 	}
 	return buf.String()
 }
 
-func convertType(v reflect.Value) (*string, error) {
+func convertType(v reflect.Value) (string, error) {
 	v = reflect.Indirect(v)
 	if !v.IsValid() {
-		return nil, nil
+		return "", errValueNotSet
 	}
 
 	var str string
@@ -211,7 +250,7 @@ func convertType(v reflect.Value) (*string, error) {
 		str = value.UTC().Format(RFC822)
 	default:
 		err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
-		return nil, err
+		return "", err
 	}
-	return &str, nil
+	return str, nil
 }

+ 2 - 2
vendor/src/github.com/aws/aws-sdk-go/internal/protocol/rest/payload.go → vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go

@@ -12,7 +12,7 @@ func PayloadMember(i interface{}) interface{} {
 	if !v.IsValid() {
 		return nil
 	}
-	if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok {
+	if field, ok := v.Type().FieldByName("_"); ok {
 		if payloadName := field.Tag.Get("payload"); payloadName != "" {
 			field, _ := v.Type().FieldByName(payloadName)
 			if field.Tag.Get("type") != "structure" {
@@ -34,7 +34,7 @@ func PayloadType(i interface{}) string {
 	if !v.IsValid() {
 		return ""
 	}
-	if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok {
+	if field, ok := v.Type().FieldByName("_"); ok {
 		if payloadName := field.Tag.Get("payload"); payloadName != "" {
 			if member, ok := v.Type().FieldByName(payloadName); ok {
 				return member.Tag.Get("type")

+ 16 - 1
vendor/src/github.com/aws/aws-sdk-go/internal/protocol/rest/unmarshal.go → vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go

@@ -3,6 +3,7 @@ package rest
 import (
 	"encoding/base64"
 	"fmt"
+	"io"
 	"io/ioutil"
 	"net/http"
 	"reflect"
@@ -15,6 +16,12 @@ import (
 	"github.com/aws/aws-sdk-go/aws/request"
 )
 
+// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta}
+
 // Unmarshal unmarshals the REST component of a response in a REST service.
 func Unmarshal(r *request.Request) {
 	if r.DataFilled() {
@@ -26,6 +33,10 @@ func Unmarshal(r *request.Request) {
 // UnmarshalMeta unmarshals the REST metadata of a response in a REST service
 func UnmarshalMeta(r *request.Request) {
 	r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
+	if r.RequestID == "" {
+		// Alternative version of request id in the header
+		r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id")
+	}
 	if r.DataFilled() {
 		v := reflect.Indirect(reflect.ValueOf(r.Data))
 		unmarshalLocationElements(r, v)
@@ -33,7 +44,7 @@ func UnmarshalMeta(r *request.Request) {
 }
 
 func unmarshalBody(r *request.Request, v reflect.Value) {
-	if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok {
+	if field, ok := v.Type().FieldByName("_"); ok {
 		if payloadName := field.Tag.Get("payload"); payloadName != "" {
 			pfield, _ := v.Type().FieldByName(payloadName)
 			if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
@@ -41,6 +52,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) {
 				if payload.IsValid() {
 					switch payload.Interface().(type) {
 					case []byte:
+						defer r.HTTPResponse.Body.Close()
 						b, err := ioutil.ReadAll(r.HTTPResponse.Body)
 						if err != nil {
 							r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
@@ -48,6 +60,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) {
 							payload.Set(reflect.ValueOf(b))
 						}
 					case *string:
+						defer r.HTTPResponse.Body.Close()
 						b, err := ioutil.ReadAll(r.HTTPResponse.Body)
 						if err != nil {
 							r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
@@ -62,6 +75,8 @@ func unmarshalBody(r *request.Request, v reflect.Value) {
 						case "aws.ReadSeekCloser", "io.ReadCloser":
 							payload.Set(reflect.ValueOf(r.HTTPResponse.Body))
 						default:
+							io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+							defer r.HTTPResponse.Body.Close()
 							r.Error = awserr.New("SerializationError",
 								"failed to decode REST response",
 								fmt.Errorf("unknown payload type %s", payload.Type()))

+ 21 - 0
vendor/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go

@@ -0,0 +1,21 @@
+package protocol
+
+import (
+	"io"
+	"io/ioutil"
+
+	"github.com/aws/aws-sdk-go/aws/request"
+)
+
+// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body
+var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody}
+
+// UnmarshalDiscardBody is a request handler to empty a response's body and closing it.
+func UnmarshalDiscardBody(r *request.Request) {
+	if r.HTTPResponse == nil || r.HTTPResponse.Body == nil {
+		return
+	}
+
+	io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+	r.HTTPResponse.Body.Close()
+}

+ 82 - 0
vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go

@@ -0,0 +1,82 @@
+package v4
+
+import (
+	"net/http"
+	"strings"
+)
+
+// validator houses a set of rule needed for validation of a
+// string value
+type rules []rule
+
+// rule interface allows for more flexible rules and just simply
+// checks whether or not a value adheres to that rule
+type rule interface {
+	IsValid(value string) bool
+}
+
+// IsValid will iterate through all rules and see if any rules
+// apply to the value and supports nested rules
+func (r rules) IsValid(value string) bool {
+	for _, rule := range r {
+		if rule.IsValid(value) {
+			return true
+		}
+	}
+	return false
+}
+
+// mapRule generic rule for maps
+type mapRule map[string]struct{}
+
+// IsValid for the map rule satisfies whether it exists in the map
+func (m mapRule) IsValid(value string) bool {
+	_, ok := m[value]
+	return ok
+}
+
+// whitelist is a generic rule for whitelisting
+type whitelist struct {
+	rule
+}
+
+// IsValid for whitelist checks if the value is within the whitelist
+func (w whitelist) IsValid(value string) bool {
+	return w.rule.IsValid(value)
+}
+
+// blacklist is a generic rule for blacklisting
+type blacklist struct {
+	rule
+}
+
+// IsValid for whitelist checks if the value is within the whitelist
+func (b blacklist) IsValid(value string) bool {
+	return !b.rule.IsValid(value)
+}
+
+type patterns []string
+
+// IsValid for patterns checks each pattern and returns if a match has
+// been found
+func (p patterns) IsValid(value string) bool {
+	for _, pattern := range p {
+		if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) {
+			return true
+		}
+	}
+	return false
+}
+
+// inclusiveRules rules allow for rules to depend on one another
+type inclusiveRules []rule
+
+// IsValid will return true if all rules are true
+func (r inclusiveRules) IsValid(value string) bool {
+	for _, rule := range r {
+		if !rule.IsValid(value) {
+			return false
+		}
+	}
+	return true
+}

+ 141 - 41
vendor/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4.go → vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go

@@ -17,7 +17,7 @@ import (
 	"github.com/aws/aws-sdk-go/aws"
 	"github.com/aws/aws-sdk-go/aws/credentials"
 	"github.com/aws/aws-sdk-go/aws/request"
-	"github.com/aws/aws-sdk-go/internal/protocol/rest"
+	"github.com/aws/aws-sdk-go/private/protocol/rest"
 )
 
 const (
@@ -26,11 +26,66 @@ const (
 	shortTimeFormat  = "20060102"
 )
 
-var ignoredHeaders = map[string]bool{
-	"Authorization":  true,
-	"Content-Type":   true,
-	"Content-Length": true,
-	"User-Agent":     true,
+var ignoredHeaders = rules{
+	blacklist{
+		mapRule{
+			"Authorization": struct{}{},
+			"User-Agent":    struct{}{},
+		},
+	},
+}
+
+// requiredSignedHeaders is a whitelist for build canonical headers.
+var requiredSignedHeaders = rules{
+	whitelist{
+		mapRule{
+			"Cache-Control":                                               struct{}{},
+			"Content-Disposition":                                         struct{}{},
+			"Content-Encoding":                                            struct{}{},
+			"Content-Language":                                            struct{}{},
+			"Content-Md5":                                                 struct{}{},
+			"Content-Type":                                                struct{}{},
+			"Expires":                                                     struct{}{},
+			"If-Match":                                                    struct{}{},
+			"If-Modified-Since":                                           struct{}{},
+			"If-None-Match":                                               struct{}{},
+			"If-Unmodified-Since":                                         struct{}{},
+			"Range":                                                       struct{}{},
+			"X-Amz-Acl":                                                   struct{}{},
+			"X-Amz-Copy-Source":                                           struct{}{},
+			"X-Amz-Copy-Source-If-Match":                                  struct{}{},
+			"X-Amz-Copy-Source-If-Modified-Since":                         struct{}{},
+			"X-Amz-Copy-Source-If-None-Match":                             struct{}{},
+			"X-Amz-Copy-Source-If-Unmodified-Since":                       struct{}{},
+			"X-Amz-Copy-Source-Range":                                     struct{}{},
+			"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
+			"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key":       struct{}{},
+			"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5":   struct{}{},
+			"X-Amz-Grant-Full-control":                                    struct{}{},
+			"X-Amz-Grant-Read":                                            struct{}{},
+			"X-Amz-Grant-Read-Acp":                                        struct{}{},
+			"X-Amz-Grant-Write":                                           struct{}{},
+			"X-Amz-Grant-Write-Acp":                                       struct{}{},
+			"X-Amz-Metadata-Directive":                                    struct{}{},
+			"X-Amz-Mfa":                                                   struct{}{},
+			"X-Amz-Request-Payer":                                         struct{}{},
+			"X-Amz-Server-Side-Encryption":                                struct{}{},
+			"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id":                 struct{}{},
+			"X-Amz-Server-Side-Encryption-Customer-Algorithm":             struct{}{},
+			"X-Amz-Server-Side-Encryption-Customer-Key":                   struct{}{},
+			"X-Amz-Server-Side-Encryption-Customer-Key-Md5":               struct{}{},
+			"X-Amz-Storage-Class":                                         struct{}{},
+			"X-Amz-Website-Redirect-Location":                             struct{}{},
+		},
+	},
+	patterns{"X-Amz-Meta-"},
+}
+
+// allowedHoisting is a whitelist for build query headers. The boolean value
+// represents whether or not it is a pattern.
+var allowedQueryHoisting = inclusiveRules{
+	blacklist{requiredSignedHeaders},
+	patterns{"X-Amz-"},
 }
 
 type signer struct {
@@ -57,6 +112,8 @@ type signer struct {
 	stringToSign     string
 	signature        string
 	authorization    string
+	notHoist         bool
+	signedHeaderVals http.Header
 }
 
 // Sign requests with signature version 4.
@@ -67,18 +124,18 @@ type signer struct {
 func Sign(req *request.Request) {
 	// If the request does not need to be signed ignore the signing of the
 	// request if the AnonymousCredentials object is used.
-	if req.Service.Config.Credentials == credentials.AnonymousCredentials {
+	if req.Config.Credentials == credentials.AnonymousCredentials {
 		return
 	}
 
-	region := req.Service.SigningRegion
+	region := req.ClientInfo.SigningRegion
 	if region == "" {
-		region = aws.StringValue(req.Service.Config.Region)
+		region = aws.StringValue(req.Config.Region)
 	}
 
-	name := req.Service.SigningName
+	name := req.ClientInfo.SigningName
 	if name == "" {
-		name = req.Service.ServiceName
+		name = req.ClientInfo.ServiceName
 	}
 
 	s := signer{
@@ -89,12 +146,15 @@ func Sign(req *request.Request) {
 		Body:        req.Body,
 		ServiceName: name,
 		Region:      region,
-		Credentials: req.Service.Config.Credentials,
-		Debug:       req.Service.Config.LogLevel.Value(),
-		Logger:      req.Service.Config.Logger,
+		Credentials: req.Config.Credentials,
+		Debug:       req.Config.LogLevel.Value(),
+		Logger:      req.Config.Logger,
+		notHoist:    req.NotHoist,
 	}
 
 	req.Error = s.sign()
+	req.Time = s.Time
+	req.SignedHeaderVals = s.signedHeaderVals
 }
 
 func (v4 *signer) sign() error {
@@ -103,11 +163,12 @@ func (v4 *signer) sign() error {
 	}
 
 	if v4.isRequestSigned() {
-		if !v4.Credentials.IsExpired() {
+		if !v4.Credentials.IsExpired() && time.Now().Before(v4.Time.Add(10*time.Minute)) {
 			// If the request is already signed, and the credentials have not
-			// expired yet ignore the signing request.
+			// expired, and the request is not too old ignore the signing request.
 			return nil
 		}
+		v4.Time = time.Now()
 
 		// The credentials have expired for this request. The current signing
 		// is invalid, and needs to be request because the request will fail.
@@ -165,15 +226,25 @@ func (v4 *signer) logSigningInfo() {
 }
 
 func (v4 *signer) build() {
+
 	v4.buildTime()             // no depends
 	v4.buildCredentialString() // no depends
+
+	unsignedHeaders := v4.Request.Header
 	if v4.isPresign {
-		v4.buildQuery() // no depends
+		if !v4.notHoist {
+			urlValues := url.Values{}
+			urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
+			for k := range urlValues {
+				v4.Query[k] = urlValues[k]
+			}
+		}
 	}
-	v4.buildCanonicalHeaders() // depends on cred string
-	v4.buildCanonicalString()  // depends on canon headers / signed headers
-	v4.buildStringToSign()     // depends on canon string
-	v4.buildSignature()        // depends on string to sign
+
+	v4.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
+	v4.buildCanonicalString() // depends on canon headers / signed headers
+	v4.buildStringToSign()    // depends on canon string
+	v4.buildSignature()       // depends on string to sign
 
 	if v4.isPresign {
 		v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature
@@ -213,31 +284,40 @@ func (v4 *signer) buildCredentialString() {
 	}
 }
 
-func (v4 *signer) buildQuery() {
-	for k, h := range v4.Request.Header {
-		if strings.HasPrefix(http.CanonicalHeaderKey(k), "X-Amz-") {
-			continue // never hoist x-amz-* headers, they must be signed
-		}
-		if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
-			continue // never hoist ignored headers
-		}
-
-		v4.Request.Header.Del(k)
-		v4.Query.Del(k)
-		for _, v := range h {
-			v4.Query.Add(k, v)
+func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
+	query := url.Values{}
+	unsignedHeaders := http.Header{}
+	for k, h := range header {
+		if r.IsValid(k) {
+			query[k] = h
+		} else {
+			unsignedHeaders[k] = h
 		}
 	}
-}
 
-func (v4 *signer) buildCanonicalHeaders() {
+	return query, unsignedHeaders
+}
+func (v4 *signer) buildCanonicalHeaders(r rule, header http.Header) {
 	var headers []string
 	headers = append(headers, "host")
-	for k := range v4.Request.Header {
-		if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
+	for k, v := range header {
+		canonicalKey := http.CanonicalHeaderKey(k)
+		if !r.IsValid(canonicalKey) {
 			continue // ignored header
 		}
-		headers = append(headers, strings.ToLower(k))
+		if v4.signedHeaderVals == nil {
+			v4.signedHeaderVals = make(http.Header)
+		}
+
+		lowerCaseKey := strings.ToLower(k)
+		if _, ok := v4.signedHeaderVals[lowerCaseKey]; ok {
+			// include additional values
+			v4.signedHeaderVals[lowerCaseKey] = append(v4.signedHeaderVals[lowerCaseKey], v...)
+			continue
+		}
+
+		headers = append(headers, lowerCaseKey)
+		v4.signedHeaderVals[lowerCaseKey] = v
 	}
 	sort.Strings(headers)
 
@@ -253,11 +333,11 @@ func (v4 *signer) buildCanonicalHeaders() {
 			headerValues[i] = "host:" + v4.Request.URL.Host
 		} else {
 			headerValues[i] = k + ":" +
-				strings.Join(v4.Request.Header[http.CanonicalHeaderKey(k)], ",")
+				strings.Join(v4.signedHeaderVals[k], ",")
 		}
 	}
 
-	v4.canonicalHeaders = strings.Join(headerValues, "\n")
+	v4.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n")
 }
 
 func (v4 *signer) buildCanonicalString() {
@@ -363,3 +443,23 @@ func makeSha256Reader(reader io.ReadSeeker) []byte {
 	io.Copy(hash, reader)
 	return hash.Sum(nil)
 }
+
+func stripExcessSpaces(headerVals []string) []string {
+	vals := make([]string, len(headerVals))
+	for i, str := range headerVals {
+		stripped := ""
+		found := false
+		str = strings.TrimSpace(str)
+		for _, c := range str {
+			if !found && c == ' ' {
+				stripped += string(c)
+				found = true
+			} else if c != ' ' {
+				stripped += string(c)
+				found = false
+			}
+		}
+		vals[i] = stripped
+	}
+	return vals
+}

Файловите разлики са ограничени, защото са твърде много
+ 382 - 151
vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go


+ 76 - 56
vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go

@@ -4,82 +4,102 @@ package cloudwatchlogs
 
 import (
 	"github.com/aws/aws-sdk-go/aws"
-	"github.com/aws/aws-sdk-go/aws/defaults"
+	"github.com/aws/aws-sdk-go/aws/client"
+	"github.com/aws/aws-sdk-go/aws/client/metadata"
 	"github.com/aws/aws-sdk-go/aws/request"
-	"github.com/aws/aws-sdk-go/aws/service"
-	"github.com/aws/aws-sdk-go/aws/service/serviceinfo"
-	"github.com/aws/aws-sdk-go/internal/protocol/jsonrpc"
-	"github.com/aws/aws-sdk-go/internal/signer/v4"
+	"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
+	"github.com/aws/aws-sdk-go/private/signer/v4"
 )
 
-// This is the Amazon CloudWatch Logs API Reference. Amazon CloudWatch Logs
-// enables you to monitor, store, and access your system, application, and custom
-// log files. This guide provides detailed information about Amazon CloudWatch
-// Logs actions, data types, parameters, and errors. For detailed information
-// about Amazon CloudWatch Logs features and their associated API calls, go
-// to the Amazon CloudWatch Developer Guide (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide).
+// You can use Amazon CloudWatch Logs to monitor, store, and access your log
+// files from Amazon Elastic Compute Cloud (Amazon EC2) instances, Amazon CloudTrail,
+// or other sources. You can then retrieve the associated log data from CloudWatch
+// Logs using the Amazon CloudWatch console, the CloudWatch Logs commands in
+// the AWS CLI, the CloudWatch Logs API, or the CloudWatch Logs SDK.
 //
-// Use the following links to get started using the Amazon CloudWatch Logs
-// API Reference:
+// You can use CloudWatch Logs to:
 //
-//   Actions (http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_Operations.html):
-// An alphabetical list of all Amazon CloudWatch Logs actions.  Data Types (http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_Types.html):
-// An alphabetical list of all Amazon CloudWatch Logs data types.  Common Parameters
-// (http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/CommonParameters.html):
-// Parameters that all Query actions can use.  Common Errors (http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/CommonErrors.html):
-// Client and server errors that all actions can return.  Regions and Endpoints
-// (http://docs.aws.amazon.com/general/latest/gr/index.html?rande.html): Itemized
-// regions and endpoints for all AWS products.  In addition to using the Amazon
-// CloudWatch Logs API, you can also use the following SDKs and third-party
-// libraries to access Amazon CloudWatch Logs programmatically.
+//   Monitor Logs from Amazon EC2 Instances in Real-time: You can use CloudWatch
+// Logs to monitor applications and systems using log data. For example, CloudWatch
+// Logs can track the number of errors that occur in your application logs and
+// send you a notification whenever the rate of errors exceeds a threshold you
+// specify. CloudWatch Logs uses your log data for monitoring; so, no code changes
+// are required. For example, you can monitor application logs for specific
+// literal terms (such as "NullReferenceException") or count the number of occurrences
+// of a literal term at a particular position in log data (such as "404" status
+// codes in an Apache access log). When the term you are searching for is found,
+// CloudWatch Logs reports the data to a Amazon CloudWatch metric that you specify.
 //
-//  AWS SDK for Java Documentation (http://aws.amazon.com/documentation/sdkforjava/)
-// AWS SDK for .NET Documentation (http://aws.amazon.com/documentation/sdkfornet/)
-// AWS SDK for PHP Documentation (http://aws.amazon.com/documentation/sdkforphp/)
-// AWS SDK for Ruby Documentation (http://aws.amazon.com/documentation/sdkforruby/)
-//  Developers in the AWS developer community also provide their own libraries,
-// which you can find at the following AWS developer centers:
+//   Monitor Amazon CloudTrail Logged Events: You can create alarms in Amazon
+// CloudWatch and receive notifications of particular API activity as captured
+// by CloudTrail and use the notification to perform troubleshooting.
 //
-//  AWS Java Developer Center (http://aws.amazon.com/java/) AWS PHP Developer
-// Center (http://aws.amazon.com/php/) AWS Python Developer Center (http://aws.amazon.com/python/)
-// AWS Ruby Developer Center (http://aws.amazon.com/ruby/) AWS Windows and .NET
-// Developer Center (http://aws.amazon.com/net/)
+//   Archive Log Data: You can use CloudWatch Logs to store your log data in
+// highly durable storage. You can change the log retention setting so that
+// any log events older than this setting are automatically deleted. The CloudWatch
+// Logs agent makes it easy to quickly send both rotated and non-rotated log
+// data off of a host and into the log service. You can then access the raw
+// log data when you need it.
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
 type CloudWatchLogs struct {
-	*service.Service
+	*client.Client
 }
 
-// Used for custom service initialization logic
-var initService func(*service.Service)
+// Used for custom client initialization logic
+var initClient func(*client.Client)
 
 // Used for custom request initialization logic
 var initRequest func(*request.Request)
 
-// New returns a new CloudWatchLogs client.
-func New(config *aws.Config) *CloudWatchLogs {
-	service := &service.Service{
-		ServiceInfo: serviceinfo.ServiceInfo{
-			Config:       defaults.DefaultConfig.Merge(config),
-			ServiceName:  "logs",
-			APIVersion:   "2014-03-28",
-			JSONVersion:  "1.1",
-			TargetPrefix: "Logs_20140328",
-		},
+// A ServiceName is the name of the service the client will make API calls to.
+const ServiceName = "logs"
+
+// New creates a new instance of the CloudWatchLogs client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+//     // Create a CloudWatchLogs client from just a session.
+//     svc := cloudwatchlogs.New(mySession)
+//
+//     // Create a CloudWatchLogs client with additional configuration
+//     svc := cloudwatchlogs.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudWatchLogs {
+	c := p.ClientConfig(ServiceName, cfgs...)
+	return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudWatchLogs {
+	svc := &CloudWatchLogs{
+		Client: client.New(
+			cfg,
+			metadata.ClientInfo{
+				ServiceName:   ServiceName,
+				SigningRegion: signingRegion,
+				Endpoint:      endpoint,
+				APIVersion:    "2014-03-28",
+				JSONVersion:   "1.1",
+				TargetPrefix:  "Logs_20140328",
+			},
+			handlers,
+		),
 	}
-	service.Initialize()
 
 	// Handlers
-	service.Handlers.Sign.PushBack(v4.Sign)
-	service.Handlers.Build.PushBack(jsonrpc.Build)
-	service.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal)
-	service.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta)
-	service.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError)
+	svc.Handlers.Sign.PushBack(v4.Sign)
+	svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler)
+	svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler)
+	svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler)
+	svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler)
 
-	// Run custom service initialization if present
-	if initService != nil {
-		initService(service)
+	// Run custom client initialization if present
+	if initClient != nil {
+		initClient(svc.Client)
 	}
 
-	return &CloudWatchLogs{service}
+	return svc
 }
 
 // newRequest creates a new request for a CloudWatchLogs operation and runs any

+ 4 - 0
vendor/src/github.com/go-ini/ini/.gitignore

@@ -0,0 +1,4 @@
+testdata/conf_out.ini
+ini.sublime-project
+ini.sublime-workspace
+testdata/conf_reflect.ini

+ 191 - 0
vendor/src/github.com/go-ini/ini/LICENSE

@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 560 - 0
vendor/src/github.com/go-ini/ini/README.md

@@ -0,0 +1,560 @@
+ini [![Build Status](https://drone.io/github.com/go-ini/ini/status.png)](https://drone.io/github.com/go-ini/ini/latest) [![](http://gocover.io/_badge/github.com/go-ini/ini)](http://gocover.io/github.com/go-ini/ini)
+===
+
+![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200)
+
+Package ini provides INI file read and write functionality in Go.
+
+[简体中文](README_ZH.md)
+
+## Feature
+
+- Load multiple data sources(`[]byte` or file) with overwrites.
+- Read with recursion values.
+- Read with parent-child sections.
+- Read with auto-increment key names.
+- Read with multiple-line values.
+- Read with tons of helper methods.
+- Read and convert values to Go types.
+- Read and **WRITE** comments of sections and keys.
+- Manipulate sections, keys and comments with ease.
+- Keep sections and keys in order as you parse and save.
+
+## Installation
+
+	go get gopkg.in/ini.v1
+
+## Getting Started
+
+### Loading from data sources
+
+A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many as** data sources you want. Passing other types will simply return an error.
+
+```go
+cfg, err := ini.Load([]byte("raw data"), "filename")
+```
+
+Or start with an empty object:
+
+```go
+cfg := ini.Empty()
+```
+
+When you cannot decide how many data sources to load at the beginning, you still able to **Append()** them later.
+
+```go
+err := cfg.Append("other file", []byte("other raw data"))
+```
+
+### Working with sections
+
+To get a section, you would need to:
+
+```go
+section, err := cfg.GetSection("section name")
+```
+
+For a shortcut for default section, just give an empty string as name:
+
+```go
+section, err := cfg.GetSection("")
+```
+
+When you're pretty sure the section exists, following code could make your life easier:
+
+```go
+section := cfg.Section("")
+```
+
+What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you.
+
+To create a new section:
+
+```go
+err := cfg.NewSection("new section")
+```
+
+To get a list of sections or section names:
+
+```go
+sections := cfg.Sections()
+names := cfg.SectionStrings()
+```
+
+### Working with keys
+
+To get a key under a section:
+
+```go
+key, err := cfg.Section("").GetKey("key name")
+```
+
+Same rule applies to key operations:
+
+```go
+key := cfg.Section("").Key("key name")
+```
+
+To create a new key:
+
+```go
+err := cfg.Section("").NewKey("name", "value")
+```
+
+To get a list of keys or key names:
+
+```go
+keys := cfg.Section("").Keys()
+names := cfg.Section("").KeyStrings()
+```
+
+To get a clone hash of keys and corresponding values:
+
+```go
+hash := cfg.GetSection("").KeysHash()
+```
+
+### Working with values
+
+To get a string value:
+
+```go
+val := cfg.Section("").Key("key name").String()
+```
+
+To validate key value on the fly:
+
+```go
+val := cfg.Section("").Key("key name").Validate(func(in string) string {
+	if len(in) == 0 {
+		return "default"
+	}
+	return in
+})
+```
+
+To get value with types:
+
+```go
+// For boolean values:
+// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On
+// false when value is: 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off
+v, err = cfg.Section("").Key("BOOL").Bool()
+v, err = cfg.Section("").Key("FLOAT64").Float64()
+v, err = cfg.Section("").Key("INT").Int()
+v, err = cfg.Section("").Key("INT64").Int64()
+v, err = cfg.Section("").Key("UINT").Uint()
+v, err = cfg.Section("").Key("UINT64").Uint64()
+v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
+v, err = cfg.Section("").Key("TIME").Time() // RFC3339
+
+v = cfg.Section("").Key("BOOL").MustBool()
+v = cfg.Section("").Key("FLOAT64").MustFloat64()
+v = cfg.Section("").Key("INT").MustInt()
+v = cfg.Section("").Key("INT64").MustInt64()
+v = cfg.Section("").Key("UINT").MustUint()
+v = cfg.Section("").Key("UINT64").MustUint64()
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
+v = cfg.Section("").Key("TIME").MustTime() // RFC3339
+
+// Methods start with Must also accept one argument for default value
+// when key not found or fail to parse value to given type.
+// Except method MustString, which you have to pass a default value.
+
+v = cfg.Section("").Key("String").MustString("default")
+v = cfg.Section("").Key("BOOL").MustBool(true)
+v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
+v = cfg.Section("").Key("INT").MustInt(10)
+v = cfg.Section("").Key("INT64").MustInt64(99)
+v = cfg.Section("").Key("UINT").MustUint(3)
+v = cfg.Section("").Key("UINT64").MustUint64(6)
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
+v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
+```
+
+What if my value is three-line long?
+
+```ini
+[advance]
+ADDRESS = """404 road,
+NotFound, State, 5000
+Earth"""
+```
+
+Not a problem!
+
+```go
+cfg.Section("advance").Key("ADDRESS").String()
+
+/* --- start ---
+404 road,
+NotFound, State, 5000
+Earth
+------  end  --- */
+```
+
+That's cool, how about continuation lines?
+
+```ini
+[advance]
+two_lines = how about \
+	continuation lines?
+lots_of_lines = 1 \
+	2 \
+	3 \
+	4
+```
+
+Piece of cake!
+
+```go
+cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
+cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
+```
+
+Note that single quotes around values will be stripped:
+
+```ini
+foo = "some value" // foo: some value
+bar = 'some value' // bar: some value
+```
+
+That's all? Hmm, no.
+
+#### Helper methods of working with values
+
+To get value with given candidates:
+
+```go
+v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
+v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
+v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
+v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
+v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
+v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
+v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
+v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
+```
+
+Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates.
+
+To validate value in a given range:
+
+```go
+vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
+vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
+vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
+vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
+vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
+vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
+vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
+```
+
+To auto-split value into slice:
+
+```go
+vals = cfg.Section("").Key("STRINGS").Strings(",")
+vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
+vals = cfg.Section("").Key("INTS").Ints(",")
+vals = cfg.Section("").Key("INT64S").Int64s(",")
+vals = cfg.Section("").Key("UINTS").Uints(",")
+vals = cfg.Section("").Key("UINT64S").Uint64s(",")
+vals = cfg.Section("").Key("TIMES").Times(",")
+```
+
+### Save your configuration
+
+Finally, it's time to save your configuration to somewhere.
+
+A typical way to save configuration is writing it to a file:
+
+```go
+// ...
+err = cfg.SaveTo("my.ini")
+err = cfg.SaveToIndent("my.ini", "\t")
+```
+
+Another way to save is writing to a `io.Writer` interface:
+
+```go
+// ...
+cfg.WriteTo(writer)
+cfg.WriteToIndent(writer, "\t")
+```
+
+## Advanced Usage
+
+### Recursive Values
+
+For all value of keys, there is a special syntax `%(<name>)s`, where `<name>` is the key name in same section or default section, and `%(<name>)s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions.
+
+```ini
+NAME = ini
+
+[author]
+NAME = Unknwon
+GITHUB = https://github.com/%(NAME)s
+
+[package]
+FULL_NAME = github.com/go-ini/%(NAME)s
+```
+
+```go
+cfg.Section("author").Key("GITHUB").String()		// https://github.com/Unknwon
+cfg.Section("package").Key("FULL_NAME").String()	// github.com/go-ini/ini
+```
+
+### Parent-child Sections
+
+You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section.
+
+```ini
+NAME = ini
+VERSION = v1
+IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
+
+[package]
+CLONE_URL = https://%(IMPORT_PATH)s
+
+[package.sub]
+```
+
+```go
+cfg.Section("package.sub").Key("CLONE_URL").String()	// https://gopkg.in/ini.v1
+```
+
+### Auto-increment Key Names
+
+If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter.
+
+```ini
+[features]
+-: Support read/write comments of keys and sections
+-: Support auto-increment of key names
+-: Support load multiple files to overwrite key values
+```
+
+```go
+cfg.Section("features").KeyStrings()	// []{"#1", "#2", "#3"}
+```
+
+### Map To Struct
+
+Want more objective way to play with INI? Cool.
+
+```ini
+Name = Unknwon
+age = 21
+Male = true
+Born = 1993-01-01T20:17:05Z
+
+[Note]
+Content = Hi is a good man!
+Cities = HangZhou, Boston
+```
+
+```go
+type Note struct {
+	Content string
+	Cities  []string
+}
+
+type Person struct {
+	Name string
+	Age  int `ini:"age"`
+	Male bool
+	Born time.Time
+	Note
+	Created time.Time `ini:"-"`
+}
+
+func main() {
+	cfg, err := ini.Load("path/to/ini")
+	// ...
+	p := new(Person)
+	err = cfg.MapTo(p)
+	// ...
+
+	// Things can be simpler.
+	err = ini.MapTo(p, "path/to/ini")
+	// ...
+
+	// Just map a section? Fine.
+	n := new(Note)
+	err = cfg.Section("Note").MapTo(n)
+	// ...
+}
+```
+
+Can I have default value for field? Absolutely.
+
+Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type.
+
+```go
+// ...
+p := &Person{
+	Name: "Joe",
+}
+// ...
+```
+
+It's really cool, but what's the point if you can't give me my file back from struct?
+
+### Reflect From Struct
+
+Why not?
+
+```go
+type Embeded struct {
+	Dates  []time.Time `delim:"|"`
+	Places []string
+	None   []int
+}
+
+type Author struct {
+	Name      string `ini:"NAME"`
+	Male      bool
+	Age       int
+	GPA       float64
+	NeverMind string `ini:"-"`
+	*Embeded
+}
+
+func main() {
+	a := &Author{"Unknwon", true, 21, 2.8, "",
+		&Embeded{
+			[]time.Time{time.Now(), time.Now()},
+			[]string{"HangZhou", "Boston"},
+			[]int{},
+		}}
+	cfg := ini.Empty()
+	err = ini.ReflectFrom(cfg, a)
+	// ...
+}
+```
+
+So, what do I get?
+
+```ini
+NAME = Unknwon
+Male = true
+Age = 21
+GPA = 2.8
+
+[Embeded]
+Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
+Places = HangZhou,Boston
+None =
+```
+
+#### Name Mapper
+
+To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name.
+
+There are 2 built-in name mappers:
+
+- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key.
+- `TitleUnderscore`: it converts to format `title_underscore` then match section or key.
+
+To use them:
+
+```go
+type Info struct {
+	PackageName string
+}
+
+func main() {
+	err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini"))
+	// ...
+
+	cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
+	// ...
+	info := new(Info)
+	cfg.NameMapper = ini.AllCapsUnderscore
+	err = cfg.MapTo(info)
+	// ...
+}
+```
+
+Same rules of name mapper apply to `ini.ReflectFromWithMapper` function.
+
+#### Other Notes On Map/Reflect
+
+Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature:
+
+```go
+type Child struct {
+	Age string
+}
+
+type Parent struct {
+	Name string
+	Child
+}
+
+type Config struct {
+	City string
+	Parent
+}
+```
+
+Example configuration:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+
+[Child]
+Age = 21
+```
+
+What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome.
+
+```go
+type Child struct {
+	Age string
+}
+
+type Parent struct {
+	Name string
+	Child `ini:"Parent"`
+}
+
+type Config struct {
+	City string
+	Parent
+}
+```
+
+Example configuration:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+Age = 21
+```
+
+## Getting Help
+
+- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
+- [File An Issue](https://github.com/go-ini/ini/issues/new)
+
+## FAQs
+
+### What does `BlockMode` field do?
+
+By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster.
+
+### Why another INI library?
+
+Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster.
+
+To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path)
+
+## License
+
+This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.

+ 547 - 0
vendor/src/github.com/go-ini/ini/README_ZH.md

@@ -0,0 +1,547 @@
+本包提供了 Go 语言中读写 INI 文件的功能。
+
+## 功能特性
+
+- 支持覆盖加载多个数据源(`[]byte` 或文件)
+- 支持递归读取键值
+- 支持读取父子分区
+- 支持读取自增键名
+- 支持读取多行的键值
+- 支持大量辅助方法
+- 支持在读取时直接转换为 Go 语言类型
+- 支持读取和 **写入** 分区和键的注释
+- 轻松操作分区、键值和注释
+- 在保存文件时分区和键值会保持原有的顺序
+
+## 下载安装
+
+    go get gopkg.in/ini.v1
+
+## 开始使用
+
+### 从数据源加载
+
+一个 **数据源** 可以是 `[]byte` 类型的原始数据,或 `string` 类型的文件路径。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。
+
+```go
+cfg, err := ini.Load([]byte("raw data"), "filename")
+```
+
+或者从一个空白的文件开始:
+
+```go
+cfg := ini.Empty()
+```
+
+当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。
+
+```go
+err := cfg.Append("other file", []byte("other raw data"))
+```
+
+### 操作分区(Section)
+
+获取指定分区:
+
+```go
+section, err := cfg.GetSection("section name")
+```
+
+如果您想要获取默认分区,则可以用空字符串代替分区名:
+
+```go
+section, err := cfg.GetSection("")
+```
+
+当您非常确定某个分区是存在的,可以使用以下简便方法:
+
+```go
+section := cfg.Section("")
+```
+
+如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。
+
+创建一个分区:
+
+```go
+err := cfg.NewSection("new section")
+```
+
+获取所有分区对象或名称:
+
+```go
+sections := cfg.Sections()
+names := cfg.SectionStrings()
+```
+
+### 操作键(Key)
+
+获取某个分区下的键:
+
+```go
+key, err := cfg.Section("").GetKey("key name")
+```
+
+和分区一样,您也可以直接获取键而忽略错误处理:
+
+```go
+key := cfg.Section("").Key("key name")
+```
+
+创建一个新的键:
+
+```go
+err := cfg.Section("").NewKey("name", "value")
+```
+
+获取分区下的所有键或键名:
+
+```go
+keys := cfg.Section("").Keys()
+names := cfg.Section("").KeyStrings()
+```
+
+获取分区下的所有键值对的克隆:
+
+```go
+hash := cfg.GetSection("").KeysHash()
+```
+
+### 操作键值(Value)
+
+获取一个类型为字符串(string)的值:
+
+```go
+val := cfg.Section("").Key("key name").String()
+```
+
+获取值的同时通过自定义函数进行处理验证:
+
+```go
+val := cfg.Section("").Key("key name").Validate(func(in string) string {
+	if len(in) == 0 {
+		return "default"
+	}
+	return in
+})
+```
+
+获取其它类型的值:
+
+```go
+// 布尔值的规则:
+// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On
+// false 当值为:0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off
+v, err = cfg.Section("").Key("BOOL").Bool()
+v, err = cfg.Section("").Key("FLOAT64").Float64()
+v, err = cfg.Section("").Key("INT").Int()
+v, err = cfg.Section("").Key("INT64").Int64()
+v, err = cfg.Section("").Key("UINT").Uint()
+v, err = cfg.Section("").Key("UINT64").Uint64()
+v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
+v, err = cfg.Section("").Key("TIME").Time() // RFC3339
+
+v = cfg.Section("").Key("BOOL").MustBool()
+v = cfg.Section("").Key("FLOAT64").MustFloat64()
+v = cfg.Section("").Key("INT").MustInt()
+v = cfg.Section("").Key("INT64").MustInt64()
+v = cfg.Section("").Key("UINT").MustUint()
+v = cfg.Section("").Key("UINT64").MustUint64()
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
+v = cfg.Section("").Key("TIME").MustTime() // RFC3339
+
+// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值,
+// 当键不存在或者转换失败时,则会直接返回该默认值。
+// 但是,MustString 方法必须传递一个默认值。
+
+v = cfg.Seciont("").Key("String").MustString("default")
+v = cfg.Section("").Key("BOOL").MustBool(true)
+v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
+v = cfg.Section("").Key("INT").MustInt(10)
+v = cfg.Section("").Key("INT64").MustInt64(99)
+v = cfg.Section("").Key("UINT").MustUint(3)
+v = cfg.Section("").Key("UINT64").MustUint64(6)
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
+v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
+```
+
+如果我的值有好多行怎么办?
+
+```ini
+[advance]
+ADDRESS = """404 road,
+NotFound, State, 5000
+Earth"""
+```
+
+嗯哼?小 case!
+
+```go
+cfg.Section("advance").Key("ADDRESS").String()
+
+/* --- start ---
+404 road,
+NotFound, State, 5000
+Earth
+------  end  --- */
+```
+
+赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办?
+
+```ini
+[advance]
+two_lines = how about \
+	continuation lines?
+lots_of_lines = 1 \
+	2 \
+	3 \
+	4
+```
+
+简直是小菜一碟!
+
+```go
+cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
+cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
+```
+
+需要注意的是,值两侧的单引号会被自动剔除:
+
+```ini
+foo = "some value" // foo: some value
+bar = 'some value' // bar: some value
+```
+
+这就是全部了?哈哈,当然不是。
+
+#### 操作键值的辅助方法
+
+获取键值时设定候选值:
+
+```go
+v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
+v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
+v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
+v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
+v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
+v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
+v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
+v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
+```
+
+如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。
+
+验证获取的值是否在指定范围内:
+
+```go
+vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
+vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
+vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
+vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
+vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
+vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
+vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
+```
+
+自动分割键值为切片(slice):
+
+```go
+vals = cfg.Section("").Key("STRINGS").Strings(",")
+vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
+vals = cfg.Section("").Key("INTS").Ints(",")
+vals = cfg.Section("").Key("INT64S").Int64s(",")
+vals = cfg.Section("").Key("UINTS").Uints(",")
+vals = cfg.Section("").Key("UINT64S").Uint64s(",")
+vals = cfg.Section("").Key("TIMES").Times(",")
+```
+
+### 保存配置
+
+终于到了这个时刻,是时候保存一下配置了。
+
+比较原始的做法是输出配置到某个文件:
+
+```go
+// ...
+err = cfg.SaveTo("my.ini")
+err = cfg.SaveToIndent("my.ini", "\t")
+```
+
+另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中:
+
+```go
+// ...
+cfg.WriteTo(writer)
+cfg.WriteToIndent(writer, "\t")
+```
+
+### 高级用法
+
+#### 递归读取键值
+
+在获取所有键值的过程中,特殊语法 `%(<name>)s` 会被应用,其中 `<name>` 可以是相同分区或者默认分区下的键名。字符串 `%(<name>)s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。
+
+```ini
+NAME = ini
+
+[author]
+NAME = Unknwon
+GITHUB = https://github.com/%(NAME)s
+
+[package]
+FULL_NAME = github.com/go-ini/%(NAME)s
+```
+
+```go
+cfg.Section("author").Key("GITHUB").String()		// https://github.com/Unknwon
+cfg.Section("package").Key("FULL_NAME").String()	// github.com/go-ini/ini
+```
+
+#### 读取父子分区
+
+您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。
+
+```ini
+NAME = ini
+VERSION = v1
+IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
+
+[package]
+CLONE_URL = https://%(IMPORT_PATH)s
+
+[package.sub]
+```
+
+```go
+cfg.Section("package.sub").Key("CLONE_URL").String()	// https://gopkg.in/ini.v1
+```
+
+#### 读取自增键名
+
+如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。
+
+```ini
+[features]
+-: Support read/write comments of keys and sections
+-: Support auto-increment of key names
+-: Support load multiple files to overwrite key values
+```
+
+```go
+cfg.Section("features").KeyStrings()	// []{"#1", "#2", "#3"}
+```
+
+### 映射到结构
+
+想要使用更加面向对象的方式玩转 INI 吗?好主意。
+
+```ini
+Name = Unknwon
+age = 21
+Male = true
+Born = 1993-01-01T20:17:05Z
+
+[Note]
+Content = Hi is a good man!
+Cities = HangZhou, Boston
+```
+
+```go
+type Note struct {
+	Content string
+	Cities  []string
+}
+
+type Person struct {
+	Name string
+	Age  int `ini:"age"`
+	Male bool
+	Born time.Time
+	Note
+	Created time.Time `ini:"-"`
+}
+
+func main() {
+	cfg, err := ini.Load("path/to/ini")
+	// ...
+	p := new(Person)
+	err = cfg.MapTo(p)
+	// ...
+
+	// 一切竟可以如此的简单。
+	err = ini.MapTo(p, "path/to/ini")
+	// ...
+
+	// 嗯哼?只需要映射一个分区吗?
+	n := new(Note)
+	err = cfg.Section("Note").MapTo(n)
+	// ...
+}
+```
+
+结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。
+
+```go
+// ...
+p := &Person{
+	Name: "Joe",
+}
+// ...
+```
+
+这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用?
+
+### 从结构反射
+
+可是,我有说不能吗?
+
+```go
+type Embeded struct {
+	Dates  []time.Time `delim:"|"`
+	Places []string
+	None   []int
+}
+
+type Author struct {
+	Name      string `ini:"NAME"`
+	Male      bool
+	Age       int
+	GPA       float64
+	NeverMind string `ini:"-"`
+	*Embeded
+}
+
+func main() {
+	a := &Author{"Unknwon", true, 21, 2.8, "",
+		&Embeded{
+			[]time.Time{time.Now(), time.Now()},
+			[]string{"HangZhou", "Boston"},
+			[]int{},
+		}}
+	cfg := ini.Empty()
+	err = ini.ReflectFrom(cfg, a)
+	// ...
+}
+```
+
+瞧瞧,奇迹发生了。
+
+```ini
+NAME = Unknwon
+Male = true
+Age = 21
+GPA = 2.8
+
+[Embeded]
+Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
+Places = HangZhou,Boston
+None =
+```
+
+#### 名称映射器(Name Mapper)
+
+为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。
+
+目前有 2 款内置的映射器:
+
+- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。
+- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。
+
+使用方法:
+
+```go
+type Info struct{
+	PackageName string
+}
+
+func main() {
+	err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini"))
+	// ...
+
+	cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
+	// ...
+	info := new(Info)
+	cfg.NameMapper = ini.AllCapsUnderscore
+	err = cfg.MapTo(info)
+	// ...
+}
+```
+
+使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。
+
+#### 映射/反射的其它说明
+
+任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联:
+
+```go
+type Child struct {
+	Age string
+}
+
+type Parent struct {
+	Name string
+	Child
+}
+
+type Config struct {
+	City string
+	Parent
+}
+```
+
+示例配置文件:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+
+[Child]
+Age = 21
+```
+
+很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚!
+
+```go
+type Child struct {
+	Age string
+}
+
+type Parent struct {
+	Name string
+	Child `ini:"Parent"`
+}
+
+type Config struct {
+	City string
+	Parent
+}
+```
+
+示例配置文件:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+Age = 21
+```
+
+## 获取帮助
+
+- [API 文档](https://gowalker.org/gopkg.in/ini.v1)
+- [创建工单](https://github.com/go-ini/ini/issues/new)
+
+## 常见问题
+
+### 字段 `BlockMode` 是什么?
+
+默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。
+
+### 为什么要写另一个 INI 解析库?
+
+许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。
+
+为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了)

+ 1226 - 0
vendor/src/github.com/go-ini/ini/ini.go

@@ -0,0 +1,1226 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// Package ini provides INI file read and write functionality in Go.
+package ini
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"regexp"
+	"runtime"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+)
+
+const (
+	DEFAULT_SECTION = "DEFAULT"
+	// Maximum allowed depth when recursively substituing variable names.
+	_DEPTH_VALUES = 99
+
+	_VERSION = "1.6.0"
+)
+
+func Version() string {
+	return _VERSION
+}
+
+var (
+	LineBreak = "\n"
+
+	// Variable regexp pattern: %(variable)s
+	varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`)
+
+	// Write spaces around "=" to look better.
+	PrettyFormat = true
+)
+
+func init() {
+	if runtime.GOOS == "windows" {
+		LineBreak = "\r\n"
+	}
+}
+
+func inSlice(str string, s []string) bool {
+	for _, v := range s {
+		if str == v {
+			return true
+		}
+	}
+	return false
+}
+
+// dataSource is a interface that returns file content.
+type dataSource interface {
+	ReadCloser() (io.ReadCloser, error)
+}
+
+type sourceFile struct {
+	name string
+}
+
+func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
+	return os.Open(s.name)
+}
+
+type bytesReadCloser struct {
+	reader io.Reader
+}
+
+func (rc *bytesReadCloser) Read(p []byte) (n int, err error) {
+	return rc.reader.Read(p)
+}
+
+func (rc *bytesReadCloser) Close() error {
+	return nil
+}
+
+type sourceData struct {
+	data []byte
+}
+
+func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
+	return &bytesReadCloser{bytes.NewReader(s.data)}, nil
+}
+
+//  ____  __.
+// |    |/ _|____ ___.__.
+// |      <_/ __ <   |  |
+// |    |  \  ___/\___  |
+// |____|__ \___  > ____|
+//         \/   \/\/
+
+// Key represents a key under a section.
+type Key struct {
+	s          *Section
+	Comment    string
+	name       string
+	value      string
+	isAutoIncr bool
+}
+
+// Name returns name of key.
+func (k *Key) Name() string {
+	return k.name
+}
+
+// Value returns raw value of key for performance purpose.
+func (k *Key) Value() string {
+	return k.value
+}
+
+// String returns string representation of value.
+func (k *Key) String() string {
+	val := k.value
+	if strings.Index(val, "%") == -1 {
+		return val
+	}
+
+	for i := 0; i < _DEPTH_VALUES; i++ {
+		vr := varPattern.FindString(val)
+		if len(vr) == 0 {
+			break
+		}
+
+		// Take off leading '%(' and trailing ')s'.
+		noption := strings.TrimLeft(vr, "%(")
+		noption = strings.TrimRight(noption, ")s")
+
+		// Search in the same section.
+		nk, err := k.s.GetKey(noption)
+		if err != nil {
+			// Search again in default section.
+			nk, _ = k.s.f.Section("").GetKey(noption)
+		}
+
+		// Substitute by new value and take off leading '%(' and trailing ')s'.
+		val = strings.Replace(val, vr, nk.value, -1)
+	}
+	return val
+}
+
+// Validate accepts a validate function which can
+// return modifed result as key value.
+func (k *Key) Validate(fn func(string) string) string {
+	return fn(k.String())
+}
+
+// parseBool returns the boolean value represented by the string.
+//
+// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On,
+// 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off.
+// Any other value returns an error.
+func parseBool(str string) (value bool, err error) {
+	switch str {
+	case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "ON", "on", "On":
+		return true, nil
+	case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "OFF", "off", "Off":
+		return false, nil
+	}
+	return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
+}
+
+// Bool returns bool type value.
+func (k *Key) Bool() (bool, error) {
+	return parseBool(k.String())
+}
+
+// Float64 returns float64 type value.
+func (k *Key) Float64() (float64, error) {
+	return strconv.ParseFloat(k.String(), 64)
+}
+
+// Int returns int type value.
+func (k *Key) Int() (int, error) {
+	return strconv.Atoi(k.String())
+}
+
+// Int64 returns int64 type value.
+func (k *Key) Int64() (int64, error) {
+	return strconv.ParseInt(k.String(), 10, 64)
+}
+
+// Uint returns uint type valued.
+func (k *Key) Uint() (uint, error) {
+	u, e := strconv.ParseUint(k.String(), 10, 64)
+	return uint(u), e
+}
+
+// Uint64 returns uint64 type value.
+func (k *Key) Uint64() (uint64, error) {
+	return strconv.ParseUint(k.String(), 10, 64)
+}
+
+// Duration returns time.Duration type value.
+func (k *Key) Duration() (time.Duration, error) {
+	return time.ParseDuration(k.String())
+}
+
+// TimeFormat parses with given format and returns time.Time type value.
+func (k *Key) TimeFormat(format string) (time.Time, error) {
+	return time.Parse(format, k.String())
+}
+
+// Time parses with RFC3339 format and returns time.Time type value.
+func (k *Key) Time() (time.Time, error) {
+	return k.TimeFormat(time.RFC3339)
+}
+
+// MustString returns default value if key value is empty.
+func (k *Key) MustString(defaultVal string) string {
+	val := k.String()
+	if len(val) == 0 {
+		return defaultVal
+	}
+	return val
+}
+
+// MustBool always returns value without error,
+// it returns false if error occurs.
+func (k *Key) MustBool(defaultVal ...bool) bool {
+	val, err := k.Bool()
+	if len(defaultVal) > 0 && err != nil {
+		return defaultVal[0]
+	}
+	return val
+}
+
+// MustFloat64 always returns value without error,
+// it returns 0.0 if error occurs.
+func (k *Key) MustFloat64(defaultVal ...float64) float64 {
+	val, err := k.Float64()
+	if len(defaultVal) > 0 && err != nil {
+		return defaultVal[0]
+	}
+	return val
+}
+
+// MustInt always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustInt(defaultVal ...int) int {
+	val, err := k.Int()
+	if len(defaultVal) > 0 && err != nil {
+		return defaultVal[0]
+	}
+	return val
+}
+
+// MustInt64 always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustInt64(defaultVal ...int64) int64 {
+	val, err := k.Int64()
+	if len(defaultVal) > 0 && err != nil {
+		return defaultVal[0]
+	}
+	return val
+}
+
+// MustUint always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustUint(defaultVal ...uint) uint {
+	val, err := k.Uint()
+	if len(defaultVal) > 0 && err != nil {
+		return defaultVal[0]
+	}
+	return val
+}
+
+// MustUint64 always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustUint64(defaultVal ...uint64) uint64 {
+	val, err := k.Uint64()
+	if len(defaultVal) > 0 && err != nil {
+		return defaultVal[0]
+	}
+	return val
+}
+
+// MustDuration always returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration {
+	val, err := k.Duration()
+	if len(defaultVal) > 0 && err != nil {
+		return defaultVal[0]
+	}
+	return val
+}
+
+// MustTimeFormat always parses with given format and returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time {
+	val, err := k.TimeFormat(format)
+	if len(defaultVal) > 0 && err != nil {
+		return defaultVal[0]
+	}
+	return val
+}
+
+// MustTime always parses with RFC3339 format and returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustTime(defaultVal ...time.Time) time.Time {
+	return k.MustTimeFormat(time.RFC3339, defaultVal...)
+}
+
+// In always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) In(defaultVal string, candidates []string) string {
+	val := k.String()
+	for _, cand := range candidates {
+		if val == cand {
+			return val
+		}
+	}
+	return defaultVal
+}
+
+// InFloat64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 {
+	val := k.MustFloat64()
+	for _, cand := range candidates {
+		if val == cand {
+			return val
+		}
+	}
+	return defaultVal
+}
+
+// InInt always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InInt(defaultVal int, candidates []int) int {
+	val := k.MustInt()
+	for _, cand := range candidates {
+		if val == cand {
+			return val
+		}
+	}
+	return defaultVal
+}
+
+// InInt64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 {
+	val := k.MustInt64()
+	for _, cand := range candidates {
+		if val == cand {
+			return val
+		}
+	}
+	return defaultVal
+}
+
+// InUint always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InUint(defaultVal uint, candidates []uint) uint {
+	val := k.MustUint()
+	for _, cand := range candidates {
+		if val == cand {
+			return val
+		}
+	}
+	return defaultVal
+}
+
+// InUint64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 {
+	val := k.MustUint64()
+	for _, cand := range candidates {
+		if val == cand {
+			return val
+		}
+	}
+	return defaultVal
+}
+
+// InTimeFormat always parses with given format and returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time {
+	val := k.MustTimeFormat(format)
+	for _, cand := range candidates {
+		if val == cand {
+			return val
+		}
+	}
+	return defaultVal
+}
+
+// InTime always parses with RFC3339 format and returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time {
+	return k.InTimeFormat(time.RFC3339, defaultVal, candidates)
+}
+
+// RangeFloat64 checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 {
+	val := k.MustFloat64()
+	if val < min || val > max {
+		return defaultVal
+	}
+	return val
+}
+
+// RangeInt checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeInt(defaultVal, min, max int) int {
+	val := k.MustInt()
+	if val < min || val > max {
+		return defaultVal
+	}
+	return val
+}
+
+// RangeInt64 checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeInt64(defaultVal, min, max int64) int64 {
+	val := k.MustInt64()
+	if val < min || val > max {
+		return defaultVal
+	}
+	return val
+}
+
+// RangeTimeFormat checks if value with given format is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time {
+	val := k.MustTimeFormat(format)
+	if val.Unix() < min.Unix() || val.Unix() > max.Unix() {
+		return defaultVal
+	}
+	return val
+}
+
+// RangeTime checks if value with RFC3339 format is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
+	return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
+}
+
+// Strings returns list of string devide by given delimiter.
+func (k *Key) Strings(delim string) []string {
+	str := k.String()
+	if len(str) == 0 {
+		return []string{}
+	}
+
+	vals := strings.Split(str, delim)
+	for i := range vals {
+		vals[i] = strings.TrimSpace(vals[i])
+	}
+	return vals
+}
+
+// Float64s returns list of float64 devide by given delimiter.
+func (k *Key) Float64s(delim string) []float64 {
+	strs := k.Strings(delim)
+	vals := make([]float64, len(strs))
+	for i := range strs {
+		vals[i], _ = strconv.ParseFloat(strs[i], 64)
+	}
+	return vals
+}
+
+// Ints returns list of int devide by given delimiter.
+func (k *Key) Ints(delim string) []int {
+	strs := k.Strings(delim)
+	vals := make([]int, len(strs))
+	for i := range strs {
+		vals[i], _ = strconv.Atoi(strs[i])
+	}
+	return vals
+}
+
+// Int64s returns list of int64 devide by given delimiter.
+func (k *Key) Int64s(delim string) []int64 {
+	strs := k.Strings(delim)
+	vals := make([]int64, len(strs))
+	for i := range strs {
+		vals[i], _ = strconv.ParseInt(strs[i], 10, 64)
+	}
+	return vals
+}
+
+// Uints returns list of uint devide by given delimiter.
+func (k *Key) Uints(delim string) []uint {
+	strs := k.Strings(delim)
+	vals := make([]uint, len(strs))
+	for i := range strs {
+		u, _ := strconv.ParseUint(strs[i], 10, 64)
+		vals[i] = uint(u)
+	}
+	return vals
+}
+
+// Uint64s returns list of uint64 devide by given delimiter.
+func (k *Key) Uint64s(delim string) []uint64 {
+	strs := k.Strings(delim)
+	vals := make([]uint64, len(strs))
+	for i := range strs {
+		vals[i], _ = strconv.ParseUint(strs[i], 10, 64)
+	}
+	return vals
+}
+
+// TimesFormat parses with given format and returns list of time.Time devide by given delimiter.
+func (k *Key) TimesFormat(format, delim string) []time.Time {
+	strs := k.Strings(delim)
+	vals := make([]time.Time, len(strs))
+	for i := range strs {
+		vals[i], _ = time.Parse(format, strs[i])
+	}
+	return vals
+}
+
+// Times parses with RFC3339 format and returns list of time.Time devide by given delimiter.
+func (k *Key) Times(delim string) []time.Time {
+	return k.TimesFormat(time.RFC3339, delim)
+}
+
+// SetValue changes key value.
+func (k *Key) SetValue(v string) {
+	k.value = v
+}
+
+//   _________              __  .__
+//  /   _____/ ____   _____/  |_|__| ____   ____
+//  \_____  \_/ __ \_/ ___\   __\  |/  _ \ /    \
+//  /        \  ___/\  \___|  | |  (  <_> )   |  \
+// /_______  /\___  >\___  >__| |__|\____/|___|  /
+//         \/     \/     \/                    \/
+
+// Section represents a config section.
+type Section struct {
+	f        *File
+	Comment  string
+	name     string
+	keys     map[string]*Key
+	keyList  []string
+	keysHash map[string]string
+}
+
+func newSection(f *File, name string) *Section {
+	return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)}
+}
+
+// Name returns name of Section.
+func (s *Section) Name() string {
+	return s.name
+}
+
+// NewKey creates a new key to given section.
+func (s *Section) NewKey(name, val string) (*Key, error) {
+	if len(name) == 0 {
+		return nil, errors.New("error creating new key: empty key name")
+	}
+
+	if s.f.BlockMode {
+		s.f.lock.Lock()
+		defer s.f.lock.Unlock()
+	}
+
+	if inSlice(name, s.keyList) {
+		s.keys[name].value = val
+		return s.keys[name], nil
+	}
+
+	s.keyList = append(s.keyList, name)
+	s.keys[name] = &Key{s, "", name, val, false}
+	s.keysHash[name] = val
+	return s.keys[name], nil
+}
+
+// GetKey returns key in section by given name.
+func (s *Section) GetKey(name string) (*Key, error) {
+	// FIXME: change to section level lock?
+	if s.f.BlockMode {
+		s.f.lock.RLock()
+	}
+	key := s.keys[name]
+	if s.f.BlockMode {
+		s.f.lock.RUnlock()
+	}
+
+	if key == nil {
+		// Check if it is a child-section.
+		sname := s.name
+		for {
+			if i := strings.LastIndex(sname, "."); i > -1 {
+				sname = sname[:i]
+				sec, err := s.f.GetSection(sname)
+				if err != nil {
+					continue
+				}
+				return sec.GetKey(name)
+			} else {
+				break
+			}
+		}
+		return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name)
+	}
+	return key, nil
+}
+
+// Key assumes named Key exists in section and returns a zero-value when not.
+func (s *Section) Key(name string) *Key {
+	key, err := s.GetKey(name)
+	if err != nil {
+		// It's OK here because the only possible error is empty key name,
+		// but if it's empty, this piece of code won't be executed.
+		key, _ = s.NewKey(name, "")
+		return key
+	}
+	return key
+}
+
+// Keys returns list of keys of section.
+func (s *Section) Keys() []*Key {
+	keys := make([]*Key, len(s.keyList))
+	for i := range s.keyList {
+		keys[i] = s.Key(s.keyList[i])
+	}
+	return keys
+}
+
+// KeyStrings returns list of key names of section.
+func (s *Section) KeyStrings() []string {
+	list := make([]string, len(s.keyList))
+	copy(list, s.keyList)
+	return list
+}
+
+// KeysHash returns keys hash consisting of names and values.
+func (s *Section) KeysHash() map[string]string {
+	if s.f.BlockMode {
+		s.f.lock.RLock()
+		defer s.f.lock.RUnlock()
+	}
+
+	hash := map[string]string{}
+	for key, value := range s.keysHash {
+		hash[key] = value
+	}
+	return hash
+}
+
+// DeleteKey deletes a key from section.
+func (s *Section) DeleteKey(name string) {
+	if s.f.BlockMode {
+		s.f.lock.Lock()
+		defer s.f.lock.Unlock()
+	}
+
+	for i, k := range s.keyList {
+		if k == name {
+			s.keyList = append(s.keyList[:i], s.keyList[i+1:]...)
+			delete(s.keys, name)
+			return
+		}
+	}
+}
+
+// ___________.__.__
+// \_   _____/|__|  |   ____
+//  |    __)  |  |  | _/ __ \
+//  |     \   |  |  |_\  ___/
+//  \___  /   |__|____/\___  >
+//      \/                 \/
+
+// File represents a combination of a or more INI file(s) in memory.
+type File struct {
+	// Should make things safe, but sometimes doesn't matter.
+	BlockMode bool
+	// Make sure data is safe in multiple goroutines.
+	lock sync.RWMutex
+
+	// Allow combination of multiple data sources.
+	dataSources []dataSource
+	// Actual data is stored here.
+	sections map[string]*Section
+
+	// To keep data in order.
+	sectionList []string
+
+	NameMapper
+}
+
+// newFile initializes File object with given data sources.
+func newFile(dataSources []dataSource) *File {
+	return &File{
+		BlockMode:   true,
+		dataSources: dataSources,
+		sections:    make(map[string]*Section),
+		sectionList: make([]string, 0, 10),
+	}
+}
+
+func parseDataSource(source interface{}) (dataSource, error) {
+	switch s := source.(type) {
+	case string:
+		return sourceFile{s}, nil
+	case []byte:
+		return &sourceData{s}, nil
+	default:
+		return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s)
+	}
+}
+
+// Load loads and parses from INI data sources.
+// Arguments can be mixed of file name with string type, or raw data in []byte.
+func Load(source interface{}, others ...interface{}) (_ *File, err error) {
+	sources := make([]dataSource, len(others)+1)
+	sources[0], err = parseDataSource(source)
+	if err != nil {
+		return nil, err
+	}
+	for i := range others {
+		sources[i+1], err = parseDataSource(others[i])
+		if err != nil {
+			return nil, err
+		}
+	}
+	f := newFile(sources)
+	return f, f.Reload()
+}
+
+// Empty returns an empty file object.
+func Empty() *File {
+	// Ignore error here, we sure our data is good.
+	f, _ := Load([]byte(""))
+	return f
+}
+
+// NewSection creates a new section.
+func (f *File) NewSection(name string) (*Section, error) {
+	if len(name) == 0 {
+		return nil, errors.New("error creating new section: empty section name")
+	}
+
+	if f.BlockMode {
+		f.lock.Lock()
+		defer f.lock.Unlock()
+	}
+
+	if inSlice(name, f.sectionList) {
+		return f.sections[name], nil
+	}
+
+	f.sectionList = append(f.sectionList, name)
+	f.sections[name] = newSection(f, name)
+	return f.sections[name], nil
+}
+
+// NewSections creates a list of sections.
+func (f *File) NewSections(names ...string) (err error) {
+	for _, name := range names {
+		if _, err = f.NewSection(name); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// GetSection returns section by given name.
+func (f *File) GetSection(name string) (*Section, error) {
+	if len(name) == 0 {
+		name = DEFAULT_SECTION
+	}
+
+	if f.BlockMode {
+		f.lock.RLock()
+		defer f.lock.RUnlock()
+	}
+
+	sec := f.sections[name]
+	if sec == nil {
+		return nil, fmt.Errorf("error when getting section: section '%s' not exists", name)
+	}
+	return sec, nil
+}
+
+// Section assumes named section exists and returns a zero-value when not.
+func (f *File) Section(name string) *Section {
+	sec, err := f.GetSection(name)
+	if err != nil {
+		// Note: It's OK here because the only possible error is empty section name,
+		// but if it's empty, this piece of code won't be executed.
+		sec, _ = f.NewSection(name)
+		return sec
+	}
+	return sec
+}
+
+// Section returns list of Section.
+func (f *File) Sections() []*Section {
+	sections := make([]*Section, len(f.sectionList))
+	for i := range f.sectionList {
+		sections[i] = f.Section(f.sectionList[i])
+	}
+	return sections
+}
+
+// SectionStrings returns list of section names.
+func (f *File) SectionStrings() []string {
+	list := make([]string, len(f.sectionList))
+	copy(list, f.sectionList)
+	return list
+}
+
+// DeleteSection deletes a section.
+func (f *File) DeleteSection(name string) {
+	if f.BlockMode {
+		f.lock.Lock()
+		defer f.lock.Unlock()
+	}
+
+	if len(name) == 0 {
+		name = DEFAULT_SECTION
+	}
+
+	for i, s := range f.sectionList {
+		if s == name {
+			f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
+			delete(f.sections, name)
+			return
+		}
+	}
+}
+
+func cutComment(str string) string {
+	i := strings.Index(str, "#")
+	if i == -1 {
+		return str
+	}
+	return str[:i]
+}
+
+func checkMultipleLines(buf *bufio.Reader, line, val, valQuote string) (string, error) {
+	isEnd := false
+	for {
+		next, err := buf.ReadString('\n')
+		if err != nil {
+			if err != io.EOF {
+				return "", err
+			}
+			isEnd = true
+		}
+		pos := strings.LastIndex(next, valQuote)
+		if pos > -1 {
+			val += next[:pos]
+			break
+		}
+		val += next
+		if isEnd {
+			return "", fmt.Errorf("error parsing line: missing closing key quote from '%s' to '%s'", line, next)
+		}
+	}
+	return val, nil
+}
+
+func checkContinuationLines(buf *bufio.Reader, val string) (string, bool, error) {
+	isEnd := false
+	for {
+		valLen := len(val)
+		if valLen == 0 || val[valLen-1] != '\\' {
+			break
+		}
+		val = val[:valLen-1]
+
+		next, err := buf.ReadString('\n')
+		if err != nil {
+			if err != io.EOF {
+				return "", isEnd, err
+			}
+			isEnd = true
+		}
+
+		next = strings.TrimSpace(next)
+		if len(next) == 0 {
+			break
+		}
+		val += next
+	}
+	return val, isEnd, nil
+}
+
+// parse parses data through an io.Reader.
+func (f *File) parse(reader io.Reader) error {
+	buf := bufio.NewReader(reader)
+
+	// Handle BOM-UTF8.
+	// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
+	mask, err := buf.Peek(3)
+	if err == nil && len(mask) >= 3 && mask[0] == 239 && mask[1] == 187 && mask[2] == 191 {
+		buf.Read(mask)
+	}
+
+	count := 1
+	comments := ""
+	isEnd := false
+
+	section, err := f.NewSection(DEFAULT_SECTION)
+	if err != nil {
+		return err
+	}
+
+	for {
+		line, err := buf.ReadString('\n')
+		line = strings.TrimSpace(line)
+		length := len(line)
+
+		// Check error and ignore io.EOF just for a moment.
+		if err != nil {
+			if err != io.EOF {
+				return fmt.Errorf("error reading next line: %v", err)
+			}
+			// The last line of file could be an empty line.
+			if length == 0 {
+				break
+			}
+			isEnd = true
+		}
+
+		// Skip empty lines.
+		if length == 0 {
+			continue
+		}
+
+		switch {
+		case line[0] == '#' || line[0] == ';': // Comments.
+			if len(comments) == 0 {
+				comments = line
+			} else {
+				comments += LineBreak + line
+			}
+			continue
+		case line[0] == '[' && line[length-1] == ']': // New sction.
+			section, err = f.NewSection(strings.TrimSpace(line[1 : length-1]))
+			if err != nil {
+				return err
+			}
+
+			if len(comments) > 0 {
+				section.Comment = comments
+				comments = ""
+			}
+			// Reset counter.
+			count = 1
+			continue
+		}
+
+		// Other possibilities.
+		var (
+			i        int
+			keyQuote string
+			kname    string
+			valQuote string
+			val      string
+		)
+
+		// Key name surrounded by quotes.
+		if line[0] == '"' {
+			if length > 6 && line[0:3] == `"""` {
+				keyQuote = `"""`
+			} else {
+				keyQuote = `"`
+			}
+		} else if line[0] == '`' {
+			keyQuote = "`"
+		}
+		if len(keyQuote) > 0 {
+			qLen := len(keyQuote)
+			pos := strings.Index(line[qLen:], keyQuote)
+			if pos == -1 {
+				return fmt.Errorf("error parsing line: missing closing key quote: %s", line)
+			}
+			pos = pos + qLen
+			i = strings.IndexAny(line[pos:], "=:")
+			if i < 0 {
+				return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line)
+			} else if i == pos {
+				return fmt.Errorf("error parsing line: key is empty: %s", line)
+			}
+			i = i + pos
+			kname = line[qLen:pos] // Just keep spaces inside quotes.
+		} else {
+			i = strings.IndexAny(line, "=:")
+			if i < 0 {
+				return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line)
+			} else if i == 0 {
+				return fmt.Errorf("error parsing line: key is empty: %s", line)
+			}
+			kname = strings.TrimSpace(line[0:i])
+		}
+
+		isAutoIncr := false
+		// Auto increment.
+		if kname == "-" {
+			isAutoIncr = true
+			kname = "#" + fmt.Sprint(count)
+			count++
+		}
+
+		lineRight := strings.TrimSpace(line[i+1:])
+		lineRightLength := len(lineRight)
+		firstChar := ""
+		if lineRightLength >= 2 {
+			firstChar = lineRight[0:1]
+		}
+		if firstChar == "`" {
+			valQuote = "`"
+		} else if firstChar == `"` {
+			if lineRightLength >= 3 && lineRight[0:3] == `"""` {
+				valQuote = `"""`
+			} else {
+				valQuote = `"`
+			}
+		} else if firstChar == `'` {
+			valQuote = `'`
+		}
+
+		if len(valQuote) > 0 {
+			qLen := len(valQuote)
+			pos := strings.LastIndex(lineRight[qLen:], valQuote)
+			// For multiple-line value check.
+			if pos == -1 {
+				if valQuote == `"` || valQuote == `'` {
+					return fmt.Errorf("error parsing line: single quote does not allow multiple-line value: %s", line)
+				}
+
+				val = lineRight[qLen:] + "\n"
+				val, err = checkMultipleLines(buf, line, val, valQuote)
+				if err != nil {
+					return err
+				}
+			} else {
+				val = lineRight[qLen : pos+qLen]
+			}
+		} else {
+			val = strings.TrimSpace(cutComment(lineRight))
+			val, isEnd, err = checkContinuationLines(buf, val)
+			if err != nil {
+				return err
+			}
+		}
+
+		k, err := section.NewKey(kname, val)
+		if err != nil {
+			return err
+		}
+		k.isAutoIncr = isAutoIncr
+		if len(comments) > 0 {
+			k.Comment = comments
+			comments = ""
+		}
+
+		if isEnd {
+			break
+		}
+	}
+	return nil
+}
+
+func (f *File) reload(s dataSource) error {
+	r, err := s.ReadCloser()
+	if err != nil {
+		return err
+	}
+	defer r.Close()
+
+	return f.parse(r)
+}
+
+// Reload reloads and parses all data sources.
+func (f *File) Reload() (err error) {
+	for _, s := range f.dataSources {
+		if err = f.reload(s); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Append appends one or more data sources and reloads automatically.
+func (f *File) Append(source interface{}, others ...interface{}) error {
+	ds, err := parseDataSource(source)
+	if err != nil {
+		return err
+	}
+	f.dataSources = append(f.dataSources, ds)
+	for _, s := range others {
+		ds, err = parseDataSource(s)
+		if err != nil {
+			return err
+		}
+		f.dataSources = append(f.dataSources, ds)
+	}
+	return f.Reload()
+}
+
+// WriteToIndent writes file content into io.Writer with given value indention.
+func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
+	equalSign := "="
+	if PrettyFormat {
+		equalSign = " = "
+	}
+
+	// Use buffer to make sure target is safe until finish encoding.
+	buf := bytes.NewBuffer(nil)
+	for i, sname := range f.sectionList {
+		sec := f.Section(sname)
+		if len(sec.Comment) > 0 {
+			if sec.Comment[0] != '#' && sec.Comment[0] != ';' {
+				sec.Comment = "; " + sec.Comment
+			}
+			if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil {
+				return 0, err
+			}
+		}
+
+		if i > 0 {
+			if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
+				return 0, err
+			}
+		} else {
+			// Write nothing if default section is empty.
+			if len(sec.keyList) == 0 {
+				continue
+			}
+		}
+
+		for _, kname := range sec.keyList {
+			key := sec.Key(kname)
+			if len(key.Comment) > 0 {
+				if len(indent) > 0 && sname != DEFAULT_SECTION {
+					buf.WriteString(indent)
+				}
+				if key.Comment[0] != '#' && key.Comment[0] != ';' {
+					key.Comment = "; " + key.Comment
+				}
+				if _, err = buf.WriteString(key.Comment + LineBreak); err != nil {
+					return 0, err
+				}
+			}
+
+			if len(indent) > 0 && sname != DEFAULT_SECTION {
+				buf.WriteString(indent)
+			}
+
+			switch {
+			case key.isAutoIncr:
+				kname = "-"
+			case strings.Contains(kname, "`") || strings.Contains(kname, `"`):
+				kname = `"""` + kname + `"""`
+			case strings.Contains(kname, `=`) || strings.Contains(kname, `:`):
+				kname = "`" + kname + "`"
+			}
+
+			val := key.value
+			// In case key value contains "\n", "`" or "\"".
+			if strings.Contains(val, "\n") || strings.Contains(val, "`") || strings.Contains(val, `"`) ||
+				strings.Contains(val, "#") {
+				val = `"""` + val + `"""`
+			}
+			if _, err = buf.WriteString(kname + equalSign + val + LineBreak); err != nil {
+				return 0, err
+			}
+		}
+
+		// Put a line between sections.
+		if _, err = buf.WriteString(LineBreak); err != nil {
+			return 0, err
+		}
+	}
+
+	return buf.WriteTo(w)
+}
+
+// WriteTo writes file content into io.Writer.
+func (f *File) WriteTo(w io.Writer) (int64, error) {
+	return f.WriteToIndent(w, "")
+}
+
+// SaveToIndent writes content to file system with given value indention.
+func (f *File) SaveToIndent(filename, indent string) error {
+	// Note: Because we are truncating with os.Create,
+	// 	so it's safer to save to a temporary file location and rename afte done.
+	tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp"
+	defer os.Remove(tmpPath)
+
+	fw, err := os.Create(tmpPath)
+	if err != nil {
+		return err
+	}
+
+	if _, err = f.WriteToIndent(fw, indent); err != nil {
+		fw.Close()
+		return err
+	}
+	fw.Close()
+
+	// Remove old file and rename the new one.
+	os.Remove(filename)
+	return os.Rename(tmpPath, filename)
+}
+
+// SaveTo writes content to file system.
+func (f *File) SaveTo(filename string) error {
+	return f.SaveToIndent(filename, "")
+}

+ 350 - 0
vendor/src/github.com/go-ini/ini/struct.go

@@ -0,0 +1,350 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"reflect"
+	"time"
+	"unicode"
+)
+
+// NameMapper represents a ini tag name mapper.
+type NameMapper func(string) string
+
+// Built-in name getters.
+var (
+	// AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
+	AllCapsUnderscore NameMapper = func(raw string) string {
+		newstr := make([]rune, 0, len(raw))
+		for i, chr := range raw {
+			if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
+				if i > 0 {
+					newstr = append(newstr, '_')
+				}
+			}
+			newstr = append(newstr, unicode.ToUpper(chr))
+		}
+		return string(newstr)
+	}
+	// TitleUnderscore converts to format title_underscore.
+	TitleUnderscore NameMapper = func(raw string) string {
+		newstr := make([]rune, 0, len(raw))
+		for i, chr := range raw {
+			if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
+				if i > 0 {
+					newstr = append(newstr, '_')
+				}
+				chr -= ('A' - 'a')
+			}
+			newstr = append(newstr, chr)
+		}
+		return string(newstr)
+	}
+)
+
+func (s *Section) parseFieldName(raw, actual string) string {
+	if len(actual) > 0 {
+		return actual
+	}
+	if s.f.NameMapper != nil {
+		return s.f.NameMapper(raw)
+	}
+	return raw
+}
+
+func parseDelim(actual string) string {
+	if len(actual) > 0 {
+		return actual
+	}
+	return ","
+}
+
+var reflectTime = reflect.TypeOf(time.Now()).Kind()
+
+// setWithProperType sets proper value to field based on its type,
+// but it does not return error for failing parsing,
+// because we want to use default value that is already assigned to strcut.
+func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
+	switch t.Kind() {
+	case reflect.String:
+		if len(key.String()) == 0 {
+			return nil
+		}
+		field.SetString(key.String())
+	case reflect.Bool:
+		boolVal, err := key.Bool()
+		if err != nil {
+			return nil
+		}
+		field.SetBool(boolVal)
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		durationVal, err := key.Duration()
+		if err == nil {
+			field.Set(reflect.ValueOf(durationVal))
+			return nil
+		}
+
+		intVal, err := key.Int64()
+		if err != nil {
+			return nil
+		}
+		field.SetInt(intVal)
+	//	byte is an alias for uint8, so supporting uint8 breaks support for byte
+	case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		durationVal, err := key.Duration()
+		if err == nil {
+			field.Set(reflect.ValueOf(durationVal))
+			return nil
+		}
+
+		uintVal, err := key.Uint64()
+		if err != nil {
+			return nil
+		}
+		field.SetUint(uintVal)
+
+	case reflect.Float64:
+		floatVal, err := key.Float64()
+		if err != nil {
+			return nil
+		}
+		field.SetFloat(floatVal)
+	case reflectTime:
+		timeVal, err := key.Time()
+		if err != nil {
+			return nil
+		}
+		field.Set(reflect.ValueOf(timeVal))
+	case reflect.Slice:
+		vals := key.Strings(delim)
+		numVals := len(vals)
+		if numVals == 0 {
+			return nil
+		}
+
+		sliceOf := field.Type().Elem().Kind()
+
+		var times []time.Time
+		if sliceOf == reflectTime {
+			times = key.Times(delim)
+		}
+
+		slice := reflect.MakeSlice(field.Type(), numVals, numVals)
+		for i := 0; i < numVals; i++ {
+			switch sliceOf {
+			case reflectTime:
+				slice.Index(i).Set(reflect.ValueOf(times[i]))
+			default:
+				slice.Index(i).Set(reflect.ValueOf(vals[i]))
+			}
+		}
+		field.Set(slice)
+	default:
+		return fmt.Errorf("unsupported type '%s'", t)
+	}
+	return nil
+}
+
+func (s *Section) mapTo(val reflect.Value) error {
+	if val.Kind() == reflect.Ptr {
+		val = val.Elem()
+	}
+	typ := val.Type()
+
+	for i := 0; i < typ.NumField(); i++ {
+		field := val.Field(i)
+		tpField := typ.Field(i)
+
+		tag := tpField.Tag.Get("ini")
+		if tag == "-" {
+			continue
+		}
+
+		fieldName := s.parseFieldName(tpField.Name, tag)
+		if len(fieldName) == 0 || !field.CanSet() {
+			continue
+		}
+
+		isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
+		isStruct := tpField.Type.Kind() == reflect.Struct
+		if isAnonymous {
+			field.Set(reflect.New(tpField.Type.Elem()))
+		}
+
+		if isAnonymous || isStruct {
+			if sec, err := s.f.GetSection(fieldName); err == nil {
+				if err = sec.mapTo(field); err != nil {
+					return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
+				}
+				continue
+			}
+		}
+
+		if key, err := s.GetKey(fieldName); err == nil {
+			if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
+				return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
+			}
+		}
+	}
+	return nil
+}
+
+// MapTo maps section to given struct.
+func (s *Section) MapTo(v interface{}) error {
+	typ := reflect.TypeOf(v)
+	val := reflect.ValueOf(v)
+	if typ.Kind() == reflect.Ptr {
+		typ = typ.Elem()
+		val = val.Elem()
+	} else {
+		return errors.New("cannot map to non-pointer struct")
+	}
+
+	return s.mapTo(val)
+}
+
+// MapTo maps file to given struct.
+func (f *File) MapTo(v interface{}) error {
+	return f.Section("").MapTo(v)
+}
+
+// MapTo maps data sources to given struct with name mapper.
+func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
+	cfg, err := Load(source, others...)
+	if err != nil {
+		return err
+	}
+	cfg.NameMapper = mapper
+	return cfg.MapTo(v)
+}
+
+// MapTo maps data sources to given struct.
+func MapTo(v, source interface{}, others ...interface{}) error {
+	return MapToWithMapper(v, nil, source, others...)
+}
+
+// reflectWithProperType does the opposite thing with setWithProperType.
+func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
+	switch t.Kind() {
+	case reflect.String:
+		key.SetValue(field.String())
+	case reflect.Bool,
+		reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+		reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+		reflect.Float64,
+		reflectTime:
+		key.SetValue(fmt.Sprint(field))
+	case reflect.Slice:
+		vals := field.Slice(0, field.Len())
+		if field.Len() == 0 {
+			return nil
+		}
+
+		var buf bytes.Buffer
+		isTime := fmt.Sprint(field.Type()) == "[]time.Time"
+		for i := 0; i < field.Len(); i++ {
+			if isTime {
+				buf.WriteString(vals.Index(i).Interface().(time.Time).Format(time.RFC3339))
+			} else {
+				buf.WriteString(fmt.Sprint(vals.Index(i)))
+			}
+			buf.WriteString(delim)
+		}
+		key.SetValue(buf.String()[:buf.Len()-1])
+	default:
+		return fmt.Errorf("unsupported type '%s'", t)
+	}
+	return nil
+}
+
+func (s *Section) reflectFrom(val reflect.Value) error {
+	if val.Kind() == reflect.Ptr {
+		val = val.Elem()
+	}
+	typ := val.Type()
+
+	for i := 0; i < typ.NumField(); i++ {
+		field := val.Field(i)
+		tpField := typ.Field(i)
+
+		tag := tpField.Tag.Get("ini")
+		if tag == "-" {
+			continue
+		}
+
+		fieldName := s.parseFieldName(tpField.Name, tag)
+		if len(fieldName) == 0 || !field.CanSet() {
+			continue
+		}
+
+		if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) ||
+			(tpField.Type.Kind() == reflect.Struct) {
+			// Note: The only error here is section doesn't exist.
+			sec, err := s.f.GetSection(fieldName)
+			if err != nil {
+				// Note: fieldName can never be empty here, ignore error.
+				sec, _ = s.f.NewSection(fieldName)
+			}
+			if err = sec.reflectFrom(field); err != nil {
+				return fmt.Errorf("error reflecting field(%s): %v", fieldName, err)
+			}
+			continue
+		}
+
+		// Note: Same reason as secion.
+		key, err := s.GetKey(fieldName)
+		if err != nil {
+			key, _ = s.NewKey(fieldName, "")
+		}
+		if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
+			return fmt.Errorf("error reflecting field(%s): %v", fieldName, err)
+		}
+
+	}
+	return nil
+}
+
+// ReflectFrom reflects secion from given struct.
+func (s *Section) ReflectFrom(v interface{}) error {
+	typ := reflect.TypeOf(v)
+	val := reflect.ValueOf(v)
+	if typ.Kind() == reflect.Ptr {
+		typ = typ.Elem()
+		val = val.Elem()
+	} else {
+		return errors.New("cannot reflect from non-pointer struct")
+	}
+
+	return s.reflectFrom(val)
+}
+
+// ReflectFrom reflects file from given struct.
+func (f *File) ReflectFrom(v interface{}) error {
+	return f.Section("").ReflectFrom(v)
+}
+
+// ReflectFrom reflects data sources from given struct with name mapper.
+func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
+	cfg.NameMapper = mapper
+	return cfg.ReflectFrom(v)
+}
+
+// ReflectFrom reflects data sources from given struct.
+func ReflectFrom(cfg *File, v interface{}) error {
+	return ReflectFromWithMapper(cfg, v, nil)
+}

+ 4 - 0
vendor/src/github.com/jmespath/go-jmespath/.gitignore

@@ -0,0 +1,4 @@
+jpgo
+jmespath-fuzz.zip
+cpu.out
+go-jmespath.test

+ 9 - 0
vendor/src/github.com/jmespath/go-jmespath/.travis.yml

@@ -0,0 +1,9 @@
+language: go
+
+sudo: false
+
+go:
+  - 1.4
+
+install: go get -v -t ./...
+script: make test

+ 13 - 0
vendor/src/github.com/jmespath/go-jmespath/LICENSE

@@ -0,0 +1,13 @@
+Copyright 2015 James Saryerwinnie
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.

+ 44 - 0
vendor/src/github.com/jmespath/go-jmespath/Makefile

@@ -0,0 +1,44 @@
+
+CMD = jpgo
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  test                    to run all the tests"
+	@echo "  build                   to build the library and jp executable"
+	@echo "  generate                to run codegen"
+
+
+generate:
+	go generate ./...
+
+build:
+	rm -f $(CMD)
+	go build ./...
+	rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./...
+	mv cmd/$(CMD)/$(CMD) .
+
+test:
+	go test -v ./...
+
+check:
+	go vet ./...
+	@echo "golint ./..."
+	@lint=`golint ./...`; \
+	lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \
+	echo "$$lint"; \
+	if [ "$$lint" != "" ]; then exit 1; fi
+
+htmlc:
+	go test -coverprofile="/tmp/jpcov"  && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov
+
+buildfuzz:
+	go-fuzz-build github.com/jmespath/go-jmespath/fuzz
+
+fuzz: buildfuzz
+	go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata
+
+bench:
+	go test -bench . -cpuprofile cpu.out
+
+pprof-cpu:
+	go tool pprof ./go-jmespath.test ./cpu.out

+ 7 - 0
vendor/src/github.com/jmespath/go-jmespath/README.md

@@ -0,0 +1,7 @@
+# go-jmespath - A JMESPath implementation in Go
+
+[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath)
+
+
+
+See http://jmespath.org for more info.

+ 49 - 0
vendor/src/github.com/jmespath/go-jmespath/api.go

@@ -0,0 +1,49 @@
+package jmespath
+
+import "strconv"
+
+// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is
+// safe for concurrent use by multiple goroutines.
+type JMESPath struct {
+	ast  ASTNode
+	intr *treeInterpreter
+}
+
+// Compile parses a JMESPath expression and returns, if successful, a JMESPath
+// object that can be used to match against data.
+func Compile(expression string) (*JMESPath, error) {
+	parser := NewParser()
+	ast, err := parser.Parse(expression)
+	if err != nil {
+		return nil, err
+	}
+	jmespath := &JMESPath{ast: ast, intr: newInterpreter()}
+	return jmespath, nil
+}
+
+// MustCompile is like Compile but panics if the expression cannot be parsed.
+// It simplifies safe initialization of global variables holding compiled
+// JMESPaths.
+func MustCompile(expression string) *JMESPath {
+	jmespath, err := Compile(expression)
+	if err != nil {
+		panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error())
+	}
+	return jmespath
+}
+
+// Search evaluates a JMESPath expression against input data and returns the result.
+func (jp *JMESPath) Search(data interface{}) (interface{}, error) {
+	return jp.intr.Execute(jp.ast, data)
+}
+
+// Search evaluates a JMESPath expression against input data and returns the result.
+func Search(expression string, data interface{}) (interface{}, error) {
+	intr := newInterpreter()
+	parser := NewParser()
+	ast, err := parser.Parse(expression)
+	if err != nil {
+		return nil, err
+	}
+	return intr.Execute(ast, data)
+}

+ 16 - 0
vendor/src/github.com/jmespath/go-jmespath/astnodetype_string.go

@@ -0,0 +1,16 @@
+// generated by stringer -type astNodeType; DO NOT EDIT
+
+package jmespath
+
+import "fmt"
+
+const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection"
+
+var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307}
+
+func (i astNodeType) String() string {
+	if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) {
+		return fmt.Sprintf("astNodeType(%d)", i)
+	}
+	return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]]
+}

+ 842 - 0
vendor/src/github.com/jmespath/go-jmespath/functions.go

@@ -0,0 +1,842 @@
+package jmespath
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+)
+
+type jpFunction func(arguments []interface{}) (interface{}, error)
+
+type jpType string
+
+const (
+	jpUnknown     jpType = "unknown"
+	jpNumber      jpType = "number"
+	jpString      jpType = "string"
+	jpArray       jpType = "array"
+	jpObject      jpType = "object"
+	jpArrayNumber jpType = "array[number]"
+	jpArrayString jpType = "array[string]"
+	jpExpref      jpType = "expref"
+	jpAny         jpType = "any"
+)
+
+type functionEntry struct {
+	name      string
+	arguments []argSpec
+	handler   jpFunction
+	hasExpRef bool
+}
+
+type argSpec struct {
+	types    []jpType
+	variadic bool
+}
+
+type byExprString struct {
+	intr     *treeInterpreter
+	node     ASTNode
+	items    []interface{}
+	hasError bool
+}
+
+func (a *byExprString) Len() int {
+	return len(a.items)
+}
+func (a *byExprString) Swap(i, j int) {
+	a.items[i], a.items[j] = a.items[j], a.items[i]
+}
+func (a *byExprString) Less(i, j int) bool {
+	first, err := a.intr.Execute(a.node, a.items[i])
+	if err != nil {
+		a.hasError = true
+		// Return a dummy value.
+		return true
+	}
+	ith, ok := first.(string)
+	if !ok {
+		a.hasError = true
+		return true
+	}
+	second, err := a.intr.Execute(a.node, a.items[j])
+	if err != nil {
+		a.hasError = true
+		// Return a dummy value.
+		return true
+	}
+	jth, ok := second.(string)
+	if !ok {
+		a.hasError = true
+		return true
+	}
+	return ith < jth
+}
+
+type byExprFloat struct {
+	intr     *treeInterpreter
+	node     ASTNode
+	items    []interface{}
+	hasError bool
+}
+
+func (a *byExprFloat) Len() int {
+	return len(a.items)
+}
+func (a *byExprFloat) Swap(i, j int) {
+	a.items[i], a.items[j] = a.items[j], a.items[i]
+}
+func (a *byExprFloat) Less(i, j int) bool {
+	first, err := a.intr.Execute(a.node, a.items[i])
+	if err != nil {
+		a.hasError = true
+		// Return a dummy value.
+		return true
+	}
+	ith, ok := first.(float64)
+	if !ok {
+		a.hasError = true
+		return true
+	}
+	second, err := a.intr.Execute(a.node, a.items[j])
+	if err != nil {
+		a.hasError = true
+		// Return a dummy value.
+		return true
+	}
+	jth, ok := second.(float64)
+	if !ok {
+		a.hasError = true
+		return true
+	}
+	return ith < jth
+}
+
+type functionCaller struct {
+	functionTable map[string]functionEntry
+}
+
+func newFunctionCaller() *functionCaller {
+	caller := &functionCaller{}
+	caller.functionTable = map[string]functionEntry{
+		"length": {
+			name: "length",
+			arguments: []argSpec{
+				{types: []jpType{jpString, jpArray, jpObject}},
+			},
+			handler: jpfLength,
+		},
+		"starts_with": {
+			name: "starts_with",
+			arguments: []argSpec{
+				{types: []jpType{jpString}},
+				{types: []jpType{jpString}},
+			},
+			handler: jpfStartsWith,
+		},
+		"abs": {
+			name: "abs",
+			arguments: []argSpec{
+				{types: []jpType{jpNumber}},
+			},
+			handler: jpfAbs,
+		},
+		"avg": {
+			name: "avg",
+			arguments: []argSpec{
+				{types: []jpType{jpArrayNumber}},
+			},
+			handler: jpfAvg,
+		},
+		"ceil": {
+			name: "ceil",
+			arguments: []argSpec{
+				{types: []jpType{jpNumber}},
+			},
+			handler: jpfCeil,
+		},
+		"contains": {
+			name: "contains",
+			arguments: []argSpec{
+				{types: []jpType{jpArray, jpString}},
+				{types: []jpType{jpAny}},
+			},
+			handler: jpfContains,
+		},
+		"ends_with": {
+			name: "ends_with",
+			arguments: []argSpec{
+				{types: []jpType{jpString}},
+				{types: []jpType{jpString}},
+			},
+			handler: jpfEndsWith,
+		},
+		"floor": {
+			name: "floor",
+			arguments: []argSpec{
+				{types: []jpType{jpNumber}},
+			},
+			handler: jpfFloor,
+		},
+		"map": {
+			name: "amp",
+			arguments: []argSpec{
+				{types: []jpType{jpExpref}},
+				{types: []jpType{jpArray}},
+			},
+			handler:   jpfMap,
+			hasExpRef: true,
+		},
+		"max": {
+			name: "max",
+			arguments: []argSpec{
+				{types: []jpType{jpArrayNumber, jpArrayString}},
+			},
+			handler: jpfMax,
+		},
+		"merge": {
+			name: "merge",
+			arguments: []argSpec{
+				{types: []jpType{jpObject}, variadic: true},
+			},
+			handler: jpfMerge,
+		},
+		"max_by": {
+			name: "max_by",
+			arguments: []argSpec{
+				{types: []jpType{jpArray}},
+				{types: []jpType{jpExpref}},
+			},
+			handler:   jpfMaxBy,
+			hasExpRef: true,
+		},
+		"sum": {
+			name: "sum",
+			arguments: []argSpec{
+				{types: []jpType{jpArrayNumber}},
+			},
+			handler: jpfSum,
+		},
+		"min": {
+			name: "min",
+			arguments: []argSpec{
+				{types: []jpType{jpArrayNumber, jpArrayString}},
+			},
+			handler: jpfMin,
+		},
+		"min_by": {
+			name: "min_by",
+			arguments: []argSpec{
+				{types: []jpType{jpArray}},
+				{types: []jpType{jpExpref}},
+			},
+			handler:   jpfMinBy,
+			hasExpRef: true,
+		},
+		"type": {
+			name: "type",
+			arguments: []argSpec{
+				{types: []jpType{jpAny}},
+			},
+			handler: jpfType,
+		},
+		"keys": {
+			name: "keys",
+			arguments: []argSpec{
+				{types: []jpType{jpObject}},
+			},
+			handler: jpfKeys,
+		},
+		"values": {
+			name: "values",
+			arguments: []argSpec{
+				{types: []jpType{jpObject}},
+			},
+			handler: jpfValues,
+		},
+		"sort": {
+			name: "sort",
+			arguments: []argSpec{
+				{types: []jpType{jpArrayString, jpArrayNumber}},
+			},
+			handler: jpfSort,
+		},
+		"sort_by": {
+			name: "sort_by",
+			arguments: []argSpec{
+				{types: []jpType{jpArray}},
+				{types: []jpType{jpExpref}},
+			},
+			handler:   jpfSortBy,
+			hasExpRef: true,
+		},
+		"join": {
+			name: "join",
+			arguments: []argSpec{
+				{types: []jpType{jpString}},
+				{types: []jpType{jpArrayString}},
+			},
+			handler: jpfJoin,
+		},
+		"reverse": {
+			name: "reverse",
+			arguments: []argSpec{
+				{types: []jpType{jpArray, jpString}},
+			},
+			handler: jpfReverse,
+		},
+		"to_array": {
+			name: "to_array",
+			arguments: []argSpec{
+				{types: []jpType{jpAny}},
+			},
+			handler: jpfToArray,
+		},
+		"to_string": {
+			name: "to_string",
+			arguments: []argSpec{
+				{types: []jpType{jpAny}},
+			},
+			handler: jpfToString,
+		},
+		"to_number": {
+			name: "to_number",
+			arguments: []argSpec{
+				{types: []jpType{jpAny}},
+			},
+			handler: jpfToNumber,
+		},
+		"not_null": {
+			name: "not_null",
+			arguments: []argSpec{
+				{types: []jpType{jpAny}, variadic: true},
+			},
+			handler: jpfNotNull,
+		},
+	}
+	return caller
+}
+
+func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) {
+	if len(e.arguments) == 0 {
+		return arguments, nil
+	}
+	if !e.arguments[len(e.arguments)-1].variadic {
+		if len(e.arguments) != len(arguments) {
+			return nil, errors.New("incorrect number of args")
+		}
+		for i, spec := range e.arguments {
+			userArg := arguments[i]
+			err := spec.typeCheck(userArg)
+			if err != nil {
+				return nil, err
+			}
+		}
+		return arguments, nil
+	}
+	if len(arguments) < len(e.arguments) {
+		return nil, errors.New("Invalid arity.")
+	}
+	return arguments, nil
+}
+
+func (a *argSpec) typeCheck(arg interface{}) error {
+	for _, t := range a.types {
+		switch t {
+		case jpNumber:
+			if _, ok := arg.(float64); ok {
+				return nil
+			}
+		case jpString:
+			if _, ok := arg.(string); ok {
+				return nil
+			}
+		case jpArray:
+			if isSliceType(arg) {
+				return nil
+			}
+		case jpObject:
+			if _, ok := arg.(map[string]interface{}); ok {
+				return nil
+			}
+		case jpArrayNumber:
+			if _, ok := toArrayNum(arg); ok {
+				return nil
+			}
+		case jpArrayString:
+			if _, ok := toArrayStr(arg); ok {
+				return nil
+			}
+		case jpAny:
+			return nil
+		case jpExpref:
+			if _, ok := arg.(expRef); ok {
+				return nil
+			}
+		}
+	}
+	return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types)
+}
+
+func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) {
+	entry, ok := f.functionTable[name]
+	if !ok {
+		return nil, errors.New("unknown function: " + name)
+	}
+	resolvedArgs, err := entry.resolveArgs(arguments)
+	if err != nil {
+		return nil, err
+	}
+	if entry.hasExpRef {
+		var extra []interface{}
+		extra = append(extra, intr)
+		resolvedArgs = append(extra, resolvedArgs...)
+	}
+	return entry.handler(resolvedArgs)
+}
+
+func jpfAbs(arguments []interface{}) (interface{}, error) {
+	num := arguments[0].(float64)
+	return math.Abs(num), nil
+}
+
+func jpfLength(arguments []interface{}) (interface{}, error) {
+	arg := arguments[0]
+	if c, ok := arg.(string); ok {
+		return float64(utf8.RuneCountInString(c)), nil
+	} else if isSliceType(arg) {
+		v := reflect.ValueOf(arg)
+		return float64(v.Len()), nil
+	} else if c, ok := arg.(map[string]interface{}); ok {
+		return float64(len(c)), nil
+	}
+	return nil, errors.New("could not compute length()")
+}
+
+func jpfStartsWith(arguments []interface{}) (interface{}, error) {
+	search := arguments[0].(string)
+	prefix := arguments[1].(string)
+	return strings.HasPrefix(search, prefix), nil
+}
+
+func jpfAvg(arguments []interface{}) (interface{}, error) {
+	// We've already type checked the value so we can safely use
+	// type assertions.
+	args := arguments[0].([]interface{})
+	length := float64(len(args))
+	numerator := 0.0
+	for _, n := range args {
+		numerator += n.(float64)
+	}
+	return numerator / length, nil
+}
+func jpfCeil(arguments []interface{}) (interface{}, error) {
+	val := arguments[0].(float64)
+	return math.Ceil(val), nil
+}
+func jpfContains(arguments []interface{}) (interface{}, error) {
+	search := arguments[0]
+	el := arguments[1]
+	if searchStr, ok := search.(string); ok {
+		if elStr, ok := el.(string); ok {
+			return strings.Index(searchStr, elStr) != -1, nil
+		}
+		return false, nil
+	}
+	// Otherwise this is a generic contains for []interface{}
+	general := search.([]interface{})
+	for _, item := range general {
+		if item == el {
+			return true, nil
+		}
+	}
+	return false, nil
+}
+func jpfEndsWith(arguments []interface{}) (interface{}, error) {
+	search := arguments[0].(string)
+	suffix := arguments[1].(string)
+	return strings.HasSuffix(search, suffix), nil
+}
+func jpfFloor(arguments []interface{}) (interface{}, error) {
+	val := arguments[0].(float64)
+	return math.Floor(val), nil
+}
+func jpfMap(arguments []interface{}) (interface{}, error) {
+	intr := arguments[0].(*treeInterpreter)
+	exp := arguments[1].(expRef)
+	node := exp.ref
+	arr := arguments[2].([]interface{})
+	mapped := make([]interface{}, 0, len(arr))
+	for _, value := range arr {
+		current, err := intr.Execute(node, value)
+		if err != nil {
+			return nil, err
+		}
+		mapped = append(mapped, current)
+	}
+	return mapped, nil
+}
+func jpfMax(arguments []interface{}) (interface{}, error) {
+	if items, ok := toArrayNum(arguments[0]); ok {
+		if len(items) == 0 {
+			return nil, nil
+		}
+		if len(items) == 1 {
+			return items[0], nil
+		}
+		best := items[0]
+		for _, item := range items[1:] {
+			if item > best {
+				best = item
+			}
+		}
+		return best, nil
+	}
+	// Otherwise we're dealing with a max() of strings.
+	items, _ := toArrayStr(arguments[0])
+	if len(items) == 0 {
+		return nil, nil
+	}
+	if len(items) == 1 {
+		return items[0], nil
+	}
+	best := items[0]
+	for _, item := range items[1:] {
+		if item > best {
+			best = item
+		}
+	}
+	return best, nil
+}
+func jpfMerge(arguments []interface{}) (interface{}, error) {
+	final := make(map[string]interface{})
+	for _, m := range arguments {
+		mapped := m.(map[string]interface{})
+		for key, value := range mapped {
+			final[key] = value
+		}
+	}
+	return final, nil
+}
+func jpfMaxBy(arguments []interface{}) (interface{}, error) {
+	intr := arguments[0].(*treeInterpreter)
+	arr := arguments[1].([]interface{})
+	exp := arguments[2].(expRef)
+	node := exp.ref
+	if len(arr) == 0 {
+		return nil, nil
+	} else if len(arr) == 1 {
+		return arr[0], nil
+	}
+	start, err := intr.Execute(node, arr[0])
+	if err != nil {
+		return nil, err
+	}
+	switch t := start.(type) {
+	case float64:
+		bestVal := t
+		bestItem := arr[0]
+		for _, item := range arr[1:] {
+			result, err := intr.Execute(node, item)
+			if err != nil {
+				return nil, err
+			}
+			current, ok := result.(float64)
+			if !ok {
+				return nil, errors.New("invalid type, must be number")
+			}
+			if current > bestVal {
+				bestVal = current
+				bestItem = item
+			}
+		}
+		return bestItem, nil
+	case string:
+		bestVal := t
+		bestItem := arr[0]
+		for _, item := range arr[1:] {
+			result, err := intr.Execute(node, item)
+			if err != nil {
+				return nil, err
+			}
+			current, ok := result.(string)
+			if !ok {
+				return nil, errors.New("invalid type, must be string")
+			}
+			if current > bestVal {
+				bestVal = current
+				bestItem = item
+			}
+		}
+		return bestItem, nil
+	default:
+		return nil, errors.New("invalid type, must be number of string")
+	}
+}
+func jpfSum(arguments []interface{}) (interface{}, error) {
+	items, _ := toArrayNum(arguments[0])
+	sum := 0.0
+	for _, item := range items {
+		sum += item
+	}
+	return sum, nil
+}
+
+func jpfMin(arguments []interface{}) (interface{}, error) {
+	if items, ok := toArrayNum(arguments[0]); ok {
+		if len(items) == 0 {
+			return nil, nil
+		}
+		if len(items) == 1 {
+			return items[0], nil
+		}
+		best := items[0]
+		for _, item := range items[1:] {
+			if item < best {
+				best = item
+			}
+		}
+		return best, nil
+	}
+	items, _ := toArrayStr(arguments[0])
+	if len(items) == 0 {
+		return nil, nil
+	}
+	if len(items) == 1 {
+		return items[0], nil
+	}
+	best := items[0]
+	for _, item := range items[1:] {
+		if item < best {
+			best = item
+		}
+	}
+	return best, nil
+}
+
+func jpfMinBy(arguments []interface{}) (interface{}, error) {
+	intr := arguments[0].(*treeInterpreter)
+	arr := arguments[1].([]interface{})
+	exp := arguments[2].(expRef)
+	node := exp.ref
+	if len(arr) == 0 {
+		return nil, nil
+	} else if len(arr) == 1 {
+		return arr[0], nil
+	}
+	start, err := intr.Execute(node, arr[0])
+	if err != nil {
+		return nil, err
+	}
+	if t, ok := start.(float64); ok {
+		bestVal := t
+		bestItem := arr[0]
+		for _, item := range arr[1:] {
+			result, err := intr.Execute(node, item)
+			if err != nil {
+				return nil, err
+			}
+			current, ok := result.(float64)
+			if !ok {
+				return nil, errors.New("invalid type, must be number")
+			}
+			if current < bestVal {
+				bestVal = current
+				bestItem = item
+			}
+		}
+		return bestItem, nil
+	} else if t, ok := start.(string); ok {
+		bestVal := t
+		bestItem := arr[0]
+		for _, item := range arr[1:] {
+			result, err := intr.Execute(node, item)
+			if err != nil {
+				return nil, err
+			}
+			current, ok := result.(string)
+			if !ok {
+				return nil, errors.New("invalid type, must be string")
+			}
+			if current < bestVal {
+				bestVal = current
+				bestItem = item
+			}
+		}
+		return bestItem, nil
+	} else {
+		return nil, errors.New("invalid type, must be number of string")
+	}
+}
+func jpfType(arguments []interface{}) (interface{}, error) {
+	arg := arguments[0]
+	if _, ok := arg.(float64); ok {
+		return "number", nil
+	}
+	if _, ok := arg.(string); ok {
+		return "string", nil
+	}
+	if _, ok := arg.([]interface{}); ok {
+		return "array", nil
+	}
+	if _, ok := arg.(map[string]interface{}); ok {
+		return "object", nil
+	}
+	if arg == nil {
+		return "null", nil
+	}
+	if arg == true || arg == false {
+		return "boolean", nil
+	}
+	return nil, errors.New("unknown type")
+}
+func jpfKeys(arguments []interface{}) (interface{}, error) {
+	arg := arguments[0].(map[string]interface{})
+	collected := make([]interface{}, 0, len(arg))
+	for key := range arg {
+		collected = append(collected, key)
+	}
+	return collected, nil
+}
+func jpfValues(arguments []interface{}) (interface{}, error) {
+	arg := arguments[0].(map[string]interface{})
+	collected := make([]interface{}, 0, len(arg))
+	for _, value := range arg {
+		collected = append(collected, value)
+	}
+	return collected, nil
+}
+func jpfSort(arguments []interface{}) (interface{}, error) {
+	if items, ok := toArrayNum(arguments[0]); ok {
+		d := sort.Float64Slice(items)
+		sort.Stable(d)
+		final := make([]interface{}, len(d))
+		for i, val := range d {
+			final[i] = val
+		}
+		return final, nil
+	}
+	// Otherwise we're dealing with sort()'ing strings.
+	items, _ := toArrayStr(arguments[0])
+	d := sort.StringSlice(items)
+	sort.Stable(d)
+	final := make([]interface{}, len(d))
+	for i, val := range d {
+		final[i] = val
+	}
+	return final, nil
+}
+func jpfSortBy(arguments []interface{}) (interface{}, error) {
+	intr := arguments[0].(*treeInterpreter)
+	arr := arguments[1].([]interface{})
+	exp := arguments[2].(expRef)
+	node := exp.ref
+	if len(arr) == 0 {
+		return arr, nil
+	} else if len(arr) == 1 {
+		return arr, nil
+	}
+	start, err := intr.Execute(node, arr[0])
+	if err != nil {
+		return nil, err
+	}
+	if _, ok := start.(float64); ok {
+		sortable := &byExprFloat{intr, node, arr, false}
+		sort.Stable(sortable)
+		if sortable.hasError {
+			return nil, errors.New("error in sort_by comparison")
+		}
+		return arr, nil
+	} else if _, ok := start.(string); ok {
+		sortable := &byExprString{intr, node, arr, false}
+		sort.Stable(sortable)
+		if sortable.hasError {
+			return nil, errors.New("error in sort_by comparison")
+		}
+		return arr, nil
+	} else {
+		return nil, errors.New("invalid type, must be number of string")
+	}
+}
+func jpfJoin(arguments []interface{}) (interface{}, error) {
+	sep := arguments[0].(string)
+	// We can't just do arguments[1].([]string), we have to
+	// manually convert each item to a string.
+	arrayStr := []string{}
+	for _, item := range arguments[1].([]interface{}) {
+		arrayStr = append(arrayStr, item.(string))
+	}
+	return strings.Join(arrayStr, sep), nil
+}
+func jpfReverse(arguments []interface{}) (interface{}, error) {
+	if s, ok := arguments[0].(string); ok {
+		r := []rune(s)
+		for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 {
+			r[i], r[j] = r[j], r[i]
+		}
+		return string(r), nil
+	}
+	items := arguments[0].([]interface{})
+	length := len(items)
+	reversed := make([]interface{}, length)
+	for i, item := range items {
+		reversed[length-(i+1)] = item
+	}
+	return reversed, nil
+}
+func jpfToArray(arguments []interface{}) (interface{}, error) {
+	if _, ok := arguments[0].([]interface{}); ok {
+		return arguments[0], nil
+	}
+	return arguments[:1:1], nil
+}
+func jpfToString(arguments []interface{}) (interface{}, error) {
+	if v, ok := arguments[0].(string); ok {
+		return v, nil
+	}
+	result, err := json.Marshal(arguments[0])
+	if err != nil {
+		return nil, err
+	}
+	return string(result), nil
+}
+func jpfToNumber(arguments []interface{}) (interface{}, error) {
+	arg := arguments[0]
+	if v, ok := arg.(float64); ok {
+		return v, nil
+	}
+	if v, ok := arg.(string); ok {
+		conv, err := strconv.ParseFloat(v, 64)
+		if err != nil {
+			return nil, nil
+		}
+		return conv, nil
+	}
+	if _, ok := arg.([]interface{}); ok {
+		return nil, nil
+	}
+	if _, ok := arg.(map[string]interface{}); ok {
+		return nil, nil
+	}
+	if arg == nil {
+		return nil, nil
+	}
+	if arg == true || arg == false {
+		return nil, nil
+	}
+	return nil, errors.New("unknown type")
+}
+func jpfNotNull(arguments []interface{}) (interface{}, error) {
+	for _, arg := range arguments {
+		if arg != nil {
+			return arg, nil
+		}
+	}
+	return nil, nil
+}

+ 418 - 0
vendor/src/github.com/jmespath/go-jmespath/interpreter.go

@@ -0,0 +1,418 @@
+package jmespath
+
+import (
+	"errors"
+	"reflect"
+	"unicode"
+	"unicode/utf8"
+)
+
+/* This is a tree based interpreter.  It walks the AST and directly
+   interprets the AST to search through a JSON document.
+*/
+
+type treeInterpreter struct {
+	fCall *functionCaller
+}
+
+func newInterpreter() *treeInterpreter {
+	interpreter := treeInterpreter{}
+	interpreter.fCall = newFunctionCaller()
+	return &interpreter
+}
+
+type expRef struct {
+	ref ASTNode
+}
+
+// Execute takes an ASTNode and input data and interprets the AST directly.
+// It will produce the result of applying the JMESPath expression associated
+// with the ASTNode to the input data "value".
+func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) {
+	switch node.nodeType {
+	case ASTComparator:
+		left, err := intr.Execute(node.children[0], value)
+		if err != nil {
+			return nil, err
+		}
+		right, err := intr.Execute(node.children[1], value)
+		if err != nil {
+			return nil, err
+		}
+		switch node.value {
+		case tEQ:
+			return objsEqual(left, right), nil
+		case tNE:
+			return !objsEqual(left, right), nil
+		}
+		leftNum, ok := left.(float64)
+		if !ok {
+			return nil, nil
+		}
+		rightNum, ok := right.(float64)
+		if !ok {
+			return nil, nil
+		}
+		switch node.value {
+		case tGT:
+			return leftNum > rightNum, nil
+		case tGTE:
+			return leftNum >= rightNum, nil
+		case tLT:
+			return leftNum < rightNum, nil
+		case tLTE:
+			return leftNum <= rightNum, nil
+		}
+	case ASTExpRef:
+		return expRef{ref: node.children[0]}, nil
+	case ASTFunctionExpression:
+		resolvedArgs := []interface{}{}
+		for _, arg := range node.children {
+			current, err := intr.Execute(arg, value)
+			if err != nil {
+				return nil, err
+			}
+			resolvedArgs = append(resolvedArgs, current)
+		}
+		return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr)
+	case ASTField:
+		if m, ok := value.(map[string]interface{}); ok {
+			key := node.value.(string)
+			return m[key], nil
+		}
+		return intr.fieldFromStruct(node.value.(string), value)
+	case ASTFilterProjection:
+		left, err := intr.Execute(node.children[0], value)
+		if err != nil {
+			return nil, nil
+		}
+		sliceType, ok := left.([]interface{})
+		if !ok {
+			if isSliceType(left) {
+				return intr.filterProjectionWithReflection(node, left)
+			}
+			return nil, nil
+		}
+		compareNode := node.children[2]
+		collected := []interface{}{}
+		for _, element := range sliceType {
+			result, err := intr.Execute(compareNode, element)
+			if err != nil {
+				return nil, err
+			}
+			if !isFalse(result) {
+				current, err := intr.Execute(node.children[1], element)
+				if err != nil {
+					return nil, err
+				}
+				if current != nil {
+					collected = append(collected, current)
+				}
+			}
+		}
+		return collected, nil
+	case ASTFlatten:
+		left, err := intr.Execute(node.children[0], value)
+		if err != nil {
+			return nil, nil
+		}
+		sliceType, ok := left.([]interface{})
+		if !ok {
+			// If we can't type convert to []interface{}, there's
+			// a chance this could still work via reflection if we're
+			// dealing with user provided types.
+			if isSliceType(left) {
+				return intr.flattenWithReflection(left)
+			}
+			return nil, nil
+		}
+		flattened := []interface{}{}
+		for _, element := range sliceType {
+			if elementSlice, ok := element.([]interface{}); ok {
+				flattened = append(flattened, elementSlice...)
+			} else if isSliceType(element) {
+				reflectFlat := []interface{}{}
+				v := reflect.ValueOf(element)
+				for i := 0; i < v.Len(); i++ {
+					reflectFlat = append(reflectFlat, v.Index(i).Interface())
+				}
+				flattened = append(flattened, reflectFlat...)
+			} else {
+				flattened = append(flattened, element)
+			}
+		}
+		return flattened, nil
+	case ASTIdentity, ASTCurrentNode:
+		return value, nil
+	case ASTIndex:
+		if sliceType, ok := value.([]interface{}); ok {
+			index := node.value.(int)
+			if index < 0 {
+				index += len(sliceType)
+			}
+			if index < len(sliceType) && index >= 0 {
+				return sliceType[index], nil
+			}
+			return nil, nil
+		}
+		// Otherwise try via reflection.
+		rv := reflect.ValueOf(value)
+		if rv.Kind() == reflect.Slice {
+			index := node.value.(int)
+			if index < 0 {
+				index += rv.Len()
+			}
+			if index < rv.Len() && index >= 0 {
+				v := rv.Index(index)
+				return v.Interface(), nil
+			}
+		}
+		return nil, nil
+	case ASTKeyValPair:
+		return intr.Execute(node.children[0], value)
+	case ASTLiteral:
+		return node.value, nil
+	case ASTMultiSelectHash:
+		if value == nil {
+			return nil, nil
+		}
+		collected := make(map[string]interface{})
+		for _, child := range node.children {
+			current, err := intr.Execute(child, value)
+			if err != nil {
+				return nil, err
+			}
+			key := child.value.(string)
+			collected[key] = current
+		}
+		return collected, nil
+	case ASTMultiSelectList:
+		if value == nil {
+			return nil, nil
+		}
+		collected := []interface{}{}
+		for _, child := range node.children {
+			current, err := intr.Execute(child, value)
+			if err != nil {
+				return nil, err
+			}
+			collected = append(collected, current)
+		}
+		return collected, nil
+	case ASTOrExpression:
+		matched, err := intr.Execute(node.children[0], value)
+		if err != nil {
+			return nil, err
+		}
+		if isFalse(matched) {
+			matched, err = intr.Execute(node.children[1], value)
+			if err != nil {
+				return nil, err
+			}
+		}
+		return matched, nil
+	case ASTAndExpression:
+		matched, err := intr.Execute(node.children[0], value)
+		if err != nil {
+			return nil, err
+		}
+		if isFalse(matched) {
+			return matched, nil
+		}
+		return intr.Execute(node.children[1], value)
+	case ASTNotExpression:
+		matched, err := intr.Execute(node.children[0], value)
+		if err != nil {
+			return nil, err
+		}
+		if isFalse(matched) {
+			return true, nil
+		}
+		return false, nil
+	case ASTPipe:
+		result := value
+		var err error
+		for _, child := range node.children {
+			result, err = intr.Execute(child, result)
+			if err != nil {
+				return nil, err
+			}
+		}
+		return result, nil
+	case ASTProjection:
+		left, err := intr.Execute(node.children[0], value)
+		if err != nil {
+			return nil, err
+		}
+		sliceType, ok := left.([]interface{})
+		if !ok {
+			if isSliceType(left) {
+				return intr.projectWithReflection(node, left)
+			}
+			return nil, nil
+		}
+		collected := []interface{}{}
+		var current interface{}
+		for _, element := range sliceType {
+			current, err = intr.Execute(node.children[1], element)
+			if err != nil {
+				return nil, err
+			}
+			if current != nil {
+				collected = append(collected, current)
+			}
+		}
+		return collected, nil
+	case ASTSubexpression, ASTIndexExpression:
+		left, err := intr.Execute(node.children[0], value)
+		if err != nil {
+			return nil, err
+		}
+		return intr.Execute(node.children[1], left)
+	case ASTSlice:
+		sliceType, ok := value.([]interface{})
+		if !ok {
+			if isSliceType(value) {
+				return intr.sliceWithReflection(node, value)
+			}
+			return nil, nil
+		}
+		parts := node.value.([]*int)
+		sliceParams := make([]sliceParam, 3)
+		for i, part := range parts {
+			if part != nil {
+				sliceParams[i].Specified = true
+				sliceParams[i].N = *part
+			}
+		}
+		return slice(sliceType, sliceParams)
+	case ASTValueProjection:
+		left, err := intr.Execute(node.children[0], value)
+		if err != nil {
+			return nil, nil
+		}
+		mapType, ok := left.(map[string]interface{})
+		if !ok {
+			return nil, nil
+		}
+		values := make([]interface{}, len(mapType))
+		for _, value := range mapType {
+			values = append(values, value)
+		}
+		collected := []interface{}{}
+		for _, element := range values {
+			current, err := intr.Execute(node.children[1], element)
+			if err != nil {
+				return nil, err
+			}
+			if current != nil {
+				collected = append(collected, current)
+			}
+		}
+		return collected, nil
+	}
+	return nil, errors.New("Unknown AST node: " + node.nodeType.String())
+}
+
+func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) {
+	rv := reflect.ValueOf(value)
+	first, n := utf8.DecodeRuneInString(key)
+	fieldName := string(unicode.ToUpper(first)) + key[n:]
+	if rv.Kind() == reflect.Struct {
+		v := rv.FieldByName(fieldName)
+		if !v.IsValid() {
+			return nil, nil
+		}
+		return v.Interface(), nil
+	} else if rv.Kind() == reflect.Ptr {
+		// Handle multiple levels of indirection?
+		if rv.IsNil() {
+			return nil, nil
+		}
+		rv = rv.Elem()
+		v := rv.FieldByName(fieldName)
+		if !v.IsValid() {
+			return nil, nil
+		}
+		return v.Interface(), nil
+	}
+	return nil, nil
+}
+
+func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) {
+	v := reflect.ValueOf(value)
+	flattened := []interface{}{}
+	for i := 0; i < v.Len(); i++ {
+		element := v.Index(i).Interface()
+		if reflect.TypeOf(element).Kind() == reflect.Slice {
+			// Then insert the contents of the element
+			// slice into the flattened slice,
+			// i.e flattened = append(flattened, mySlice...)
+			elementV := reflect.ValueOf(element)
+			for j := 0; j < elementV.Len(); j++ {
+				flattened = append(
+					flattened, elementV.Index(j).Interface())
+			}
+		} else {
+			flattened = append(flattened, element)
+		}
+	}
+	return flattened, nil
+}
+
+func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+	v := reflect.ValueOf(value)
+	parts := node.value.([]*int)
+	sliceParams := make([]sliceParam, 3)
+	for i, part := range parts {
+		if part != nil {
+			sliceParams[i].Specified = true
+			sliceParams[i].N = *part
+		}
+	}
+	final := []interface{}{}
+	for i := 0; i < v.Len(); i++ {
+		element := v.Index(i).Interface()
+		final = append(final, element)
+	}
+	return slice(final, sliceParams)
+}
+
+func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+	compareNode := node.children[2]
+	collected := []interface{}{}
+	v := reflect.ValueOf(value)
+	for i := 0; i < v.Len(); i++ {
+		element := v.Index(i).Interface()
+		result, err := intr.Execute(compareNode, element)
+		if err != nil {
+			return nil, err
+		}
+		if !isFalse(result) {
+			current, err := intr.Execute(node.children[1], element)
+			if err != nil {
+				return nil, err
+			}
+			if current != nil {
+				collected = append(collected, current)
+			}
+		}
+	}
+	return collected, nil
+}
+
+func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+	collected := []interface{}{}
+	v := reflect.ValueOf(value)
+	for i := 0; i < v.Len(); i++ {
+		element := v.Index(i).Interface()
+		result, err := intr.Execute(node.children[1], element)
+		if err != nil {
+			return nil, err
+		}
+		if result != nil {
+			collected = append(collected, result)
+		}
+	}
+	return collected, nil
+}

+ 420 - 0
vendor/src/github.com/jmespath/go-jmespath/lexer.go

@@ -0,0 +1,420 @@
+package jmespath
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+)
+
+type token struct {
+	tokenType tokType
+	value     string
+	position  int
+	length    int
+}
+
+type tokType int
+
+const eof = -1
+
+// Lexer contains information about the expression being tokenized.
+type Lexer struct {
+	expression string       // The expression provided by the user.
+	currentPos int          // The current position in the string.
+	lastWidth  int          // The width of the current rune.  This
+	buf        bytes.Buffer // Internal buffer used for building up values.
+}
+
+// SyntaxError is the main error used whenever a lexing or parsing error occurs.
+type SyntaxError struct {
+	msg        string // Error message displayed to user
+	Expression string // Expression that generated a SyntaxError
+	Offset     int    // The location in the string where the error occurred
+}
+
+func (e SyntaxError) Error() string {
+	// In the future, it would be good to underline the specific
+	// location where the error occurred.
+	return "SyntaxError: " + e.msg
+}
+
+// HighlightLocation will show where the syntax error occurred.
+// It will place a "^" character on a line below the expression
+// at the point where the syntax error occurred.
+func (e SyntaxError) HighlightLocation() string {
+	return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^"
+}
+
+//go:generate stringer -type=tokType
+const (
+	tUnknown tokType = iota
+	tStar
+	tDot
+	tFilter
+	tFlatten
+	tLparen
+	tRparen
+	tLbracket
+	tRbracket
+	tLbrace
+	tRbrace
+	tOr
+	tPipe
+	tNumber
+	tUnquotedIdentifier
+	tQuotedIdentifier
+	tComma
+	tColon
+	tLT
+	tLTE
+	tGT
+	tGTE
+	tEQ
+	tNE
+	tJSONLiteral
+	tStringLiteral
+	tCurrent
+	tExpref
+	tAnd
+	tNot
+	tEOF
+)
+
+var basicTokens = map[rune]tokType{
+	'.': tDot,
+	'*': tStar,
+	',': tComma,
+	':': tColon,
+	'{': tLbrace,
+	'}': tRbrace,
+	']': tRbracket, // tLbracket not included because it could be "[]"
+	'(': tLparen,
+	')': tRparen,
+	'@': tCurrent,
+}
+
+// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64.
+// When using this bitmask just be sure to shift the rune down 64 bits
+// before checking against identifierStartBits.
+const identifierStartBits uint64 = 576460745995190270
+
+// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s.
+var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270}
+
+var whiteSpace = map[rune]bool{
+	' ': true, '\t': true, '\n': true, '\r': true,
+}
+
+func (t token) String() string {
+	return fmt.Sprintf("Token{%+v, %s, %d, %d}",
+		t.tokenType, t.value, t.position, t.length)
+}
+
+// NewLexer creates a new JMESPath lexer.
+func NewLexer() *Lexer {
+	lexer := Lexer{}
+	return &lexer
+}
+
+func (lexer *Lexer) next() rune {
+	if lexer.currentPos >= len(lexer.expression) {
+		lexer.lastWidth = 0
+		return eof
+	}
+	r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:])
+	lexer.lastWidth = w
+	lexer.currentPos += w
+	return r
+}
+
+func (lexer *Lexer) back() {
+	lexer.currentPos -= lexer.lastWidth
+}
+
+func (lexer *Lexer) peek() rune {
+	t := lexer.next()
+	lexer.back()
+	return t
+}
+
+// tokenize takes an expression and returns corresponding tokens.
+func (lexer *Lexer) tokenize(expression string) ([]token, error) {
+	var tokens []token
+	lexer.expression = expression
+	lexer.currentPos = 0
+	lexer.lastWidth = 0
+loop:
+	for {
+		r := lexer.next()
+		if identifierStartBits&(1<<(uint64(r)-64)) > 0 {
+			t := lexer.consumeUnquotedIdentifier()
+			tokens = append(tokens, t)
+		} else if val, ok := basicTokens[r]; ok {
+			// Basic single char token.
+			t := token{
+				tokenType: val,
+				value:     string(r),
+				position:  lexer.currentPos - lexer.lastWidth,
+				length:    1,
+			}
+			tokens = append(tokens, t)
+		} else if r == '-' || (r >= '0' && r <= '9') {
+			t := lexer.consumeNumber()
+			tokens = append(tokens, t)
+		} else if r == '[' {
+			t := lexer.consumeLBracket()
+			tokens = append(tokens, t)
+		} else if r == '"' {
+			t, err := lexer.consumeQuotedIdentifier()
+			if err != nil {
+				return tokens, err
+			}
+			tokens = append(tokens, t)
+		} else if r == '\'' {
+			t, err := lexer.consumeRawStringLiteral()
+			if err != nil {
+				return tokens, err
+			}
+			tokens = append(tokens, t)
+		} else if r == '`' {
+			t, err := lexer.consumeLiteral()
+			if err != nil {
+				return tokens, err
+			}
+			tokens = append(tokens, t)
+		} else if r == '|' {
+			t := lexer.matchOrElse(r, '|', tOr, tPipe)
+			tokens = append(tokens, t)
+		} else if r == '<' {
+			t := lexer.matchOrElse(r, '=', tLTE, tLT)
+			tokens = append(tokens, t)
+		} else if r == '>' {
+			t := lexer.matchOrElse(r, '=', tGTE, tGT)
+			tokens = append(tokens, t)
+		} else if r == '!' {
+			t := lexer.matchOrElse(r, '=', tNE, tNot)
+			tokens = append(tokens, t)
+		} else if r == '=' {
+			t := lexer.matchOrElse(r, '=', tEQ, tUnknown)
+			tokens = append(tokens, t)
+		} else if r == '&' {
+			t := lexer.matchOrElse(r, '&', tAnd, tExpref)
+			tokens = append(tokens, t)
+		} else if r == eof {
+			break loop
+		} else if _, ok := whiteSpace[r]; ok {
+			// Ignore whitespace
+		} else {
+			return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r)))
+		}
+	}
+	tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0})
+	return tokens, nil
+}
+
+// Consume characters until the ending rune "r" is reached.
+// If the end of the expression is reached before seeing the
+// terminating rune "r", then an error is returned.
+// If no error occurs then the matching substring is returned.
+// The returned string will not include the ending rune.
+func (lexer *Lexer) consumeUntil(end rune) (string, error) {
+	start := lexer.currentPos
+	current := lexer.next()
+	for current != end && current != eof {
+		if current == '\\' && lexer.peek() != eof {
+			lexer.next()
+		}
+		current = lexer.next()
+	}
+	if lexer.lastWidth == 0 {
+		// Then we hit an EOF so we never reached the closing
+		// delimiter.
+		return "", SyntaxError{
+			msg:        "Unclosed delimiter: " + string(end),
+			Expression: lexer.expression,
+			Offset:     len(lexer.expression),
+		}
+	}
+	return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil
+}
+
+func (lexer *Lexer) consumeLiteral() (token, error) {
+	start := lexer.currentPos
+	value, err := lexer.consumeUntil('`')
+	if err != nil {
+		return token{}, err
+	}
+	value = strings.Replace(value, "\\`", "`", -1)
+	return token{
+		tokenType: tJSONLiteral,
+		value:     value,
+		position:  start,
+		length:    len(value),
+	}, nil
+}
+
+func (lexer *Lexer) consumeRawStringLiteral() (token, error) {
+	start := lexer.currentPos
+	currentIndex := start
+	current := lexer.next()
+	for current != '\'' && lexer.peek() != eof {
+		if current == '\\' && lexer.peek() == '\'' {
+			chunk := lexer.expression[currentIndex : lexer.currentPos-1]
+			lexer.buf.WriteString(chunk)
+			lexer.buf.WriteString("'")
+			lexer.next()
+			currentIndex = lexer.currentPos
+		}
+		current = lexer.next()
+	}
+	if lexer.lastWidth == 0 {
+		// Then we hit an EOF so we never reached the closing
+		// delimiter.
+		return token{}, SyntaxError{
+			msg:        "Unclosed delimiter: '",
+			Expression: lexer.expression,
+			Offset:     len(lexer.expression),
+		}
+	}
+	if currentIndex < lexer.currentPos {
+		lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1])
+	}
+	value := lexer.buf.String()
+	// Reset the buffer so it can reused again.
+	lexer.buf.Reset()
+	return token{
+		tokenType: tStringLiteral,
+		value:     value,
+		position:  start,
+		length:    len(value),
+	}, nil
+}
+
+func (lexer *Lexer) syntaxError(msg string) SyntaxError {
+	return SyntaxError{
+		msg:        msg,
+		Expression: lexer.expression,
+		Offset:     lexer.currentPos - 1,
+	}
+}
+
+// Checks for a two char token, otherwise matches a single character
+// token. This is used whenever a two char token overlaps a single
+// char token, e.g. "||" -> tPipe, "|" -> tOr.
+func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token {
+	start := lexer.currentPos - lexer.lastWidth
+	nextRune := lexer.next()
+	var t token
+	if nextRune == second {
+		t = token{
+			tokenType: matchedType,
+			value:     string(first) + string(second),
+			position:  start,
+			length:    2,
+		}
+	} else {
+		lexer.back()
+		t = token{
+			tokenType: singleCharType,
+			value:     string(first),
+			position:  start,
+			length:    1,
+		}
+	}
+	return t
+}
+
+func (lexer *Lexer) consumeLBracket() token {
+	// There's three options here:
+	// 1. A filter expression "[?"
+	// 2. A flatten operator "[]"
+	// 3. A bare rbracket "["
+	start := lexer.currentPos - lexer.lastWidth
+	nextRune := lexer.next()
+	var t token
+	if nextRune == '?' {
+		t = token{
+			tokenType: tFilter,
+			value:     "[?",
+			position:  start,
+			length:    2,
+		}
+	} else if nextRune == ']' {
+		t = token{
+			tokenType: tFlatten,
+			value:     "[]",
+			position:  start,
+			length:    2,
+		}
+	} else {
+		t = token{
+			tokenType: tLbracket,
+			value:     "[",
+			position:  start,
+			length:    1,
+		}
+		lexer.back()
+	}
+	return t
+}
+
+func (lexer *Lexer) consumeQuotedIdentifier() (token, error) {
+	start := lexer.currentPos
+	value, err := lexer.consumeUntil('"')
+	if err != nil {
+		return token{}, err
+	}
+	var decoded string
+	asJSON := []byte("\"" + value + "\"")
+	if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil {
+		return token{}, err
+	}
+	return token{
+		tokenType: tQuotedIdentifier,
+		value:     decoded,
+		position:  start - 1,
+		length:    len(decoded),
+	}, nil
+}
+
+func (lexer *Lexer) consumeUnquotedIdentifier() token {
+	// Consume runes until we reach the end of an unquoted
+	// identifier.
+	start := lexer.currentPos - lexer.lastWidth
+	for {
+		r := lexer.next()
+		if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 {
+			lexer.back()
+			break
+		}
+	}
+	value := lexer.expression[start:lexer.currentPos]
+	return token{
+		tokenType: tUnquotedIdentifier,
+		value:     value,
+		position:  start,
+		length:    lexer.currentPos - start,
+	}
+}
+
+func (lexer *Lexer) consumeNumber() token {
+	// Consume runes until we reach something that's not a number.
+	start := lexer.currentPos - lexer.lastWidth
+	for {
+		r := lexer.next()
+		if r < '0' || r > '9' {
+			lexer.back()
+			break
+		}
+	}
+	value := lexer.expression[start:lexer.currentPos]
+	return token{
+		tokenType: tNumber,
+		value:     value,
+		position:  start,
+		length:    lexer.currentPos - start,
+	}
+}

+ 603 - 0
vendor/src/github.com/jmespath/go-jmespath/parser.go

@@ -0,0 +1,603 @@
+package jmespath
+
+import (
+	"encoding/json"
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+type astNodeType int
+
+//go:generate stringer -type astNodeType
+const (
+	ASTEmpty astNodeType = iota
+	ASTComparator
+	ASTCurrentNode
+	ASTExpRef
+	ASTFunctionExpression
+	ASTField
+	ASTFilterProjection
+	ASTFlatten
+	ASTIdentity
+	ASTIndex
+	ASTIndexExpression
+	ASTKeyValPair
+	ASTLiteral
+	ASTMultiSelectHash
+	ASTMultiSelectList
+	ASTOrExpression
+	ASTAndExpression
+	ASTNotExpression
+	ASTPipe
+	ASTProjection
+	ASTSubexpression
+	ASTSlice
+	ASTValueProjection
+)
+
+// ASTNode represents the abstract syntax tree of a JMESPath expression.
+type ASTNode struct {
+	nodeType astNodeType
+	value    interface{}
+	children []ASTNode
+}
+
+func (node ASTNode) String() string {
+	return node.PrettyPrint(0)
+}
+
+// PrettyPrint will pretty print the parsed AST.
+// The AST is an implementation detail and this pretty print
+// function is provided as a convenience method to help with
+// debugging.  You should not rely on its output as the internal
+// structure of the AST may change at any time.
+func (node ASTNode) PrettyPrint(indent int) string {
+	spaces := strings.Repeat(" ", indent)
+	output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType)
+	nextIndent := indent + 2
+	if node.value != nil {
+		if converted, ok := node.value.(fmt.Stringer); ok {
+			// Account for things like comparator nodes
+			// that are enums with a String() method.
+			output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String())
+		} else {
+			output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value)
+		}
+	}
+	lastIndex := len(node.children)
+	if lastIndex > 0 {
+		output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent))
+		childIndent := nextIndent + 2
+		for _, elem := range node.children {
+			output += elem.PrettyPrint(childIndent)
+		}
+	}
+	output += fmt.Sprintf("%s}\n", spaces)
+	return output
+}
+
+var bindingPowers = map[tokType]int{
+	tEOF:                0,
+	tUnquotedIdentifier: 0,
+	tQuotedIdentifier:   0,
+	tRbracket:           0,
+	tRparen:             0,
+	tComma:              0,
+	tRbrace:             0,
+	tNumber:             0,
+	tCurrent:            0,
+	tExpref:             0,
+	tColon:              0,
+	tPipe:               1,
+	tOr:                 2,
+	tAnd:                3,
+	tEQ:                 5,
+	tLT:                 5,
+	tLTE:                5,
+	tGT:                 5,
+	tGTE:                5,
+	tNE:                 5,
+	tFlatten:            9,
+	tStar:               20,
+	tFilter:             21,
+	tDot:                40,
+	tNot:                45,
+	tLbrace:             50,
+	tLbracket:           55,
+	tLparen:             60,
+}
+
+// Parser holds state about the current expression being parsed.
+type Parser struct {
+	expression string
+	tokens     []token
+	index      int
+}
+
+// NewParser creates a new JMESPath parser.
+func NewParser() *Parser {
+	p := Parser{}
+	return &p
+}
+
+// Parse will compile a JMESPath expression.
+func (p *Parser) Parse(expression string) (ASTNode, error) {
+	lexer := NewLexer()
+	p.expression = expression
+	p.index = 0
+	tokens, err := lexer.tokenize(expression)
+	if err != nil {
+		return ASTNode{}, err
+	}
+	p.tokens = tokens
+	parsed, err := p.parseExpression(0)
+	if err != nil {
+		return ASTNode{}, err
+	}
+	if p.current() != tEOF {
+		return ASTNode{}, p.syntaxError(fmt.Sprintf(
+			"Unexpected token at the end of the expresssion: %s", p.current()))
+	}
+	return parsed, nil
+}
+
+func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) {
+	var err error
+	leftToken := p.lookaheadToken(0)
+	p.advance()
+	leftNode, err := p.nud(leftToken)
+	if err != nil {
+		return ASTNode{}, err
+	}
+	currentToken := p.current()
+	for bindingPower < bindingPowers[currentToken] {
+		p.advance()
+		leftNode, err = p.led(currentToken, leftNode)
+		if err != nil {
+			return ASTNode{}, err
+		}
+		currentToken = p.current()
+	}
+	return leftNode, nil
+}
+
+func (p *Parser) parseIndexExpression() (ASTNode, error) {
+	if p.lookahead(0) == tColon || p.lookahead(1) == tColon {
+		return p.parseSliceExpression()
+	}
+	indexStr := p.lookaheadToken(0).value
+	parsedInt, err := strconv.Atoi(indexStr)
+	if err != nil {
+		return ASTNode{}, err
+	}
+	indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt}
+	p.advance()
+	if err := p.match(tRbracket); err != nil {
+		return ASTNode{}, err
+	}
+	return indexNode, nil
+}
+
+func (p *Parser) parseSliceExpression() (ASTNode, error) {
+	parts := []*int{nil, nil, nil}
+	index := 0
+	current := p.current()
+	for current != tRbracket && index < 3 {
+		if current == tColon {
+			index++
+			p.advance()
+		} else if current == tNumber {
+			parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value)
+			if err != nil {
+				return ASTNode{}, err
+			}
+			parts[index] = &parsedInt
+			p.advance()
+		} else {
+			return ASTNode{}, p.syntaxError(
+				"Expected tColon or tNumber" + ", received: " + p.current().String())
+		}
+		current = p.current()
+	}
+	if err := p.match(tRbracket); err != nil {
+		return ASTNode{}, err
+	}
+	return ASTNode{
+		nodeType: ASTSlice,
+		value:    parts,
+	}, nil
+}
+
+func (p *Parser) match(tokenType tokType) error {
+	if p.current() == tokenType {
+		p.advance()
+		return nil
+	}
+	return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String())
+}
+
+func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) {
+	switch tokenType {
+	case tDot:
+		if p.current() != tStar {
+			right, err := p.parseDotRHS(bindingPowers[tDot])
+			return ASTNode{
+				nodeType: ASTSubexpression,
+				children: []ASTNode{node, right},
+			}, err
+		}
+		p.advance()
+		right, err := p.parseProjectionRHS(bindingPowers[tDot])
+		return ASTNode{
+			nodeType: ASTValueProjection,
+			children: []ASTNode{node, right},
+		}, err
+	case tPipe:
+		right, err := p.parseExpression(bindingPowers[tPipe])
+		return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err
+	case tOr:
+		right, err := p.parseExpression(bindingPowers[tOr])
+		return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err
+	case tAnd:
+		right, err := p.parseExpression(bindingPowers[tAnd])
+		return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err
+	case tLparen:
+		name := node.value
+		var args []ASTNode
+		for p.current() != tRparen {
+			expression, err := p.parseExpression(0)
+			if err != nil {
+				return ASTNode{}, err
+			}
+			if p.current() == tComma {
+				if err := p.match(tComma); err != nil {
+					return ASTNode{}, err
+				}
+			}
+			args = append(args, expression)
+		}
+		if err := p.match(tRparen); err != nil {
+			return ASTNode{}, err
+		}
+		return ASTNode{
+			nodeType: ASTFunctionExpression,
+			value:    name,
+			children: args,
+		}, nil
+	case tFilter:
+		return p.parseFilter(node)
+	case tFlatten:
+		left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}}
+		right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
+		return ASTNode{
+			nodeType: ASTProjection,
+			children: []ASTNode{left, right},
+		}, err
+	case tEQ, tNE, tGT, tGTE, tLT, tLTE:
+		right, err := p.parseExpression(bindingPowers[tokenType])
+		if err != nil {
+			return ASTNode{}, err
+		}
+		return ASTNode{
+			nodeType: ASTComparator,
+			value:    tokenType,
+			children: []ASTNode{node, right},
+		}, nil
+	case tLbracket:
+		tokenType := p.current()
+		var right ASTNode
+		var err error
+		if tokenType == tNumber || tokenType == tColon {
+			right, err = p.parseIndexExpression()
+			if err != nil {
+				return ASTNode{}, err
+			}
+			return p.projectIfSlice(node, right)
+		}
+		// Otherwise this is a projection.
+		if err := p.match(tStar); err != nil {
+			return ASTNode{}, err
+		}
+		if err := p.match(tRbracket); err != nil {
+			return ASTNode{}, err
+		}
+		right, err = p.parseProjectionRHS(bindingPowers[tStar])
+		if err != nil {
+			return ASTNode{}, err
+		}
+		return ASTNode{
+			nodeType: ASTProjection,
+			children: []ASTNode{node, right},
+		}, nil
+	}
+	return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String())
+}
+
+func (p *Parser) nud(token token) (ASTNode, error) {
+	switch token.tokenType {
+	case tJSONLiteral:
+		var parsed interface{}
+		err := json.Unmarshal([]byte(token.value), &parsed)
+		if err != nil {
+			return ASTNode{}, err
+		}
+		return ASTNode{nodeType: ASTLiteral, value: parsed}, nil
+	case tStringLiteral:
+		return ASTNode{nodeType: ASTLiteral, value: token.value}, nil
+	case tUnquotedIdentifier:
+		return ASTNode{
+			nodeType: ASTField,
+			value:    token.value,
+		}, nil
+	case tQuotedIdentifier:
+		node := ASTNode{nodeType: ASTField, value: token.value}
+		if p.current() == tLparen {
+			return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token)
+		}
+		return node, nil
+	case tStar:
+		left := ASTNode{nodeType: ASTIdentity}
+		var right ASTNode
+		var err error
+		if p.current() == tRbracket {
+			right = ASTNode{nodeType: ASTIdentity}
+		} else {
+			right, err = p.parseProjectionRHS(bindingPowers[tStar])
+		}
+		return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err
+	case tFilter:
+		return p.parseFilter(ASTNode{nodeType: ASTIdentity})
+	case tLbrace:
+		return p.parseMultiSelectHash()
+	case tFlatten:
+		left := ASTNode{
+			nodeType: ASTFlatten,
+			children: []ASTNode{{nodeType: ASTIdentity}},
+		}
+		right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
+		if err != nil {
+			return ASTNode{}, err
+		}
+		return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil
+	case tLbracket:
+		tokenType := p.current()
+		//var right ASTNode
+		if tokenType == tNumber || tokenType == tColon {
+			right, err := p.parseIndexExpression()
+			if err != nil {
+				return ASTNode{}, nil
+			}
+			return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right)
+		} else if tokenType == tStar && p.lookahead(1) == tRbracket {
+			p.advance()
+			p.advance()
+			right, err := p.parseProjectionRHS(bindingPowers[tStar])
+			if err != nil {
+				return ASTNode{}, err
+			}
+			return ASTNode{
+				nodeType: ASTProjection,
+				children: []ASTNode{{nodeType: ASTIdentity}, right},
+			}, nil
+		} else {
+			return p.parseMultiSelectList()
+		}
+	case tCurrent:
+		return ASTNode{nodeType: ASTCurrentNode}, nil
+	case tExpref:
+		expression, err := p.parseExpression(bindingPowers[tExpref])
+		if err != nil {
+			return ASTNode{}, err
+		}
+		return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil
+	case tNot:
+		expression, err := p.parseExpression(bindingPowers[tNot])
+		if err != nil {
+			return ASTNode{}, err
+		}
+		return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil
+	case tLparen:
+		expression, err := p.parseExpression(0)
+		if err != nil {
+			return ASTNode{}, err
+		}
+		if err := p.match(tRparen); err != nil {
+			return ASTNode{}, err
+		}
+		return expression, nil
+	case tEOF:
+		return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token)
+	}
+
+	return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token)
+}
+
+func (p *Parser) parseMultiSelectList() (ASTNode, error) {
+	var expressions []ASTNode
+	for {
+		expression, err := p.parseExpression(0)
+		if err != nil {
+			return ASTNode{}, err
+		}
+		expressions = append(expressions, expression)
+		if p.current() == tRbracket {
+			break
+		}
+		err = p.match(tComma)
+		if err != nil {
+			return ASTNode{}, err
+		}
+	}
+	err := p.match(tRbracket)
+	if err != nil {
+		return ASTNode{}, err
+	}
+	return ASTNode{
+		nodeType: ASTMultiSelectList,
+		children: expressions,
+	}, nil
+}
+
+func (p *Parser) parseMultiSelectHash() (ASTNode, error) {
+	var children []ASTNode
+	for {
+		keyToken := p.lookaheadToken(0)
+		if err := p.match(tUnquotedIdentifier); err != nil {
+			if err := p.match(tQuotedIdentifier); err != nil {
+				return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier")
+			}
+		}
+		keyName := keyToken.value
+		err := p.match(tColon)
+		if err != nil {
+			return ASTNode{}, err
+		}
+		value, err := p.parseExpression(0)
+		if err != nil {
+			return ASTNode{}, err
+		}
+		node := ASTNode{
+			nodeType: ASTKeyValPair,
+			value:    keyName,
+			children: []ASTNode{value},
+		}
+		children = append(children, node)
+		if p.current() == tComma {
+			err := p.match(tComma)
+			if err != nil {
+				return ASTNode{}, nil
+			}
+		} else if p.current() == tRbrace {
+			err := p.match(tRbrace)
+			if err != nil {
+				return ASTNode{}, nil
+			}
+			break
+		}
+	}
+	return ASTNode{
+		nodeType: ASTMultiSelectHash,
+		children: children,
+	}, nil
+}
+
+func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) {
+	indexExpr := ASTNode{
+		nodeType: ASTIndexExpression,
+		children: []ASTNode{left, right},
+	}
+	if right.nodeType == ASTSlice {
+		right, err := p.parseProjectionRHS(bindingPowers[tStar])
+		return ASTNode{
+			nodeType: ASTProjection,
+			children: []ASTNode{indexExpr, right},
+		}, err
+	}
+	return indexExpr, nil
+}
+func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) {
+	var right, condition ASTNode
+	var err error
+	condition, err = p.parseExpression(0)
+	if err != nil {
+		return ASTNode{}, err
+	}
+	if err := p.match(tRbracket); err != nil {
+		return ASTNode{}, err
+	}
+	if p.current() == tFlatten {
+		right = ASTNode{nodeType: ASTIdentity}
+	} else {
+		right, err = p.parseProjectionRHS(bindingPowers[tFilter])
+		if err != nil {
+			return ASTNode{}, err
+		}
+	}
+
+	return ASTNode{
+		nodeType: ASTFilterProjection,
+		children: []ASTNode{node, right, condition},
+	}, nil
+}
+
+func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) {
+	lookahead := p.current()
+	if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) {
+		return p.parseExpression(bindingPower)
+	} else if lookahead == tLbracket {
+		if err := p.match(tLbracket); err != nil {
+			return ASTNode{}, err
+		}
+		return p.parseMultiSelectList()
+	} else if lookahead == tLbrace {
+		if err := p.match(tLbrace); err != nil {
+			return ASTNode{}, err
+		}
+		return p.parseMultiSelectHash()
+	}
+	return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace")
+}
+
+func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) {
+	current := p.current()
+	if bindingPowers[current] < 10 {
+		return ASTNode{nodeType: ASTIdentity}, nil
+	} else if current == tLbracket {
+		return p.parseExpression(bindingPower)
+	} else if current == tFilter {
+		return p.parseExpression(bindingPower)
+	} else if current == tDot {
+		err := p.match(tDot)
+		if err != nil {
+			return ASTNode{}, err
+		}
+		return p.parseDotRHS(bindingPower)
+	} else {
+		return ASTNode{}, p.syntaxError("Error")
+	}
+}
+
+func (p *Parser) lookahead(number int) tokType {
+	return p.lookaheadToken(number).tokenType
+}
+
+func (p *Parser) current() tokType {
+	return p.lookahead(0)
+}
+
+func (p *Parser) lookaheadToken(number int) token {
+	return p.tokens[p.index+number]
+}
+
+func (p *Parser) advance() {
+	p.index++
+}
+
+func tokensOneOf(elements []tokType, token tokType) bool {
+	for _, elem := range elements {
+		if elem == token {
+			return true
+		}
+	}
+	return false
+}
+
+func (p *Parser) syntaxError(msg string) SyntaxError {
+	return SyntaxError{
+		msg:        msg,
+		Expression: p.expression,
+		Offset:     p.lookaheadToken(0).position,
+	}
+}
+
+// Create a SyntaxError based on the provided token.
+// This differs from syntaxError() which creates a SyntaxError
+// based on the current lookahead token.
+func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError {
+	return SyntaxError{
+		msg:        msg,
+		Expression: p.expression,
+		Offset:     t.position,
+	}
+}

+ 16 - 0
vendor/src/github.com/jmespath/go-jmespath/toktype_string.go

@@ -0,0 +1,16 @@
+// generated by stringer -type=tokType; DO NOT EDIT
+
+package jmespath
+
+import "fmt"
+
+const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF"
+
+var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214}
+
+func (i tokType) String() string {
+	if i < 0 || i >= tokType(len(_tokType_index)-1) {
+		return fmt.Sprintf("tokType(%d)", i)
+	}
+	return _tokType_name[_tokType_index[i]:_tokType_index[i+1]]
+}

+ 185 - 0
vendor/src/github.com/jmespath/go-jmespath/util.go

@@ -0,0 +1,185 @@
+package jmespath
+
+import (
+	"errors"
+	"reflect"
+)
+
+// IsFalse determines if an object is false based on the JMESPath spec.
+// JMESPath defines false values to be any of:
+// - An empty string array, or hash.
+// - The boolean value false.
+// - nil
+func isFalse(value interface{}) bool {
+	switch v := value.(type) {
+	case bool:
+		return !v
+	case []interface{}:
+		return len(v) == 0
+	case map[string]interface{}:
+		return len(v) == 0
+	case string:
+		return len(v) == 0
+	case nil:
+		return true
+	}
+	// Try the reflection cases before returning false.
+	rv := reflect.ValueOf(value)
+	switch rv.Kind() {
+	case reflect.Struct:
+		// A struct type will never be false, even if
+		// all of its values are the zero type.
+		return false
+	case reflect.Slice, reflect.Map:
+		return rv.Len() == 0
+	case reflect.Ptr:
+		if rv.IsNil() {
+			return true
+		}
+		// If it's a pointer type, we'll try to deref the pointer
+		// and evaluate the pointer value for isFalse.
+		element := rv.Elem()
+		return isFalse(element.Interface())
+	}
+	return false
+}
+
+// ObjsEqual is a generic object equality check.
+// It will take two arbitrary objects and recursively determine
+// if they are equal.
+func objsEqual(left interface{}, right interface{}) bool {
+	return reflect.DeepEqual(left, right)
+}
+
+// SliceParam refers to a single part of a slice.
+// A slice consists of a start, a stop, and a step, similar to
+// python slices.
+type sliceParam struct {
+	N         int
+	Specified bool
+}
+
+// Slice supports [start:stop:step] style slicing that's supported in JMESPath.
+func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) {
+	computed, err := computeSliceParams(len(slice), parts)
+	if err != nil {
+		return nil, err
+	}
+	start, stop, step := computed[0], computed[1], computed[2]
+	result := []interface{}{}
+	if step > 0 {
+		for i := start; i < stop; i += step {
+			result = append(result, slice[i])
+		}
+	} else {
+		for i := start; i > stop; i += step {
+			result = append(result, slice[i])
+		}
+	}
+	return result, nil
+}
+
+func computeSliceParams(length int, parts []sliceParam) ([]int, error) {
+	var start, stop, step int
+	if !parts[2].Specified {
+		step = 1
+	} else if parts[2].N == 0 {
+		return nil, errors.New("Invalid slice, step cannot be 0")
+	} else {
+		step = parts[2].N
+	}
+	var stepValueNegative bool
+	if step < 0 {
+		stepValueNegative = true
+	} else {
+		stepValueNegative = false
+	}
+
+	if !parts[0].Specified {
+		if stepValueNegative {
+			start = length - 1
+		} else {
+			start = 0
+		}
+	} else {
+		start = capSlice(length, parts[0].N, step)
+	}
+
+	if !parts[1].Specified {
+		if stepValueNegative {
+			stop = -1
+		} else {
+			stop = length
+		}
+	} else {
+		stop = capSlice(length, parts[1].N, step)
+	}
+	return []int{start, stop, step}, nil
+}
+
+func capSlice(length int, actual int, step int) int {
+	if actual < 0 {
+		actual += length
+		if actual < 0 {
+			if step < 0 {
+				actual = -1
+			} else {
+				actual = 0
+			}
+		}
+	} else if actual >= length {
+		if step < 0 {
+			actual = length - 1
+		} else {
+			actual = length
+		}
+	}
+	return actual
+}
+
+// ToArrayNum converts an empty interface type to a slice of float64.
+// If any element in the array cannot be converted, then nil is returned
+// along with a second value of false.
+func toArrayNum(data interface{}) ([]float64, bool) {
+	// Is there a better way to do this with reflect?
+	if d, ok := data.([]interface{}); ok {
+		result := make([]float64, len(d))
+		for i, el := range d {
+			item, ok := el.(float64)
+			if !ok {
+				return nil, false
+			}
+			result[i] = item
+		}
+		return result, true
+	}
+	return nil, false
+}
+
+// ToArrayStr converts an empty interface type to a slice of strings.
+// If any element in the array cannot be converted, then nil is returned
+// along with a second value of false.  If the input data could be entirely
+// converted, then the converted data, along with a second value of true,
+// will be returned.
+func toArrayStr(data interface{}) ([]string, bool) {
+	// Is there a better way to do this with reflect?
+	if d, ok := data.([]interface{}); ok {
+		result := make([]string, len(d))
+		for i, el := range d {
+			item, ok := el.(string)
+			if !ok {
+				return nil, false
+			}
+			result[i] = item
+		}
+		return result, true
+	}
+	return nil, false
+}
+
+func isSliceType(v interface{}) bool {
+	if v == nil {
+		return false
+	}
+	return reflect.TypeOf(v).Kind() == reflect.Slice
+}

+ 0 - 14
vendor/src/github.com/vaughan0/go-ini/LICENSE

@@ -1,14 +0,0 @@
-Copyright (c) 2013 Vaughan Newton
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
-documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
-persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
-Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
-WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 0 - 70
vendor/src/github.com/vaughan0/go-ini/README.md

@@ -1,70 +0,0 @@
-go-ini
-======
-
-INI parsing library for Go (golang).
-
-View the API documentation [here](http://godoc.org/github.com/vaughan0/go-ini).
-
-Usage
------
-
-Parse an INI file:
-
-```go
-import "github.com/vaughan0/go-ini"
-
-file, err := ini.LoadFile("myfile.ini")
-```
-
-Get data from the parsed file:
-
-```go
-name, ok := file.Get("person", "name")
-if !ok {
-  panic("'name' variable missing from 'person' section")
-}
-```
-
-Iterate through values in a section:
-
-```go
-for key, value := range file["mysection"] {
-  fmt.Printf("%s => %s\n", key, value)
-}
-```
-
-Iterate through sections in a file:
-
-```go
-for name, section := range file {
-  fmt.Printf("Section name: %s\n", name)
-}
-```
-
-File Format
------------
-
-INI files are parsed by go-ini line-by-line. Each line may be one of the following:
-
-  * A section definition: [section-name]
-  * A property: key = value
-  * A comment: #blahblah _or_ ;blahblah
-  * Blank. The line will be ignored.
-
-Properties defined before any section headers are placed in the default section, which has
-the empty string as it's key.
-
-Example:
-
-```ini
-# I am a comment
-; So am I!
-
-[apples]
-colour = red or green
-shape = applish
-
-[oranges]
-shape = square
-colour = blue
-```

+ 0 - 123
vendor/src/github.com/vaughan0/go-ini/ini.go

@@ -1,123 +0,0 @@
-// Package ini provides functions for parsing INI configuration files.
-package ini
-
-import (
-	"bufio"
-	"fmt"
-	"io"
-	"os"
-	"regexp"
-	"strings"
-)
-
-var (
-	sectionRegex = regexp.MustCompile(`^\[(.*)\]$`)
-	assignRegex  = regexp.MustCompile(`^([^=]+)=(.*)$`)
-)
-
-// ErrSyntax is returned when there is a syntax error in an INI file.
-type ErrSyntax struct {
-	Line   int
-	Source string // The contents of the erroneous line, without leading or trailing whitespace
-}
-
-func (e ErrSyntax) Error() string {
-	return fmt.Sprintf("invalid INI syntax on line %d: %s", e.Line, e.Source)
-}
-
-// A File represents a parsed INI file.
-type File map[string]Section
-
-// A Section represents a single section of an INI file.
-type Section map[string]string
-
-// Returns a named Section. A Section will be created if one does not already exist for the given name.
-func (f File) Section(name string) Section {
-	section := f[name]
-	if section == nil {
-		section = make(Section)
-		f[name] = section
-	}
-	return section
-}
-
-// Looks up a value for a key in a section and returns that value, along with a boolean result similar to a map lookup.
-func (f File) Get(section, key string) (value string, ok bool) {
-	if s := f[section]; s != nil {
-		value, ok = s[key]
-	}
-	return
-}
-
-// Loads INI data from a reader and stores the data in the File.
-func (f File) Load(in io.Reader) (err error) {
-	bufin, ok := in.(*bufio.Reader)
-	if !ok {
-		bufin = bufio.NewReader(in)
-	}
-	return parseFile(bufin, f)
-}
-
-// Loads INI data from a named file and stores the data in the File.
-func (f File) LoadFile(file string) (err error) {
-	in, err := os.Open(file)
-	if err != nil {
-		return
-	}
-	defer in.Close()
-	return f.Load(in)
-}
-
-func parseFile(in *bufio.Reader, file File) (err error) {
-	section := ""
-	lineNum := 0
-	for done := false; !done; {
-		var line string
-		if line, err = in.ReadString('\n'); err != nil {
-			if err == io.EOF {
-				done = true
-			} else {
-				return
-			}
-		}
-		lineNum++
-		line = strings.TrimSpace(line)
-		if len(line) == 0 {
-			// Skip blank lines
-			continue
-		}
-		if line[0] == ';' || line[0] == '#' {
-			// Skip comments
-			continue
-		}
-
-		if groups := assignRegex.FindStringSubmatch(line); groups != nil {
-			key, val := groups[1], groups[2]
-			key, val = strings.TrimSpace(key), strings.TrimSpace(val)
-			file.Section(section)[key] = val
-		} else if groups := sectionRegex.FindStringSubmatch(line); groups != nil {
-			name := strings.TrimSpace(groups[1])
-			section = name
-			// Create the section if it does not exist
-			file.Section(section)
-		} else {
-			return ErrSyntax{lineNum, line}
-		}
-
-	}
-	return nil
-}
-
-// Loads and returns a File from a reader.
-func Load(in io.Reader) (File, error) {
-	file := make(File)
-	err := file.Load(in)
-	return file, err
-}
-
-// Loads and returns an INI File from a file on disk.
-func LoadFile(filename string) (File, error) {
-	file := make(File)
-	err := file.LoadFile(filename)
-	return file, err
-}

+ 0 - 2
vendor/src/github.com/vaughan0/go-ini/test.ini

@@ -1,2 +0,0 @@
-[default]
-stuff = things

Някои файлове не бяха показани, защото твърде много файлове са промени