Pārlūkot izejas kodu

Vendor AWS Go SDK

Signed-off-by: Samuel Karp <skarp@amazon.com>
Samuel Karp 10 gadi atpakaļ
vecāks
revīzija
85433365cc
41 mainītis faili ar 7369 papildinājumiem un 0 dzēšanām
  1. 4 0
      hack/vendor.sh
  2. 202 0
      vendor/src/github.com/aws/aws-sdk-go/LICENSE.txt
  3. 105 0
      vendor/src/github.com/aws/aws-sdk-go/aws/awserr/error.go
  4. 135 0
      vendor/src/github.com/aws/aws-sdk-go/aws/awserr/types.go
  5. 103 0
      vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
  6. 187 0
      vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
  7. 103 0
      vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
  8. 254 0
      vendor/src/github.com/aws/aws-sdk-go/aws/config.go
  9. 357 0
      vendor/src/github.com/aws/aws-sdk-go/aws/convutil.go
  10. 85 0
      vendor/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
  11. 220 0
      vendor/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
  12. 162 0
      vendor/src/github.com/aws/aws-sdk-go/aws/credentials/ec2_role_provider.go
  13. 73 0
      vendor/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
  14. 8 0
      vendor/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini
  15. 135 0
      vendor/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
  16. 44 0
      vendor/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
  17. 157 0
      vendor/src/github.com/aws/aws-sdk-go/aws/handler_functions.go
  18. 85 0
      vendor/src/github.com/aws/aws-sdk-go/aws/handlers.go
  19. 89 0
      vendor/src/github.com/aws/aws-sdk-go/aws/logger.go
  20. 89 0
      vendor/src/github.com/aws/aws-sdk-go/aws/param_validator.go
  21. 312 0
      vendor/src/github.com/aws/aws-sdk-go/aws/request.go
  22. 194 0
      vendor/src/github.com/aws/aws-sdk-go/aws/service.go
  23. 55 0
      vendor/src/github.com/aws/aws-sdk-go/aws/types.go
  24. 8 0
      vendor/src/github.com/aws/aws-sdk-go/aws/version.go
  25. 31 0
      vendor/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.go
  26. 77 0
      vendor/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.json
  27. 89 0
      vendor/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_map.go
  28. 199 0
      vendor/src/github.com/aws/aws-sdk-go/internal/protocol/json/jsonutil/build.go
  29. 214 0
      vendor/src/github.com/aws/aws-sdk-go/internal/protocol/json/jsonutil/unmarshal.go
  30. 98 0
      vendor/src/github.com/aws/aws-sdk-go/internal/protocol/jsonrpc/jsonrpc.go
  31. 212 0
      vendor/src/github.com/aws/aws-sdk-go/internal/protocol/rest/build.go
  32. 45 0
      vendor/src/github.com/aws/aws-sdk-go/internal/protocol/rest/payload.go
  33. 174 0
      vendor/src/github.com/aws/aws-sdk-go/internal/protocol/rest/unmarshal.go
  34. 364 0
      vendor/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4.go
  35. 2293 0
      vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go
  36. 108 0
      vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/cloudwatchlogsiface/interface.go
  37. 90 0
      vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go
  38. 14 0
      vendor/src/github.com/vaughan0/go-ini/LICENSE
  39. 70 0
      vendor/src/github.com/vaughan0/go-ini/README.md
  40. 123 0
      vendor/src/github.com/vaughan0/go-ini/ini.go
  41. 2 0
      vendor/src/github.com/vaughan0/go-ini/test.ini

+ 4 - 0
hack/vendor.sh

@@ -58,4 +58,8 @@ clone git github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c
 # fsnotify
 # fsnotify
 clone git gopkg.in/fsnotify.v1 v1.2.0
 clone git gopkg.in/fsnotify.v1 v1.2.0
 
 
+# awslogs deps
+clone git github.com/aws/aws-sdk-go v0.7.1
+clone git github.com/vaughan0/go-ini a98ad7ee00ec53921f08832bc06ecf7fd600e6a1
+
 clean
 clean

+ 202 - 0
vendor/src/github.com/aws/aws-sdk-go/LICENSE.txt

@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 105 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/awserr/error.go

@@ -0,0 +1,105 @@
+// Package awserr represents API error interface accessors for the SDK.
+package awserr
+
+// An Error wraps lower level errors with code, message and an original error.
+// The underlying concrete error type may also satisfy other interfaces which
+// can be to used to obtain more specific information about the error.
+//
+// Calling Error() or String() will always include the full information about
+// an error based on its underlying type.
+//
+// Example:
+//
+//     output, err := s3manage.Upload(svc, input, opts)
+//     if err != nil {
+//         if awsErr, ok := err.(awserr.Error); ok {
+//             // Get error details
+//             log.Println("Error:", err.Code(), err.Message())
+//
+//             // Prints out full error message, including original error if there was one.
+//             log.Println("Error:", err.Error())
+//
+//             // Get original error
+//             if origErr := err.Err(); origErr != nil {
+//                 // operate on original error.
+//             }
+//         } else {
+//             fmt.Println(err.Error())
+//         }
+//     }
+//
+type Error interface {
+	// Satisfy the generic error interface.
+	error
+
+	// Returns the short phrase depicting the classification of the error.
+	Code() string
+
+	// Returns the error details message.
+	Message() string
+
+	// Returns the original error if one was set.  Nil is returned if not set.
+	OrigErr() error
+}
+
+// New returns an Error object described by the code, message, and origErr.
+//
+// If origErr satisfies the Error interface it will not be wrapped within a new
+// Error object and will instead be returned.
+func New(code, message string, origErr error) Error {
+	if e, ok := origErr.(Error); ok && e != nil {
+		return e
+	}
+	return newBaseError(code, message, origErr)
+}
+
+// A RequestFailure is an interface to extract request failure information from
+// an Error such as the request ID of the failed request returned by a service.
+// RequestFailures may not always have a requestID value if the request failed
+// prior to reaching the service such as a connection error.
+//
+// Example:
+//
+//     output, err := s3manage.Upload(svc, input, opts)
+//     if err != nil {
+//         if reqerr, ok := err.(RequestFailure); ok {
+//             log.Printf("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
+//         } else {
+//             log.Printf("Error:", err.Error()
+//         }
+//     }
+//
+// Combined with awserr.Error:
+//
+//    output, err := s3manage.Upload(svc, input, opts)
+//    if err != nil {
+//        if awsErr, ok := err.(awserr.Error); ok {
+//            // Generic AWS Error with Code, Message, and original error (if any)
+//            fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+//
+//            if reqErr, ok := err.(awserr.RequestFailure); ok {
+//                // A service error occurred
+//                fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
+//            }
+//        } else {
+//            fmt.Println(err.Error())
+//        }
+//    }
+//
+type RequestFailure interface {
+	Error
+
+	// The status code of the HTTP response.
+	StatusCode() int
+
+	// The request ID returned by the service for a request failure. This will
+	// be empty if no request ID is available such as the request failed due
+	// to a connection error.
+	RequestID() string
+}
+
+// NewRequestFailure returns a new request error wrapper for the given Error
+// provided.
+func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
+	return newRequestError(err, statusCode, reqID)
+}

+ 135 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/awserr/types.go

@@ -0,0 +1,135 @@
+package awserr
+
+import "fmt"
+
+// SprintError returns a string of the formatted error code.
+//
+// Both extra and origErr are optional.  If they are included their lines
+// will be added, but if they are not included their lines will be ignored.
+func SprintError(code, message, extra string, origErr error) string {
+	msg := fmt.Sprintf("%s: %s", code, message)
+	if extra != "" {
+		msg = fmt.Sprintf("%s\n\t%s", msg, extra)
+	}
+	if origErr != nil {
+		msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
+	}
+	return msg
+}
+
+// A baseError wraps the code and message which defines an error. It also
+// can be used to wrap an original error object.
+//
+// Should be used as the root for errors satisfying the awserr.Error. Also
+// for any error which does not fit into a specific error wrapper type.
+type baseError struct {
+	// Classification of error
+	code string
+
+	// Detailed information about error
+	message string
+
+	// Optional original error this error is based off of. Allows building
+	// chained errors.
+	origErr error
+}
+
+// newBaseError returns an error object for the code, message, and err.
+//
+// code is a short no whitespace phrase depicting the classification of
+// the error that is being created.
+//
+// message is the free flow string containing detailed information about the error.
+//
+// origErr is the error object which will be nested under the new error to be returned.
+func newBaseError(code, message string, origErr error) *baseError {
+	return &baseError{
+		code:    code,
+		message: message,
+		origErr: origErr,
+	}
+}
+
+// Error returns the string representation of the error.
+//
+// See ErrorWithExtra for formatting.
+//
+// Satisfies the error interface.
+func (b baseError) Error() string {
+	return SprintError(b.code, b.message, "", b.origErr)
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (b baseError) String() string {
+	return b.Error()
+}
+
+// Code returns the short phrase depicting the classification of the error.
+func (b baseError) Code() string {
+	return b.code
+}
+
+// Message returns the error details message.
+func (b baseError) Message() string {
+	return b.message
+}
+
+// OrigErr returns the original error if one was set. Nil is returned if no error
+// was set.
+func (b baseError) OrigErr() error {
+	return b.origErr
+}
+
+// So that the Error interface type can be included as an anonymous field
+// in the requestError struct and not conflict with the error.Error() method.
+type awsError Error
+
+// A requestError wraps a request or service error.
+//
+// Composed of baseError for code, message, and original error.
+type requestError struct {
+	awsError
+	statusCode int
+	requestID  string
+}
+
+// newRequestError returns a wrapped error with additional information for request
+// status code, and service requestID.
+//
+// Should be used to wrap all request which involve service requests. Even if
+// the request failed without a service response, but had an HTTP status code
+// that may be meaningful.
+//
+// Also wraps original errors via the baseError.
+func newRequestError(err Error, statusCode int, requestID string) *requestError {
+	return &requestError{
+		awsError:   err,
+		statusCode: statusCode,
+		requestID:  requestID,
+	}
+}
+
+// Error returns the string representation of the error.
+// Satisfies the error interface.
+func (r requestError) Error() string {
+	extra := fmt.Sprintf("status code: %d, request id: [%s]",
+		r.statusCode, r.requestID)
+	return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (r requestError) String() string {
+	return r.Error()
+}
+
+// StatusCode returns the wrapped status code for the error
+func (r requestError) StatusCode() int {
+	return r.statusCode
+}
+
+// RequestID returns the wrapped requestID
+func (r requestError) RequestID() string {
+	return r.requestID
+}

+ 103 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go

@@ -0,0 +1,103 @@
+package awsutil
+
+import (
+	"io"
+	"reflect"
+)
+
+// Copy deeply copies a src structure to dst. Useful for copying request and
+// response structures.
+//
+// Can copy between structs of different type, but will only copy fields which
+// are assignable, and exist in both structs. Fields which are not assignable,
+// or do not exist in both structs are ignored.
+func Copy(dst, src interface{}) {
+	dstval := reflect.ValueOf(dst)
+	if !dstval.IsValid() {
+		panic("Copy dst cannot be nil")
+	}
+
+	rcopy(dstval, reflect.ValueOf(src), true)
+}
+
+// CopyOf returns a copy of src while also allocating the memory for dst.
+// src must be a pointer type or this operation will fail.
+func CopyOf(src interface{}) (dst interface{}) {
+	dsti := reflect.New(reflect.TypeOf(src).Elem())
+	dst = dsti.Interface()
+	rcopy(dsti, reflect.ValueOf(src), true)
+	return
+}
+
+// rcopy performs a recursive copy of values from the source to destination.
+//
+// root is used to skip certain aspects of the copy which are not valid
+// for the root node of a object.
+func rcopy(dst, src reflect.Value, root bool) {
+	if !src.IsValid() {
+		return
+	}
+
+	switch src.Kind() {
+	case reflect.Ptr:
+		if _, ok := src.Interface().(io.Reader); ok {
+			if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
+				dst.Elem().Set(src)
+			} else if dst.CanSet() {
+				dst.Set(src)
+			}
+		} else {
+			e := src.Type().Elem()
+			if dst.CanSet() && !src.IsNil() {
+				dst.Set(reflect.New(e))
+			}
+			if src.Elem().IsValid() {
+				// Keep the current root state since the depth hasn't changed
+				rcopy(dst.Elem(), src.Elem(), root)
+			}
+		}
+	case reflect.Struct:
+		if !root {
+			dst.Set(reflect.New(src.Type()).Elem())
+		}
+
+		t := dst.Type()
+		for i := 0; i < t.NumField(); i++ {
+			name := t.Field(i).Name
+			srcval := src.FieldByName(name)
+			if srcval.IsValid() {
+				rcopy(dst.FieldByName(name), srcval, false)
+			}
+		}
+	case reflect.Slice:
+		if src.IsNil() {
+			break
+		}
+
+		s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
+		dst.Set(s)
+		for i := 0; i < src.Len(); i++ {
+			rcopy(dst.Index(i), src.Index(i), false)
+		}
+	case reflect.Map:
+		if src.IsNil() {
+			break
+		}
+
+		s := reflect.MakeMap(src.Type())
+		dst.Set(s)
+		for _, k := range src.MapKeys() {
+			v := src.MapIndex(k)
+			v2 := reflect.New(v.Type()).Elem()
+			rcopy(v2, v, false)
+			dst.SetMapIndex(k, v2)
+		}
+	default:
+		// Assign the value if possible. If its not assignable, the value would
+		// need to be converted and the impact of that may be unexpected, or is
+		// not compatible with the dst type.
+		if src.Type().AssignableTo(dst.Type()) {
+			dst.Set(src)
+		}
+	}
+}

+ 187 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go

@@ -0,0 +1,187 @@
+package awsutil
+
+import (
+	"reflect"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
+
+// rValuesAtPath returns a slice of values found in value v. The values
+// in v are explored recursively so all nested values are collected.
+func rValuesAtPath(v interface{}, path string, create bool, caseSensitive bool) []reflect.Value {
+	pathparts := strings.Split(path, "||")
+	if len(pathparts) > 1 {
+		for _, pathpart := range pathparts {
+			vals := rValuesAtPath(v, pathpart, create, caseSensitive)
+			if vals != nil && len(vals) > 0 {
+				return vals
+			}
+		}
+		return nil
+	}
+
+	values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
+	components := strings.Split(path, ".")
+	for len(values) > 0 && len(components) > 0 {
+		var index *int64
+		var indexStar bool
+		c := strings.TrimSpace(components[0])
+		if c == "" { // no actual component, illegal syntax
+			return nil
+		} else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
+			// TODO normalize case for user
+			return nil // don't support unexported fields
+		}
+
+		// parse this component
+		if m := indexRe.FindStringSubmatch(c); m != nil {
+			c = m[1]
+			if m[2] == "" {
+				index = nil
+				indexStar = true
+			} else {
+				i, _ := strconv.ParseInt(m[2], 10, 32)
+				index = &i
+				indexStar = false
+			}
+		}
+
+		nextvals := []reflect.Value{}
+		for _, value := range values {
+			// pull component name out of struct member
+			if value.Kind() != reflect.Struct {
+				continue
+			}
+
+			if c == "*" { // pull all members
+				for i := 0; i < value.NumField(); i++ {
+					if f := reflect.Indirect(value.Field(i)); f.IsValid() {
+						nextvals = append(nextvals, f)
+					}
+				}
+				continue
+			}
+
+			value = value.FieldByNameFunc(func(name string) bool {
+				if c == name {
+					return true
+				} else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
+					return true
+				}
+				return false
+			})
+
+			if create && value.Kind() == reflect.Ptr && value.IsNil() {
+				value.Set(reflect.New(value.Type().Elem()))
+				value = value.Elem()
+			} else {
+				value = reflect.Indirect(value)
+			}
+
+			if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
+				if !create && value.IsNil() {
+					value = reflect.ValueOf(nil)
+				}
+			}
+
+			if value.IsValid() {
+				nextvals = append(nextvals, value)
+			}
+		}
+		values = nextvals
+
+		if indexStar || index != nil {
+			nextvals = []reflect.Value{}
+			for _, value := range values {
+				value := reflect.Indirect(value)
+				if value.Kind() != reflect.Slice {
+					continue
+				}
+
+				if indexStar { // grab all indices
+					for i := 0; i < value.Len(); i++ {
+						idx := reflect.Indirect(value.Index(i))
+						if idx.IsValid() {
+							nextvals = append(nextvals, idx)
+						}
+					}
+					continue
+				}
+
+				// pull out index
+				i := int(*index)
+				if i >= value.Len() { // check out of bounds
+					if create {
+						// TODO resize slice
+					} else {
+						continue
+					}
+				} else if i < 0 { // support negative indexing
+					i = value.Len() + i
+				}
+				value = reflect.Indirect(value.Index(i))
+
+				if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
+					if !create && value.IsNil() {
+						value = reflect.ValueOf(nil)
+					}
+				}
+
+				if value.IsValid() {
+					nextvals = append(nextvals, value)
+				}
+			}
+			values = nextvals
+		}
+
+		components = components[1:]
+	}
+	return values
+}
+
+// ValuesAtPath returns a list of objects at the lexical path inside of a structure
+func ValuesAtPath(i interface{}, path string) []interface{} {
+	if rvals := rValuesAtPath(i, path, false, true); rvals != nil {
+		vals := make([]interface{}, len(rvals))
+		for i, rval := range rvals {
+			vals[i] = rval.Interface()
+		}
+		return vals
+	}
+	return nil
+}
+
+// ValuesAtAnyPath returns a list of objects at the case-insensitive lexical
+// path inside of a structure
+func ValuesAtAnyPath(i interface{}, path string) []interface{} {
+	if rvals := rValuesAtPath(i, path, false, false); rvals != nil {
+		vals := make([]interface{}, len(rvals))
+		for i, rval := range rvals {
+			vals[i] = rval.Interface()
+		}
+		return vals
+	}
+	return nil
+}
+
+// SetValueAtPath sets an object at the lexical path inside of a structure
+func SetValueAtPath(i interface{}, path string, v interface{}) {
+	if rvals := rValuesAtPath(i, path, true, true); rvals != nil {
+		for _, rval := range rvals {
+			rval.Set(reflect.ValueOf(v))
+		}
+	}
+}
+
+// SetValueAtAnyPath sets an object at the case insensitive lexical path inside
+// of a structure
+func SetValueAtAnyPath(i interface{}, path string, v interface{}) {
+	if rvals := rValuesAtPath(i, path, true, false); rvals != nil {
+		for _, rval := range rvals {
+			rval.Set(reflect.ValueOf(v))
+		}
+	}
+}

+ 103 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go

@@ -0,0 +1,103 @@
+package awsutil
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"reflect"
+	"strings"
+)
+
+// Prettify returns the string representation of a value.
+func Prettify(i interface{}) string {
+	var buf bytes.Buffer
+	prettify(reflect.ValueOf(i), 0, &buf)
+	return buf.String()
+}
+
+// prettify will recursively walk value v to build a textual
+// representation of the value.
+func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
+	for v.Kind() == reflect.Ptr {
+		v = v.Elem()
+	}
+
+	switch v.Kind() {
+	case reflect.Struct:
+		strtype := v.Type().String()
+		if strtype == "time.Time" {
+			fmt.Fprintf(buf, "%s", v.Interface())
+			break
+		} else if strings.HasPrefix(strtype, "io.") {
+			buf.WriteString("<buffer>")
+			break
+		}
+
+		buf.WriteString("{\n")
+
+		names := []string{}
+		for i := 0; i < v.Type().NumField(); i++ {
+			name := v.Type().Field(i).Name
+			f := v.Field(i)
+			if name[0:1] == strings.ToLower(name[0:1]) {
+				continue // ignore unexported fields
+			}
+			if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
+				continue // ignore unset fields
+			}
+			names = append(names, name)
+		}
+
+		for i, n := range names {
+			val := v.FieldByName(n)
+			buf.WriteString(strings.Repeat(" ", indent+2))
+			buf.WriteString(n + ": ")
+			prettify(val, indent+2, buf)
+
+			if i < len(names)-1 {
+				buf.WriteString(",\n")
+			}
+		}
+
+		buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+	case reflect.Slice:
+		nl, id, id2 := "", "", ""
+		if v.Len() > 3 {
+			nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+		}
+		buf.WriteString("[" + nl)
+		for i := 0; i < v.Len(); i++ {
+			buf.WriteString(id2)
+			prettify(v.Index(i), indent+2, buf)
+
+			if i < v.Len()-1 {
+				buf.WriteString("," + nl)
+			}
+		}
+
+		buf.WriteString(nl + id + "]")
+	case reflect.Map:
+		buf.WriteString("{\n")
+
+		for i, k := range v.MapKeys() {
+			buf.WriteString(strings.Repeat(" ", indent+2))
+			buf.WriteString(k.String() + ": ")
+			prettify(v.MapIndex(k), indent+2, buf)
+
+			if i < v.Len()-1 {
+				buf.WriteString(",\n")
+			}
+		}
+
+		buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+	default:
+		format := "%v"
+		switch v.Interface().(type) {
+		case string:
+			format = "%q"
+		case io.ReadSeeker, io.Reader:
+			format = "buffer(%p)"
+		}
+		fmt.Fprintf(buf, format, v.Interface())
+	}
+}

+ 254 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/config.go

@@ -0,0 +1,254 @@
+package aws
+
+import (
+	"net/http"
+	"os"
+	"time"
+
+	"github.com/aws/aws-sdk-go/aws/credentials"
+)
+
+// DefaultChainCredentials is a Credentials which will find the first available
+// credentials Value from the list of Providers.
+//
+// This should be used in the default case. Once the type of credentials are
+// known switching to the specific Credentials will be more efficient.
+var DefaultChainCredentials = credentials.NewChainCredentials(
+	[]credentials.Provider{
+		&credentials.EnvProvider{},
+		&credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
+		&credentials.EC2RoleProvider{ExpiryWindow: 5 * time.Minute},
+	})
+
+// The default number of retries for a service. The value of -1 indicates that
+// the service specific retry default will be used.
+const DefaultRetries = -1
+
+// DefaultConfig is the default all service configuration will be based off of.
+// By default, all clients use this structure for initialization options unless
+// a custom configuration object is passed in.
+//
+// You may modify this global structure to change all default configuration
+// in the SDK. Note that configuration options are copied by value, so any
+// modifications must happen before constructing a client.
+var DefaultConfig = NewConfig().
+	WithCredentials(DefaultChainCredentials).
+	WithRegion(os.Getenv("AWS_REGION")).
+	WithHTTPClient(http.DefaultClient).
+	WithMaxRetries(DefaultRetries).
+	WithLogger(NewDefaultLogger()).
+	WithLogLevel(LogOff)
+
+// A Config provides service configuration for service clients. By default,
+// all clients will use the {DefaultConfig} structure.
+type Config struct {
+	// The credentials object to use when signing requests. Defaults to
+	// {DefaultChainCredentials}.
+	Credentials *credentials.Credentials
+
+	// An optional endpoint URL (hostname only or fully qualified URI)
+	// that overrides the default generated endpoint for a client. Set this
+	// to `""` to use the default generated endpoint.
+	//
+	// @note You must still provide a `Region` value when specifying an
+	//   endpoint for a client.
+	Endpoint *string
+
+	// The region to send requests to. This parameter is required and must
+	// be configured globally or on a per-client basis unless otherwise
+	// noted. A full list of regions is found in the "Regions and Endpoints"
+	// document.
+	//
+	// @see http://docs.aws.amazon.com/general/latest/gr/rande.html
+	//   AWS Regions and Endpoints
+	Region *string
+
+	// Set this to `true` to disable SSL when sending requests. Defaults
+	// to `false`.
+	DisableSSL *bool
+
+	// The HTTP client to use when sending requests. Defaults to
+	// `http.DefaultClient`.
+	HTTPClient *http.Client
+
+	// An integer value representing the logging level. The default log level
+	// is zero (LogOff), which represents no logging. To enable logging set
+	// to a LogLevel Value.
+	LogLevel *LogLevelType
+
+	// The logger writer interface to write logging messages to. Defaults to
+	// standard out.
+	Logger Logger
+
+	// The maximum number of times that a request will be retried for failures.
+	// Defaults to -1, which defers the max retry setting to the service specific
+	// configuration.
+	MaxRetries *int
+
+	// Disables semantic parameter validation, which validates input for missing
+	// required fields and/or other semantic request input errors.
+	DisableParamValidation *bool
+
+	// Disables the computation of request and response checksums, e.g.,
+	// CRC32 checksums in Amazon DynamoDB.
+	DisableComputeChecksums *bool
+
+	// Set this to `true` to force the request to use path-style addressing,
+	// i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will
+	// use virtual hosted bucket addressing when possible
+	// (`http://BUCKET.s3.amazonaws.com/KEY`).
+	//
+	// @note This configuration option is specific to the Amazon S3 service.
+	// @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
+	//   Amazon S3: Virtual Hosting of Buckets
+	S3ForcePathStyle *bool
+}
+
+// NewConfig returns a new Config pointer that can be chained with builder methods to
+// set multiple configuration values inline without using pointers.
+//
+//     svc := s3.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10))
+//
+func NewConfig() *Config {
+	return &Config{}
+}
+
+// WithCredentials sets a config Credentials value returning a Config pointer
+// for chaining.
+func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
+	c.Credentials = creds
+	return c
+}
+
+// WithEndpoint sets a config Endpoint value returning a Config pointer for
+// chaining.
+func (c *Config) WithEndpoint(endpoint string) *Config {
+	c.Endpoint = &endpoint
+	return c
+}
+
+// WithRegion sets a config Region value returning a Config pointer for
+// chaining.
+func (c *Config) WithRegion(region string) *Config {
+	c.Region = &region
+	return c
+}
+
+// WithDisableSSL sets a config DisableSSL value returning a Config pointer
+// for chaining.
+func (c *Config) WithDisableSSL(disable bool) *Config {
+	c.DisableSSL = &disable
+	return c
+}
+
+// WithHTTPClient sets a config HTTPClient value returning a Config pointer
+// for chaining.
+func (c *Config) WithHTTPClient(client *http.Client) *Config {
+	c.HTTPClient = client
+	return c
+}
+
+// WithMaxRetries sets a config MaxRetries value returning a Config pointer
+// for chaining.
+func (c *Config) WithMaxRetries(max int) *Config {
+	c.MaxRetries = &max
+	return c
+}
+
+// WithDisableParamValidation sets a config DisableParamValidation value
+// returning a Config pointer for chaining.
+func (c *Config) WithDisableParamValidation(disable bool) *Config {
+	c.DisableParamValidation = &disable
+	return c
+}
+
+// WithDisableComputeChecksums sets a config DisableComputeChecksums value
+// returning a Config pointer for chaining.
+func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
+	c.DisableComputeChecksums = &disable
+	return c
+}
+
+// WithLogLevel sets a config LogLevel value returning a Config pointer for
+// chaining.
+func (c *Config) WithLogLevel(level LogLevelType) *Config {
+	c.LogLevel = &level
+	return c
+}
+
+// WithLogger sets a config Logger value returning a Config pointer for
+// chaining.
+func (c *Config) WithLogger(logger Logger) *Config {
+	c.Logger = logger
+	return c
+}
+
+// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
+// pointer for chaining.
+func (c *Config) WithS3ForcePathStyle(force bool) *Config {
+	c.S3ForcePathStyle = &force
+	return c
+}
+
+// Merge returns a new Config with the other Config's attribute values merged into
+// this Config. If the other Config's attribute is nil it will not be merged into
+// the new Config to be returned.
+func (c Config) Merge(other *Config) *Config {
+	if other == nil {
+		return &c
+	}
+
+	dst := c
+
+	if other.Credentials != nil {
+		dst.Credentials = other.Credentials
+	}
+
+	if other.Endpoint != nil {
+		dst.Endpoint = other.Endpoint
+	}
+
+	if other.Region != nil {
+		dst.Region = other.Region
+	}
+
+	if other.DisableSSL != nil {
+		dst.DisableSSL = other.DisableSSL
+	}
+
+	if other.HTTPClient != nil {
+		dst.HTTPClient = other.HTTPClient
+	}
+
+	if other.LogLevel != nil {
+		dst.LogLevel = other.LogLevel
+	}
+
+	if other.Logger != nil {
+		dst.Logger = other.Logger
+	}
+
+	if other.MaxRetries != nil {
+		dst.MaxRetries = other.MaxRetries
+	}
+
+	if other.DisableParamValidation != nil {
+		dst.DisableParamValidation = other.DisableParamValidation
+	}
+
+	if other.DisableComputeChecksums != nil {
+		dst.DisableComputeChecksums = other.DisableComputeChecksums
+	}
+
+	if other.S3ForcePathStyle != nil {
+		dst.S3ForcePathStyle = other.S3ForcePathStyle
+	}
+
+	return &dst
+}
+
+// Copy will return a shallow copy of the Config object.
+func (c Config) Copy() *Config {
+	dst := c
+	return &dst
+}

+ 357 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/convutil.go

@@ -0,0 +1,357 @@
+package aws
+
+import "time"
+
+// String returns a pointer to of the string value passed in.
+func String(v string) *string {
+	return &v
+}
+
+// StringValue returns the value of the string pointer passed in or
+// "" if the pointer is nil.
+func StringValue(v *string) string {
+	if v != nil {
+		return *v
+	}
+	return ""
+}
+
+// StringSlice converts a slice of string values into a slice of
+// string pointers
+func StringSlice(src []string) []*string {
+	dst := make([]*string, len(src))
+	for i := 0; i < len(src); i++ {
+		dst[i] = &(src[i])
+	}
+	return dst
+}
+
+// StringValueSlice converts a slice of string pointers into a slice of
+// string values
+func StringValueSlice(src []*string) []string {
+	dst := make([]string, len(src))
+	for i := 0; i < len(src); i++ {
+		if src[i] != nil {
+			dst[i] = *(src[i])
+		}
+	}
+	return dst
+}
+
+// StringMap converts a string map of string values into a string
+// map of string pointers
+func StringMap(src map[string]string) map[string]*string {
+	dst := make(map[string]*string)
+	for k, val := range src {
+		v := val
+		dst[k] = &v
+	}
+	return dst
+}
+
+// StringValueMap converts a string map of string pointers into a string
+// map of string values
+func StringValueMap(src map[string]*string) map[string]string {
+	dst := make(map[string]string)
+	for k, val := range src {
+		if val != nil {
+			dst[k] = *val
+		}
+	}
+	return dst
+}
+
+// Bool returns a pointer to of the bool value passed in.
+func Bool(v bool) *bool {
+	return &v
+}
+
+// BoolValue returns the value of the bool pointer passed in or
+// false if the pointer is nil.
+func BoolValue(v *bool) bool {
+	if v != nil {
+		return *v
+	}
+	return false
+}
+
+// BoolSlice converts a slice of bool values into a slice of
+// bool pointers
+func BoolSlice(src []bool) []*bool {
+	dst := make([]*bool, len(src))
+	for i := 0; i < len(src); i++ {
+		dst[i] = &(src[i])
+	}
+	return dst
+}
+
+// BoolValueSlice converts a slice of bool pointers into a slice of
+// bool values
+func BoolValueSlice(src []*bool) []bool {
+	dst := make([]bool, len(src))
+	for i := 0; i < len(src); i++ {
+		if src[i] != nil {
+			dst[i] = *(src[i])
+		}
+	}
+	return dst
+}
+
+// BoolMap converts a string map of bool values into a string
+// map of bool pointers
+func BoolMap(src map[string]bool) map[string]*bool {
+	dst := make(map[string]*bool)
+	for k, val := range src {
+		v := val
+		dst[k] = &v
+	}
+	return dst
+}
+
+// BoolValueMap converts a string map of bool pointers into a string
+// map of bool values
+func BoolValueMap(src map[string]*bool) map[string]bool {
+	dst := make(map[string]bool)
+	for k, val := range src {
+		if val != nil {
+			dst[k] = *val
+		}
+	}
+	return dst
+}
+
+// Int returns a pointer to of the int value passed in.
+func Int(v int) *int {
+	return &v
+}
+
+// IntValue returns the value of the int pointer passed in or
+// 0 if the pointer is nil.
+func IntValue(v *int) int {
+	if v != nil {
+		return *v
+	}
+	return 0
+}
+
+// IntSlice converts a slice of int values into a slice of
+// int pointers
+func IntSlice(src []int) []*int {
+	dst := make([]*int, len(src))
+	for i := 0; i < len(src); i++ {
+		dst[i] = &(src[i])
+	}
+	return dst
+}
+
+// IntValueSlice converts a slice of int pointers into a slice of
+// int values
+func IntValueSlice(src []*int) []int {
+	dst := make([]int, len(src))
+	for i := 0; i < len(src); i++ {
+		if src[i] != nil {
+			dst[i] = *(src[i])
+		}
+	}
+	return dst
+}
+
+// IntMap converts a string map of int values into a string
+// map of int pointers
+func IntMap(src map[string]int) map[string]*int {
+	dst := make(map[string]*int)
+	for k, val := range src {
+		v := val
+		dst[k] = &v
+	}
+	return dst
+}
+
+// IntValueMap converts a string map of int pointers into a string
+// map of int values
+func IntValueMap(src map[string]*int) map[string]int {
+	dst := make(map[string]int)
+	for k, val := range src {
+		if val != nil {
+			dst[k] = *val
+		}
+	}
+	return dst
+}
+
+// Int64 returns a pointer to of the int64 value passed in.
+func Int64(v int64) *int64 {
+	return &v
+}
+
+// Int64Value returns the value of the int64 pointer passed in or
+// 0 if the pointer is nil.
+func Int64Value(v *int64) int64 {
+	if v != nil {
+		return *v
+	}
+	return 0
+}
+
+// Int64Slice converts a slice of int64 values into a slice of
+// int64 pointers
+func Int64Slice(src []int64) []*int64 {
+	dst := make([]*int64, len(src))
+	for i := 0; i < len(src); i++ {
+		dst[i] = &(src[i])
+	}
+	return dst
+}
+
+// Int64ValueSlice converts a slice of int64 pointers into a slice of
+// int64 values
+func Int64ValueSlice(src []*int64) []int64 {
+	dst := make([]int64, len(src))
+	for i := 0; i < len(src); i++ {
+		if src[i] != nil {
+			dst[i] = *(src[i])
+		}
+	}
+	return dst
+}
+
+// Int64Map converts a string map of int64 values into a string
+// map of int64 pointers
+func Int64Map(src map[string]int64) map[string]*int64 {
+	dst := make(map[string]*int64)
+	for k, val := range src {
+		v := val
+		dst[k] = &v
+	}
+	return dst
+}
+
+// Int64ValueMap converts a string map of int64 pointers into a string
+// map of int64 values
+func Int64ValueMap(src map[string]*int64) map[string]int64 {
+	dst := make(map[string]int64)
+	for k, val := range src {
+		if val != nil {
+			dst[k] = *val
+		}
+	}
+	return dst
+}
+
+// Float64 returns a pointer to of the float64 value passed in.
+func Float64(v float64) *float64 {
+	return &v
+}
+
+// Float64Value returns the value of the float64 pointer passed in or
+// 0 if the pointer is nil.
+func Float64Value(v *float64) float64 {
+	if v != nil {
+		return *v
+	}
+	return 0
+}
+
+// Float64Slice converts a slice of float64 values into a slice of
+// float64 pointers
+func Float64Slice(src []float64) []*float64 {
+	dst := make([]*float64, len(src))
+	for i := 0; i < len(src); i++ {
+		dst[i] = &(src[i])
+	}
+	return dst
+}
+
+// Float64ValueSlice converts a slice of float64 pointers into a slice of
+// float64 values
+func Float64ValueSlice(src []*float64) []float64 {
+	dst := make([]float64, len(src))
+	for i := 0; i < len(src); i++ {
+		if src[i] != nil {
+			dst[i] = *(src[i])
+		}
+	}
+	return dst
+}
+
+// Float64Map converts a string map of float64 values into a string
+// map of float64 pointers
+func Float64Map(src map[string]float64) map[string]*float64 {
+	dst := make(map[string]*float64)
+	for k, val := range src {
+		v := val
+		dst[k] = &v
+	}
+	return dst
+}
+
+// Float64ValueMap converts a string map of float64 pointers into a string
+// map of float64 values
+func Float64ValueMap(src map[string]*float64) map[string]float64 {
+	dst := make(map[string]float64)
+	for k, val := range src {
+		if val != nil {
+			dst[k] = *val
+		}
+	}
+	return dst
+}
+
+// Time returns a pointer to of the time.Time value passed in.
+func Time(v time.Time) *time.Time {
+	return &v
+}
+
+// TimeValue returns the value of the time.Time pointer passed in or
+// time.Time{} if the pointer is nil.
+func TimeValue(v *time.Time) time.Time {
+	if v != nil {
+		return *v
+	}
+	return time.Time{}
+}
+
+// TimeSlice converts a slice of time.Time values into a slice of
+// time.Time pointers
+func TimeSlice(src []time.Time) []*time.Time {
+	dst := make([]*time.Time, len(src))
+	for i := 0; i < len(src); i++ {
+		dst[i] = &(src[i])
+	}
+	return dst
+}
+
+// TimeValueSlice converts a slice of time.Time pointers into a slice of
+// time.Time values
+func TimeValueSlice(src []*time.Time) []time.Time {
+	dst := make([]time.Time, len(src))
+	for i := 0; i < len(src); i++ {
+		if src[i] != nil {
+			dst[i] = *(src[i])
+		}
+	}
+	return dst
+}
+
+// TimeMap converts a string map of time.Time values into a string
+// map of time.Time pointers
+func TimeMap(src map[string]time.Time) map[string]*time.Time {
+	dst := make(map[string]*time.Time)
+	for k, val := range src {
+		v := val
+		dst[k] = &v
+	}
+	return dst
+}
+
+// TimeValueMap converts a string map of time.Time pointers into a string
+// map of time.Time values
+func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
+	dst := make(map[string]time.Time)
+	for k, val := range src {
+		if val != nil {
+			dst[k] = *val
+		}
+	}
+	return dst
+}

+ 85 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go

@@ -0,0 +1,85 @@
+package credentials
+
+import (
+	"github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+var (
+	// ErrNoValidProvidersFoundInChain Is returned when there are no valid
+	// providers in the ChainProvider.
+	//
+	// @readonly
+	ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", "no valid providers in chain", nil)
+)
+
+// A ChainProvider will search for a provider which returns credentials
+// and cache that provider until Retrieve is called again.
+//
+// The ChainProvider provides a way of chaining multiple providers together
+// which will pick the first available using priority order of the Providers
+// in the list.
+//
+// If none of the Providers retrieve valid credentials Value, ChainProvider's
+// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
+//
+// If a Provider is found which returns valid credentials Value ChainProvider
+// will cache that Provider for all calls to IsExpired(), until Retrieve is
+// called again.
+//
+// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
+// In this example EnvProvider will first check if any credentials are available
+// vai the environment variables. If there are none ChainProvider will check
+// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
+// does not return any credentials ChainProvider will return the error
+// ErrNoValidProvidersFoundInChain
+//
+//     creds := NewChainCredentials(
+//         []Provider{
+//             &EnvProvider{},
+//             &EC2RoleProvider{},
+//         })
+//
+//     // Usage of ChainCredentials with aws.Config
+//     svc := ec2.New(&aws.Config{Credentials: creds})
+//
+type ChainProvider struct {
+	Providers []Provider
+	curr      Provider
+}
+
+// NewChainCredentials returns a pointer to a new Credentials object
+// wrapping a chain of providers.
+func NewChainCredentials(providers []Provider) *Credentials {
+	return NewCredentials(&ChainProvider{
+		Providers: append([]Provider{}, providers...),
+	})
+}
+
+// Retrieve returns the credentials value or error if no provider returned
+// without error.
+//
+// If a provider is found it will be cached and any calls to IsExpired()
+// will return the expired state of the cached provider.
+func (c *ChainProvider) Retrieve() (Value, error) {
+	for _, p := range c.Providers {
+		if creds, err := p.Retrieve(); err == nil {
+			c.curr = p
+			return creds, nil
+		}
+	}
+	c.curr = nil
+
+	// TODO better error reporting. maybe report error for each failed retrieve?
+
+	return Value{}, ErrNoValidProvidersFoundInChain
+}
+
+// IsExpired will returned the expired state of the currently cached provider
+// if there is one.  If there is no current provider, true will be returned.
+func (c *ChainProvider) IsExpired() bool {
+	if c.curr != nil {
+		return c.curr.IsExpired()
+	}
+
+	return true
+}

+ 220 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go

@@ -0,0 +1,220 @@
+// Package credentials provides credential retrieval and management
+//
+// The Credentials is the primary method of getting access to and managing
+// credentials Values. Using dependency injection retrieval of the credential
+// values is handled by a object which satisfies the Provider interface.
+//
+// By default the Credentials.Get() will cache the successful result of a
+// Provider's Retrieve() until Provider.IsExpired() returns true. At which
+// point Credentials will call Provider's Retrieve() to get new credential Value.
+//
+// The Provider is responsible for determining when credentials Value have expired.
+// It is also important to note that Credentials will always call Retrieve the
+// first time Credentials.Get() is called.
+//
+// Example of using the environment variable credentials.
+//
+//     creds := NewEnvCredentials()
+//
+//     // Retrieve the credentials value
+//     credValue, err := creds.Get()
+//     if err != nil {
+//         // handle error
+//     }
+//
+// Example of forcing credentials to expire and be refreshed on the next Get().
+// This may be helpful to proactively expire credentials and refresh them sooner
+// than they would naturally expire on their own.
+//
+//     creds := NewCredentials(&EC2RoleProvider{})
+//     creds.Expire()
+//     credsValue, err := creds.Get()
+//     // New credentials will be retrieved instead of from cache.
+//
+//
+// Custom Provider
+//
+// Each Provider built into this package also provides a helper method to generate
+// a Credentials pointer setup with the provider. To use a custom Provider just
+// create a type which satisfies the Provider interface and pass it to the
+// NewCredentials method.
+//
+//     type MyProvider struct{}
+//     func (m *MyProvider) Retrieve() (Value, error) {...}
+//     func (m *MyProvider) IsExpired() bool {...}
+//
+//     creds := NewCredentials(&MyProvider{})
+//     credValue, err := creds.Get()
+//
+package credentials
+
+import (
+	"sync"
+	"time"
+)
+
+// Create an empty Credential object that can be used as dummy placeholder
+// credentials for requests that do not need signed.
+//
+// This Credentials can be used to configure a service to not sign requests
+// when making service API calls. For example, when accessing public
+// s3 buckets.
+//
+//     svc := s3.New(&aws.Config{Credentials: AnonymousCredentials})
+//     // Access public S3 buckets.
+//
+// @readonly
+var AnonymousCredentials = NewStaticCredentials("", "", "")
+
+// A Value is the AWS credentials value for individual credential fields.
+type Value struct {
+	// AWS Access key ID
+	AccessKeyID string
+
+	// AWS Secret Access Key
+	SecretAccessKey string
+
+	// AWS Session Token
+	SessionToken string
+}
+
+// A Provider is the interface for any component which will provide credentials
+// Value. A provider is required to manage its own Expired state, and what to
+// be expired means.
+//
+// The Provider should not need to implement its own mutexes, because
+// that will be managed by Credentials.
+type Provider interface {
+	// Refresh returns nil if it successfully retrieved the value.
+	// Error is returned if the value were not obtainable, or empty.
+	Retrieve() (Value, error)
+
+	// IsExpired returns if the credentials are no longer valid, and need
+	// to be retrieved.
+	IsExpired() bool
+}
+
+// A Expiry provides shared expiration logic to be used by credentials
+// providers to implement expiry functionality.
+//
+// The best method to use this struct is as an anonymous field within the
+// provider's struct.
+//
+// Example:
+//     type EC2RoleProvider struct {
+//         Expiry
+//         ...
+//     }
+type Expiry struct {
+	// The date/time when to expire on
+	expiration time.Time
+
+	// If set will be used by IsExpired to determine the current time.
+	// Defaults to time.Now if CurrentTime is not set.  Available for testing
+	// to be able to mock out the current time.
+	CurrentTime func() time.Time
+}
+
+// SetExpiration sets the expiration IsExpired will check when called.
+//
+// If window is greater than 0 the expiration time will be reduced by the
+// window value.
+//
+// Using a window is helpful to trigger credentials to expire sooner than
+// the expiration time given to ensure no requests are made with expired
+// tokens.
+func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
+	e.expiration = expiration
+	if window > 0 {
+		e.expiration = e.expiration.Add(-window)
+	}
+}
+
+// IsExpired returns if the credentials are expired.
+func (e *Expiry) IsExpired() bool {
+	if e.CurrentTime == nil {
+		e.CurrentTime = time.Now
+	}
+	return e.expiration.Before(e.CurrentTime())
+}
+
+// A Credentials provides synchronous safe retrieval of AWS credentials Value.
+// Credentials will cache the credentials value until they expire. Once the value
+// expires the next Get will attempt to retrieve valid credentials.
+//
+// Credentials is safe to use across multiple goroutines and will manage the
+// synchronous state so the Providers do not need to implement their own
+// synchronization.
+//
+// The first Credentials.Get() will always call Provider.Retrieve() to get the
+// first instance of the credentials Value. All calls to Get() after that
+// will return the cached credentials Value until IsExpired() returns true.
+type Credentials struct {
+	creds        Value
+	forceRefresh bool
+	m            sync.Mutex
+
+	provider Provider
+}
+
+// NewCredentials returns a pointer to a new Credentials with the provider set.
+func NewCredentials(provider Provider) *Credentials {
+	return &Credentials{
+		provider:     provider,
+		forceRefresh: true,
+	}
+}
+
+// Get returns the credentials value, or error if the credentials Value failed
+// to be retrieved.
+//
+// Will return the cached credentials Value if it has not expired. If the
+// credentials Value has expired the Provider's Retrieve() will be called
+// to refresh the credentials.
+//
+// If Credentials.Expire() was called the credentials Value will be force
+// expired, and the next call to Get() will cause them to be refreshed.
+func (c *Credentials) Get() (Value, error) {
+	c.m.Lock()
+	defer c.m.Unlock()
+
+	if c.isExpired() {
+		creds, err := c.provider.Retrieve()
+		if err != nil {
+			return Value{}, err
+		}
+		c.creds = creds
+		c.forceRefresh = false
+	}
+
+	return c.creds, nil
+}
+
+// Expire expires the credentials and forces them to be retrieved on the
+// next call to Get().
+//
+// This will override the Provider's expired state, and force Credentials
+// to call the Provider's Retrieve().
+func (c *Credentials) Expire() {
+	c.m.Lock()
+	defer c.m.Unlock()
+
+	c.forceRefresh = true
+}
+
+// IsExpired returns if the credentials are no longer valid, and need
+// to be retrieved.
+//
+// If the Credentials were forced to be expired with Expire() this will
+// reflect that override.
+func (c *Credentials) IsExpired() bool {
+	c.m.Lock()
+	defer c.m.Unlock()
+
+	return c.isExpired()
+}
+
+// isExpired helper method wrapping the definition of expired credentials.
+func (c *Credentials) isExpired() bool {
+	return c.forceRefresh || c.provider.IsExpired()
+}

+ 162 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/credentials/ec2_role_provider.go

@@ -0,0 +1,162 @@
+package credentials
+
+import (
+	"bufio"
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"time"
+
+	"github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+const metadataCredentialsEndpoint = "http://169.254.169.254/latest/meta-data/iam/security-credentials/"
+
+// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if
+// those credentials are expired.
+//
+// Example how to configure the EC2RoleProvider with custom http Client, Endpoint
+// or ExpiryWindow
+//
+//     p := &credentials.EC2RoleProvider{
+//         // Pass in a custom timeout to be used when requesting
+//         // IAM EC2 Role credentials.
+//         Client: &http.Client{
+//             Timeout: 10 * time.Second,
+//         },
+//         // Use default EC2 Role metadata endpoint, Alternate endpoints can be
+//         // specified setting Endpoint to something else.
+//         Endpoint: "",
+//         // Do not use early expiry of credentials. If a non zero value is
+//         // specified the credentials will be expired early
+//         ExpiryWindow: 0,
+//     }
+type EC2RoleProvider struct {
+	Expiry
+
+	// Endpoint must be fully quantified URL
+	Endpoint string
+
+	// HTTP client to use when connecting to EC2 service
+	Client *http.Client
+
+	// ExpiryWindow will allow the credentials to trigger refreshing prior to
+	// the credentials actually expiring. This is beneficial so race conditions
+	// with expiring credentials do not cause request to fail unexpectedly
+	// due to ExpiredTokenException exceptions.
+	//
+	// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+	// 10 seconds before the credentials are actually expired.
+	//
+	// If ExpiryWindow is 0 or less it will be ignored.
+	ExpiryWindow time.Duration
+}
+
+// NewEC2RoleCredentials returns a pointer to a new Credentials object
+// wrapping the EC2RoleProvider.
+//
+// Takes a custom http.Client which can be configured for custom handling of
+// things such as timeout.
+//
+// Endpoint is the URL that the EC2RoleProvider will connect to when retrieving
+// role and credentials.
+//
+// Window is the expiry window that will be subtracted from the expiry returned
+// by the role credential request. This is done so that the credentials will
+// expire sooner than their actual lifespan.
+func NewEC2RoleCredentials(client *http.Client, endpoint string, window time.Duration) *Credentials {
+	return NewCredentials(&EC2RoleProvider{
+		Endpoint:     endpoint,
+		Client:       client,
+		ExpiryWindow: window,
+	})
+}
+
+// Retrieve retrieves credentials from the EC2 service.
+// Error will be returned if the request fails, or unable to extract
+// the desired credentials.
+func (m *EC2RoleProvider) Retrieve() (Value, error) {
+	if m.Client == nil {
+		m.Client = http.DefaultClient
+	}
+	if m.Endpoint == "" {
+		m.Endpoint = metadataCredentialsEndpoint
+	}
+
+	credsList, err := requestCredList(m.Client, m.Endpoint)
+	if err != nil {
+		return Value{}, err
+	}
+
+	if len(credsList) == 0 {
+		return Value{}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
+	}
+	credsName := credsList[0]
+
+	roleCreds, err := requestCred(m.Client, m.Endpoint, credsName)
+	if err != nil {
+		return Value{}, err
+	}
+
+	m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow)
+
+	return Value{
+		AccessKeyID:     roleCreds.AccessKeyID,
+		SecretAccessKey: roleCreds.SecretAccessKey,
+		SessionToken:    roleCreds.Token,
+	}, nil
+}
+
+// A ec2RoleCredRespBody provides the shape for deserializing credential
+// request responses.
+type ec2RoleCredRespBody struct {
+	Expiration      time.Time
+	AccessKeyID     string
+	SecretAccessKey string
+	Token           string
+}
+
+// requestCredList requests a list of credentials from the EC2 service.
+// If there are no credentials, or there is an error making or receiving the request
+func requestCredList(client *http.Client, endpoint string) ([]string, error) {
+	resp, err := client.Get(endpoint)
+	if err != nil {
+		return nil, awserr.New("ListEC2Role", "failed to list EC2 Roles", err)
+	}
+	defer resp.Body.Close()
+
+	credsList := []string{}
+	s := bufio.NewScanner(resp.Body)
+	for s.Scan() {
+		credsList = append(credsList, s.Text())
+	}
+
+	if err := s.Err(); err != nil {
+		return nil, awserr.New("ReadEC2Role", "failed to read list of EC2 Roles", err)
+	}
+
+	return credsList, nil
+}
+
+// requestCred requests the credentials for a specific credentials from the EC2 service.
+//
+// If the credentials cannot be found, or there is an error reading the response
+// and error will be returned.
+func requestCred(client *http.Client, endpoint, credsName string) (*ec2RoleCredRespBody, error) {
+	resp, err := client.Get(endpoint + credsName)
+	if err != nil {
+		return nil, awserr.New("GetEC2RoleCredentials",
+			fmt.Sprintf("failed to get %s EC2 Role credentials", credsName),
+			err)
+	}
+	defer resp.Body.Close()
+
+	respCreds := &ec2RoleCredRespBody{}
+	if err := json.NewDecoder(resp.Body).Decode(respCreds); err != nil {
+		return nil, awserr.New("DecodeEC2RoleCredentials",
+			fmt.Sprintf("failed to decode %s EC2 Role credentials", credsName),
+			err)
+	}
+
+	return respCreds, nil
+}

+ 73 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go

@@ -0,0 +1,73 @@
+package credentials
+
+import (
+	"os"
+
+	"github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+var (
+	// ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
+	// found in the process's environment.
+	//
+	// @readonly
+	ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
+
+	// ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
+	// can't be found in the process's environment.
+	//
+	// @readonly
+	ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
+)
+
+// A EnvProvider retrieves credentials from the environment variables of the
+// running process. Environment credentials never expire.
+//
+// Environment variables used:
+//
+// * Access Key ID:     AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
+// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
+type EnvProvider struct {
+	retrieved bool
+}
+
+// NewEnvCredentials returns a pointer to a new Credentials object
+// wrapping the environment variable provider.
+func NewEnvCredentials() *Credentials {
+	return NewCredentials(&EnvProvider{})
+}
+
+// Retrieve retrieves the keys from the environment.
+func (e *EnvProvider) Retrieve() (Value, error) {
+	e.retrieved = false
+
+	id := os.Getenv("AWS_ACCESS_KEY_ID")
+	if id == "" {
+		id = os.Getenv("AWS_ACCESS_KEY")
+	}
+
+	secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
+	if secret == "" {
+		secret = os.Getenv("AWS_SECRET_KEY")
+	}
+
+	if id == "" {
+		return Value{}, ErrAccessKeyIDNotFound
+	}
+
+	if secret == "" {
+		return Value{}, ErrSecretAccessKeyNotFound
+	}
+
+	e.retrieved = true
+	return Value{
+		AccessKeyID:     id,
+		SecretAccessKey: secret,
+		SessionToken:    os.Getenv("AWS_SESSION_TOKEN"),
+	}, nil
+}
+
+// IsExpired returns if the credentials have been retrieved.
+func (e *EnvProvider) IsExpired() bool {
+	return !e.retrieved
+}

+ 8 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini

@@ -0,0 +1,8 @@
+[default]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
+aws_session_token = token
+
+[no_token]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret

+ 135 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go

@@ -0,0 +1,135 @@
+package credentials
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+
+	"github.com/vaughan0/go-ini"
+
+	"github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+var (
+	// ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
+	//
+	// @readonly
+	ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
+)
+
+// A SharedCredentialsProvider retrieves credentials from the current user's home
+// directory, and keeps track if those credentials are expired.
+//
+// Profile ini file example: $HOME/.aws/credentials
+type SharedCredentialsProvider struct {
+	// Path to the shared credentials file. If empty will default to current user's
+	// home directory.
+	Filename string
+
+	// AWS Profile to extract credentials from the shared credentials file. If empty
+	// will default to environment variable "AWS_PROFILE" or "default" if
+	// environment variable is also not set.
+	Profile string
+
+	// retrieved states if the credentials have been successfully retrieved.
+	retrieved bool
+}
+
+// NewSharedCredentials returns a pointer to a new Credentials object
+// wrapping the Profile file provider.
+func NewSharedCredentials(filename, profile string) *Credentials {
+	return NewCredentials(&SharedCredentialsProvider{
+		Filename: filename,
+		Profile:  profile,
+	})
+}
+
+// Retrieve reads and extracts the shared credentials from the current
+// users home directory.
+func (p *SharedCredentialsProvider) Retrieve() (Value, error) {
+	p.retrieved = false
+
+	filename, err := p.filename()
+	if err != nil {
+		return Value{}, err
+	}
+
+	creds, err := loadProfile(filename, p.profile())
+	if err != nil {
+		return Value{}, err
+	}
+
+	p.retrieved = true
+	return creds, nil
+}
+
+// IsExpired returns if the shared credentials have expired.
+func (p *SharedCredentialsProvider) IsExpired() bool {
+	return !p.retrieved
+}
+
+// loadProfiles loads from the file pointed to by shared credentials filename for profile.
+// The credentials retrieved from the profile will be returned or error. Error will be
+// returned if it fails to read from the file, or the data is invalid.
+func loadProfile(filename, profile string) (Value, error) {
+	config, err := ini.LoadFile(filename)
+	if err != nil {
+		return Value{}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
+	}
+	iniProfile := config.Section(profile)
+
+	id, ok := iniProfile["aws_access_key_id"]
+	if !ok {
+		return Value{}, awserr.New("SharedCredsAccessKey",
+			fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
+			nil)
+	}
+
+	secret, ok := iniProfile["aws_secret_access_key"]
+	if !ok {
+		return Value{}, awserr.New("SharedCredsSecret",
+			fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
+			nil)
+	}
+
+	token := iniProfile["aws_session_token"]
+
+	return Value{
+		AccessKeyID:     id,
+		SecretAccessKey: secret,
+		SessionToken:    token,
+	}, nil
+}
+
+// filename returns the filename to use to read AWS shared credentials.
+//
+// Will return an error if the user's home directory path cannot be found.
+func (p *SharedCredentialsProvider) filename() (string, error) {
+	if p.Filename == "" {
+		homeDir := os.Getenv("HOME") // *nix
+		if homeDir == "" {           // Windows
+			homeDir = os.Getenv("USERPROFILE")
+		}
+		if homeDir == "" {
+			return "", ErrSharedCredentialsHomeNotFound
+		}
+
+		p.Filename = filepath.Join(homeDir, ".aws", "credentials")
+	}
+
+	return p.Filename, nil
+}
+
+// profile returns the AWS shared credentials profile.  If empty will read
+// environment variable "AWS_PROFILE". If that is not set profile will
+// return "default".
+func (p *SharedCredentialsProvider) profile() string {
+	if p.Profile == "" {
+		p.Profile = os.Getenv("AWS_PROFILE")
+	}
+	if p.Profile == "" {
+		p.Profile = "default"
+	}
+
+	return p.Profile
+}

+ 44 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go

@@ -0,0 +1,44 @@
+package credentials
+
+import (
+	"github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+var (
+	// ErrStaticCredentialsEmpty is emitted when static credentials are empty.
+	//
+	// @readonly
+	ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
+)
+
+// A StaticProvider is a set of credentials which are set pragmatically,
+// and will never expire.
+type StaticProvider struct {
+	Value
+}
+
+// NewStaticCredentials returns a pointer to a new Credentials object
+// wrapping a static credentials value provider.
+func NewStaticCredentials(id, secret, token string) *Credentials {
+	return NewCredentials(&StaticProvider{Value: Value{
+		AccessKeyID:     id,
+		SecretAccessKey: secret,
+		SessionToken:    token,
+	}})
+}
+
+// Retrieve returns the credentials or error if the credentials are invalid.
+func (s *StaticProvider) Retrieve() (Value, error) {
+	if s.AccessKeyID == "" || s.SecretAccessKey == "" {
+		return Value{}, ErrStaticCredentialsEmpty
+	}
+
+	return s.Value, nil
+}
+
+// IsExpired returns if the credentials are expired.
+//
+// For StaticProvider, the credentials never expired.
+func (s *StaticProvider) IsExpired() bool {
+	return false
+}

+ 157 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/handler_functions.go

@@ -0,0 +1,157 @@
+package aws
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"regexp"
+	"strconv"
+	"time"
+
+	"github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+var sleepDelay = func(delay time.Duration) {
+	time.Sleep(delay)
+}
+
+// Interface for matching types which also have a Len method.
+type lener interface {
+	Len() int
+}
+
+// BuildContentLength builds the content length of a request based on the body,
+// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
+// to determine request body length and no "Content-Length" was specified it will panic.
+func BuildContentLength(r *Request) {
+	if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
+		length, _ := strconv.ParseInt(slength, 10, 64)
+		r.HTTPRequest.ContentLength = length
+		return
+	}
+
+	var length int64
+	switch body := r.Body.(type) {
+	case nil:
+		length = 0
+	case lener:
+		length = int64(body.Len())
+	case io.Seeker:
+		r.bodyStart, _ = body.Seek(0, 1)
+		end, _ := body.Seek(0, 2)
+		body.Seek(r.bodyStart, 0) // make sure to seek back to original location
+		length = end - r.bodyStart
+	default:
+		panic("Cannot get length of body, must provide `ContentLength`")
+	}
+
+	r.HTTPRequest.ContentLength = length
+	r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
+}
+
+// UserAgentHandler is a request handler for injecting User agent into requests.
+func UserAgentHandler(r *Request) {
+	r.HTTPRequest.Header.Set("User-Agent", SDKName+"/"+SDKVersion)
+}
+
+var reStatusCode = regexp.MustCompile(`^(\d+)`)
+
+// SendHandler is a request handler to send service request using HTTP client.
+func SendHandler(r *Request) {
+	var err error
+	r.HTTPResponse, err = r.Service.Config.HTTPClient.Do(r.HTTPRequest)
+	if err != nil {
+		// Capture the case where url.Error is returned for error processing
+		// response. e.g. 301 without location header comes back as string
+		// error and r.HTTPResponse is nil. Other url redirect errors will
+		// comeback in a similar method.
+		if e, ok := err.(*url.Error); ok {
+			if s := reStatusCode.FindStringSubmatch(e.Error()); s != nil {
+				code, _ := strconv.ParseInt(s[1], 10, 64)
+				r.HTTPResponse = &http.Response{
+					StatusCode: int(code),
+					Status:     http.StatusText(int(code)),
+					Body:       ioutil.NopCloser(bytes.NewReader([]byte{})),
+				}
+				return
+			}
+		}
+		if r.HTTPRequest == nil {
+			// Add a dummy request response object to ensure the HTTPResponse
+			// value is consistent.
+			r.HTTPResponse = &http.Response{
+				StatusCode: int(0),
+				Status:     http.StatusText(int(0)),
+				Body:       ioutil.NopCloser(bytes.NewReader([]byte{})),
+			}
+		}
+		// Catch all other request errors.
+		r.Error = awserr.New("RequestError", "send request failed", err)
+		r.Retryable = Bool(true) // network errors are retryable
+	}
+}
+
+// ValidateResponseHandler is a request handler to validate service response.
+func ValidateResponseHandler(r *Request) {
+	if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
+		// this may be replaced by an UnmarshalError handler
+		r.Error = awserr.New("UnknownError", "unknown error", nil)
+	}
+}
+
+// AfterRetryHandler performs final checks to determine if the request should
+// be retried and how long to delay.
+func AfterRetryHandler(r *Request) {
+	// If one of the other handlers already set the retry state
+	// we don't want to override it based on the service's state
+	if r.Retryable == nil {
+		r.Retryable = Bool(r.Service.ShouldRetry(r))
+	}
+
+	if r.WillRetry() {
+		r.RetryDelay = r.Service.RetryRules(r)
+		sleepDelay(r.RetryDelay)
+
+		// when the expired token exception occurs the credentials
+		// need to be expired locally so that the next request to
+		// get credentials will trigger a credentials refresh.
+		if r.Error != nil {
+			if err, ok := r.Error.(awserr.Error); ok {
+				if isCodeExpiredCreds(err.Code()) {
+					r.Config.Credentials.Expire()
+				}
+			}
+		}
+
+		r.RetryCount++
+		r.Error = nil
+	}
+}
+
+var (
+	// ErrMissingRegion is an error that is returned if region configuration is
+	// not found.
+	//
+	// @readonly
+	ErrMissingRegion error = awserr.New("MissingRegion", "could not find region configuration", nil)
+
+	// ErrMissingEndpoint is an error that is returned if an endpoint cannot be
+	// resolved for a service.
+	//
+	// @readonly
+	ErrMissingEndpoint error = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
+)
+
+// ValidateEndpointHandler is a request handler to validate a request had the
+// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
+// region is not valid.
+func ValidateEndpointHandler(r *Request) {
+	if r.Service.SigningRegion == "" && StringValue(r.Service.Config.Region) == "" {
+		r.Error = ErrMissingRegion
+	} else if r.Service.Endpoint == "" {
+		r.Error = ErrMissingEndpoint
+	}
+}

+ 85 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/handlers.go

@@ -0,0 +1,85 @@
+package aws
+
+// A Handlers provides a collection of request handlers for various
+// stages of handling requests.
+type Handlers struct {
+	Validate         HandlerList
+	Build            HandlerList
+	Sign             HandlerList
+	Send             HandlerList
+	ValidateResponse HandlerList
+	Unmarshal        HandlerList
+	UnmarshalMeta    HandlerList
+	UnmarshalError   HandlerList
+	Retry            HandlerList
+	AfterRetry       HandlerList
+}
+
+// copy returns of this handler's lists.
+func (h *Handlers) copy() Handlers {
+	return Handlers{
+		Validate:         h.Validate.copy(),
+		Build:            h.Build.copy(),
+		Sign:             h.Sign.copy(),
+		Send:             h.Send.copy(),
+		ValidateResponse: h.ValidateResponse.copy(),
+		Unmarshal:        h.Unmarshal.copy(),
+		UnmarshalError:   h.UnmarshalError.copy(),
+		UnmarshalMeta:    h.UnmarshalMeta.copy(),
+		Retry:            h.Retry.copy(),
+		AfterRetry:       h.AfterRetry.copy(),
+	}
+}
+
+// Clear removes callback functions for all handlers
+func (h *Handlers) Clear() {
+	h.Validate.Clear()
+	h.Build.Clear()
+	h.Send.Clear()
+	h.Sign.Clear()
+	h.Unmarshal.Clear()
+	h.UnmarshalMeta.Clear()
+	h.UnmarshalError.Clear()
+	h.ValidateResponse.Clear()
+	h.Retry.Clear()
+	h.AfterRetry.Clear()
+}
+
+// A HandlerList manages zero or more handlers in a list.
+type HandlerList struct {
+	list []func(*Request)
+}
+
+// copy creates a copy of the handler list.
+func (l *HandlerList) copy() HandlerList {
+	var n HandlerList
+	n.list = append([]func(*Request){}, l.list...)
+	return n
+}
+
+// Clear clears the handler list.
+func (l *HandlerList) Clear() {
+	l.list = []func(*Request){}
+}
+
+// Len returns the number of handlers in the list.
+func (l *HandlerList) Len() int {
+	return len(l.list)
+}
+
+// PushBack pushes handlers f to the back of the handler list.
+func (l *HandlerList) PushBack(f ...func(*Request)) {
+	l.list = append(l.list, f...)
+}
+
+// PushFront pushes handlers f to the front of the handler list.
+func (l *HandlerList) PushFront(f ...func(*Request)) {
+	l.list = append(f, l.list...)
+}
+
+// Run executes all handlers in the list with a given request object.
+func (l *HandlerList) Run(r *Request) {
+	for _, f := range l.list {
+		f(r)
+	}
+}

+ 89 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/logger.go

@@ -0,0 +1,89 @@
+package aws
+
+import (
+	"log"
+	"os"
+)
+
+// A LogLevelType defines the level logging should be performed at. Used to instruct
+// the SDK which statements should be logged.
+type LogLevelType uint
+
+// LogLevel returns the pointer to a LogLevel. Should be used to workaround
+// not being able to take the address of a non-composite literal.
+func LogLevel(l LogLevelType) *LogLevelType {
+	return &l
+}
+
+// Value returns the LogLevel value or the default value LogOff if the LogLevel
+// is nil. Safe to use on nil value LogLevelTypes.
+func (l *LogLevelType) Value() LogLevelType {
+	if l != nil {
+		return *l
+	}
+	return LogOff
+}
+
+// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
+// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
+// LogLevel is nill, will default to LogOff comparison.
+func (l *LogLevelType) Matches(v LogLevelType) bool {
+	c := l.Value()
+	return c&v == v
+}
+
+// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
+// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default
+// to LogOff comparison.
+func (l *LogLevelType) AtLeast(v LogLevelType) bool {
+	c := l.Value()
+	return c >= v
+}
+
+const (
+	// LogOff states that no logging should be performed by the SDK. This is the
+	// default state of the SDK, and should be use to disable all logging.
+	LogOff LogLevelType = iota * 0x1000
+
+	// LogDebug state that debug output should be logged by the SDK. This should
+	// be used to inspect request made and responses received.
+	LogDebug
+)
+
+// Debug Logging Sub Levels
+const (
+	// LogDebugWithSigning states that the SDK should log request signing and
+	// presigning events. This should be used to log the signing details of
+	// requests for debugging. Will also enable LogDebug.
+	LogDebugWithSigning LogLevelType = LogDebug | (1 << iota)
+
+	// LogDebugWithHTTPBody states the SDK should log HTTP request and response
+	// HTTP bodys in addition to the headers and path. This should be used to
+	// see the body content of requests and responses made while using the SDK
+	// Will also enable LogDebug.
+	LogDebugWithHTTPBody
+)
+
+// A Logger is a minimalistic interface for the SDK to log messages to. Should
+// be used to provide custom logging writers for the SDK to use.
+type Logger interface {
+	Log(...interface{})
+}
+
+// NewDefaultLogger returns a Logger which will write log messages to stdout, and
+// use same formatting runes as the stdlib log.Logger
+func NewDefaultLogger() Logger {
+	return &defaultLogger{
+		logger: log.New(os.Stdout, "", log.LstdFlags),
+	}
+}
+
+// A defaultLogger provides a minimalistic logger satisfying the Logger interface.
+type defaultLogger struct {
+	logger *log.Logger
+}
+
+// Log logs the parameters to the stdlib logger. See log.Println.
+func (l defaultLogger) Log(args ...interface{}) {
+	l.logger.Println(args...)
+}

+ 89 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/param_validator.go

@@ -0,0 +1,89 @@
+package aws
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+
+	"github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// ValidateParameters is a request handler to validate the input parameters.
+// Validating parameters only has meaning if done prior to the request being sent.
+func ValidateParameters(r *Request) {
+	if r.ParamsFilled() {
+		v := validator{errors: []string{}}
+		v.validateAny(reflect.ValueOf(r.Params), "")
+
+		if count := len(v.errors); count > 0 {
+			format := "%d validation errors:\n- %s"
+			msg := fmt.Sprintf(format, count, strings.Join(v.errors, "\n- "))
+			r.Error = awserr.New("InvalidParameter", msg, nil)
+		}
+	}
+}
+
+// A validator validates values. Collects validations errors which occurs.
+type validator struct {
+	errors []string
+}
+
+// validateAny will validate any struct, slice or map type. All validations
+// are also performed recursively for nested types.
+func (v *validator) validateAny(value reflect.Value, path string) {
+	value = reflect.Indirect(value)
+	if !value.IsValid() {
+		return
+	}
+
+	switch value.Kind() {
+	case reflect.Struct:
+		v.validateStruct(value, path)
+	case reflect.Slice:
+		for i := 0; i < value.Len(); i++ {
+			v.validateAny(value.Index(i), path+fmt.Sprintf("[%d]", i))
+		}
+	case reflect.Map:
+		for _, n := range value.MapKeys() {
+			v.validateAny(value.MapIndex(n), path+fmt.Sprintf("[%q]", n.String()))
+		}
+	}
+}
+
+// validateStruct will validate the struct value's fields. If the structure has
+// nested types those types will be validated also.
+func (v *validator) validateStruct(value reflect.Value, path string) {
+	prefix := "."
+	if path == "" {
+		prefix = ""
+	}
+
+	for i := 0; i < value.Type().NumField(); i++ {
+		f := value.Type().Field(i)
+		if strings.ToLower(f.Name[0:1]) == f.Name[0:1] {
+			continue
+		}
+		fvalue := value.FieldByName(f.Name)
+
+		notset := false
+		if f.Tag.Get("required") != "" {
+			switch fvalue.Kind() {
+			case reflect.Ptr, reflect.Slice, reflect.Map:
+				if fvalue.IsNil() {
+					notset = true
+				}
+			default:
+				if !fvalue.IsValid() {
+					notset = true
+				}
+			}
+		}
+
+		if notset {
+			msg := "missing required parameter: " + path + prefix + f.Name
+			v.errors = append(v.errors, msg)
+		} else {
+			v.validateAny(fvalue, path+prefix+f.Name)
+		}
+	}
+}

+ 312 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/request.go

@@ -0,0 +1,312 @@
+package aws
+
+import (
+	"bytes"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"reflect"
+	"strings"
+	"time"
+
+	"github.com/aws/aws-sdk-go/aws/awsutil"
+)
+
+// A Request is the service request to be made.
+type Request struct {
+	*Service
+	Handlers     Handlers
+	Time         time.Time
+	ExpireTime   time.Duration
+	Operation    *Operation
+	HTTPRequest  *http.Request
+	HTTPResponse *http.Response
+	Body         io.ReadSeeker
+	bodyStart    int64 // offset from beginning of Body that the request body starts
+	Params       interface{}
+	Error        error
+	Data         interface{}
+	RequestID    string
+	RetryCount   uint
+	Retryable    *bool
+	RetryDelay   time.Duration
+
+	built bool
+}
+
+// An Operation is the service API operation to be made.
+type Operation struct {
+	Name       string
+	HTTPMethod string
+	HTTPPath   string
+	*Paginator
+}
+
+// Paginator keeps track of pagination configuration for an API operation.
+type Paginator struct {
+	InputTokens     []string
+	OutputTokens    []string
+	LimitToken      string
+	TruncationToken string
+}
+
+// NewRequest returns a new Request pointer for the service API
+// operation and parameters.
+//
+// Params is any value of input parameters to be the request payload.
+// Data is pointer value to an object which the request's response
+// payload will be deserialized to.
+func NewRequest(service *Service, operation *Operation, params interface{}, data interface{}) *Request {
+	method := operation.HTTPMethod
+	if method == "" {
+		method = "POST"
+	}
+	p := operation.HTTPPath
+	if p == "" {
+		p = "/"
+	}
+
+	httpReq, _ := http.NewRequest(method, "", nil)
+	httpReq.URL, _ = url.Parse(service.Endpoint + p)
+
+	r := &Request{
+		Service:     service,
+		Handlers:    service.Handlers.copy(),
+		Time:        time.Now(),
+		ExpireTime:  0,
+		Operation:   operation,
+		HTTPRequest: httpReq,
+		Body:        nil,
+		Params:      params,
+		Error:       nil,
+		Data:        data,
+	}
+	r.SetBufferBody([]byte{})
+
+	return r
+}
+
+// WillRetry returns if the request's can be retried.
+func (r *Request) WillRetry() bool {
+	return r.Error != nil && BoolValue(r.Retryable) && r.RetryCount < r.Service.MaxRetries()
+}
+
+// ParamsFilled returns if the request's parameters have been populated
+// and the parameters are valid. False is returned if no parameters are
+// provided or invalid.
+func (r *Request) ParamsFilled() bool {
+	return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid()
+}
+
+// DataFilled returns true if the request's data for response deserialization
+// target has been set and is a valid. False is returned if data is not
+// set, or is invalid.
+func (r *Request) DataFilled() bool {
+	return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid()
+}
+
+// SetBufferBody will set the request's body bytes that will be sent to
+// the service API.
+func (r *Request) SetBufferBody(buf []byte) {
+	r.SetReaderBody(bytes.NewReader(buf))
+}
+
+// SetStringBody sets the body of the request to be backed by a string.
+func (r *Request) SetStringBody(s string) {
+	r.SetReaderBody(strings.NewReader(s))
+}
+
+// SetReaderBody will set the request's body reader.
+func (r *Request) SetReaderBody(reader io.ReadSeeker) {
+	r.HTTPRequest.Body = ioutil.NopCloser(reader)
+	r.Body = reader
+}
+
+// Presign returns the request's signed URL. Error will be returned
+// if the signing fails.
+func (r *Request) Presign(expireTime time.Duration) (string, error) {
+	r.ExpireTime = expireTime
+	r.Sign()
+	if r.Error != nil {
+		return "", r.Error
+	}
+	return r.HTTPRequest.URL.String(), nil
+}
+
+// Build will build the request's object so it can be signed and sent
+// to the service. Build will also validate all the request's parameters.
+// Anny additional build Handlers set on this request will be run
+// in the order they were set.
+//
+// The request will only be built once. Multiple calls to build will have
+// no effect.
+//
+// If any Validate or Build errors occur the build will stop and the error
+// which occurred will be returned.
+func (r *Request) Build() error {
+	if !r.built {
+		r.Error = nil
+		r.Handlers.Validate.Run(r)
+		if r.Error != nil {
+			return r.Error
+		}
+		r.Handlers.Build.Run(r)
+		r.built = true
+	}
+
+	return r.Error
+}
+
+// Sign will sign the request retuning error if errors are encountered.
+//
+// Send will build the request prior to signing. All Sign Handlers will
+// be executed in the order they were set.
+func (r *Request) Sign() error {
+	r.Build()
+	if r.Error != nil {
+		return r.Error
+	}
+
+	r.Handlers.Sign.Run(r)
+	return r.Error
+}
+
+// Send will send the request returning error if errors are encountered.
+//
+// Send will sign the request prior to sending. All Send Handlers will
+// be executed in the order they were set.
+func (r *Request) Send() error {
+	for {
+		r.Sign()
+		if r.Error != nil {
+			return r.Error
+		}
+
+		if BoolValue(r.Retryable) {
+			// Re-seek the body back to the original point in for a retry so that
+			// send will send the body's contents again in the upcoming request.
+			r.Body.Seek(r.bodyStart, 0)
+		}
+		r.Retryable = nil
+
+		r.Handlers.Send.Run(r)
+		if r.Error != nil {
+			r.Handlers.Retry.Run(r)
+			r.Handlers.AfterRetry.Run(r)
+			if r.Error != nil {
+				return r.Error
+			}
+			continue
+		}
+
+		r.Handlers.UnmarshalMeta.Run(r)
+		r.Handlers.ValidateResponse.Run(r)
+		if r.Error != nil {
+			r.Handlers.UnmarshalError.Run(r)
+			r.Handlers.Retry.Run(r)
+			r.Handlers.AfterRetry.Run(r)
+			if r.Error != nil {
+				return r.Error
+			}
+			continue
+		}
+
+		r.Handlers.Unmarshal.Run(r)
+		if r.Error != nil {
+			r.Handlers.Retry.Run(r)
+			r.Handlers.AfterRetry.Run(r)
+			if r.Error != nil {
+				return r.Error
+			}
+			continue
+		}
+
+		break
+	}
+
+	return nil
+}
+
+// HasNextPage returns true if this request has more pages of data available.
+func (r *Request) HasNextPage() bool {
+	return r.nextPageTokens() != nil
+}
+
+// nextPageTokens returns the tokens to use when asking for the next page of
+// data.
+func (r *Request) nextPageTokens() []interface{} {
+	if r.Operation.Paginator == nil {
+		return nil
+	}
+
+	if r.Operation.TruncationToken != "" {
+		tr := awsutil.ValuesAtAnyPath(r.Data, r.Operation.TruncationToken)
+		if tr == nil || len(tr) == 0 {
+			return nil
+		}
+		switch v := tr[0].(type) {
+		case bool:
+			if v == false {
+				return nil
+			}
+		}
+	}
+
+	found := false
+	tokens := make([]interface{}, len(r.Operation.OutputTokens))
+
+	for i, outtok := range r.Operation.OutputTokens {
+		v := awsutil.ValuesAtAnyPath(r.Data, outtok)
+		if v != nil && len(v) > 0 {
+			found = true
+			tokens[i] = v[0]
+		}
+	}
+
+	if found {
+		return tokens
+	}
+	return nil
+}
+
+// NextPage returns a new Request that can be executed to return the next
+// page of result data. Call .Send() on this request to execute it.
+func (r *Request) NextPage() *Request {
+	tokens := r.nextPageTokens()
+	if tokens == nil {
+		return nil
+	}
+
+	data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface()
+	nr := NewRequest(r.Service, r.Operation, awsutil.CopyOf(r.Params), data)
+	for i, intok := range nr.Operation.InputTokens {
+		awsutil.SetValueAtAnyPath(nr.Params, intok, tokens[i])
+	}
+	return nr
+}
+
+// EachPage iterates over each page of a paginated request object. The fn
+// parameter should be a function with the following sample signature:
+//
+//   func(page *T, lastPage bool) bool {
+//       return true // return false to stop iterating
+//   }
+//
+// Where "T" is the structure type matching the output structure of the given
+// operation. For example, a request object generated by
+// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput
+// as the structure "T". The lastPage value represents whether the page is
+// the last page of data or not. The return value of this function should
+// return true to keep iterating or false to stop.
+func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
+	for page := r; page != nil; page = page.NextPage() {
+		page.Send()
+		shouldContinue := fn(page.Data, !page.HasNextPage())
+		if page.Error != nil || !shouldContinue {
+			return page.Error
+		}
+	}
+
+	return nil
+}

+ 194 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/service.go

@@ -0,0 +1,194 @@
+package aws
+
+import (
+	"fmt"
+	"math"
+	"math/rand"
+	"net/http"
+	"net/http/httputil"
+	"regexp"
+	"time"
+
+	"github.com/aws/aws-sdk-go/aws/awserr"
+	"github.com/aws/aws-sdk-go/internal/endpoints"
+)
+
+// A Service implements the base service request and response handling
+// used by all services.
+type Service struct {
+	Config            *Config
+	Handlers          Handlers
+	ServiceName       string
+	APIVersion        string
+	Endpoint          string
+	SigningName       string
+	SigningRegion     string
+	JSONVersion       string
+	TargetPrefix      string
+	RetryRules        func(*Request) time.Duration
+	ShouldRetry       func(*Request) bool
+	DefaultMaxRetries uint
+}
+
+var schemeRE = regexp.MustCompile("^([^:]+)://")
+
+// NewService will return a pointer to a new Server object initialized.
+func NewService(config *Config) *Service {
+	svc := &Service{Config: config}
+	svc.Initialize()
+	return svc
+}
+
+// Initialize initializes the service.
+func (s *Service) Initialize() {
+	if s.Config == nil {
+		s.Config = &Config{}
+	}
+	if s.Config.HTTPClient == nil {
+		s.Config.HTTPClient = http.DefaultClient
+	}
+
+	if s.RetryRules == nil {
+		s.RetryRules = retryRules
+	}
+
+	if s.ShouldRetry == nil {
+		s.ShouldRetry = shouldRetry
+	}
+
+	s.DefaultMaxRetries = 3
+	s.Handlers.Validate.PushBack(ValidateEndpointHandler)
+	s.Handlers.Build.PushBack(UserAgentHandler)
+	s.Handlers.Sign.PushBack(BuildContentLength)
+	s.Handlers.Send.PushBack(SendHandler)
+	s.Handlers.AfterRetry.PushBack(AfterRetryHandler)
+	s.Handlers.ValidateResponse.PushBack(ValidateResponseHandler)
+	s.AddDebugHandlers()
+	s.buildEndpoint()
+
+	if !BoolValue(s.Config.DisableParamValidation) {
+		s.Handlers.Validate.PushBack(ValidateParameters)
+	}
+}
+
+// buildEndpoint builds the endpoint values the service will use to make requests with.
+func (s *Service) buildEndpoint() {
+	if StringValue(s.Config.Endpoint) != "" {
+		s.Endpoint = *s.Config.Endpoint
+	} else {
+		s.Endpoint, s.SigningRegion =
+			endpoints.EndpointForRegion(s.ServiceName, StringValue(s.Config.Region))
+	}
+
+	if s.Endpoint != "" && !schemeRE.MatchString(s.Endpoint) {
+		scheme := "https"
+		if BoolValue(s.Config.DisableSSL) {
+			scheme = "http"
+		}
+		s.Endpoint = scheme + "://" + s.Endpoint
+	}
+}
+
+// AddDebugHandlers injects debug logging handlers into the service to log request
+// debug information.
+func (s *Service) AddDebugHandlers() {
+	if !s.Config.LogLevel.AtLeast(LogDebug) {
+		return
+	}
+
+	s.Handlers.Send.PushFront(logRequest)
+	s.Handlers.Send.PushBack(logResponse)
+}
+
+const logReqMsg = `DEBUG: Request %s/%s Details:
+---[ REQUEST POST-SIGN ]-----------------------------
+%s
+-----------------------------------------------------`
+
+func logRequest(r *Request) {
+	logBody := r.Config.LogLevel.Matches(LogDebugWithHTTPBody)
+	dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody)
+
+	r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ServiceName, r.Operation.Name, string(dumpedBody)))
+}
+
+const logRespMsg = `DEBUG: Response %s/%s Details:
+---[ RESPONSE ]--------------------------------------
+%s
+-----------------------------------------------------`
+
+func logResponse(r *Request) {
+	var msg = "no reponse data"
+	if r.HTTPResponse != nil {
+		logBody := r.Config.LogLevel.Matches(LogDebugWithHTTPBody)
+		dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody)
+		msg = string(dumpedBody)
+	} else if r.Error != nil {
+		msg = r.Error.Error()
+	}
+	r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ServiceName, r.Operation.Name, msg))
+}
+
+// MaxRetries returns the number of maximum returns the service will use to make
+// an individual API request.
+func (s *Service) MaxRetries() uint {
+	if IntValue(s.Config.MaxRetries) < 0 {
+		return s.DefaultMaxRetries
+	}
+	return uint(IntValue(s.Config.MaxRetries))
+}
+
+var seededRand = rand.New(rand.NewSource(time.Now().UnixNano()))
+
+// retryRules returns the delay duration before retrying this request again
+func retryRules(r *Request) time.Duration {
+
+	delay := int(math.Pow(2, float64(r.RetryCount))) * (seededRand.Intn(30) + 30)
+	return time.Duration(delay) * time.Millisecond
+}
+
+// retryableCodes is a collection of service response codes which are retry-able
+// without any further action.
+var retryableCodes = map[string]struct{}{
+	"RequestError":                           {},
+	"ProvisionedThroughputExceededException": {},
+	"Throttling":                             {},
+	"ThrottlingException":                    {},
+	"RequestLimitExceeded":                   {},
+	"RequestThrottled":                       {},
+}
+
+// credsExpiredCodes is a collection of error codes which signify the credentials
+// need to be refreshed. Expired tokens require refreshing of credentials, and
+// resigning before the request can be retried.
+var credsExpiredCodes = map[string]struct{}{
+	"ExpiredToken":          {},
+	"ExpiredTokenException": {},
+	"RequestExpired":        {}, // EC2 Only
+}
+
+func isCodeRetryable(code string) bool {
+	if _, ok := retryableCodes[code]; ok {
+		return true
+	}
+
+	return isCodeExpiredCreds(code)
+}
+
+func isCodeExpiredCreds(code string) bool {
+	_, ok := credsExpiredCodes[code]
+	return ok
+}
+
+// shouldRetry returns if the request should be retried.
+func shouldRetry(r *Request) bool {
+	if r.HTTPResponse.StatusCode >= 500 {
+		return true
+	}
+	if r.Error != nil {
+		if err, ok := r.Error.(awserr.Error); ok {
+			return isCodeRetryable(err.Code())
+		}
+	}
+	return false
+}

+ 55 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/types.go

@@ -0,0 +1,55 @@
+package aws
+
+import (
+	"io"
+)
+
+// ReadSeekCloser wraps a io.Reader returning a ReaderSeakerCloser
+func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
+	return ReaderSeekerCloser{r}
+}
+
+// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
+// io.Closer interfaces to the underlying object if they are available.
+type ReaderSeekerCloser struct {
+	r io.Reader
+}
+
+// Read reads from the reader up to size of p. The number of bytes read, and
+// error if it occurred will be returned.
+//
+// If the reader is not an io.Reader zero bytes read, and nil error will be returned.
+//
+// Performs the same functionality as io.Reader Read
+func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
+	switch t := r.r.(type) {
+	case io.Reader:
+		return t.Read(p)
+	}
+	return 0, nil
+}
+
+// Seek sets the offset for the next Read to offset, interpreted according to
+// whence: 0 means relative to the origin of the file, 1 means relative to the
+// current offset, and 2 means relative to the end. Seek returns the new offset
+// and an error, if any.
+//
+// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
+func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
+	switch t := r.r.(type) {
+	case io.Seeker:
+		return t.Seek(offset, whence)
+	}
+	return int64(0), nil
+}
+
+// Close closes the ReaderSeekerCloser.
+//
+// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
+func (r ReaderSeekerCloser) Close() error {
+	switch t := r.r.(type) {
+	case io.Closer:
+		return t.Close()
+	}
+	return nil
+}

+ 8 - 0
vendor/src/github.com/aws/aws-sdk-go/aws/version.go

@@ -0,0 +1,8 @@
+// Package aws provides core functionality for making requests to AWS services.
+package aws
+
+// SDKName is the name of this AWS SDK
+const SDKName = "aws-sdk-go"
+
+// SDKVersion is the version of this SDK
+const SDKVersion = "0.7.1"

+ 31 - 0
vendor/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.go

@@ -0,0 +1,31 @@
+// Package endpoints validates regional endpoints for services.
+package endpoints
+
+//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go
+//go:generate gofmt -s -w endpoints_map.go
+
+import "strings"
+
+// EndpointForRegion returns an endpoint and its signing region for a service and region.
+// if the service and region pair are not found endpoint and signingRegion will be empty.
+func EndpointForRegion(svcName, region string) (endpoint, signingRegion string) {
+	derivedKeys := []string{
+		region + "/" + svcName,
+		region + "/*",
+		"*/" + svcName,
+		"*/*",
+	}
+
+	for _, key := range derivedKeys {
+		if val, ok := endpointsMap.Endpoints[key]; ok {
+			ep := val.Endpoint
+			ep = strings.Replace(ep, "{region}", region, -1)
+			ep = strings.Replace(ep, "{service}", svcName, -1)
+
+			endpoint = ep
+			signingRegion = val.SigningRegion
+			return
+		}
+	}
+	return
+}

+ 77 - 0
vendor/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.json

@@ -0,0 +1,77 @@
+{
+  "version": 2,
+  "endpoints": {
+    "*/*": {
+      "endpoint": "{service}.{region}.amazonaws.com"
+    },
+    "cn-north-1/*": {
+      "endpoint": "{service}.{region}.amazonaws.com.cn",
+      "signatureVersion": "v4"
+    },
+    "us-gov-west-1/iam": {
+      "endpoint": "iam.us-gov.amazonaws.com"
+    },
+    "us-gov-west-1/sts": {
+      "endpoint": "sts.us-gov-west-1.amazonaws.com"
+    },
+    "us-gov-west-1/s3": {
+      "endpoint": "s3-{region}.amazonaws.com"
+    },
+    "*/cloudfront": {
+      "endpoint": "cloudfront.amazonaws.com",
+      "signingRegion": "us-east-1"
+    },
+    "*/cloudsearchdomain": {
+      "endpoint": "",
+      "signingRegion": "us-east-1"
+    },
+    "*/iam": {
+      "endpoint": "iam.amazonaws.com",
+      "signingRegion": "us-east-1"
+    },
+    "*/importexport": {
+      "endpoint": "importexport.amazonaws.com",
+      "signingRegion": "us-east-1"
+    },
+    "*/route53": {
+      "endpoint": "route53.amazonaws.com",
+      "signingRegion": "us-east-1"
+    },
+    "*/sts": {
+      "endpoint": "sts.amazonaws.com",
+      "signingRegion": "us-east-1"
+    },
+    "us-east-1/sdb": {
+      "endpoint": "sdb.amazonaws.com",
+      "signingRegion": "us-east-1"
+    },
+    "us-east-1/s3": {
+      "endpoint": "s3.amazonaws.com"
+    },
+    "us-west-1/s3": {
+      "endpoint": "s3-{region}.amazonaws.com"
+    },
+    "us-west-2/s3": {
+      "endpoint": "s3-{region}.amazonaws.com"
+    },
+    "eu-west-1/s3": {
+      "endpoint": "s3-{region}.amazonaws.com"
+    },
+    "ap-southeast-1/s3": {
+      "endpoint": "s3-{region}.amazonaws.com"
+    },
+    "ap-southeast-2/s3": {
+      "endpoint": "s3-{region}.amazonaws.com"
+    },
+    "ap-northeast-1/s3": {
+      "endpoint": "s3-{region}.amazonaws.com"
+    },
+    "sa-east-1/s3": {
+      "endpoint": "s3-{region}.amazonaws.com"
+    },
+    "eu-central-1/s3": {
+      "endpoint": "{service}.{region}.amazonaws.com",
+      "signatureVersion": "v4"
+    }
+  }
+}

+ 89 - 0
vendor/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_map.go

@@ -0,0 +1,89 @@
+package endpoints
+
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+type endpointStruct struct {
+	Version   int
+	Endpoints map[string]endpointEntry
+}
+
+type endpointEntry struct {
+	Endpoint      string
+	SigningRegion string
+}
+
+var endpointsMap = endpointStruct{
+	Version: 2,
+	Endpoints: map[string]endpointEntry{
+		"*/*": {
+			Endpoint: "{service}.{region}.amazonaws.com",
+		},
+		"*/cloudfront": {
+			Endpoint:      "cloudfront.amazonaws.com",
+			SigningRegion: "us-east-1",
+		},
+		"*/cloudsearchdomain": {
+			Endpoint:      "",
+			SigningRegion: "us-east-1",
+		},
+		"*/iam": {
+			Endpoint:      "iam.amazonaws.com",
+			SigningRegion: "us-east-1",
+		},
+		"*/importexport": {
+			Endpoint:      "importexport.amazonaws.com",
+			SigningRegion: "us-east-1",
+		},
+		"*/route53": {
+			Endpoint:      "route53.amazonaws.com",
+			SigningRegion: "us-east-1",
+		},
+		"*/sts": {
+			Endpoint:      "sts.amazonaws.com",
+			SigningRegion: "us-east-1",
+		},
+		"ap-northeast-1/s3": {
+			Endpoint: "s3-{region}.amazonaws.com",
+		},
+		"ap-southeast-1/s3": {
+			Endpoint: "s3-{region}.amazonaws.com",
+		},
+		"ap-southeast-2/s3": {
+			Endpoint: "s3-{region}.amazonaws.com",
+		},
+		"cn-north-1/*": {
+			Endpoint: "{service}.{region}.amazonaws.com.cn",
+		},
+		"eu-central-1/s3": {
+			Endpoint: "{service}.{region}.amazonaws.com",
+		},
+		"eu-west-1/s3": {
+			Endpoint: "s3-{region}.amazonaws.com",
+		},
+		"sa-east-1/s3": {
+			Endpoint: "s3-{region}.amazonaws.com",
+		},
+		"us-east-1/s3": {
+			Endpoint: "s3.amazonaws.com",
+		},
+		"us-east-1/sdb": {
+			Endpoint:      "sdb.amazonaws.com",
+			SigningRegion: "us-east-1",
+		},
+		"us-gov-west-1/iam": {
+			Endpoint: "iam.us-gov.amazonaws.com",
+		},
+		"us-gov-west-1/s3": {
+			Endpoint: "s3-{region}.amazonaws.com",
+		},
+		"us-gov-west-1/sts": {
+			Endpoint: "sts.us-gov-west-1.amazonaws.com",
+		},
+		"us-west-1/s3": {
+			Endpoint: "s3-{region}.amazonaws.com",
+		},
+		"us-west-2/s3": {
+			Endpoint: "s3-{region}.amazonaws.com",
+		},
+	},
+}

+ 199 - 0
vendor/src/github.com/aws/aws-sdk-go/internal/protocol/json/jsonutil/build.go

@@ -0,0 +1,199 @@
+// Package jsonutil provides JSON serialisation of AWS requests and responses.
+package jsonutil
+
+import (
+	"bytes"
+	"encoding/base64"
+	"fmt"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// BuildJSON builds a JSON string for a given object v.
+func BuildJSON(v interface{}) ([]byte, error) {
+	var buf bytes.Buffer
+
+	err := buildAny(reflect.ValueOf(v), &buf, "")
+	return buf.Bytes(), err
+}
+
+func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+	value = reflect.Indirect(value)
+	if !value.IsValid() {
+		return nil
+	}
+
+	vtype := value.Type()
+
+	t := tag.Get("type")
+	if t == "" {
+		switch vtype.Kind() {
+		case reflect.Struct:
+			// also it can't be a time object
+			if _, ok := value.Interface().(time.Time); !ok {
+				t = "structure"
+			}
+		case reflect.Slice:
+			// also it can't be a byte slice
+			if _, ok := value.Interface().([]byte); !ok {
+				t = "list"
+			}
+		case reflect.Map:
+			t = "map"
+		}
+	}
+
+	switch t {
+	case "structure":
+		if field, ok := vtype.FieldByName("SDKShapeTraits"); ok {
+			tag = field.Tag
+		}
+		return buildStruct(value, buf, tag)
+	case "list":
+		return buildList(value, buf, tag)
+	case "map":
+		return buildMap(value, buf, tag)
+	default:
+		return buildScalar(value, buf, tag)
+	}
+}
+
+func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+	if !value.IsValid() {
+		return nil
+	}
+
+	buf.WriteString("{")
+
+	t, fields := value.Type(), []*reflect.StructField{}
+	for i := 0; i < t.NumField(); i++ {
+		field := t.Field(i)
+		member := value.FieldByName(field.Name)
+		if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() {
+			continue // ignore unset fields
+		}
+		if c := field.Name[0:1]; strings.ToLower(c) == c {
+			continue // ignore unexported fields
+		}
+		if field.Tag.Get("location") != "" {
+			continue // ignore non-body elements
+		}
+
+		fields = append(fields, &field)
+	}
+
+	for i, field := range fields {
+		member := value.FieldByName(field.Name)
+
+		// figure out what this field is called
+		name := field.Name
+		if locName := field.Tag.Get("locationName"); locName != "" {
+			name = locName
+		}
+
+		buf.WriteString(fmt.Sprintf("%q:", name))
+
+		err := buildAny(member, buf, field.Tag)
+		if err != nil {
+			return err
+		}
+
+		if i < len(fields)-1 {
+			buf.WriteString(",")
+		}
+	}
+
+	buf.WriteString("}")
+
+	return nil
+}
+
+func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+	buf.WriteString("[")
+
+	for i := 0; i < value.Len(); i++ {
+		buildAny(value.Index(i), buf, "")
+
+		if i < value.Len()-1 {
+			buf.WriteString(",")
+		}
+	}
+
+	buf.WriteString("]")
+
+	return nil
+}
+
+func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+	buf.WriteString("{")
+
+	keys := make([]string, value.Len())
+	for i, n := range value.MapKeys() {
+		keys[i] = n.String()
+	}
+	sort.Strings(keys)
+
+	for i, k := range keys {
+		buf.WriteString(fmt.Sprintf("%q:", k))
+		buildAny(value.MapIndex(reflect.ValueOf(k)), buf, "")
+
+		if i < len(keys)-1 {
+			buf.WriteString(",")
+		}
+	}
+
+	buf.WriteString("}")
+
+	return nil
+}
+
+func buildScalar(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+	switch converted := value.Interface().(type) {
+	case string:
+		writeString(converted, buf)
+	case []byte:
+		if !value.IsNil() {
+			buf.WriteString(fmt.Sprintf("%q", base64.StdEncoding.EncodeToString(converted)))
+		}
+	case bool:
+		buf.WriteString(strconv.FormatBool(converted))
+	case int64:
+		buf.WriteString(strconv.FormatInt(converted, 10))
+	case float64:
+		buf.WriteString(strconv.FormatFloat(converted, 'f', -1, 64))
+	case time.Time:
+		buf.WriteString(strconv.FormatInt(converted.UTC().Unix(), 10))
+	default:
+		return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type())
+	}
+	return nil
+}
+
+func writeString(s string, buf *bytes.Buffer) {
+	buf.WriteByte('"')
+	for _, r := range s {
+		if r == '"' {
+			buf.WriteString(`\"`)
+		} else if r == '\\' {
+			buf.WriteString(`\\`)
+		} else if r == '\b' {
+			buf.WriteString(`\b`)
+		} else if r == '\f' {
+			buf.WriteString(`\f`)
+		} else if r == '\r' {
+			buf.WriteString(`\r`)
+		} else if r == '\t' {
+			buf.WriteString(`\t`)
+		} else if r == '\n' {
+			buf.WriteString(`\n`)
+		} else if r < 32 {
+			fmt.Fprintf(buf, "\\u%0.4x", r)
+		} else {
+			buf.WriteRune(r)
+		}
+	}
+	buf.WriteByte('"')
+}

+ 214 - 0
vendor/src/github.com/aws/aws-sdk-go/internal/protocol/json/jsonutil/unmarshal.go

@@ -0,0 +1,214 @@
+package jsonutil
+
+import (
+	"encoding/base64"
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"reflect"
+	"strings"
+	"time"
+)
+
+// UnmarshalJSON reads a stream and unmarshals the results in object v.
+func UnmarshalJSON(v interface{}, stream io.Reader) error {
+	var out interface{}
+
+	b, err := ioutil.ReadAll(stream)
+	if err != nil {
+		return err
+	}
+
+	if len(b) == 0 {
+		return nil
+	}
+
+	if err := json.Unmarshal(b, &out); err != nil {
+		return err
+	}
+
+	return unmarshalAny(reflect.ValueOf(v), out, "")
+}
+
+func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+	vtype := value.Type()
+	if vtype.Kind() == reflect.Ptr {
+		vtype = vtype.Elem() // check kind of actual element type
+	}
+
+	t := tag.Get("type")
+	if t == "" {
+		switch vtype.Kind() {
+		case reflect.Struct:
+			// also it can't be a time object
+			if _, ok := value.Interface().(*time.Time); !ok {
+				t = "structure"
+			}
+		case reflect.Slice:
+			// also it can't be a byte slice
+			if _, ok := value.Interface().([]byte); !ok {
+				t = "list"
+			}
+		case reflect.Map:
+			t = "map"
+		}
+	}
+
+	switch t {
+	case "structure":
+		if field, ok := vtype.FieldByName("SDKShapeTraits"); ok {
+			tag = field.Tag
+		}
+		return unmarshalStruct(value, data, tag)
+	case "list":
+		return unmarshalList(value, data, tag)
+	case "map":
+		return unmarshalMap(value, data, tag)
+	default:
+		return unmarshalScalar(value, data, tag)
+	}
+}
+
+func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+	if data == nil {
+		return nil
+	}
+	mapData, ok := data.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("JSON value is not a structure (%#v)", data)
+	}
+
+	t := value.Type()
+	if value.Kind() == reflect.Ptr {
+		if value.IsNil() { // create the structure if it's nil
+			s := reflect.New(value.Type().Elem())
+			value.Set(s)
+			value = s
+		}
+
+		value = value.Elem()
+		t = t.Elem()
+	}
+
+	// unwrap any payloads
+	if payload := tag.Get("payload"); payload != "" {
+		field, _ := t.FieldByName(payload)
+		return unmarshalAny(value.FieldByName(payload), data, field.Tag)
+	}
+
+	for i := 0; i < t.NumField(); i++ {
+		field := t.Field(i)
+		if c := field.Name[0:1]; strings.ToLower(c) == c {
+			continue // ignore unexported fields
+		}
+
+		// figure out what this field is called
+		name := field.Name
+		if locName := field.Tag.Get("locationName"); locName != "" {
+			name = locName
+		}
+
+		member := value.FieldByName(field.Name)
+		err := unmarshalAny(member, mapData[name], field.Tag)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+	if data == nil {
+		return nil
+	}
+	listData, ok := data.([]interface{})
+	if !ok {
+		return fmt.Errorf("JSON value is not a list (%#v)", data)
+	}
+
+	if value.IsNil() {
+		l := len(listData)
+		value.Set(reflect.MakeSlice(value.Type(), l, l))
+	}
+
+	for i, c := range listData {
+		err := unmarshalAny(value.Index(i), c, "")
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+	if data == nil {
+		return nil
+	}
+	mapData, ok := data.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("JSON value is not a map (%#v)", data)
+	}
+
+	if value.IsNil() {
+		value.Set(reflect.MakeMap(value.Type()))
+	}
+
+	for k, v := range mapData {
+		kvalue := reflect.ValueOf(k)
+		vvalue := reflect.New(value.Type().Elem()).Elem()
+
+		unmarshalAny(vvalue, v, "")
+		value.SetMapIndex(kvalue, vvalue)
+	}
+
+	return nil
+}
+
+func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+	errf := func() error {
+		return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
+	}
+
+	switch d := data.(type) {
+	case nil:
+		return nil // nothing to do here
+	case string:
+		switch value.Interface().(type) {
+		case *string:
+			value.Set(reflect.ValueOf(&d))
+		case []byte:
+			b, err := base64.StdEncoding.DecodeString(d)
+			if err != nil {
+				return err
+			}
+			value.Set(reflect.ValueOf(b))
+		default:
+			return errf()
+		}
+	case float64:
+		switch value.Interface().(type) {
+		case *int64:
+			di := int64(d)
+			value.Set(reflect.ValueOf(&di))
+		case *float64:
+			value.Set(reflect.ValueOf(&d))
+		case *time.Time:
+			t := time.Unix(int64(d), 0).UTC()
+			value.Set(reflect.ValueOf(&t))
+		default:
+			return errf()
+		}
+	case bool:
+		switch value.Interface().(type) {
+		case *bool:
+			value.Set(reflect.ValueOf(&d))
+		default:
+			return errf()
+		}
+	default:
+		return fmt.Errorf("unsupported JSON value (%v)", data)
+	}
+	return nil
+}

+ 98 - 0
vendor/src/github.com/aws/aws-sdk-go/internal/protocol/jsonrpc/jsonrpc.go

@@ -0,0 +1,98 @@
+// Package jsonrpc provides JSON RPC utilities for serialisation of AWS
+// requests and responses.
+package jsonrpc
+
+//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/input/json.json build_test.go
+//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/output/json.json unmarshal_test.go
+
+import (
+	"encoding/json"
+	"io/ioutil"
+	"strings"
+
+	"github.com/aws/aws-sdk-go/aws"
+	"github.com/aws/aws-sdk-go/aws/awserr"
+	"github.com/aws/aws-sdk-go/internal/protocol/json/jsonutil"
+)
+
+var emptyJSON = []byte("{}")
+
+// Build builds a JSON payload for a JSON RPC request.
+func Build(req *aws.Request) {
+	var buf []byte
+	var err error
+	if req.ParamsFilled() {
+		buf, err = jsonutil.BuildJSON(req.Params)
+		if err != nil {
+			req.Error = awserr.New("SerializationError", "failed encoding JSON RPC request", err)
+			return
+		}
+	} else {
+		buf = emptyJSON
+	}
+
+	if req.Service.TargetPrefix != "" || string(buf) != "{}" {
+		req.SetBufferBody(buf)
+	}
+
+	if req.Service.TargetPrefix != "" {
+		target := req.Service.TargetPrefix + "." + req.Operation.Name
+		req.HTTPRequest.Header.Add("X-Amz-Target", target)
+	}
+	if req.Service.JSONVersion != "" {
+		jsonVersion := req.Service.JSONVersion
+		req.HTTPRequest.Header.Add("Content-Type", "application/x-amz-json-"+jsonVersion)
+	}
+}
+
+// Unmarshal unmarshals a response for a JSON RPC service.
+func Unmarshal(req *aws.Request) {
+	defer req.HTTPResponse.Body.Close()
+	if req.DataFilled() {
+		err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body)
+		if err != nil {
+			req.Error = awserr.New("SerializationError", "failed decoding JSON RPC response", err)
+		}
+	}
+	return
+}
+
+// UnmarshalMeta unmarshals headers from a response for a JSON RPC service.
+func UnmarshalMeta(req *aws.Request) {
+	req.RequestID = req.HTTPResponse.Header.Get("x-amzn-requestid")
+}
+
+// UnmarshalError unmarshals an error response for a JSON RPC service.
+func UnmarshalError(req *aws.Request) {
+	defer req.HTTPResponse.Body.Close()
+	bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body)
+	if err != nil {
+		req.Error = awserr.New("SerializationError", "failed reading JSON RPC error response", err)
+		return
+	}
+	if len(bodyBytes) == 0 {
+		req.Error = awserr.NewRequestFailure(
+			awserr.New("SerializationError", req.HTTPResponse.Status, nil),
+			req.HTTPResponse.StatusCode,
+			"",
+		)
+		return
+	}
+	var jsonErr jsonErrorResponse
+	if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil {
+		req.Error = awserr.New("SerializationError", "failed decoding JSON RPC error response", err)
+		return
+	}
+
+	codes := strings.SplitN(jsonErr.Code, "#", 2)
+	req.Error = awserr.NewRequestFailure(
+		awserr.New(codes[len(codes)-1], jsonErr.Message, nil),
+		req.HTTPResponse.StatusCode,
+		"",
+	)
+}
+
+type jsonErrorResponse struct {
+	Code    string `json:"__type"`
+	Message string `json:"message"`
+}

+ 212 - 0
vendor/src/github.com/aws/aws-sdk-go/internal/protocol/rest/build.go

@@ -0,0 +1,212 @@
+// Package rest provides RESTful serialisation of AWS requests and responses.
+package rest
+
+import (
+	"bytes"
+	"encoding/base64"
+	"fmt"
+	"io"
+	"net/url"
+	"path"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/aws/aws-sdk-go/aws"
+	"github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// RFC822 returns an RFC822 formatted timestamp for AWS protocols
+const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT"
+
+// Whether the byte value can be sent without escaping in AWS URLs
+var noEscape [256]bool
+
+func init() {
+	for i := 0; i < len(noEscape); i++ {
+		// AWS expects every character except these to be escaped
+		noEscape[i] = (i >= 'A' && i <= 'Z') ||
+			(i >= 'a' && i <= 'z') ||
+			(i >= '0' && i <= '9') ||
+			i == '-' ||
+			i == '.' ||
+			i == '_' ||
+			i == '~'
+	}
+}
+
+// Build builds the REST component of a service request.
+func Build(r *aws.Request) {
+	if r.ParamsFilled() {
+		v := reflect.ValueOf(r.Params).Elem()
+		buildLocationElements(r, v)
+		buildBody(r, v)
+	}
+}
+
+func buildLocationElements(r *aws.Request, v reflect.Value) {
+	query := r.HTTPRequest.URL.Query()
+
+	for i := 0; i < v.NumField(); i++ {
+		m := v.Field(i)
+		if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) {
+			continue
+		}
+
+		if m.IsValid() {
+			field := v.Type().Field(i)
+			name := field.Tag.Get("locationName")
+			if name == "" {
+				name = field.Name
+			}
+			if m.Kind() == reflect.Ptr {
+				m = m.Elem()
+			}
+			if !m.IsValid() {
+				continue
+			}
+
+			switch field.Tag.Get("location") {
+			case "headers": // header maps
+				buildHeaderMap(r, m, field.Tag.Get("locationName"))
+			case "header":
+				buildHeader(r, m, name)
+			case "uri":
+				buildURI(r, m, name)
+			case "querystring":
+				buildQueryString(r, m, name, query)
+			}
+		}
+		if r.Error != nil {
+			return
+		}
+	}
+
+	r.HTTPRequest.URL.RawQuery = query.Encode()
+	updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path)
+}
+
+func buildBody(r *aws.Request, v reflect.Value) {
+	if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok {
+		if payloadName := field.Tag.Get("payload"); payloadName != "" {
+			pfield, _ := v.Type().FieldByName(payloadName)
+			if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
+				payload := reflect.Indirect(v.FieldByName(payloadName))
+				if payload.IsValid() && payload.Interface() != nil {
+					switch reader := payload.Interface().(type) {
+					case io.ReadSeeker:
+						r.SetReaderBody(reader)
+					case []byte:
+						r.SetBufferBody(reader)
+					case string:
+						r.SetStringBody(reader)
+					default:
+						r.Error = awserr.New("SerializationError",
+							"failed to encode REST request",
+							fmt.Errorf("unknown payload type %s", payload.Type()))
+					}
+				}
+			}
+		}
+	}
+}
+
+func buildHeader(r *aws.Request, v reflect.Value, name string) {
+	str, err := convertType(v)
+	if err != nil {
+		r.Error = awserr.New("SerializationError", "failed to encode REST request", err)
+	} else if str != nil {
+		r.HTTPRequest.Header.Add(name, *str)
+	}
+}
+
+func buildHeaderMap(r *aws.Request, v reflect.Value, prefix string) {
+	for _, key := range v.MapKeys() {
+		str, err := convertType(v.MapIndex(key))
+		if err != nil {
+			r.Error = awserr.New("SerializationError", "failed to encode REST request", err)
+		} else if str != nil {
+			r.HTTPRequest.Header.Add(prefix+key.String(), *str)
+		}
+	}
+}
+
+func buildURI(r *aws.Request, v reflect.Value, name string) {
+	value, err := convertType(v)
+	if err != nil {
+		r.Error = awserr.New("SerializationError", "failed to encode REST request", err)
+	} else if value != nil {
+		uri := r.HTTPRequest.URL.Path
+		uri = strings.Replace(uri, "{"+name+"}", EscapePath(*value, true), -1)
+		uri = strings.Replace(uri, "{"+name+"+}", EscapePath(*value, false), -1)
+		r.HTTPRequest.URL.Path = uri
+	}
+}
+
+func buildQueryString(r *aws.Request, v reflect.Value, name string, query url.Values) {
+	str, err := convertType(v)
+	if err != nil {
+		r.Error = awserr.New("SerializationError", "failed to encode REST request", err)
+	} else if str != nil {
+		query.Set(name, *str)
+	}
+}
+
+func updatePath(url *url.URL, urlPath string) {
+	scheme, query := url.Scheme, url.RawQuery
+
+	// clean up path
+	urlPath = path.Clean(urlPath)
+
+	// get formatted URL minus scheme so we can build this into Opaque
+	url.Scheme, url.Path, url.RawQuery = "", "", ""
+	s := url.String()
+	url.Scheme = scheme
+	url.RawQuery = query
+
+	// build opaque URI
+	url.Opaque = s + urlPath
+}
+
+// EscapePath escapes part of a URL path in Amazon style
+func EscapePath(path string, encodeSep bool) string {
+	var buf bytes.Buffer
+	for i := 0; i < len(path); i++ {
+		c := path[i]
+		if noEscape[c] || (c == '/' && !encodeSep) {
+			buf.WriteByte(c)
+		} else {
+			buf.WriteByte('%')
+			buf.WriteString(strings.ToUpper(strconv.FormatUint(uint64(c), 16)))
+		}
+	}
+	return buf.String()
+}
+
+func convertType(v reflect.Value) (*string, error) {
+	v = reflect.Indirect(v)
+	if !v.IsValid() {
+		return nil, nil
+	}
+
+	var str string
+	switch value := v.Interface().(type) {
+	case string:
+		str = value
+	case []byte:
+		str = base64.StdEncoding.EncodeToString(value)
+	case bool:
+		str = strconv.FormatBool(value)
+	case int64:
+		str = strconv.FormatInt(value, 10)
+	case float64:
+		str = strconv.FormatFloat(value, 'f', -1, 64)
+	case time.Time:
+		str = value.UTC().Format(RFC822)
+	default:
+		err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
+		return nil, err
+	}
+	return &str, nil
+}

+ 45 - 0
vendor/src/github.com/aws/aws-sdk-go/internal/protocol/rest/payload.go

@@ -0,0 +1,45 @@
+package rest
+
+import "reflect"
+
+// PayloadMember returns the payload field member of i if there is one, or nil.
+func PayloadMember(i interface{}) interface{} {
+	if i == nil {
+		return nil
+	}
+
+	v := reflect.ValueOf(i).Elem()
+	if !v.IsValid() {
+		return nil
+	}
+	if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok {
+		if payloadName := field.Tag.Get("payload"); payloadName != "" {
+			field, _ := v.Type().FieldByName(payloadName)
+			if field.Tag.Get("type") != "structure" {
+				return nil
+			}
+
+			payload := v.FieldByName(payloadName)
+			if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) {
+				return payload.Interface()
+			}
+		}
+	}
+	return nil
+}
+
+// PayloadType returns the type of a payload field member of i if there is one, or "".
+func PayloadType(i interface{}) string {
+	v := reflect.Indirect(reflect.ValueOf(i))
+	if !v.IsValid() {
+		return ""
+	}
+	if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok {
+		if payloadName := field.Tag.Get("payload"); payloadName != "" {
+			if member, ok := v.Type().FieldByName(payloadName); ok {
+				return member.Tag.Get("type")
+			}
+		}
+	}
+	return ""
+}

+ 174 - 0
vendor/src/github.com/aws/aws-sdk-go/internal/protocol/rest/unmarshal.go

@@ -0,0 +1,174 @@
+package rest
+
+import (
+	"encoding/base64"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/aws/aws-sdk-go/aws"
+	"github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// Unmarshal unmarshals the REST component of a response in a REST service.
+func Unmarshal(r *aws.Request) {
+	if r.DataFilled() {
+		v := reflect.Indirect(reflect.ValueOf(r.Data))
+		unmarshalBody(r, v)
+		unmarshalLocationElements(r, v)
+	}
+}
+
+func unmarshalBody(r *aws.Request, v reflect.Value) {
+	if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok {
+		if payloadName := field.Tag.Get("payload"); payloadName != "" {
+			pfield, _ := v.Type().FieldByName(payloadName)
+			if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
+				payload := v.FieldByName(payloadName)
+				if payload.IsValid() {
+					switch payload.Interface().(type) {
+					case []byte:
+						b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+						if err != nil {
+							r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+						} else {
+							payload.Set(reflect.ValueOf(b))
+						}
+					case *string:
+						b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+						if err != nil {
+							r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+						} else {
+							str := string(b)
+							payload.Set(reflect.ValueOf(&str))
+						}
+					default:
+						switch payload.Type().String() {
+						case "io.ReadSeeker":
+							payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body)))
+						case "aws.ReadSeekCloser", "io.ReadCloser":
+							payload.Set(reflect.ValueOf(r.HTTPResponse.Body))
+						default:
+							r.Error = awserr.New("SerializationError",
+								"failed to decode REST response",
+								fmt.Errorf("unknown payload type %s", payload.Type()))
+						}
+					}
+				}
+			}
+		}
+	}
+}
+
+func unmarshalLocationElements(r *aws.Request, v reflect.Value) {
+	for i := 0; i < v.NumField(); i++ {
+		m, field := v.Field(i), v.Type().Field(i)
+		if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) {
+			continue
+		}
+
+		if m.IsValid() {
+			name := field.Tag.Get("locationName")
+			if name == "" {
+				name = field.Name
+			}
+
+			switch field.Tag.Get("location") {
+			case "statusCode":
+				unmarshalStatusCode(m, r.HTTPResponse.StatusCode)
+			case "header":
+				err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name))
+				if err != nil {
+					r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+					break
+				}
+			case "headers":
+				prefix := field.Tag.Get("locationName")
+				err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix)
+				if err != nil {
+					r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+					break
+				}
+			}
+		}
+		if r.Error != nil {
+			return
+		}
+	}
+}
+
+func unmarshalStatusCode(v reflect.Value, statusCode int) {
+	if !v.IsValid() {
+		return
+	}
+
+	switch v.Interface().(type) {
+	case *int64:
+		s := int64(statusCode)
+		v.Set(reflect.ValueOf(&s))
+	}
+}
+
+func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error {
+	switch r.Interface().(type) {
+	case map[string]*string: // we only support string map value types
+		out := map[string]*string{}
+		for k, v := range headers {
+			k = http.CanonicalHeaderKey(k)
+			if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
+				out[k[len(prefix):]] = &v[0]
+			}
+		}
+		r.Set(reflect.ValueOf(out))
+	}
+	return nil
+}
+
+func unmarshalHeader(v reflect.Value, header string) error {
+	if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) {
+		return nil
+	}
+
+	switch v.Interface().(type) {
+	case *string:
+		v.Set(reflect.ValueOf(&header))
+	case []byte:
+		b, err := base64.StdEncoding.DecodeString(header)
+		if err != nil {
+			return err
+		}
+		v.Set(reflect.ValueOf(&b))
+	case *bool:
+		b, err := strconv.ParseBool(header)
+		if err != nil {
+			return err
+		}
+		v.Set(reflect.ValueOf(&b))
+	case *int64:
+		i, err := strconv.ParseInt(header, 10, 64)
+		if err != nil {
+			return err
+		}
+		v.Set(reflect.ValueOf(&i))
+	case *float64:
+		f, err := strconv.ParseFloat(header, 64)
+		if err != nil {
+			return err
+		}
+		v.Set(reflect.ValueOf(&f))
+	case *time.Time:
+		t, err := time.Parse(RFC822, header)
+		if err != nil {
+			return err
+		}
+		v.Set(reflect.ValueOf(&t))
+	default:
+		err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
+		return err
+	}
+	return nil
+}

+ 364 - 0
vendor/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4.go

@@ -0,0 +1,364 @@
+// Package v4 implements signing for AWS V4 signer
+package v4
+
+import (
+	"crypto/hmac"
+	"crypto/sha256"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"net/http"
+	"net/url"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/aws/aws-sdk-go/aws"
+	"github.com/aws/aws-sdk-go/aws/credentials"
+	"github.com/aws/aws-sdk-go/internal/protocol/rest"
+)
+
+const (
+	authHeaderPrefix = "AWS4-HMAC-SHA256"
+	timeFormat       = "20060102T150405Z"
+	shortTimeFormat  = "20060102"
+)
+
+var ignoredHeaders = map[string]bool{
+	"Authorization":  true,
+	"Content-Type":   true,
+	"Content-Length": true,
+	"User-Agent":     true,
+}
+
+type signer struct {
+	Request     *http.Request
+	Time        time.Time
+	ExpireTime  time.Duration
+	ServiceName string
+	Region      string
+	CredValues  credentials.Value
+	Credentials *credentials.Credentials
+	Query       url.Values
+	Body        io.ReadSeeker
+	Debug       aws.LogLevelType
+	Logger      aws.Logger
+
+	isPresign          bool
+	formattedTime      string
+	formattedShortTime string
+
+	signedHeaders    string
+	canonicalHeaders string
+	canonicalString  string
+	credentialString string
+	stringToSign     string
+	signature        string
+	authorization    string
+}
+
+// Sign requests with signature version 4.
+//
+// Will sign the requests with the service config's Credentials object
+// Signing is skipped if the credentials is the credentials.AnonymousCredentials
+// object.
+func Sign(req *aws.Request) {
+	// If the request does not need to be signed ignore the signing of the
+	// request if the AnonymousCredentials object is used.
+	if req.Service.Config.Credentials == credentials.AnonymousCredentials {
+		return
+	}
+
+	region := req.Service.SigningRegion
+	if region == "" {
+		region = aws.StringValue(req.Service.Config.Region)
+	}
+
+	name := req.Service.SigningName
+	if name == "" {
+		name = req.Service.ServiceName
+	}
+
+	s := signer{
+		Request:     req.HTTPRequest,
+		Time:        req.Time,
+		ExpireTime:  req.ExpireTime,
+		Query:       req.HTTPRequest.URL.Query(),
+		Body:        req.Body,
+		ServiceName: name,
+		Region:      region,
+		Credentials: req.Service.Config.Credentials,
+		Debug:       req.Service.Config.LogLevel.Value(),
+		Logger:      req.Service.Config.Logger,
+	}
+
+	req.Error = s.sign()
+}
+
+func (v4 *signer) sign() error {
+	if v4.ExpireTime != 0 {
+		v4.isPresign = true
+	}
+
+	if v4.isRequestSigned() {
+		if !v4.Credentials.IsExpired() {
+			// If the request is already signed, and the credentials have not
+			// expired yet ignore the signing request.
+			return nil
+		}
+
+		// The credentials have expired for this request. The current signing
+		// is invalid, and needs to be request because the request will fail.
+		if v4.isPresign {
+			v4.removePresign()
+			// Update the request's query string to ensure the values stays in
+			// sync in the case retrieving the new credentials fails.
+			v4.Request.URL.RawQuery = v4.Query.Encode()
+		}
+	}
+
+	var err error
+	v4.CredValues, err = v4.Credentials.Get()
+	if err != nil {
+		return err
+	}
+
+	if v4.isPresign {
+		v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
+		if v4.CredValues.SessionToken != "" {
+			v4.Query.Set("X-Amz-Security-Token", v4.CredValues.SessionToken)
+		} else {
+			v4.Query.Del("X-Amz-Security-Token")
+		}
+	} else if v4.CredValues.SessionToken != "" {
+		v4.Request.Header.Set("X-Amz-Security-Token", v4.CredValues.SessionToken)
+	}
+
+	v4.build()
+
+	if v4.Debug.Matches(aws.LogDebugWithSigning) {
+		v4.logSigningInfo()
+	}
+
+	return nil
+}
+
+const logSignInfoMsg = `DEBUG: Request Signiture:
+---[ CANONICAL STRING  ]-----------------------------
+%s
+---[ STRING TO SIGN ]--------------------------------
+%s%s
+-----------------------------------------------------`
+const logSignedURLMsg = `
+---[ SIGNED URL ]------------------------------------
+%s`
+
+func (v4 *signer) logSigningInfo() {
+	signedURLMsg := ""
+	if v4.isPresign {
+		signedURLMsg = fmt.Sprintf(logSignedURLMsg, v4.Request.URL.String())
+	}
+	msg := fmt.Sprintf(logSignInfoMsg, v4.canonicalString, v4.stringToSign, signedURLMsg)
+	v4.Logger.Log(msg)
+}
+
+func (v4 *signer) build() {
+	v4.buildTime()             // no depends
+	v4.buildCredentialString() // no depends
+	if v4.isPresign {
+		v4.buildQuery() // no depends
+	}
+	v4.buildCanonicalHeaders() // depends on cred string
+	v4.buildCanonicalString()  // depends on canon headers / signed headers
+	v4.buildStringToSign()     // depends on canon string
+	v4.buildSignature()        // depends on string to sign
+
+	if v4.isPresign {
+		v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature
+	} else {
+		parts := []string{
+			authHeaderPrefix + " Credential=" + v4.CredValues.AccessKeyID + "/" + v4.credentialString,
+			"SignedHeaders=" + v4.signedHeaders,
+			"Signature=" + v4.signature,
+		}
+		v4.Request.Header.Set("Authorization", strings.Join(parts, ", "))
+	}
+}
+
+func (v4 *signer) buildTime() {
+	v4.formattedTime = v4.Time.UTC().Format(timeFormat)
+	v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat)
+
+	if v4.isPresign {
+		duration := int64(v4.ExpireTime / time.Second)
+		v4.Query.Set("X-Amz-Date", v4.formattedTime)
+		v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
+	} else {
+		v4.Request.Header.Set("X-Amz-Date", v4.formattedTime)
+	}
+}
+
+func (v4 *signer) buildCredentialString() {
+	v4.credentialString = strings.Join([]string{
+		v4.formattedShortTime,
+		v4.Region,
+		v4.ServiceName,
+		"aws4_request",
+	}, "/")
+
+	if v4.isPresign {
+		v4.Query.Set("X-Amz-Credential", v4.CredValues.AccessKeyID+"/"+v4.credentialString)
+	}
+}
+
+func (v4 *signer) buildQuery() {
+	for k, h := range v4.Request.Header {
+		if strings.HasPrefix(http.CanonicalHeaderKey(k), "X-Amz-") {
+			continue // never hoist x-amz-* headers, they must be signed
+		}
+		if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
+			continue // never hoist ignored headers
+		}
+
+		v4.Request.Header.Del(k)
+		v4.Query.Del(k)
+		for _, v := range h {
+			v4.Query.Add(k, v)
+		}
+	}
+}
+
+func (v4 *signer) buildCanonicalHeaders() {
+	var headers []string
+	headers = append(headers, "host")
+	for k := range v4.Request.Header {
+		if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
+			continue // ignored header
+		}
+		headers = append(headers, strings.ToLower(k))
+	}
+	sort.Strings(headers)
+
+	v4.signedHeaders = strings.Join(headers, ";")
+
+	if v4.isPresign {
+		v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders)
+	}
+
+	headerValues := make([]string, len(headers))
+	for i, k := range headers {
+		if k == "host" {
+			headerValues[i] = "host:" + v4.Request.URL.Host
+		} else {
+			headerValues[i] = k + ":" +
+				strings.Join(v4.Request.Header[http.CanonicalHeaderKey(k)], ",")
+		}
+	}
+
+	v4.canonicalHeaders = strings.Join(headerValues, "\n")
+}
+
+func (v4 *signer) buildCanonicalString() {
+	v4.Request.URL.RawQuery = strings.Replace(v4.Query.Encode(), "+", "%20", -1)
+	uri := v4.Request.URL.Opaque
+	if uri != "" {
+		uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
+	} else {
+		uri = v4.Request.URL.Path
+	}
+	if uri == "" {
+		uri = "/"
+	}
+
+	if v4.ServiceName != "s3" {
+		uri = rest.EscapePath(uri, false)
+	}
+
+	v4.canonicalString = strings.Join([]string{
+		v4.Request.Method,
+		uri,
+		v4.Request.URL.RawQuery,
+		v4.canonicalHeaders + "\n",
+		v4.signedHeaders,
+		v4.bodyDigest(),
+	}, "\n")
+}
+
+func (v4 *signer) buildStringToSign() {
+	v4.stringToSign = strings.Join([]string{
+		authHeaderPrefix,
+		v4.formattedTime,
+		v4.credentialString,
+		hex.EncodeToString(makeSha256([]byte(v4.canonicalString))),
+	}, "\n")
+}
+
+func (v4 *signer) buildSignature() {
+	secret := v4.CredValues.SecretAccessKey
+	date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime))
+	region := makeHmac(date, []byte(v4.Region))
+	service := makeHmac(region, []byte(v4.ServiceName))
+	credentials := makeHmac(service, []byte("aws4_request"))
+	signature := makeHmac(credentials, []byte(v4.stringToSign))
+	v4.signature = hex.EncodeToString(signature)
+}
+
+func (v4 *signer) bodyDigest() string {
+	hash := v4.Request.Header.Get("X-Amz-Content-Sha256")
+	if hash == "" {
+		if v4.isPresign && v4.ServiceName == "s3" {
+			hash = "UNSIGNED-PAYLOAD"
+		} else if v4.Body == nil {
+			hash = hex.EncodeToString(makeSha256([]byte{}))
+		} else {
+			hash = hex.EncodeToString(makeSha256Reader(v4.Body))
+		}
+		v4.Request.Header.Add("X-Amz-Content-Sha256", hash)
+	}
+	return hash
+}
+
+// isRequestSigned returns if the request is currently signed or presigned
+func (v4 *signer) isRequestSigned() bool {
+	if v4.isPresign && v4.Query.Get("X-Amz-Signature") != "" {
+		return true
+	}
+	if v4.Request.Header.Get("Authorization") != "" {
+		return true
+	}
+
+	return false
+}
+
+// unsign removes signing flags for both signed and presigned requests.
+func (v4 *signer) removePresign() {
+	v4.Query.Del("X-Amz-Algorithm")
+	v4.Query.Del("X-Amz-Signature")
+	v4.Query.Del("X-Amz-Security-Token")
+	v4.Query.Del("X-Amz-Date")
+	v4.Query.Del("X-Amz-Expires")
+	v4.Query.Del("X-Amz-Credential")
+	v4.Query.Del("X-Amz-SignedHeaders")
+}
+
+func makeHmac(key []byte, data []byte) []byte {
+	hash := hmac.New(sha256.New, key)
+	hash.Write(data)
+	return hash.Sum(nil)
+}
+
+func makeSha256(data []byte) []byte {
+	hash := sha256.New()
+	hash.Write(data)
+	return hash.Sum(nil)
+}
+
+func makeSha256Reader(reader io.ReadSeeker) []byte {
+	hash := sha256.New()
+	start, _ := reader.Seek(0, 1)
+	defer reader.Seek(start, 0)
+
+	io.Copy(hash, reader)
+	return hash.Sum(nil)
+}

+ 2293 - 0
vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go

@@ -0,0 +1,2293 @@
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+// Package cloudwatchlogs provides a client for Amazon CloudWatch Logs.
+package cloudwatchlogs
+
+import (
+	"github.com/aws/aws-sdk-go/aws"
+	"github.com/aws/aws-sdk-go/aws/awsutil"
+)
+
+const opCreateLogGroup = "CreateLogGroup"
+
+// CreateLogGroupRequest generates a request for the CreateLogGroup operation.
+func (c *CloudWatchLogs) CreateLogGroupRequest(input *CreateLogGroupInput) (req *aws.Request, output *CreateLogGroupOutput) {
+	op := &aws.Operation{
+		Name:       opCreateLogGroup,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+	}
+
+	if input == nil {
+		input = &CreateLogGroupInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &CreateLogGroupOutput{}
+	req.Data = output
+	return
+}
+
+// Creates a new log group with the specified name. The name of the log group
+// must be unique within a region for an AWS account. You can create up to 500
+// log groups per account.
+//
+//  You must use the following guidelines when naming a log group:  Log group
+// names can be between 1 and 512 characters long. Allowed characters are a-z,
+// A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), and '.' (period).
+func (c *CloudWatchLogs) CreateLogGroup(input *CreateLogGroupInput) (*CreateLogGroupOutput, error) {
+	req, out := c.CreateLogGroupRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+const opCreateLogStream = "CreateLogStream"
+
+// CreateLogStreamRequest generates a request for the CreateLogStream operation.
+func (c *CloudWatchLogs) CreateLogStreamRequest(input *CreateLogStreamInput) (req *aws.Request, output *CreateLogStreamOutput) {
+	op := &aws.Operation{
+		Name:       opCreateLogStream,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+	}
+
+	if input == nil {
+		input = &CreateLogStreamInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &CreateLogStreamOutput{}
+	req.Data = output
+	return
+}
+
+// Creates a new log stream in the specified log group. The name of the log
+// stream must be unique within the log group. There is no limit on the number
+// of log streams that can exist in a log group.
+//
+//  You must use the following guidelines when naming a log stream:  Log stream
+// names can be between 1 and 512 characters long. The ':' colon character is
+// not allowed.
+func (c *CloudWatchLogs) CreateLogStream(input *CreateLogStreamInput) (*CreateLogStreamOutput, error) {
+	req, out := c.CreateLogStreamRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+const opDeleteDestination = "DeleteDestination"
+
+// DeleteDestinationRequest generates a request for the DeleteDestination operation.
+func (c *CloudWatchLogs) DeleteDestinationRequest(input *DeleteDestinationInput) (req *aws.Request, output *DeleteDestinationOutput) {
+	op := &aws.Operation{
+		Name:       opDeleteDestination,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+	}
+
+	if input == nil {
+		input = &DeleteDestinationInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &DeleteDestinationOutput{}
+	req.Data = output
+	return
+}
+
+// Deletes the destination with the specified name and eventually disables all
+// the subscription filters that publish to it. This will not delete the physical
+// resource encapsulated by the destination.
+func (c *CloudWatchLogs) DeleteDestination(input *DeleteDestinationInput) (*DeleteDestinationOutput, error) {
+	req, out := c.DeleteDestinationRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+const opDeleteLogGroup = "DeleteLogGroup"
+
+// DeleteLogGroupRequest generates a request for the DeleteLogGroup operation.
+func (c *CloudWatchLogs) DeleteLogGroupRequest(input *DeleteLogGroupInput) (req *aws.Request, output *DeleteLogGroupOutput) {
+	op := &aws.Operation{
+		Name:       opDeleteLogGroup,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+	}
+
+	if input == nil {
+		input = &DeleteLogGroupInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &DeleteLogGroupOutput{}
+	req.Data = output
+	return
+}
+
+// Deletes the log group with the specified name and permanently deletes all
+// the archived log events associated with it.
+func (c *CloudWatchLogs) DeleteLogGroup(input *DeleteLogGroupInput) (*DeleteLogGroupOutput, error) {
+	req, out := c.DeleteLogGroupRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+const opDeleteLogStream = "DeleteLogStream"
+
+// DeleteLogStreamRequest generates a request for the DeleteLogStream operation.
+func (c *CloudWatchLogs) DeleteLogStreamRequest(input *DeleteLogStreamInput) (req *aws.Request, output *DeleteLogStreamOutput) {
+	op := &aws.Operation{
+		Name:       opDeleteLogStream,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+	}
+
+	if input == nil {
+		input = &DeleteLogStreamInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &DeleteLogStreamOutput{}
+	req.Data = output
+	return
+}
+
+// Deletes a log stream and permanently deletes all the archived log events
+// associated with it.
+func (c *CloudWatchLogs) DeleteLogStream(input *DeleteLogStreamInput) (*DeleteLogStreamOutput, error) {
+	req, out := c.DeleteLogStreamRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+const opDeleteMetricFilter = "DeleteMetricFilter"
+
+// DeleteMetricFilterRequest generates a request for the DeleteMetricFilter operation.
+func (c *CloudWatchLogs) DeleteMetricFilterRequest(input *DeleteMetricFilterInput) (req *aws.Request, output *DeleteMetricFilterOutput) {
+	op := &aws.Operation{
+		Name:       opDeleteMetricFilter,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+	}
+
+	if input == nil {
+		input = &DeleteMetricFilterInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &DeleteMetricFilterOutput{}
+	req.Data = output
+	return
+}
+
+// Deletes a metric filter associated with the specified log group.
+func (c *CloudWatchLogs) DeleteMetricFilter(input *DeleteMetricFilterInput) (*DeleteMetricFilterOutput, error) {
+	req, out := c.DeleteMetricFilterRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+const opDeleteRetentionPolicy = "DeleteRetentionPolicy"
+
+// DeleteRetentionPolicyRequest generates a request for the DeleteRetentionPolicy operation.
+func (c *CloudWatchLogs) DeleteRetentionPolicyRequest(input *DeleteRetentionPolicyInput) (req *aws.Request, output *DeleteRetentionPolicyOutput) {
+	op := &aws.Operation{
+		Name:       opDeleteRetentionPolicy,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+	}
+
+	if input == nil {
+		input = &DeleteRetentionPolicyInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &DeleteRetentionPolicyOutput{}
+	req.Data = output
+	return
+}
+
+// Deletes the retention policy of the specified log group. Log events would
+// not expire if they belong to log groups without a retention policy.
+func (c *CloudWatchLogs) DeleteRetentionPolicy(input *DeleteRetentionPolicyInput) (*DeleteRetentionPolicyOutput, error) {
+	req, out := c.DeleteRetentionPolicyRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+const opDeleteSubscriptionFilter = "DeleteSubscriptionFilter"
+
+// DeleteSubscriptionFilterRequest generates a request for the DeleteSubscriptionFilter operation.
+func (c *CloudWatchLogs) DeleteSubscriptionFilterRequest(input *DeleteSubscriptionFilterInput) (req *aws.Request, output *DeleteSubscriptionFilterOutput) {
+	op := &aws.Operation{
+		Name:       opDeleteSubscriptionFilter,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+	}
+
+	if input == nil {
+		input = &DeleteSubscriptionFilterInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &DeleteSubscriptionFilterOutput{}
+	req.Data = output
+	return
+}
+
+// Deletes a subscription filter associated with the specified log group.
+func (c *CloudWatchLogs) DeleteSubscriptionFilter(input *DeleteSubscriptionFilterInput) (*DeleteSubscriptionFilterOutput, error) {
+	req, out := c.DeleteSubscriptionFilterRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+const opDescribeDestinations = "DescribeDestinations"
+
+// DescribeDestinationsRequest generates a request for the DescribeDestinations operation.
+func (c *CloudWatchLogs) DescribeDestinationsRequest(input *DescribeDestinationsInput) (req *aws.Request, output *DescribeDestinationsOutput) {
+	op := &aws.Operation{
+		Name:       opDescribeDestinations,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+	}
+
+	if input == nil {
+		input = &DescribeDestinationsInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &DescribeDestinationsOutput{}
+	req.Data = output
+	return
+}
+
+// Returns all the destinations that are associated with the AWS account making
+// the request. The list returned in the response is ASCII-sorted by destination
+// name.
+//
+//  By default, this operation returns up to 50 destinations. If there are
+// more destinations to list, the response would contain a nextToken value in
+// the response body. You can also limit the number of destinations returned
+// in the response by specifying the limit parameter in the request.
+func (c *CloudWatchLogs) DescribeDestinations(input *DescribeDestinationsInput) (*DescribeDestinationsOutput, error) {
+	req, out := c.DescribeDestinationsRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+const opDescribeLogGroups = "DescribeLogGroups"
+
+// DescribeLogGroupsRequest generates a request for the DescribeLogGroups operation.
+func (c *CloudWatchLogs) DescribeLogGroupsRequest(input *DescribeLogGroupsInput) (req *aws.Request, output *DescribeLogGroupsOutput) {
+	op := &aws.Operation{
+		Name:       opDescribeLogGroups,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+		Paginator: &aws.Paginator{
+			InputTokens:     []string{"nextToken"},
+			OutputTokens:    []string{"nextToken"},
+			LimitToken:      "limit",
+			TruncationToken: "",
+		},
+	}
+
+	if input == nil {
+		input = &DescribeLogGroupsInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &DescribeLogGroupsOutput{}
+	req.Data = output
+	return
+}
+
+// Returns all the log groups that are associated with the AWS account making
+// the request. The list returned in the response is ASCII-sorted by log group
+// name.
+//
+//  By default, this operation returns up to 50 log groups. If there are more
+// log groups to list, the response would contain a nextToken value in the response
+// body. You can also limit the number of log groups returned in the response
+// by specifying the limit parameter in the request.
+func (c *CloudWatchLogs) DescribeLogGroups(input *DescribeLogGroupsInput) (*DescribeLogGroupsOutput, error) {
+	req, out := c.DescribeLogGroupsRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+func (c *CloudWatchLogs) DescribeLogGroupsPages(input *DescribeLogGroupsInput, fn func(p *DescribeLogGroupsOutput, lastPage bool) (shouldContinue bool)) error {
+	page, _ := c.DescribeLogGroupsRequest(input)
+	return page.EachPage(func(p interface{}, lastPage bool) bool {
+		return fn(p.(*DescribeLogGroupsOutput), lastPage)
+	})
+}
+
+const opDescribeLogStreams = "DescribeLogStreams"
+
+// DescribeLogStreamsRequest generates a request for the DescribeLogStreams operation.
+func (c *CloudWatchLogs) DescribeLogStreamsRequest(input *DescribeLogStreamsInput) (req *aws.Request, output *DescribeLogStreamsOutput) {
+	op := &aws.Operation{
+		Name:       opDescribeLogStreams,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+		Paginator: &aws.Paginator{
+			InputTokens:     []string{"nextToken"},
+			OutputTokens:    []string{"nextToken"},
+			LimitToken:      "limit",
+			TruncationToken: "",
+		},
+	}
+
+	if input == nil {
+		input = &DescribeLogStreamsInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &DescribeLogStreamsOutput{}
+	req.Data = output
+	return
+}
+
+// Returns all the log streams that are associated with the specified log group.
+// The list returned in the response is ASCII-sorted by log stream name.
+//
+//  By default, this operation returns up to 50 log streams. If there are more
+// log streams to list, the response would contain a nextToken value in the
+// response body. You can also limit the number of log streams returned in the
+// response by specifying the limit parameter in the request. This operation
+// has a limit of five transactions per second, after which transactions are
+// throttled.
+func (c *CloudWatchLogs) DescribeLogStreams(input *DescribeLogStreamsInput) (*DescribeLogStreamsOutput, error) {
+	req, out := c.DescribeLogStreamsRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+func (c *CloudWatchLogs) DescribeLogStreamsPages(input *DescribeLogStreamsInput, fn func(p *DescribeLogStreamsOutput, lastPage bool) (shouldContinue bool)) error {
+	page, _ := c.DescribeLogStreamsRequest(input)
+	return page.EachPage(func(p interface{}, lastPage bool) bool {
+		return fn(p.(*DescribeLogStreamsOutput), lastPage)
+	})
+}
+
+const opDescribeMetricFilters = "DescribeMetricFilters"
+
+// DescribeMetricFiltersRequest generates a request for the DescribeMetricFilters operation.
+func (c *CloudWatchLogs) DescribeMetricFiltersRequest(input *DescribeMetricFiltersInput) (req *aws.Request, output *DescribeMetricFiltersOutput) {
+	op := &aws.Operation{
+		Name:       opDescribeMetricFilters,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+		Paginator: &aws.Paginator{
+			InputTokens:     []string{"nextToken"},
+			OutputTokens:    []string{"nextToken"},
+			LimitToken:      "limit",
+			TruncationToken: "",
+		},
+	}
+
+	if input == nil {
+		input = &DescribeMetricFiltersInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &DescribeMetricFiltersOutput{}
+	req.Data = output
+	return
+}
+
+// Returns all the metrics filters associated with the specified log group.
+// The list returned in the response is ASCII-sorted by filter name.
+//
+//  By default, this operation returns up to 50 metric filters. If there are
+// more metric filters to list, the response would contain a nextToken value
+// in the response body. You can also limit the number of metric filters returned
+// in the response by specifying the limit parameter in the request.
+func (c *CloudWatchLogs) DescribeMetricFilters(input *DescribeMetricFiltersInput) (*DescribeMetricFiltersOutput, error) {
+	req, out := c.DescribeMetricFiltersRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+func (c *CloudWatchLogs) DescribeMetricFiltersPages(input *DescribeMetricFiltersInput, fn func(p *DescribeMetricFiltersOutput, lastPage bool) (shouldContinue bool)) error {
+	page, _ := c.DescribeMetricFiltersRequest(input)
+	return page.EachPage(func(p interface{}, lastPage bool) bool {
+		return fn(p.(*DescribeMetricFiltersOutput), lastPage)
+	})
+}
+
+const opDescribeSubscriptionFilters = "DescribeSubscriptionFilters"
+
+// DescribeSubscriptionFiltersRequest generates a request for the DescribeSubscriptionFilters operation.
+func (c *CloudWatchLogs) DescribeSubscriptionFiltersRequest(input *DescribeSubscriptionFiltersInput) (req *aws.Request, output *DescribeSubscriptionFiltersOutput) {
+	op := &aws.Operation{
+		Name:       opDescribeSubscriptionFilters,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+	}
+
+	if input == nil {
+		input = &DescribeSubscriptionFiltersInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &DescribeSubscriptionFiltersOutput{}
+	req.Data = output
+	return
+}
+
+// Returns all the subscription filters associated with the specified log group.
+// The list returned in the response is ASCII-sorted by filter name.
+//
+//  By default, this operation returns up to 50 subscription filters. If there
+// are more subscription filters to list, the response would contain a nextToken
+// value in the response body. You can also limit the number of subscription
+// filters returned in the response by specifying the limit parameter in the
+// request.
+func (c *CloudWatchLogs) DescribeSubscriptionFilters(input *DescribeSubscriptionFiltersInput) (*DescribeSubscriptionFiltersOutput, error) {
+	req, out := c.DescribeSubscriptionFiltersRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+const opFilterLogEvents = "FilterLogEvents"
+
+// FilterLogEventsRequest generates a request for the FilterLogEvents operation.
+func (c *CloudWatchLogs) FilterLogEventsRequest(input *FilterLogEventsInput) (req *aws.Request, output *FilterLogEventsOutput) {
+	op := &aws.Operation{
+		Name:       opFilterLogEvents,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+	}
+
+	if input == nil {
+		input = &FilterLogEventsInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &FilterLogEventsOutput{}
+	req.Data = output
+	return
+}
+
+// Retrieves log events, optionally filtered by a filter pattern from the specified
+// log group. You can provide an optional time range to filter the results on
+// the event timestamp. You can limit the streams searched to an explicit list
+// of logStreamNames.
+//
+//  By default, this operation returns as much matching log events as can fit
+// in a response size of 1MB, up to 10,000 log events, or all the events found
+// within a time-bounded scan window. If the response includes a nextToken,
+// then there is more data to search, and the search can be resumed with a new
+// request providing the nextToken. The response will contain a list of searchedLogStreams
+// that contains information about which streams were searched in the request
+// and whether they have been searched completely or require further pagination.
+// The limit parameter in the request. can be used to specify the maximum number
+// of events to return in a page.
+func (c *CloudWatchLogs) FilterLogEvents(input *FilterLogEventsInput) (*FilterLogEventsOutput, error) {
+	req, out := c.FilterLogEventsRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+const opGetLogEvents = "GetLogEvents"
+
+// GetLogEventsRequest generates a request for the GetLogEvents operation.
+func (c *CloudWatchLogs) GetLogEventsRequest(input *GetLogEventsInput) (req *aws.Request, output *GetLogEventsOutput) {
+	op := &aws.Operation{
+		Name:       opGetLogEvents,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+		Paginator: &aws.Paginator{
+			InputTokens:     []string{"nextToken"},
+			OutputTokens:    []string{"nextForwardToken"},
+			LimitToken:      "limit",
+			TruncationToken: "",
+		},
+	}
+
+	if input == nil {
+		input = &GetLogEventsInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &GetLogEventsOutput{}
+	req.Data = output
+	return
+}
+
+// Retrieves log events from the specified log stream. You can provide an optional
+// time range to filter the results on the event timestamp.
+//
+//  By default, this operation returns as much log events as can fit in a response
+// size of 1MB, up to 10,000 log events. The response will always include a
+// nextForwardToken and a nextBackwardToken in the response body. You can use
+// any of these tokens in subsequent GetLogEvents requests to paginate through
+// events in either forward or backward direction. You can also limit the number
+// of log events returned in the response by specifying the limit parameter
+// in the request.
+func (c *CloudWatchLogs) GetLogEvents(input *GetLogEventsInput) (*GetLogEventsOutput, error) {
+	req, out := c.GetLogEventsRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+func (c *CloudWatchLogs) GetLogEventsPages(input *GetLogEventsInput, fn func(p *GetLogEventsOutput, lastPage bool) (shouldContinue bool)) error {
+	page, _ := c.GetLogEventsRequest(input)
+	return page.EachPage(func(p interface{}, lastPage bool) bool {
+		return fn(p.(*GetLogEventsOutput), lastPage)
+	})
+}
+
+const opPutDestination = "PutDestination"
+
+// PutDestinationRequest generates a request for the PutDestination operation.
+func (c *CloudWatchLogs) PutDestinationRequest(input *PutDestinationInput) (req *aws.Request, output *PutDestinationOutput) {
+	op := &aws.Operation{
+		Name:       opPutDestination,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+	}
+
+	if input == nil {
+		input = &PutDestinationInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &PutDestinationOutput{}
+	req.Data = output
+	return
+}
+
+// Creates or updates a Destination. A destination encapsulates a physical resource
+// (such as a Kinesis stream) and allows you to subscribe to a real-time stream
+// of log events of a different account, ingested through PutLogEvents requests.
+// Currently, the only supported physical resource is a Amazon Kinesis stream
+// belonging to the same account as the destination.
+//
+//  A destination controls what is written to its Amazon Kinesis stream through
+// an access policy. By default, PutDestination does not set any access policy
+// with the destination, which means a cross-account user will not be able to
+// call PutSubscriptionFilter against this destination. To enable that, the
+// destination owner must call PutDestinationPolicy after PutDestination.
+func (c *CloudWatchLogs) PutDestination(input *PutDestinationInput) (*PutDestinationOutput, error) {
+	req, out := c.PutDestinationRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+const opPutDestinationPolicy = "PutDestinationPolicy"
+
+// PutDestinationPolicyRequest generates a request for the PutDestinationPolicy operation.
+func (c *CloudWatchLogs) PutDestinationPolicyRequest(input *PutDestinationPolicyInput) (req *aws.Request, output *PutDestinationPolicyOutput) {
+	op := &aws.Operation{
+		Name:       opPutDestinationPolicy,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+	}
+
+	if input == nil {
+		input = &PutDestinationPolicyInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &PutDestinationPolicyOutput{}
+	req.Data = output
+	return
+}
+
+// Creates or updates an access policy associated with an existing Destination.
+// An access policy is an IAM policy document (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies_overview.html)
+// that is used to authorize claims to register a subscription filter against
+// a given destination.
+func (c *CloudWatchLogs) PutDestinationPolicy(input *PutDestinationPolicyInput) (*PutDestinationPolicyOutput, error) {
+	req, out := c.PutDestinationPolicyRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+const opPutLogEvents = "PutLogEvents"
+
+// PutLogEventsRequest generates a request for the PutLogEvents operation.
+func (c *CloudWatchLogs) PutLogEventsRequest(input *PutLogEventsInput) (req *aws.Request, output *PutLogEventsOutput) {
+	op := &aws.Operation{
+		Name:       opPutLogEvents,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+	}
+
+	if input == nil {
+		input = &PutLogEventsInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &PutLogEventsOutput{}
+	req.Data = output
+	return
+}
+
+// Uploads a batch of log events to the specified log stream.
+//
+//  Every PutLogEvents request must include the sequenceToken obtained from
+// the response of the previous request. An upload in a newly created log stream
+// does not require a sequenceToken.
+//
+//  The batch of events must satisfy the following constraints:  The maximum
+// batch size is 1,048,576 bytes, and this size is calculated as the sum of
+// all event messages in UTF-8, plus 26 bytes for each log event. None of the
+// log events in the batch can be more than 2 hours in the future. None of the
+// log events in the batch can be older than 14 days or the retention period
+// of the log group. The log events in the batch must be in chronological ordered
+// by their timestamp. The maximum number of log events in a batch is 10,000.
+func (c *CloudWatchLogs) PutLogEvents(input *PutLogEventsInput) (*PutLogEventsOutput, error) {
+	req, out := c.PutLogEventsRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+const opPutMetricFilter = "PutMetricFilter"
+
+// PutMetricFilterRequest generates a request for the PutMetricFilter operation.
+func (c *CloudWatchLogs) PutMetricFilterRequest(input *PutMetricFilterInput) (req *aws.Request, output *PutMetricFilterOutput) {
+	op := &aws.Operation{
+		Name:       opPutMetricFilter,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+	}
+
+	if input == nil {
+		input = &PutMetricFilterInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &PutMetricFilterOutput{}
+	req.Data = output
+	return
+}
+
+// Creates or updates a metric filter and associates it with the specified log
+// group. Metric filters allow you to configure rules to extract metric data
+// from log events ingested through PutLogEvents requests.
+//
+//  The maximum number of metric filters that can be associated with a log
+// group is 100.
+func (c *CloudWatchLogs) PutMetricFilter(input *PutMetricFilterInput) (*PutMetricFilterOutput, error) {
+	req, out := c.PutMetricFilterRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+const opPutRetentionPolicy = "PutRetentionPolicy"
+
+// PutRetentionPolicyRequest generates a request for the PutRetentionPolicy operation.
+func (c *CloudWatchLogs) PutRetentionPolicyRequest(input *PutRetentionPolicyInput) (req *aws.Request, output *PutRetentionPolicyOutput) {
+	op := &aws.Operation{
+		Name:       opPutRetentionPolicy,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+	}
+
+	if input == nil {
+		input = &PutRetentionPolicyInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &PutRetentionPolicyOutput{}
+	req.Data = output
+	return
+}
+
+// Sets the retention of the specified log group. A retention policy allows
+// you to configure the number of days you want to retain log events in the
+// specified log group.
+func (c *CloudWatchLogs) PutRetentionPolicy(input *PutRetentionPolicyInput) (*PutRetentionPolicyOutput, error) {
+	req, out := c.PutRetentionPolicyRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+const opPutSubscriptionFilter = "PutSubscriptionFilter"
+
+// PutSubscriptionFilterRequest generates a request for the PutSubscriptionFilter operation.
+func (c *CloudWatchLogs) PutSubscriptionFilterRequest(input *PutSubscriptionFilterInput) (req *aws.Request, output *PutSubscriptionFilterOutput) {
+	op := &aws.Operation{
+		Name:       opPutSubscriptionFilter,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+	}
+
+	if input == nil {
+		input = &PutSubscriptionFilterInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &PutSubscriptionFilterOutput{}
+	req.Data = output
+	return
+}
+
+// Creates or updates a subscription filter and associates it with the specified
+// log group. Subscription filters allow you to subscribe to a real-time stream
+// of log events ingested through PutLogEvents requests and have them delivered
+// to a specific destination. Currently, the supported destinations are:   A
+// Amazon Kinesis stream belonging to the same account as the subscription filter,
+// for same-account delivery.   A logical destination (used via an ARN of Destination)
+// belonging to a different account, for cross-account delivery.
+//
+//  Currently there can only be one subscription filter associated with a log
+// group.
+func (c *CloudWatchLogs) PutSubscriptionFilter(input *PutSubscriptionFilterInput) (*PutSubscriptionFilterOutput, error) {
+	req, out := c.PutSubscriptionFilterRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+const opTestMetricFilter = "TestMetricFilter"
+
+// TestMetricFilterRequest generates a request for the TestMetricFilter operation.
+func (c *CloudWatchLogs) TestMetricFilterRequest(input *TestMetricFilterInput) (req *aws.Request, output *TestMetricFilterOutput) {
+	op := &aws.Operation{
+		Name:       opTestMetricFilter,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+	}
+
+	if input == nil {
+		input = &TestMetricFilterInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &TestMetricFilterOutput{}
+	req.Data = output
+	return
+}
+
+// Tests the filter pattern of a metric filter against a sample of log event
+// messages. You can use this operation to validate the correctness of a metric
+// filter pattern.
+func (c *CloudWatchLogs) TestMetricFilter(input *TestMetricFilterInput) (*TestMetricFilterOutput, error) {
+	req, out := c.TestMetricFilterRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+type CreateLogGroupInput struct {
+	// The name of the log group to create.
+	LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"`
+
+	metadataCreateLogGroupInput `json:"-" xml:"-"`
+}
+
+type metadataCreateLogGroupInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s CreateLogGroupInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateLogGroupInput) GoString() string {
+	return s.String()
+}
+
+type CreateLogGroupOutput struct {
+	metadataCreateLogGroupOutput `json:"-" xml:"-"`
+}
+
+type metadataCreateLogGroupOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s CreateLogGroupOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateLogGroupOutput) GoString() string {
+	return s.String()
+}
+
+type CreateLogStreamInput struct {
+	// The name of the log group under which the log stream is to be created.
+	LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"`
+
+	// The name of the log stream to create.
+	LogStreamName *string `locationName:"logStreamName" type:"string" required:"true"`
+
+	metadataCreateLogStreamInput `json:"-" xml:"-"`
+}
+
+type metadataCreateLogStreamInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s CreateLogStreamInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateLogStreamInput) GoString() string {
+	return s.String()
+}
+
+type CreateLogStreamOutput struct {
+	metadataCreateLogStreamOutput `json:"-" xml:"-"`
+}
+
+type metadataCreateLogStreamOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s CreateLogStreamOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateLogStreamOutput) GoString() string {
+	return s.String()
+}
+
+type DeleteDestinationInput struct {
+	// The name of destination to delete.
+	DestinationName *string `locationName:"destinationName" type:"string" required:"true"`
+
+	metadataDeleteDestinationInput `json:"-" xml:"-"`
+}
+
+type metadataDeleteDestinationInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteDestinationInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteDestinationInput) GoString() string {
+	return s.String()
+}
+
+type DeleteDestinationOutput struct {
+	metadataDeleteDestinationOutput `json:"-" xml:"-"`
+}
+
+type metadataDeleteDestinationOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteDestinationOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteDestinationOutput) GoString() string {
+	return s.String()
+}
+
+type DeleteLogGroupInput struct {
+	// The name of the log group to delete.
+	LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"`
+
+	metadataDeleteLogGroupInput `json:"-" xml:"-"`
+}
+
+type metadataDeleteLogGroupInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteLogGroupInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteLogGroupInput) GoString() string {
+	return s.String()
+}
+
+type DeleteLogGroupOutput struct {
+	metadataDeleteLogGroupOutput `json:"-" xml:"-"`
+}
+
+type metadataDeleteLogGroupOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteLogGroupOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteLogGroupOutput) GoString() string {
+	return s.String()
+}
+
+type DeleteLogStreamInput struct {
+	// The name of the log group under which the log stream to delete belongs.
+	LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"`
+
+	// The name of the log stream to delete.
+	LogStreamName *string `locationName:"logStreamName" type:"string" required:"true"`
+
+	metadataDeleteLogStreamInput `json:"-" xml:"-"`
+}
+
+type metadataDeleteLogStreamInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteLogStreamInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteLogStreamInput) GoString() string {
+	return s.String()
+}
+
+type DeleteLogStreamOutput struct {
+	metadataDeleteLogStreamOutput `json:"-" xml:"-"`
+}
+
+type metadataDeleteLogStreamOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteLogStreamOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteLogStreamOutput) GoString() string {
+	return s.String()
+}
+
+type DeleteMetricFilterInput struct {
+	// The name of the metric filter to delete.
+	FilterName *string `locationName:"filterName" type:"string" required:"true"`
+
+	// The name of the log group that is associated with the metric filter to delete.
+	LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"`
+
+	metadataDeleteMetricFilterInput `json:"-" xml:"-"`
+}
+
+type metadataDeleteMetricFilterInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteMetricFilterInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteMetricFilterInput) GoString() string {
+	return s.String()
+}
+
+type DeleteMetricFilterOutput struct {
+	metadataDeleteMetricFilterOutput `json:"-" xml:"-"`
+}
+
+type metadataDeleteMetricFilterOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteMetricFilterOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteMetricFilterOutput) GoString() string {
+	return s.String()
+}
+
+type DeleteRetentionPolicyInput struct {
+	// The name of the log group that is associated with the retention policy to
+	// delete.
+	LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"`
+
+	metadataDeleteRetentionPolicyInput `json:"-" xml:"-"`
+}
+
+type metadataDeleteRetentionPolicyInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteRetentionPolicyInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteRetentionPolicyInput) GoString() string {
+	return s.String()
+}
+
+type DeleteRetentionPolicyOutput struct {
+	metadataDeleteRetentionPolicyOutput `json:"-" xml:"-"`
+}
+
+type metadataDeleteRetentionPolicyOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteRetentionPolicyOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteRetentionPolicyOutput) GoString() string {
+	return s.String()
+}
+
+type DeleteSubscriptionFilterInput struct {
+	// The name of the subscription filter to delete.
+	FilterName *string `locationName:"filterName" type:"string" required:"true"`
+
+	// The name of the log group that is associated with the subscription filter
+	// to delete.
+	LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"`
+
+	metadataDeleteSubscriptionFilterInput `json:"-" xml:"-"`
+}
+
+type metadataDeleteSubscriptionFilterInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteSubscriptionFilterInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteSubscriptionFilterInput) GoString() string {
+	return s.String()
+}
+
+type DeleteSubscriptionFilterOutput struct {
+	metadataDeleteSubscriptionFilterOutput `json:"-" xml:"-"`
+}
+
+type metadataDeleteSubscriptionFilterOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteSubscriptionFilterOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteSubscriptionFilterOutput) GoString() string {
+	return s.String()
+}
+
+type DescribeDestinationsInput struct {
+	// Will only return destinations that match the provided destinationNamePrefix.
+	// If you don't specify a value, no prefix is applied.
+	DestinationNamePrefix *string `type:"string"`
+
+	// The maximum number of results to return.
+	Limit *int64 `locationName:"limit" type:"integer"`
+
+	// A string token used for pagination that points to the next page of results.
+	// It must be a value obtained from the response of the previous request. The
+	// token expires after 24 hours.
+	NextToken *string `locationName:"nextToken" type:"string"`
+
+	metadataDescribeDestinationsInput `json:"-" xml:"-"`
+}
+
+type metadataDescribeDestinationsInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s DescribeDestinationsInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeDestinationsInput) GoString() string {
+	return s.String()
+}
+
+type DescribeDestinationsOutput struct {
+	Destinations []*Destination `locationName:"destinations" type:"list"`
+
+	// A string token used for pagination that points to the next page of results.
+	// It must be a value obtained from the response of the previous request. The
+	// token expires after 24 hours.
+	NextToken *string `locationName:"nextToken" type:"string"`
+
+	metadataDescribeDestinationsOutput `json:"-" xml:"-"`
+}
+
+type metadataDescribeDestinationsOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s DescribeDestinationsOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeDestinationsOutput) GoString() string {
+	return s.String()
+}
+
+type DescribeLogGroupsInput struct {
+	// The maximum number of items returned in the response. If you don't specify
+	// a value, the request would return up to 50 items.
+	Limit *int64 `locationName:"limit" type:"integer"`
+
+	// Will only return log groups that match the provided logGroupNamePrefix. If
+	// you don't specify a value, no prefix filter is applied.
+	LogGroupNamePrefix *string `locationName:"logGroupNamePrefix" type:"string"`
+
+	// A string token used for pagination that points to the next page of results.
+	// It must be a value obtained from the response of the previous DescribeLogGroups
+	// request.
+	NextToken *string `locationName:"nextToken" type:"string"`
+
+	metadataDescribeLogGroupsInput `json:"-" xml:"-"`
+}
+
+type metadataDescribeLogGroupsInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s DescribeLogGroupsInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeLogGroupsInput) GoString() string {
+	return s.String()
+}
+
+type DescribeLogGroupsOutput struct {
+	// A list of log groups.
+	LogGroups []*LogGroup `locationName:"logGroups" type:"list"`
+
+	// A string token used for pagination that points to the next page of results.
+	// It must be a value obtained from the response of the previous request. The
+	// token expires after 24 hours.
+	NextToken *string `locationName:"nextToken" type:"string"`
+
+	metadataDescribeLogGroupsOutput `json:"-" xml:"-"`
+}
+
+type metadataDescribeLogGroupsOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s DescribeLogGroupsOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeLogGroupsOutput) GoString() string {
+	return s.String()
+}
+
+type DescribeLogStreamsInput struct {
+	// If set to true, results are returned in descending order. If you don't specify
+	// a value or set it to false, results are returned in ascending order.
+	Descending *bool `locationName:"descending" type:"boolean"`
+
+	// The maximum number of items returned in the response. If you don't specify
+	// a value, the request would return up to 50 items.
+	Limit *int64 `locationName:"limit" type:"integer"`
+
+	// The log group name for which log streams are to be listed.
+	LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"`
+
+	// Will only return log streams that match the provided logStreamNamePrefix.
+	// If you don't specify a value, no prefix filter is applied.
+	LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" type:"string"`
+
+	// A string token used for pagination that points to the next page of results.
+	// It must be a value obtained from the response of the previous DescribeLogStreams
+	// request.
+	NextToken *string `locationName:"nextToken" type:"string"`
+
+	// Specifies what to order the returned log streams by. Valid arguments are
+	// 'LogStreamName' or 'LastEventTime'. If you don't specify a value, results
+	// are ordered by LogStreamName. If 'LastEventTime' is chosen, the request cannot
+	// also contain a logStreamNamePrefix.
+	OrderBy *string `locationName:"orderBy" type:"string" enum:"OrderBy"`
+
+	metadataDescribeLogStreamsInput `json:"-" xml:"-"`
+}
+
+type metadataDescribeLogStreamsInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s DescribeLogStreamsInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeLogStreamsInput) GoString() string {
+	return s.String()
+}
+
+type DescribeLogStreamsOutput struct {
+	// A list of log streams.
+	LogStreams []*LogStream `locationName:"logStreams" type:"list"`
+
+	// A string token used for pagination that points to the next page of results.
+	// It must be a value obtained from the response of the previous request. The
+	// token expires after 24 hours.
+	NextToken *string `locationName:"nextToken" type:"string"`
+
+	metadataDescribeLogStreamsOutput `json:"-" xml:"-"`
+}
+
+type metadataDescribeLogStreamsOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s DescribeLogStreamsOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeLogStreamsOutput) GoString() string {
+	return s.String()
+}
+
+type DescribeMetricFiltersInput struct {
+	// Will only return metric filters that match the provided filterNamePrefix.
+	// If you don't specify a value, no prefix filter is applied.
+	FilterNamePrefix *string `locationName:"filterNamePrefix" type:"string"`
+
+	// The maximum number of items returned in the response. If you don't specify
+	// a value, the request would return up to 50 items.
+	Limit *int64 `locationName:"limit" type:"integer"`
+
+	// The log group name for which metric filters are to be listed.
+	LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"`
+
+	// A string token used for pagination that points to the next page of results.
+	// It must be a value obtained from the response of the previous DescribeMetricFilters
+	// request.
+	NextToken *string `locationName:"nextToken" type:"string"`
+
+	metadataDescribeMetricFiltersInput `json:"-" xml:"-"`
+}
+
+type metadataDescribeMetricFiltersInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s DescribeMetricFiltersInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeMetricFiltersInput) GoString() string {
+	return s.String()
+}
+
+type DescribeMetricFiltersOutput struct {
+	MetricFilters []*MetricFilter `locationName:"metricFilters" type:"list"`
+
+	// A string token used for pagination that points to the next page of results.
+	// It must be a value obtained from the response of the previous request. The
+	// token expires after 24 hours.
+	NextToken *string `locationName:"nextToken" type:"string"`
+
+	metadataDescribeMetricFiltersOutput `json:"-" xml:"-"`
+}
+
+type metadataDescribeMetricFiltersOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s DescribeMetricFiltersOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeMetricFiltersOutput) GoString() string {
+	return s.String()
+}
+
+type DescribeSubscriptionFiltersInput struct {
+	// Will only return subscription filters that match the provided filterNamePrefix.
+	// If you don't specify a value, no prefix filter is applied.
+	FilterNamePrefix *string `locationName:"filterNamePrefix" type:"string"`
+
+	// The maximum number of results to return.
+	Limit *int64 `locationName:"limit" type:"integer"`
+
+	// The log group name for which subscription filters are to be listed.
+	LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"`
+
+	// A string token used for pagination that points to the next page of results.
+	// It must be a value obtained from the response of the previous request. The
+	// token expires after 24 hours.
+	NextToken *string `locationName:"nextToken" type:"string"`
+
+	metadataDescribeSubscriptionFiltersInput `json:"-" xml:"-"`
+}
+
+type metadataDescribeSubscriptionFiltersInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s DescribeSubscriptionFiltersInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeSubscriptionFiltersInput) GoString() string {
+	return s.String()
+}
+
+type DescribeSubscriptionFiltersOutput struct {
+	// A string token used for pagination that points to the next page of results.
+	// It must be a value obtained from the response of the previous request. The
+	// token expires after 24 hours.
+	NextToken *string `locationName:"nextToken" type:"string"`
+
+	SubscriptionFilters []*SubscriptionFilter `locationName:"subscriptionFilters" type:"list"`
+
+	metadataDescribeSubscriptionFiltersOutput `json:"-" xml:"-"`
+}
+
+type metadataDescribeSubscriptionFiltersOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s DescribeSubscriptionFiltersOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeSubscriptionFiltersOutput) GoString() string {
+	return s.String()
+}
+
+type Destination struct {
+	ARN *string `locationName:"arn" type:"string"`
+
+	AccessPolicy *string `locationName:"accessPolicy" type:"string"`
+
+	// A point in time expressed as the number of milliseconds since Jan 1, 1970
+	// 00:00:00 UTC.
+	CreationTime *int64 `locationName:"creationTime" type:"long"`
+
+	DestinationName *string `locationName:"destinationName" type:"string"`
+
+	RoleARN *string `locationName:"roleArn" type:"string"`
+
+	TargetARN *string `locationName:"targetArn" type:"string"`
+
+	metadataDestination `json:"-" xml:"-"`
+}
+
+type metadataDestination struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s Destination) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Destination) GoString() string {
+	return s.String()
+}
+
+type FilterLogEventsInput struct {
+	// A unix timestamp indicating the end time of the range for the request. If
+	// provided, events with a timestamp later than this time will not be returned.
+	EndTime *int64 `locationName:"endTime" type:"long"`
+
+	// A valid CloudWatch Logs filter pattern to use for filtering the response.
+	// If not provided, all the events are matched.
+	FilterPattern *string `locationName:"filterPattern" type:"string"`
+
+	// If provided, the API will make a best effort to provide responses that contain
+	// events from multiple log streams within the log group interleaved in a single
+	// response. If not provided, all the matched log events in the first log stream
+	// will be searched first, then those in the next log stream, etc.
+	Interleaved *bool `locationName:"interleaved" type:"boolean"`
+
+	// The maximum number of events to return in a page of results. Default is 10,000
+	// events.
+	Limit *int64 `locationName:"limit" type:"integer"`
+
+	// The name of the log group to query.
+	LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"`
+
+	// Optional list of log stream names within the specified log group to search.
+	// Defaults to all the log streams in the log group.
+	LogStreamNames []*string `locationName:"logStreamNames" type:"list"`
+
+	// A pagination token obtained from a FilterLogEvents response to continue paginating
+	// the FilterLogEvents results.
+	NextToken *string `locationName:"nextToken" type:"string"`
+
+	// A unix timestamp indicating the start time of the range for the request.
+	// If provided, events with a timestamp prior to this time will not be returned.
+	StartTime *int64 `locationName:"startTime" type:"long"`
+
+	metadataFilterLogEventsInput `json:"-" xml:"-"`
+}
+
+type metadataFilterLogEventsInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s FilterLogEventsInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s FilterLogEventsInput) GoString() string {
+	return s.String()
+}
+
+type FilterLogEventsOutput struct {
+	// A list of FilteredLogEvent objects representing the matched events from the
+	// request.
+	Events []*FilteredLogEvent `locationName:"events" type:"list"`
+
+	// A pagination token obtained from a FilterLogEvents response to continue paginating
+	// the FilterLogEvents results.
+	NextToken *string `locationName:"nextToken" type:"string"`
+
+	// A list of SearchedLogStream objects indicating which log streams have been
+	// searched in this request and whether each has been searched completely or
+	// still has more to be paginated.
+	SearchedLogStreams []*SearchedLogStream `locationName:"searchedLogStreams" type:"list"`
+
+	metadataFilterLogEventsOutput `json:"-" xml:"-"`
+}
+
+type metadataFilterLogEventsOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s FilterLogEventsOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s FilterLogEventsOutput) GoString() string {
+	return s.String()
+}
+
+// Represents a matched event from a FilterLogEvents request.
+type FilteredLogEvent struct {
+	// A unique identifier for this event.
+	EventID *string `locationName:"eventId" type:"string"`
+
+	// A point in time expressed as the number of milliseconds since Jan 1, 1970
+	// 00:00:00 UTC.
+	IngestionTime *int64 `locationName:"ingestionTime" type:"long"`
+
+	// The name of the log stream this event belongs to.
+	LogStreamName *string `locationName:"logStreamName" type:"string"`
+
+	// The data contained in the log event.
+	Message *string `locationName:"message" type:"string"`
+
+	// A point in time expressed as the number of milliseconds since Jan 1, 1970
+	// 00:00:00 UTC.
+	Timestamp *int64 `locationName:"timestamp" type:"long"`
+
+	metadataFilteredLogEvent `json:"-" xml:"-"`
+}
+
+type metadataFilteredLogEvent struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s FilteredLogEvent) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s FilteredLogEvent) GoString() string {
+	return s.String()
+}
+
+type GetLogEventsInput struct {
+	// A point in time expressed as the number of milliseconds since Jan 1, 1970
+	// 00:00:00 UTC.
+	EndTime *int64 `locationName:"endTime" type:"long"`
+
+	// The maximum number of log events returned in the response. If you don't specify
+	// a value, the request would return as many log events as can fit in a response
+	// size of 1MB, up to 10,000 log events.
+	Limit *int64 `locationName:"limit" type:"integer"`
+
+	// The name of the log group to query.
+	LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"`
+
+	// The name of the log stream to query.
+	LogStreamName *string `locationName:"logStreamName" type:"string" required:"true"`
+
+	// A string token used for pagination that points to the next page of results.
+	// It must be a value obtained from the nextForwardToken or nextBackwardToken
+	// fields in the response of the previous GetLogEvents request.
+	NextToken *string `locationName:"nextToken" type:"string"`
+
+	// If set to true, the earliest log events would be returned first. The default
+	// is false (the latest log events are returned first).
+	StartFromHead *bool `locationName:"startFromHead" type:"boolean"`
+
+	// A point in time expressed as the number of milliseconds since Jan 1, 1970
+	// 00:00:00 UTC.
+	StartTime *int64 `locationName:"startTime" type:"long"`
+
+	metadataGetLogEventsInput `json:"-" xml:"-"`
+}
+
+type metadataGetLogEventsInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetLogEventsInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetLogEventsInput) GoString() string {
+	return s.String()
+}
+
+type GetLogEventsOutput struct {
+	Events []*OutputLogEvent `locationName:"events" type:"list"`
+
+	// A string token used for pagination that points to the next page of results.
+	// It must be a value obtained from the response of the previous request. The
+	// token expires after 24 hours.
+	NextBackwardToken *string `locationName:"nextBackwardToken" type:"string"`
+
+	// A string token used for pagination that points to the next page of results.
+	// It must be a value obtained from the response of the previous request. The
+	// token expires after 24 hours.
+	NextForwardToken *string `locationName:"nextForwardToken" type:"string"`
+
+	metadataGetLogEventsOutput `json:"-" xml:"-"`
+}
+
+type metadataGetLogEventsOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetLogEventsOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetLogEventsOutput) GoString() string {
+	return s.String()
+}
+
+// A log event is a record of some activity that was recorded by the application
+// or resource being monitored. The log event record that Amazon CloudWatch
+// Logs understands contains two properties: the timestamp of when the event
+// occurred, and the raw event message.
+type InputLogEvent struct {
+	Message *string `locationName:"message" type:"string" required:"true"`
+
+	// A point in time expressed as the number of milliseconds since Jan 1, 1970
+	// 00:00:00 UTC.
+	Timestamp *int64 `locationName:"timestamp" type:"long" required:"true"`
+
+	metadataInputLogEvent `json:"-" xml:"-"`
+}
+
+type metadataInputLogEvent struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s InputLogEvent) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InputLogEvent) GoString() string {
+	return s.String()
+}
+
+type LogGroup struct {
+	ARN *string `locationName:"arn" type:"string"`
+
+	// A point in time expressed as the number of milliseconds since Jan 1, 1970
+	// 00:00:00 UTC.
+	CreationTime *int64 `locationName:"creationTime" type:"long"`
+
+	LogGroupName *string `locationName:"logGroupName" type:"string"`
+
+	// The number of metric filters associated with the log group.
+	MetricFilterCount *int64 `locationName:"metricFilterCount" type:"integer"`
+
+	// Specifies the number of days you want to retain log events in the specified
+	// log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180,
+	// 365, 400, 545, 731, 1827, 3653.
+	RetentionInDays *int64 `locationName:"retentionInDays" type:"integer"`
+
+	StoredBytes *int64 `locationName:"storedBytes" type:"long"`
+
+	metadataLogGroup `json:"-" xml:"-"`
+}
+
+type metadataLogGroup struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s LogGroup) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LogGroup) GoString() string {
+	return s.String()
+}
+
+// A log stream is sequence of log events from a single emitter of logs.
+type LogStream struct {
+	ARN *string `locationName:"arn" type:"string"`
+
+	// A point in time expressed as the number of milliseconds since Jan 1, 1970
+	// 00:00:00 UTC.
+	CreationTime *int64 `locationName:"creationTime" type:"long"`
+
+	// A point in time expressed as the number of milliseconds since Jan 1, 1970
+	// 00:00:00 UTC.
+	FirstEventTimestamp *int64 `locationName:"firstEventTimestamp" type:"long"`
+
+	// A point in time expressed as the number of milliseconds since Jan 1, 1970
+	// 00:00:00 UTC.
+	LastEventTimestamp *int64 `locationName:"lastEventTimestamp" type:"long"`
+
+	// A point in time expressed as the number of milliseconds since Jan 1, 1970
+	// 00:00:00 UTC.
+	LastIngestionTime *int64 `locationName:"lastIngestionTime" type:"long"`
+
+	LogStreamName *string `locationName:"logStreamName" type:"string"`
+
+	StoredBytes *int64 `locationName:"storedBytes" type:"long"`
+
+	// A string token used for making PutLogEvents requests. A sequenceToken can
+	// only be used once, and PutLogEvents requests must include the sequenceToken
+	// obtained from the response of the previous request.
+	UploadSequenceToken *string `locationName:"uploadSequenceToken" type:"string"`
+
+	metadataLogStream `json:"-" xml:"-"`
+}
+
+type metadataLogStream struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s LogStream) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LogStream) GoString() string {
+	return s.String()
+}
+
+// Metric filters can be used to express how Amazon CloudWatch Logs would extract
+// metric observations from ingested log events and transform them to metric
+// data in a CloudWatch metric.
+type MetricFilter struct {
+	// A point in time expressed as the number of milliseconds since Jan 1, 1970
+	// 00:00:00 UTC.
+	CreationTime *int64 `locationName:"creationTime" type:"long"`
+
+	// A name for a metric or subscription filter.
+	FilterName *string `locationName:"filterName" type:"string"`
+
+	// A symbolic description of how Amazon CloudWatch Logs should interpret the
+	// data in each log event. For example, a log event may contain timestamps,
+	// IP addresses, strings, and so on. You use the filter pattern to specify what
+	// to look for in the log event message.
+	FilterPattern *string `locationName:"filterPattern" type:"string"`
+
+	MetricTransformations []*MetricTransformation `locationName:"metricTransformations" type:"list"`
+
+	metadataMetricFilter `json:"-" xml:"-"`
+}
+
+type metadataMetricFilter struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s MetricFilter) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s MetricFilter) GoString() string {
+	return s.String()
+}
+
+type MetricFilterMatchRecord struct {
+	EventMessage *string `locationName:"eventMessage" type:"string"`
+
+	EventNumber *int64 `locationName:"eventNumber" type:"long"`
+
+	ExtractedValues map[string]*string `locationName:"extractedValues" type:"map"`
+
+	metadataMetricFilterMatchRecord `json:"-" xml:"-"`
+}
+
+type metadataMetricFilterMatchRecord struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s MetricFilterMatchRecord) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s MetricFilterMatchRecord) GoString() string {
+	return s.String()
+}
+
+type MetricTransformation struct {
+	// The name of the CloudWatch metric to which the monitored log information
+	// should be published. For example, you may publish to a metric called ErrorCount.
+	MetricName *string `locationName:"metricName" type:"string" required:"true"`
+
+	// The destination namespace of the new CloudWatch metric.
+	MetricNamespace *string `locationName:"metricNamespace" type:"string" required:"true"`
+
+	// What to publish to the metric. For example, if you're counting the occurrences
+	// of a particular term like "Error", the value will be "1" for each occurrence.
+	// If you're counting the bytes transferred the published value will be the
+	// value in the log event.
+	MetricValue *string `locationName:"metricValue" type:"string" required:"true"`
+
+	metadataMetricTransformation `json:"-" xml:"-"`
+}
+
+type metadataMetricTransformation struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s MetricTransformation) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s MetricTransformation) GoString() string {
+	return s.String()
+}
+
+type OutputLogEvent struct {
+	// A point in time expressed as the number of milliseconds since Jan 1, 1970
+	// 00:00:00 UTC.
+	IngestionTime *int64 `locationName:"ingestionTime" type:"long"`
+
+	Message *string `locationName:"message" type:"string"`
+
+	// A point in time expressed as the number of milliseconds since Jan 1, 1970
+	// 00:00:00 UTC.
+	Timestamp *int64 `locationName:"timestamp" type:"long"`
+
+	metadataOutputLogEvent `json:"-" xml:"-"`
+}
+
+type metadataOutputLogEvent struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s OutputLogEvent) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s OutputLogEvent) GoString() string {
+	return s.String()
+}
+
+type PutDestinationInput struct {
+	// A name for the destination.
+	DestinationName *string `locationName:"destinationName" type:"string" required:"true"`
+
+	// The ARN of an IAM role that grants Amazon CloudWatch Logs permissions to
+	// do Amazon Kinesis PutRecord requests on the desitnation stream.
+	RoleARN *string `locationName:"roleArn" type:"string" required:"true"`
+
+	// The ARN of an Amazon Kinesis stream to deliver matching log events to.
+	TargetARN *string `locationName:"targetArn" type:"string" required:"true"`
+
+	metadataPutDestinationInput `json:"-" xml:"-"`
+}
+
+type metadataPutDestinationInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutDestinationInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutDestinationInput) GoString() string {
+	return s.String()
+}
+
+type PutDestinationOutput struct {
+	Destination *Destination `locationName:"destination" type:"structure"`
+
+	metadataPutDestinationOutput `json:"-" xml:"-"`
+}
+
+type metadataPutDestinationOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutDestinationOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutDestinationOutput) GoString() string {
+	return s.String()
+}
+
+type PutDestinationPolicyInput struct {
+	// An IAM policy document that authorizes cross-account users to deliver their
+	// log events to associated destination.
+	AccessPolicy *string `locationName:"accessPolicy" type:"string" required:"true"`
+
+	// A name for an existing destination.
+	DestinationName *string `locationName:"destinationName" type:"string" required:"true"`
+
+	metadataPutDestinationPolicyInput `json:"-" xml:"-"`
+}
+
+type metadataPutDestinationPolicyInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutDestinationPolicyInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutDestinationPolicyInput) GoString() string {
+	return s.String()
+}
+
+type PutDestinationPolicyOutput struct {
+	metadataPutDestinationPolicyOutput `json:"-" xml:"-"`
+}
+
+type metadataPutDestinationPolicyOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutDestinationPolicyOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutDestinationPolicyOutput) GoString() string {
+	return s.String()
+}
+
+type PutLogEventsInput struct {
+	// A list of log events belonging to a log stream.
+	LogEvents []*InputLogEvent `locationName:"logEvents" type:"list" required:"true"`
+
+	// The name of the log group to put log events to.
+	LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"`
+
+	// The name of the log stream to put log events to.
+	LogStreamName *string `locationName:"logStreamName" type:"string" required:"true"`
+
+	// A string token that must be obtained from the response of the previous PutLogEvents
+	// request.
+	SequenceToken *string `locationName:"sequenceToken" type:"string"`
+
+	metadataPutLogEventsInput `json:"-" xml:"-"`
+}
+
+type metadataPutLogEventsInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutLogEventsInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutLogEventsInput) GoString() string {
+	return s.String()
+}
+
+type PutLogEventsOutput struct {
+	// A string token used for making PutLogEvents requests. A sequenceToken can
+	// only be used once, and PutLogEvents requests must include the sequenceToken
+	// obtained from the response of the previous request.
+	NextSequenceToken *string `locationName:"nextSequenceToken" type:"string"`
+
+	RejectedLogEventsInfo *RejectedLogEventsInfo `locationName:"rejectedLogEventsInfo" type:"structure"`
+
+	metadataPutLogEventsOutput `json:"-" xml:"-"`
+}
+
+type metadataPutLogEventsOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutLogEventsOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutLogEventsOutput) GoString() string {
+	return s.String()
+}
+
+type PutMetricFilterInput struct {
+	// A name for the metric filter.
+	FilterName *string `locationName:"filterName" type:"string" required:"true"`
+
+	// A valid CloudWatch Logs filter pattern for extracting metric data out of
+	// ingested log events.
+	FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"`
+
+	// The name of the log group to associate the metric filter with.
+	LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"`
+
+	// A collection of information needed to define how metric data gets emitted.
+	MetricTransformations []*MetricTransformation `locationName:"metricTransformations" type:"list" required:"true"`
+
+	metadataPutMetricFilterInput `json:"-" xml:"-"`
+}
+
+type metadataPutMetricFilterInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutMetricFilterInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutMetricFilterInput) GoString() string {
+	return s.String()
+}
+
+type PutMetricFilterOutput struct {
+	metadataPutMetricFilterOutput `json:"-" xml:"-"`
+}
+
+type metadataPutMetricFilterOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutMetricFilterOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutMetricFilterOutput) GoString() string {
+	return s.String()
+}
+
+type PutRetentionPolicyInput struct {
+	// The name of the log group to associate the retention policy with.
+	LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"`
+
+	// Specifies the number of days you want to retain log events in the specified
+	// log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180,
+	// 365, 400, 545, 731, 1827, 3653.
+	RetentionInDays *int64 `locationName:"retentionInDays" type:"integer" required:"true"`
+
+	metadataPutRetentionPolicyInput `json:"-" xml:"-"`
+}
+
+type metadataPutRetentionPolicyInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutRetentionPolicyInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutRetentionPolicyInput) GoString() string {
+	return s.String()
+}
+
+type PutRetentionPolicyOutput struct {
+	metadataPutRetentionPolicyOutput `json:"-" xml:"-"`
+}
+
+type metadataPutRetentionPolicyOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutRetentionPolicyOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutRetentionPolicyOutput) GoString() string {
+	return s.String()
+}
+
+type PutSubscriptionFilterInput struct {
+	// The ARN of the destination to deliver matching log events to. Currently,
+	// the supported destinations are:   A Amazon Kinesis stream belonging to the
+	// same account as the subscription filter, for same-account delivery.   A logical
+	// destination (used via an ARN of Destination) belonging to a different account,
+	// for cross-account delivery.
+	DestinationARN *string `locationName:"destinationArn" type:"string" required:"true"`
+
+	// A name for the subscription filter.
+	FilterName *string `locationName:"filterName" type:"string" required:"true"`
+
+	// A valid CloudWatch Logs filter pattern for subscribing to a filtered stream
+	// of log events.
+	FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"`
+
+	// The name of the log group to associate the subscription filter with.
+	LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"`
+
+	// The ARN of an IAM role that grants Amazon CloudWatch Logs permissions to
+	// deliver ingested log events to the destination stream. You don't need to
+	// provide the ARN when you are working with a logical destination (used via
+	// an ARN of Destination) for cross-account delivery.
+	RoleARN *string `locationName:"roleArn" type:"string"`
+
+	metadataPutSubscriptionFilterInput `json:"-" xml:"-"`
+}
+
+type metadataPutSubscriptionFilterInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutSubscriptionFilterInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutSubscriptionFilterInput) GoString() string {
+	return s.String()
+}
+
+type PutSubscriptionFilterOutput struct {
+	metadataPutSubscriptionFilterOutput `json:"-" xml:"-"`
+}
+
+type metadataPutSubscriptionFilterOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutSubscriptionFilterOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutSubscriptionFilterOutput) GoString() string {
+	return s.String()
+}
+
+type RejectedLogEventsInfo struct {
+	ExpiredLogEventEndIndex *int64 `locationName:"expiredLogEventEndIndex" type:"integer"`
+
+	TooNewLogEventStartIndex *int64 `locationName:"tooNewLogEventStartIndex" type:"integer"`
+
+	TooOldLogEventEndIndex *int64 `locationName:"tooOldLogEventEndIndex" type:"integer"`
+
+	metadataRejectedLogEventsInfo `json:"-" xml:"-"`
+}
+
+type metadataRejectedLogEventsInfo struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s RejectedLogEventsInfo) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RejectedLogEventsInfo) GoString() string {
+	return s.String()
+}
+
+// An object indicating the search status of a log stream in a FilterLogEvents
+// request.
+type SearchedLogStream struct {
+	// The name of the log stream.
+	LogStreamName *string `locationName:"logStreamName" type:"string"`
+
+	// Indicates whether all the events in this log stream were searched or more
+	// data exists to search by paginating further.
+	SearchedCompletely *bool `locationName:"searchedCompletely" type:"boolean"`
+
+	metadataSearchedLogStream `json:"-" xml:"-"`
+}
+
+type metadataSearchedLogStream struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s SearchedLogStream) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SearchedLogStream) GoString() string {
+	return s.String()
+}
+
+type SubscriptionFilter struct {
+	// A point in time expressed as the number of milliseconds since Jan 1, 1970
+	// 00:00:00 UTC.
+	CreationTime *int64 `locationName:"creationTime" type:"long"`
+
+	DestinationARN *string `locationName:"destinationArn" type:"string"`
+
+	// A name for a metric or subscription filter.
+	FilterName *string `locationName:"filterName" type:"string"`
+
+	// A symbolic description of how Amazon CloudWatch Logs should interpret the
+	// data in each log event. For example, a log event may contain timestamps,
+	// IP addresses, strings, and so on. You use the filter pattern to specify what
+	// to look for in the log event message.
+	FilterPattern *string `locationName:"filterPattern" type:"string"`
+
+	LogGroupName *string `locationName:"logGroupName" type:"string"`
+
+	RoleARN *string `locationName:"roleArn" type:"string"`
+
+	metadataSubscriptionFilter `json:"-" xml:"-"`
+}
+
+type metadataSubscriptionFilter struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s SubscriptionFilter) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SubscriptionFilter) GoString() string {
+	return s.String()
+}
+
+type TestMetricFilterInput struct {
+	// A symbolic description of how Amazon CloudWatch Logs should interpret the
+	// data in each log event. For example, a log event may contain timestamps,
+	// IP addresses, strings, and so on. You use the filter pattern to specify what
+	// to look for in the log event message.
+	FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"`
+
+	// A list of log event messages to test.
+	LogEventMessages []*string `locationName:"logEventMessages" type:"list" required:"true"`
+
+	metadataTestMetricFilterInput `json:"-" xml:"-"`
+}
+
+type metadataTestMetricFilterInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s TestMetricFilterInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TestMetricFilterInput) GoString() string {
+	return s.String()
+}
+
+type TestMetricFilterOutput struct {
+	Matches []*MetricFilterMatchRecord `locationName:"matches" type:"list"`
+
+	metadataTestMetricFilterOutput `json:"-" xml:"-"`
+}
+
+type metadataTestMetricFilterOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s TestMetricFilterOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TestMetricFilterOutput) GoString() string {
+	return s.String()
+}
+
+const (
+	// @enum OrderBy
+	OrderByLogStreamName = "LogStreamName"
+	// @enum OrderBy
+	OrderByLastEventTime = "LastEventTime"
+)

+ 108 - 0
vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/cloudwatchlogsiface/interface.go

@@ -0,0 +1,108 @@
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+// Package cloudwatchlogsiface provides an interface for the Amazon CloudWatch Logs.
+package cloudwatchlogsiface
+
+import (
+	"github.com/aws/aws-sdk-go/aws"
+	"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
+)
+
+// CloudWatchLogsAPI is the interface type for cloudwatchlogs.CloudWatchLogs.
+type CloudWatchLogsAPI interface {
+	CreateLogGroupRequest(*cloudwatchlogs.CreateLogGroupInput) (*aws.Request, *cloudwatchlogs.CreateLogGroupOutput)
+
+	CreateLogGroup(*cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error)
+
+	CreateLogStreamRequest(*cloudwatchlogs.CreateLogStreamInput) (*aws.Request, *cloudwatchlogs.CreateLogStreamOutput)
+
+	CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error)
+
+	DeleteDestinationRequest(*cloudwatchlogs.DeleteDestinationInput) (*aws.Request, *cloudwatchlogs.DeleteDestinationOutput)
+
+	DeleteDestination(*cloudwatchlogs.DeleteDestinationInput) (*cloudwatchlogs.DeleteDestinationOutput, error)
+
+	DeleteLogGroupRequest(*cloudwatchlogs.DeleteLogGroupInput) (*aws.Request, *cloudwatchlogs.DeleteLogGroupOutput)
+
+	DeleteLogGroup(*cloudwatchlogs.DeleteLogGroupInput) (*cloudwatchlogs.DeleteLogGroupOutput, error)
+
+	DeleteLogStreamRequest(*cloudwatchlogs.DeleteLogStreamInput) (*aws.Request, *cloudwatchlogs.DeleteLogStreamOutput)
+
+	DeleteLogStream(*cloudwatchlogs.DeleteLogStreamInput) (*cloudwatchlogs.DeleteLogStreamOutput, error)
+
+	DeleteMetricFilterRequest(*cloudwatchlogs.DeleteMetricFilterInput) (*aws.Request, *cloudwatchlogs.DeleteMetricFilterOutput)
+
+	DeleteMetricFilter(*cloudwatchlogs.DeleteMetricFilterInput) (*cloudwatchlogs.DeleteMetricFilterOutput, error)
+
+	DeleteRetentionPolicyRequest(*cloudwatchlogs.DeleteRetentionPolicyInput) (*aws.Request, *cloudwatchlogs.DeleteRetentionPolicyOutput)
+
+	DeleteRetentionPolicy(*cloudwatchlogs.DeleteRetentionPolicyInput) (*cloudwatchlogs.DeleteRetentionPolicyOutput, error)
+
+	DeleteSubscriptionFilterRequest(*cloudwatchlogs.DeleteSubscriptionFilterInput) (*aws.Request, *cloudwatchlogs.DeleteSubscriptionFilterOutput)
+
+	DeleteSubscriptionFilter(*cloudwatchlogs.DeleteSubscriptionFilterInput) (*cloudwatchlogs.DeleteSubscriptionFilterOutput, error)
+
+	DescribeDestinationsRequest(*cloudwatchlogs.DescribeDestinationsInput) (*aws.Request, *cloudwatchlogs.DescribeDestinationsOutput)
+
+	DescribeDestinations(*cloudwatchlogs.DescribeDestinationsInput) (*cloudwatchlogs.DescribeDestinationsOutput, error)
+
+	DescribeLogGroupsRequest(*cloudwatchlogs.DescribeLogGroupsInput) (*aws.Request, *cloudwatchlogs.DescribeLogGroupsOutput)
+
+	DescribeLogGroups(*cloudwatchlogs.DescribeLogGroupsInput) (*cloudwatchlogs.DescribeLogGroupsOutput, error)
+
+	DescribeLogGroupsPages(*cloudwatchlogs.DescribeLogGroupsInput, func(*cloudwatchlogs.DescribeLogGroupsOutput, bool) bool) error
+
+	DescribeLogStreamsRequest(*cloudwatchlogs.DescribeLogStreamsInput) (*aws.Request, *cloudwatchlogs.DescribeLogStreamsOutput)
+
+	DescribeLogStreams(*cloudwatchlogs.DescribeLogStreamsInput) (*cloudwatchlogs.DescribeLogStreamsOutput, error)
+
+	DescribeLogStreamsPages(*cloudwatchlogs.DescribeLogStreamsInput, func(*cloudwatchlogs.DescribeLogStreamsOutput, bool) bool) error
+
+	DescribeMetricFiltersRequest(*cloudwatchlogs.DescribeMetricFiltersInput) (*aws.Request, *cloudwatchlogs.DescribeMetricFiltersOutput)
+
+	DescribeMetricFilters(*cloudwatchlogs.DescribeMetricFiltersInput) (*cloudwatchlogs.DescribeMetricFiltersOutput, error)
+
+	DescribeMetricFiltersPages(*cloudwatchlogs.DescribeMetricFiltersInput, func(*cloudwatchlogs.DescribeMetricFiltersOutput, bool) bool) error
+
+	DescribeSubscriptionFiltersRequest(*cloudwatchlogs.DescribeSubscriptionFiltersInput) (*aws.Request, *cloudwatchlogs.DescribeSubscriptionFiltersOutput)
+
+	DescribeSubscriptionFilters(*cloudwatchlogs.DescribeSubscriptionFiltersInput) (*cloudwatchlogs.DescribeSubscriptionFiltersOutput, error)
+
+	FilterLogEventsRequest(*cloudwatchlogs.FilterLogEventsInput) (*aws.Request, *cloudwatchlogs.FilterLogEventsOutput)
+
+	FilterLogEvents(*cloudwatchlogs.FilterLogEventsInput) (*cloudwatchlogs.FilterLogEventsOutput, error)
+
+	GetLogEventsRequest(*cloudwatchlogs.GetLogEventsInput) (*aws.Request, *cloudwatchlogs.GetLogEventsOutput)
+
+	GetLogEvents(*cloudwatchlogs.GetLogEventsInput) (*cloudwatchlogs.GetLogEventsOutput, error)
+
+	GetLogEventsPages(*cloudwatchlogs.GetLogEventsInput, func(*cloudwatchlogs.GetLogEventsOutput, bool) bool) error
+
+	PutDestinationRequest(*cloudwatchlogs.PutDestinationInput) (*aws.Request, *cloudwatchlogs.PutDestinationOutput)
+
+	PutDestination(*cloudwatchlogs.PutDestinationInput) (*cloudwatchlogs.PutDestinationOutput, error)
+
+	PutDestinationPolicyRequest(*cloudwatchlogs.PutDestinationPolicyInput) (*aws.Request, *cloudwatchlogs.PutDestinationPolicyOutput)
+
+	PutDestinationPolicy(*cloudwatchlogs.PutDestinationPolicyInput) (*cloudwatchlogs.PutDestinationPolicyOutput, error)
+
+	PutLogEventsRequest(*cloudwatchlogs.PutLogEventsInput) (*aws.Request, *cloudwatchlogs.PutLogEventsOutput)
+
+	PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error)
+
+	PutMetricFilterRequest(*cloudwatchlogs.PutMetricFilterInput) (*aws.Request, *cloudwatchlogs.PutMetricFilterOutput)
+
+	PutMetricFilter(*cloudwatchlogs.PutMetricFilterInput) (*cloudwatchlogs.PutMetricFilterOutput, error)
+
+	PutRetentionPolicyRequest(*cloudwatchlogs.PutRetentionPolicyInput) (*aws.Request, *cloudwatchlogs.PutRetentionPolicyOutput)
+
+	PutRetentionPolicy(*cloudwatchlogs.PutRetentionPolicyInput) (*cloudwatchlogs.PutRetentionPolicyOutput, error)
+
+	PutSubscriptionFilterRequest(*cloudwatchlogs.PutSubscriptionFilterInput) (*aws.Request, *cloudwatchlogs.PutSubscriptionFilterOutput)
+
+	PutSubscriptionFilter(*cloudwatchlogs.PutSubscriptionFilterInput) (*cloudwatchlogs.PutSubscriptionFilterOutput, error)
+
+	TestMetricFilterRequest(*cloudwatchlogs.TestMetricFilterInput) (*aws.Request, *cloudwatchlogs.TestMetricFilterOutput)
+
+	TestMetricFilter(*cloudwatchlogs.TestMetricFilterInput) (*cloudwatchlogs.TestMetricFilterOutput, error)
+}

+ 90 - 0
vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go

@@ -0,0 +1,90 @@
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+package cloudwatchlogs
+
+import (
+	"github.com/aws/aws-sdk-go/aws"
+	"github.com/aws/aws-sdk-go/internal/protocol/jsonrpc"
+	"github.com/aws/aws-sdk-go/internal/signer/v4"
+)
+
+// This is the Amazon CloudWatch Logs API Reference. Amazon CloudWatch Logs
+// enables you to monitor, store, and access your system, application, and custom
+// log files. This guide provides detailed information about Amazon CloudWatch
+// Logs actions, data types, parameters, and errors. For detailed information
+// about Amazon CloudWatch Logs features and their associated API calls, go
+// to the Amazon CloudWatch Developer Guide (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide).
+//
+// Use the following links to get started using the Amazon CloudWatch Logs
+// API Reference:
+//
+//   Actions (http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_Operations.html):
+// An alphabetical list of all Amazon CloudWatch Logs actions.  Data Types (http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_Types.html):
+// An alphabetical list of all Amazon CloudWatch Logs data types.  Common Parameters
+// (http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/CommonParameters.html):
+// Parameters that all Query actions can use.  Common Errors (http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/CommonErrors.html):
+// Client and server errors that all actions can return.  Regions and Endpoints
+// (http://docs.aws.amazon.com/general/latest/gr/index.html?rande.html): Itemized
+// regions and endpoints for all AWS products.  In addition to using the Amazon
+// CloudWatch Logs API, you can also use the following SDKs and third-party
+// libraries to access Amazon CloudWatch Logs programmatically.
+//
+//  AWS SDK for Java Documentation (http://aws.amazon.com/documentation/sdkforjava/)
+// AWS SDK for .NET Documentation (http://aws.amazon.com/documentation/sdkfornet/)
+// AWS SDK for PHP Documentation (http://aws.amazon.com/documentation/sdkforphp/)
+// AWS SDK for Ruby Documentation (http://aws.amazon.com/documentation/sdkforruby/)
+//  Developers in the AWS developer community also provide their own libraries,
+// which you can find at the following AWS developer centers:
+//
+//  AWS Java Developer Center (http://aws.amazon.com/java/) AWS PHP Developer
+// Center (http://aws.amazon.com/php/) AWS Python Developer Center (http://aws.amazon.com/python/)
+// AWS Ruby Developer Center (http://aws.amazon.com/ruby/) AWS Windows and .NET
+// Developer Center (http://aws.amazon.com/net/)
+type CloudWatchLogs struct {
+	*aws.Service
+}
+
+// Used for custom service initialization logic
+var initService func(*aws.Service)
+
+// Used for custom request initialization logic
+var initRequest func(*aws.Request)
+
+// New returns a new CloudWatchLogs client.
+func New(config *aws.Config) *CloudWatchLogs {
+	service := &aws.Service{
+		Config:       aws.DefaultConfig.Merge(config),
+		ServiceName:  "logs",
+		APIVersion:   "2014-03-28",
+		JSONVersion:  "1.1",
+		TargetPrefix: "Logs_20140328",
+	}
+	service.Initialize()
+
+	// Handlers
+	service.Handlers.Sign.PushBack(v4.Sign)
+	service.Handlers.Build.PushBack(jsonrpc.Build)
+	service.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal)
+	service.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta)
+	service.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError)
+
+	// Run custom service initialization if present
+	if initService != nil {
+		initService(service)
+	}
+
+	return &CloudWatchLogs{service}
+}
+
+// newRequest creates a new request for a CloudWatchLogs operation and runs any
+// custom request initialization.
+func (c *CloudWatchLogs) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
+	req := aws.NewRequest(c.Service, op, params, data)
+
+	// Run custom request initialization if present
+	if initRequest != nil {
+		initRequest(req)
+	}
+
+	return req
+}

+ 14 - 0
vendor/src/github.com/vaughan0/go-ini/LICENSE

@@ -0,0 +1,14 @@
+Copyright (c) 2013 Vaughan Newton
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
+rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
+persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
+Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 70 - 0
vendor/src/github.com/vaughan0/go-ini/README.md

@@ -0,0 +1,70 @@
+go-ini
+======
+
+INI parsing library for Go (golang).
+
+View the API documentation [here](http://godoc.org/github.com/vaughan0/go-ini).
+
+Usage
+-----
+
+Parse an INI file:
+
+```go
+import "github.com/vaughan0/go-ini"
+
+file, err := ini.LoadFile("myfile.ini")
+```
+
+Get data from the parsed file:
+
+```go
+name, ok := file.Get("person", "name")
+if !ok {
+  panic("'name' variable missing from 'person' section")
+}
+```
+
+Iterate through values in a section:
+
+```go
+for key, value := range file["mysection"] {
+  fmt.Printf("%s => %s\n", key, value)
+}
+```
+
+Iterate through sections in a file:
+
+```go
+for name, section := range file {
+  fmt.Printf("Section name: %s\n", name)
+}
+```
+
+File Format
+-----------
+
+INI files are parsed by go-ini line-by-line. Each line may be one of the following:
+
+  * A section definition: [section-name]
+  * A property: key = value
+  * A comment: #blahblah _or_ ;blahblah
+  * Blank. The line will be ignored.
+
+Properties defined before any section headers are placed in the default section, which has
+the empty string as it's key.
+
+Example:
+
+```ini
+# I am a comment
+; So am I!
+
+[apples]
+colour = red or green
+shape = applish
+
+[oranges]
+shape = square
+colour = blue
+```

+ 123 - 0
vendor/src/github.com/vaughan0/go-ini/ini.go

@@ -0,0 +1,123 @@
+// Package ini provides functions for parsing INI configuration files.
+package ini
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"os"
+	"regexp"
+	"strings"
+)
+
+var (
+	sectionRegex = regexp.MustCompile(`^\[(.*)\]$`)
+	assignRegex  = regexp.MustCompile(`^([^=]+)=(.*)$`)
+)
+
+// ErrSyntax is returned when there is a syntax error in an INI file.
+type ErrSyntax struct {
+	Line   int
+	Source string // The contents of the erroneous line, without leading or trailing whitespace
+}
+
+func (e ErrSyntax) Error() string {
+	return fmt.Sprintf("invalid INI syntax on line %d: %s", e.Line, e.Source)
+}
+
+// A File represents a parsed INI file.
+type File map[string]Section
+
+// A Section represents a single section of an INI file.
+type Section map[string]string
+
+// Returns a named Section. A Section will be created if one does not already exist for the given name.
+func (f File) Section(name string) Section {
+	section := f[name]
+	if section == nil {
+		section = make(Section)
+		f[name] = section
+	}
+	return section
+}
+
+// Looks up a value for a key in a section and returns that value, along with a boolean result similar to a map lookup.
+func (f File) Get(section, key string) (value string, ok bool) {
+	if s := f[section]; s != nil {
+		value, ok = s[key]
+	}
+	return
+}
+
+// Loads INI data from a reader and stores the data in the File.
+func (f File) Load(in io.Reader) (err error) {
+	bufin, ok := in.(*bufio.Reader)
+	if !ok {
+		bufin = bufio.NewReader(in)
+	}
+	return parseFile(bufin, f)
+}
+
+// Loads INI data from a named file and stores the data in the File.
+func (f File) LoadFile(file string) (err error) {
+	in, err := os.Open(file)
+	if err != nil {
+		return
+	}
+	defer in.Close()
+	return f.Load(in)
+}
+
+func parseFile(in *bufio.Reader, file File) (err error) {
+	section := ""
+	lineNum := 0
+	for done := false; !done; {
+		var line string
+		if line, err = in.ReadString('\n'); err != nil {
+			if err == io.EOF {
+				done = true
+			} else {
+				return
+			}
+		}
+		lineNum++
+		line = strings.TrimSpace(line)
+		if len(line) == 0 {
+			// Skip blank lines
+			continue
+		}
+		if line[0] == ';' || line[0] == '#' {
+			// Skip comments
+			continue
+		}
+
+		if groups := assignRegex.FindStringSubmatch(line); groups != nil {
+			key, val := groups[1], groups[2]
+			key, val = strings.TrimSpace(key), strings.TrimSpace(val)
+			file.Section(section)[key] = val
+		} else if groups := sectionRegex.FindStringSubmatch(line); groups != nil {
+			name := strings.TrimSpace(groups[1])
+			section = name
+			// Create the section if it does not exist
+			file.Section(section)
+		} else {
+			return ErrSyntax{lineNum, line}
+		}
+
+	}
+	return nil
+}
+
+// Loads and returns a File from a reader.
+func Load(in io.Reader) (File, error) {
+	file := make(File)
+	err := file.Load(in)
+	return file, err
+}
+
+// Loads and returns an INI File from a file on disk.
+func LoadFile(filename string) (File, error) {
+	file := make(File)
+	err := file.LoadFile(filename)
+	return file, err
+}

+ 2 - 0
vendor/src/github.com/vaughan0/go-ini/test.ini

@@ -0,0 +1,2 @@
+[default]
+stuff = things