Re-run vndr

Now that the CLI is gone there are lots of packages that need to be
pruned from vendor.

Signed-off-by: Brian Goff <cpuguy83@gmail.com>
This commit is contained in:
Brian Goff 2017-05-09 10:47:41 -04:00
parent 7f968435f6
commit 5122abeb44
132 changed files with 0 additions and 36665 deletions

View file

@ -56,13 +56,8 @@ github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa
github.com/pborman/uuid v1.0
# get desired notary commit, might also need to be updated in Dockerfile
github.com/docker/notary v0.4.2
google.golang.org/grpc v1.0.4
github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f
github.com/docker/go v1.5.1-1-1-gbaf439e
github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c
# When updating, also update RUNC_COMMIT in hack/dockerfile/binaries-commits accordingly
github.com/opencontainers/runc b6b70e53451794e8333e9b602cc096b47a20bd0f
@ -134,16 +129,9 @@ github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f
github.com/spf13/cobra v1.5.1 https://github.com/dnephin/cobra.git
github.com/spf13/pflag 9ff6c6923cfffbcd502984b8e0c80539a94968b7
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff
github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github.com/ijc25/Gotty
# metrics
github.com/docker/go-metrics 8fd5772bf1584597834c6f7961a530f06cbfbb87
# composefile
github.com/mitchellh/mapstructure f3009df150dadf309fdee4a54ed65c124afad715
github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a
github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45
github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d
gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6
github.com/opencontainers/selinux v1.0.0-rc1

View file

@ -1,27 +0,0 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,125 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ed25519 implements the Ed25519 signature algorithm. See
// http://ed25519.cr.yp.to/.
package ed25519
// This code is a port of the public domain, "ref10" implementation of ed25519
// from SUPERCOP.
import (
"crypto/sha512"
"crypto/subtle"
"io"
"github.com/agl/ed25519/edwards25519"
)
const (
PublicKeySize = 32
PrivateKeySize = 64
SignatureSize = 64
)
// GenerateKey generates a public/private key pair using randomness from rand.
func GenerateKey(rand io.Reader) (publicKey *[PublicKeySize]byte, privateKey *[PrivateKeySize]byte, err error) {
privateKey = new([64]byte)
publicKey = new([32]byte)
_, err = io.ReadFull(rand, privateKey[:32])
if err != nil {
return nil, nil, err
}
h := sha512.New()
h.Write(privateKey[:32])
digest := h.Sum(nil)
digest[0] &= 248
digest[31] &= 127
digest[31] |= 64
var A edwards25519.ExtendedGroupElement
var hBytes [32]byte
copy(hBytes[:], digest)
edwards25519.GeScalarMultBase(&A, &hBytes)
A.ToBytes(publicKey)
copy(privateKey[32:], publicKey[:])
return
}
// Sign signs the message with privateKey and returns a signature.
func Sign(privateKey *[PrivateKeySize]byte, message []byte) *[SignatureSize]byte {
h := sha512.New()
h.Write(privateKey[:32])
var digest1, messageDigest, hramDigest [64]byte
var expandedSecretKey [32]byte
h.Sum(digest1[:0])
copy(expandedSecretKey[:], digest1[:])
expandedSecretKey[0] &= 248
expandedSecretKey[31] &= 63
expandedSecretKey[31] |= 64
h.Reset()
h.Write(digest1[32:])
h.Write(message)
h.Sum(messageDigest[:0])
var messageDigestReduced [32]byte
edwards25519.ScReduce(&messageDigestReduced, &messageDigest)
var R edwards25519.ExtendedGroupElement
edwards25519.GeScalarMultBase(&R, &messageDigestReduced)
var encodedR [32]byte
R.ToBytes(&encodedR)
h.Reset()
h.Write(encodedR[:])
h.Write(privateKey[32:])
h.Write(message)
h.Sum(hramDigest[:0])
var hramDigestReduced [32]byte
edwards25519.ScReduce(&hramDigestReduced, &hramDigest)
var s [32]byte
edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced)
signature := new([64]byte)
copy(signature[:], encodedR[:])
copy(signature[32:], s[:])
return signature
}
// Verify returns true iff sig is a valid signature of message by publicKey.
func Verify(publicKey *[PublicKeySize]byte, message []byte, sig *[SignatureSize]byte) bool {
if sig[63]&224 != 0 {
return false
}
var A edwards25519.ExtendedGroupElement
if !A.FromBytes(publicKey) {
return false
}
h := sha512.New()
h.Write(sig[:32])
h.Write(publicKey[:])
h.Write(message)
var digest [64]byte
h.Sum(digest[:0])
var hReduced [32]byte
edwards25519.ScReduce(&hReduced, &digest)
var R edwards25519.ProjectiveGroupElement
var b [32]byte
copy(b[:], sig[32:])
edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &b)
var checkR [32]byte
R.ToBytes(&checkR)
return subtle.ConstantTimeCompare(sig[:32], checkR[:]) == 1
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,87 +0,0 @@
package client
import (
"bytes"
"encoding/json"
"fmt"
"strings"
"github.com/docker/docker-credential-helpers/credentials"
)
// Store uses an external program to save credentials.
func Store(program ProgramFunc, credentials *credentials.Credentials) error {
cmd := program("store")
buffer := new(bytes.Buffer)
if err := json.NewEncoder(buffer).Encode(credentials); err != nil {
return err
}
cmd.Input(buffer)
out, err := cmd.Output()
if err != nil {
t := strings.TrimSpace(string(out))
return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t)
}
return nil
}
// Get executes an external program to get the credentials from a native store.
func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) {
cmd := program("get")
cmd.Input(strings.NewReader(serverURL))
out, err := cmd.Output()
if err != nil {
t := strings.TrimSpace(string(out))
if credentials.IsErrCredentialsNotFoundMessage(t) {
return nil, credentials.NewErrCredentialsNotFound()
}
return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t)
}
resp := &credentials.Credentials{
ServerURL: serverURL,
}
if err := json.NewDecoder(bytes.NewReader(out)).Decode(resp); err != nil {
return nil, err
}
return resp, nil
}
// Erase executes a program to remove the server credentials from the native store.
func Erase(program ProgramFunc, serverURL string) error {
cmd := program("erase")
cmd.Input(strings.NewReader(serverURL))
out, err := cmd.Output()
if err != nil {
t := strings.TrimSpace(string(out))
return fmt.Errorf("error erasing credentials - err: %v, out: `%s`", err, t)
}
return nil
}
// List executes a program to list server credentials in the native store.
func List(program ProgramFunc) (map[string]string, error) {
cmd := program("list")
cmd.Input(strings.NewReader("unused"))
out, err := cmd.Output()
if err != nil {
t := strings.TrimSpace(string(out))
return nil, fmt.Errorf("error listing credentials - err: %v, out: `%s`", err, t)
}
var resp map[string]string
if err = json.NewDecoder(bytes.NewReader(out)).Decode(&resp); err != nil {
return nil, err
}
return resp, nil
}

View file

@ -1,37 +0,0 @@
package client
import (
"io"
"os/exec"
)
// Program is an interface to execute external programs.
type Program interface {
Output() ([]byte, error)
Input(in io.Reader)
}
// ProgramFunc is a type of function that initializes programs based on arguments.
type ProgramFunc func(args ...string) Program
// NewShellProgramFunc creates programs that are executed in a Shell.
func NewShellProgramFunc(name string) ProgramFunc {
return func(args ...string) Program {
return &Shell{cmd: exec.Command(name, args...)}
}
}
// Shell invokes shell commands to talk with a remote credentials helper.
type Shell struct {
cmd *exec.Cmd
}
// Output returns responses from the remote credentials helper.
func (s *Shell) Output() ([]byte, error) {
return s.cmd.Output()
}
// Input sets the input to send to a remote credentials helper.
func (s *Shell) Input(in io.Reader) {
s.cmd.Stdin = in
}

View file

@ -1,150 +0,0 @@
package credentials
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"os"
"strings"
)
// Credentials holds the information shared between docker and the credentials store.
type Credentials struct {
ServerURL string
Username string
Secret string
}
// Docker credentials should be labeled as such in credentials stores that allow labelling.
// That label allows to filter out non-Docker credentials too at lookup/search in macOS keychain,
// Windows credentials manager and Linux libsecret. Default value is "Docker Credentials"
var CredsLabel = "Docker Credentials"
func SetCredsLabel(label string) {
CredsLabel = label
}
// Serve initializes the credentials helper and parses the action argument.
// This function is designed to be called from a command line interface.
// It uses os.Args[1] as the key for the action.
// It uses os.Stdin as input and os.Stdout as output.
// This function terminates the program with os.Exit(1) if there is an error.
func Serve(helper Helper) {
var err error
if len(os.Args) != 2 {
err = fmt.Errorf("Usage: %s <store|get|erase|list>", os.Args[0])
}
if err == nil {
err = HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout)
}
if err != nil {
fmt.Fprintf(os.Stdout, "%v\n", err)
os.Exit(1)
}
}
// HandleCommand uses a helper and a key to run a credential action.
func HandleCommand(helper Helper, key string, in io.Reader, out io.Writer) error {
switch key {
case "store":
return Store(helper, in)
case "get":
return Get(helper, in, out)
case "erase":
return Erase(helper, in)
case "list":
return List(helper, out)
}
return fmt.Errorf("Unknown credential action `%s`", key)
}
// Store uses a helper and an input reader to save credentials.
// The reader must contain the JSON serialization of a Credentials struct.
func Store(helper Helper, reader io.Reader) error {
scanner := bufio.NewScanner(reader)
buffer := new(bytes.Buffer)
for scanner.Scan() {
buffer.Write(scanner.Bytes())
}
if err := scanner.Err(); err != nil && err != io.EOF {
return err
}
var creds Credentials
if err := json.NewDecoder(buffer).Decode(&creds); err != nil {
return err
}
return helper.Add(&creds)
}
// Get retrieves the credentials for a given server url.
// The reader must contain the server URL to search.
// The writer is used to write the JSON serialization of the credentials.
func Get(helper Helper, reader io.Reader, writer io.Writer) error {
scanner := bufio.NewScanner(reader)
buffer := new(bytes.Buffer)
for scanner.Scan() {
buffer.Write(scanner.Bytes())
}
if err := scanner.Err(); err != nil && err != io.EOF {
return err
}
serverURL := strings.TrimSpace(buffer.String())
username, secret, err := helper.Get(serverURL)
if err != nil {
return err
}
resp := Credentials{
Username: username,
Secret: secret,
}
buffer.Reset()
if err := json.NewEncoder(buffer).Encode(resp); err != nil {
return err
}
fmt.Fprint(writer, buffer.String())
return nil
}
// Erase removes credentials from the store.
// The reader must contain the server URL to remove.
func Erase(helper Helper, reader io.Reader) error {
scanner := bufio.NewScanner(reader)
buffer := new(bytes.Buffer)
for scanner.Scan() {
buffer.Write(scanner.Bytes())
}
if err := scanner.Err(); err != nil && err != io.EOF {
return err
}
serverURL := strings.TrimSpace(buffer.String())
return helper.Delete(serverURL)
}
//List returns all the serverURLs of keys in
//the OS store as a list of strings
func List(helper Helper, writer io.Writer) error {
accts, err := helper.List()
if err != nil {
return err
}
return json.NewEncoder(writer).Encode(accts)
}

View file

@ -1,37 +0,0 @@
package credentials
// ErrCredentialsNotFound standarizes the not found error, so every helper returns
// the same message and docker can handle it properly.
const errCredentialsNotFoundMessage = "credentials not found in native keychain"
// errCredentialsNotFound represents an error
// raised when credentials are not in the store.
type errCredentialsNotFound struct{}
// Error returns the standard error message
// for when the credentials are not in the store.
func (errCredentialsNotFound) Error() string {
return errCredentialsNotFoundMessage
}
// NewErrCredentialsNotFound creates a new error
// for when the credentials are not in the store.
func NewErrCredentialsNotFound() error {
return errCredentialsNotFound{}
}
// IsErrCredentialsNotFound returns true if the error
// was caused by not having a set of credentials in a store.
func IsErrCredentialsNotFound(err error) bool {
_, ok := err.(errCredentialsNotFound)
return ok
}
// IsErrCredentialsNotFoundMessage returns true if the error
// was caused by not having a set of credentials in a store.
//
// This function helps to check messages returned by an
// external program via its standard output.
func IsErrCredentialsNotFoundMessage(err string) bool {
return err == errCredentialsNotFoundMessage
}

View file

@ -1,14 +0,0 @@
package credentials
// Helper is the interface a credentials store helper must implement.
type Helper interface {
// Add appends credentials to the store.
Add(*Credentials) error
// Delete removes credentials from the store.
Delete(serverURL string) error
// Get retrieves credentials from the store.
// It returns username and secret as strings.
Get(serverURL string) (string, string, error)
// List returns the stored serverURLs and their associated usernames.
List() (map[string]string, error)
}

27
vendor/github.com/docker/go/LICENSE generated vendored
View file

@ -1,27 +0,0 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,16 +0,0 @@
[![Circle CI](https://circleci.com/gh/jfrazelle/go.svg?style=svg)](https://circleci.com/gh/jfrazelle/go)
This is a repository used for building go packages based off upstream with
small patches.
It is only used so far for a canonical json pkg.
I hope we do not need to use it for anything else in the future.
**To update:**
```console
$ make update
```
This will nuke the current package, clone upstream and apply the patch.

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,143 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
"unicode/utf8"
)
const (
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
kelvin = '\u212a'
smallLongEss = '\u017f'
)
// foldFunc returns one of four different case folding equivalence
// functions, from most general (and slow) to fastest:
//
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
// 3) asciiEqualFold, no special, but includes non-letters (including _)
// 4) simpleLetterEqualFold, no specials, no non-letters.
//
// The letters S and K are special because they map to 3 runes, not just 2:
// * S maps to s and to U+017F 'ſ' Latin small letter long s
// * k maps to K and to U+212A '' Kelvin sign
// See https://play.golang.org/p/tTxjOc0OGo
//
// The returned function is specialized for matching against s and
// should only be given s. It's not curried for performance reasons.
func foldFunc(s []byte) func(s, t []byte) bool {
nonLetter := false
special := false // special letter
for _, b := range s {
if b >= utf8.RuneSelf {
return bytes.EqualFold
}
upper := b & caseMask
if upper < 'A' || upper > 'Z' {
nonLetter = true
} else if upper == 'K' || upper == 'S' {
// See above for why these letters are special.
special = true
}
}
if special {
return equalFoldRight
}
if nonLetter {
return asciiEqualFold
}
return simpleLetterEqualFold
}
// equalFoldRight is a specialization of bytes.EqualFold when s is
// known to be all ASCII (including punctuation), but contains an 's',
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
// See comments on foldFunc.
func equalFoldRight(s, t []byte) bool {
for _, sb := range s {
if len(t) == 0 {
return false
}
tb := t[0]
if tb < utf8.RuneSelf {
if sb != tb {
sbUpper := sb & caseMask
if 'A' <= sbUpper && sbUpper <= 'Z' {
if sbUpper != tb&caseMask {
return false
}
} else {
return false
}
}
t = t[1:]
continue
}
// sb is ASCII and t is not. t must be either kelvin
// sign or long s; sb must be s, S, k, or K.
tr, size := utf8.DecodeRune(t)
switch sb {
case 's', 'S':
if tr != smallLongEss {
return false
}
case 'k', 'K':
if tr != kelvin {
return false
}
default:
return false
}
t = t[size:]
}
if len(t) > 0 {
return false
}
return true
}
// asciiEqualFold is a specialization of bytes.EqualFold for use when
// s is all ASCII (but may contain non-letters) and contains no
// special-folding letters.
// See comments on foldFunc.
func asciiEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, sb := range s {
tb := t[i]
if sb == tb {
continue
}
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
if sb&caseMask != tb&caseMask {
return false
}
} else {
return false
}
}
return true
}
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
// use when s is all ASCII letters (no underscores, etc) and also
// doesn't contain 'k', 'K', 's', or 'S'.
// See comments on foldFunc.
func simpleLetterEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, b := range s {
if b&caseMask != t[i]&caseMask {
return false
}
}
return true
}

View file

@ -1,137 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import "bytes"
// Compact appends to dst the JSON-encoded src with
// insignificant space characters elided.
func Compact(dst *bytes.Buffer, src []byte) error {
return compact(dst, src, false)
}
func compact(dst *bytes.Buffer, src []byte, escape bool) error {
origLen := dst.Len()
var scan scanner
scan.reset()
start := 0
for i, c := range src {
if escape && (c == '<' || c == '>' || c == '&') {
if start < i {
dst.Write(src[start:i])
}
dst.WriteString(`\u00`)
dst.WriteByte(hex[c>>4])
dst.WriteByte(hex[c&0xF])
start = i + 1
}
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
if start < i {
dst.Write(src[start:i])
}
dst.WriteString(`\u202`)
dst.WriteByte(hex[src[i+2]&0xF])
start = i + 3
}
v := scan.step(&scan, int(c))
if v >= scanSkipSpace {
if v == scanError {
break
}
if start < i {
dst.Write(src[start:i])
}
start = i + 1
}
}
if scan.eof() == scanError {
dst.Truncate(origLen)
return scan.err
}
if start < len(src) {
dst.Write(src[start:])
}
return nil
}
func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
dst.WriteByte('\n')
dst.WriteString(prefix)
for i := 0; i < depth; i++ {
dst.WriteString(indent)
}
}
// Indent appends to dst an indented form of the JSON-encoded src.
// Each element in a JSON object or array begins on a new,
// indented line beginning with prefix followed by one or more
// copies of indent according to the indentation nesting.
// The data appended to dst does not begin with the prefix nor
// any indentation, and has no trailing newline, to make it
// easier to embed inside other formatted JSON data.
func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
origLen := dst.Len()
var scan scanner
scan.reset()
needIndent := false
depth := 0
for _, c := range src {
scan.bytes++
v := scan.step(&scan, int(c))
if v == scanSkipSpace {
continue
}
if v == scanError {
break
}
if needIndent && v != scanEndObject && v != scanEndArray {
needIndent = false
depth++
newline(dst, prefix, indent, depth)
}
// Emit semantically uninteresting bytes
// (in particular, punctuation in strings) unmodified.
if v == scanContinue {
dst.WriteByte(c)
continue
}
// Add spacing around real punctuation.
switch c {
case '{', '[':
// delay indent so that empty object and array are formatted as {} and [].
needIndent = true
dst.WriteByte(c)
case ',':
dst.WriteByte(c)
newline(dst, prefix, indent, depth)
case ':':
dst.WriteByte(c)
dst.WriteByte(' ')
case '}', ']':
if needIndent {
// suppress indent in empty object/array
needIndent = false
} else {
depth--
newline(dst, prefix, indent, depth)
}
dst.WriteByte(c)
default:
dst.WriteByte(c)
}
}
if scan.eof() == scanError {
dst.Truncate(origLen)
return scan.err
}
return nil
}

View file

@ -1,630 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
// JSON value parser state machine.
// Just about at the limit of what is reasonable to write by hand.
// Some parts are a bit tedious, but overall it nicely factors out the
// otherwise common code from the multiple scanning functions
// in this package (Compact, Indent, checkValid, nextValue, etc).
//
// This file starts with two simple examples using the scanner
// before diving into the scanner itself.
import "strconv"
// checkValid verifies that data is valid JSON-encoded data.
// scan is passed in for use by checkValid to avoid an allocation.
func checkValid(data []byte, scan *scanner) error {
scan.reset()
for _, c := range data {
scan.bytes++
if scan.step(scan, int(c)) == scanError {
return scan.err
}
}
if scan.eof() == scanError {
return scan.err
}
return nil
}
// nextValue splits data after the next whole JSON value,
// returning that value and the bytes that follow it as separate slices.
// scan is passed in for use by nextValue to avoid an allocation.
func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
scan.reset()
for i, c := range data {
v := scan.step(scan, int(c))
if v >= scanEndObject {
switch v {
// probe the scanner with a space to determine whether we will
// get scanEnd on the next character. Otherwise, if the next character
// is not a space, scanEndTop allocates a needless error.
case scanEndObject, scanEndArray:
if scan.step(scan, ' ') == scanEnd {
return data[:i+1], data[i+1:], nil
}
case scanError:
return nil, nil, scan.err
case scanEnd:
return data[0:i], data[i:], nil
}
}
}
if scan.eof() == scanError {
return nil, nil, scan.err
}
return data, nil, nil
}
// A SyntaxError is a description of a JSON syntax error.
type SyntaxError struct {
msg string // description of error
Offset int64 // error occurred after reading Offset bytes
}
func (e *SyntaxError) Error() string { return e.msg }
// A scanner is a JSON scanning state machine.
// Callers call scan.reset() and then pass bytes in one at a time
// by calling scan.step(&scan, c) for each byte.
// The return value, referred to as an opcode, tells the
// caller about significant parsing events like beginning
// and ending literals, objects, and arrays, so that the
// caller can follow along if it wishes.
// The return value scanEnd indicates that a single top-level
// JSON value has been completed, *before* the byte that
// just got passed in. (The indication must be delayed in order
// to recognize the end of numbers: is 123 a whole value or
// the beginning of 12345e+6?).
type scanner struct {
// The step is a func to be called to execute the next transition.
// Also tried using an integer constant and a single func
// with a switch, but using the func directly was 10% faster
// on a 64-bit Mac Mini, and it's nicer to read.
step func(*scanner, int) int
// Reached end of top-level value.
endTop bool
// Stack of what we're in the middle of - array values, object keys, object values.
parseState []int
// Error that happened, if any.
err error
// 1-byte redo (see undo method)
redo bool
redoCode int
redoState func(*scanner, int) int
// total bytes consumed, updated by decoder.Decode
bytes int64
}
// These values are returned by the state transition functions
// assigned to scanner.state and the method scanner.eof.
// They give details about the current state of the scan that
// callers might be interested to know about.
// It is okay to ignore the return value of any particular
// call to scanner.state: if one call returns scanError,
// every subsequent call will return scanError too.
const (
// Continue.
scanContinue = iota // uninteresting byte
scanBeginLiteral // end implied by next result != scanContinue
scanBeginObject // begin object
scanObjectKey // just finished object key (string)
scanObjectValue // just finished non-last object value
scanEndObject // end object (implies scanObjectValue if possible)
scanBeginArray // begin array
scanArrayValue // just finished array value
scanEndArray // end array (implies scanArrayValue if possible)
scanSkipSpace // space byte; can skip; known to be last "continue" result
// Stop.
scanEnd // top-level value ended *before* this byte; known to be first "stop" result
scanError // hit an error, scanner.err.
)
// These values are stored in the parseState stack.
// They give the current state of a composite value
// being scanned. If the parser is inside a nested value
// the parseState describes the nested state, outermost at entry 0.
const (
parseObjectKey = iota // parsing object key (before colon)
parseObjectValue // parsing object value (after colon)
parseArrayValue // parsing array value
)
// reset prepares the scanner for use.
// It must be called before calling s.step.
func (s *scanner) reset() {
s.step = stateBeginValue
s.parseState = s.parseState[0:0]
s.err = nil
s.redo = false
s.endTop = false
}
// eof tells the scanner that the end of input has been reached.
// It returns a scan status just as s.step does.
func (s *scanner) eof() int {
if s.err != nil {
return scanError
}
if s.endTop {
return scanEnd
}
s.step(s, ' ')
if s.endTop {
return scanEnd
}
if s.err == nil {
s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
}
return scanError
}
// pushParseState pushes a new parse state p onto the parse stack.
func (s *scanner) pushParseState(p int) {
s.parseState = append(s.parseState, p)
}
// popParseState pops a parse state (already obtained) off the stack
// and updates s.step accordingly.
func (s *scanner) popParseState() {
n := len(s.parseState) - 1
s.parseState = s.parseState[0:n]
s.redo = false
if n == 0 {
s.step = stateEndTop
s.endTop = true
} else {
s.step = stateEndValue
}
}
func isSpace(c rune) bool {
return c == ' ' || c == '\t' || c == '\r' || c == '\n'
}
// stateBeginValueOrEmpty is the state after reading `[`.
func stateBeginValueOrEmpty(s *scanner, c int) int {
if c <= ' ' && isSpace(rune(c)) {
return scanSkipSpace
}
if c == ']' {
return stateEndValue(s, c)
}
return stateBeginValue(s, c)
}
// stateBeginValue is the state at the beginning of the input.
func stateBeginValue(s *scanner, c int) int {
if c <= ' ' && isSpace(rune(c)) {
return scanSkipSpace
}
switch c {
case '{':
s.step = stateBeginStringOrEmpty
s.pushParseState(parseObjectKey)
return scanBeginObject
case '[':
s.step = stateBeginValueOrEmpty
s.pushParseState(parseArrayValue)
return scanBeginArray
case '"':
s.step = stateInString
return scanBeginLiteral
case '-':
s.step = stateNeg
return scanBeginLiteral
case '0': // beginning of 0.123
s.step = state0
return scanBeginLiteral
case 't': // beginning of true
s.step = stateT
return scanBeginLiteral
case 'f': // beginning of false
s.step = stateF
return scanBeginLiteral
case 'n': // beginning of null
s.step = stateN
return scanBeginLiteral
}
if '1' <= c && c <= '9' { // beginning of 1234.5
s.step = state1
return scanBeginLiteral
}
return s.error(c, "looking for beginning of value")
}
// stateBeginStringOrEmpty is the state after reading `{`.
func stateBeginStringOrEmpty(s *scanner, c int) int {
if c <= ' ' && isSpace(rune(c)) {
return scanSkipSpace
}
if c == '}' {
n := len(s.parseState)
s.parseState[n-1] = parseObjectValue
return stateEndValue(s, c)
}
return stateBeginString(s, c)
}
// stateBeginString is the state after reading `{"key": value,`.
func stateBeginString(s *scanner, c int) int {
if c <= ' ' && isSpace(rune(c)) {
return scanSkipSpace
}
if c == '"' {
s.step = stateInString
return scanBeginLiteral
}
return s.error(c, "looking for beginning of object key string")
}
// stateEndValue is the state after completing a value,
// such as after reading `{}` or `true` or `["x"`.
func stateEndValue(s *scanner, c int) int {
n := len(s.parseState)
if n == 0 {
// Completed top-level before the current byte.
s.step = stateEndTop
s.endTop = true
return stateEndTop(s, c)
}
if c <= ' ' && isSpace(rune(c)) {
s.step = stateEndValue
return scanSkipSpace
}
ps := s.parseState[n-1]
switch ps {
case parseObjectKey:
if c == ':' {
s.parseState[n-1] = parseObjectValue
s.step = stateBeginValue
return scanObjectKey
}
return s.error(c, "after object key")
case parseObjectValue:
if c == ',' {
s.parseState[n-1] = parseObjectKey
s.step = stateBeginString
return scanObjectValue
}
if c == '}' {
s.popParseState()
return scanEndObject
}
return s.error(c, "after object key:value pair")
case parseArrayValue:
if c == ',' {
s.step = stateBeginValue
return scanArrayValue
}
if c == ']' {
s.popParseState()
return scanEndArray
}
return s.error(c, "after array element")
}
return s.error(c, "")
}
// stateEndTop is the state after finishing the top-level value,
// such as after reading `{}` or `[1,2,3]`.
// Only space characters should be seen now.
func stateEndTop(s *scanner, c int) int {
if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
// Complain about non-space byte on next call.
s.error(c, "after top-level value")
}
return scanEnd
}
// stateInString is the state after reading `"`.
func stateInString(s *scanner, c int) int {
if c == '"' {
s.step = stateEndValue
return scanContinue
}
if c == '\\' {
s.step = stateInStringEsc
return scanContinue
}
if c < 0x20 {
return s.error(c, "in string literal")
}
return scanContinue
}
// stateInStringEsc is the state after reading `"\` during a quoted string.
func stateInStringEsc(s *scanner, c int) int {
switch c {
case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
s.step = stateInString
return scanContinue
}
if c == 'u' {
s.step = stateInStringEscU
return scanContinue
}
return s.error(c, "in string escape code")
}
// stateInStringEscU is the state after reading `"\u` during a quoted string.
func stateInStringEscU(s *scanner, c int) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU1
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
func stateInStringEscU1(s *scanner, c int) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU12
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
func stateInStringEscU12(s *scanner, c int) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU123
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
func stateInStringEscU123(s *scanner, c int) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInString
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateNeg is the state after reading `-` during a number.
func stateNeg(s *scanner, c int) int {
if c == '0' {
s.step = state0
return scanContinue
}
if '1' <= c && c <= '9' {
s.step = state1
return scanContinue
}
return s.error(c, "in numeric literal")
}
// state1 is the state after reading a non-zero integer during a number,
// such as after reading `1` or `100` but not `0`.
func state1(s *scanner, c int) int {
if '0' <= c && c <= '9' {
s.step = state1
return scanContinue
}
return state0(s, c)
}
// state0 is the state after reading `0` during a number.
func state0(s *scanner, c int) int {
if c == '.' {
s.step = stateDot
return scanContinue
}
if c == 'e' || c == 'E' {
s.step = stateE
return scanContinue
}
return stateEndValue(s, c)
}
// stateDot is the state after reading the integer and decimal point in a number,
// such as after reading `1.`.
func stateDot(s *scanner, c int) int {
if '0' <= c && c <= '9' {
s.step = stateDot0
return scanContinue
}
return s.error(c, "after decimal point in numeric literal")
}
// stateDot0 is the state after reading the integer, decimal point, and subsequent
// digits of a number, such as after reading `3.14`.
func stateDot0(s *scanner, c int) int {
if '0' <= c && c <= '9' {
s.step = stateDot0
return scanContinue
}
if c == 'e' || c == 'E' {
s.step = stateE
return scanContinue
}
return stateEndValue(s, c)
}
// stateE is the state after reading the mantissa and e in a number,
// such as after reading `314e` or `0.314e`.
func stateE(s *scanner, c int) int {
if c == '+' {
s.step = stateESign
return scanContinue
}
if c == '-' {
s.step = stateESign
return scanContinue
}
return stateESign(s, c)
}
// stateESign is the state after reading the mantissa, e, and sign in a number,
// such as after reading `314e-` or `0.314e+`.
func stateESign(s *scanner, c int) int {
if '0' <= c && c <= '9' {
s.step = stateE0
return scanContinue
}
return s.error(c, "in exponent of numeric literal")
}
// stateE0 is the state after reading the mantissa, e, optional sign,
// and at least one digit of the exponent in a number,
// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
func stateE0(s *scanner, c int) int {
if '0' <= c && c <= '9' {
s.step = stateE0
return scanContinue
}
return stateEndValue(s, c)
}
// stateT is the state after reading `t`.
func stateT(s *scanner, c int) int {
if c == 'r' {
s.step = stateTr
return scanContinue
}
return s.error(c, "in literal true (expecting 'r')")
}
// stateTr is the state after reading `tr`.
func stateTr(s *scanner, c int) int {
if c == 'u' {
s.step = stateTru
return scanContinue
}
return s.error(c, "in literal true (expecting 'u')")
}
// stateTru is the state after reading `tru`.
func stateTru(s *scanner, c int) int {
if c == 'e' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal true (expecting 'e')")
}
// stateF is the state after reading `f`.
func stateF(s *scanner, c int) int {
if c == 'a' {
s.step = stateFa
return scanContinue
}
return s.error(c, "in literal false (expecting 'a')")
}
// stateFa is the state after reading `fa`.
func stateFa(s *scanner, c int) int {
if c == 'l' {
s.step = stateFal
return scanContinue
}
return s.error(c, "in literal false (expecting 'l')")
}
// stateFal is the state after reading `fal`.
func stateFal(s *scanner, c int) int {
if c == 's' {
s.step = stateFals
return scanContinue
}
return s.error(c, "in literal false (expecting 's')")
}
// stateFals is the state after reading `fals`.
func stateFals(s *scanner, c int) int {
if c == 'e' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal false (expecting 'e')")
}
// stateN is the state after reading `n`.
func stateN(s *scanner, c int) int {
if c == 'u' {
s.step = stateNu
return scanContinue
}
return s.error(c, "in literal null (expecting 'u')")
}
// stateNu is the state after reading `nu`.
func stateNu(s *scanner, c int) int {
if c == 'l' {
s.step = stateNul
return scanContinue
}
return s.error(c, "in literal null (expecting 'l')")
}
// stateNul is the state after reading `nul`.
func stateNul(s *scanner, c int) int {
if c == 'l' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal null (expecting 'l')")
}
// stateError is the state after reaching a syntax error,
// such as after reading `[1}` or `5.1.2`.
func stateError(s *scanner, c int) int {
return scanError
}
// error records an error and switches to the error state.
func (s *scanner) error(c int, context string) int {
s.step = stateError
s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
return scanError
}
// quoteChar formats c as a quoted character literal
func quoteChar(c int) string {
// special cases - different from quoted strings
if c == '\'' {
return `'\''`
}
if c == '"' {
return `'"'`
}
// use quoted string with different quotation marks
s := strconv.Quote(string(c))
return "'" + s[1:len(s)-1] + "'"
}
// undo causes the scanner to return scanCode from the next state transition.
// This gives callers a simple 1-byte undo mechanism.
func (s *scanner) undo(scanCode int) {
if s.redo {
panic("json: invalid use of scanner")
}
s.redoCode = scanCode
s.redoState = s.step
s.step = stateRedo
s.redo = true
}
// stateRedo helps implement the scanner's 1-byte undo.
func stateRedo(s *scanner, c int) int {
s.redo = false
s.step = s.redoState
return s.redoCode
}

View file

@ -1,487 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
"errors"
"io"
)
// A Decoder reads and decodes JSON objects from an input stream.
type Decoder struct {
r io.Reader
buf []byte
d decodeState
scanp int // start of unread data in buf
scan scanner
err error
tokenState int
tokenStack []int
}
// NewDecoder returns a new decoder that reads from r.
//
// The decoder introduces its own buffering and may
// read data from r beyond the JSON values requested.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{r: r}
}
// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
// Number instead of as a float64.
func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
// Decode reads the next JSON-encoded value from its
// input and stores it in the value pointed to by v.
//
// See the documentation for Unmarshal for details about
// the conversion of JSON into a Go value.
func (dec *Decoder) Decode(v interface{}) error {
if dec.err != nil {
return dec.err
}
if err := dec.tokenPrepareForDecode(); err != nil {
return err
}
if !dec.tokenValueAllowed() {
return &SyntaxError{msg: "not at beginning of value"}
}
// Read whole value into buffer.
n, err := dec.readValue()
if err != nil {
return err
}
dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
dec.scanp += n
// Don't save err from unmarshal into dec.err:
// the connection is still usable since we read a complete JSON
// object from it before the error happened.
err = dec.d.unmarshal(v)
// fixup token streaming state
dec.tokenValueEnd()
return err
}
// Buffered returns a reader of the data remaining in the Decoder's
// buffer. The reader is valid until the next call to Decode.
func (dec *Decoder) Buffered() io.Reader {
return bytes.NewReader(dec.buf[dec.scanp:])
}
// readValue reads a JSON value into dec.buf.
// It returns the length of the encoding.
func (dec *Decoder) readValue() (int, error) {
dec.scan.reset()
scanp := dec.scanp
var err error
Input:
for {
// Look in the buffer for a new value.
for i, c := range dec.buf[scanp:] {
dec.scan.bytes++
v := dec.scan.step(&dec.scan, int(c))
if v == scanEnd {
scanp += i
break Input
}
// scanEnd is delayed one byte.
// We might block trying to get that byte from src,
// so instead invent a space byte.
if (v == scanEndObject || v == scanEndArray) && dec.scan.step(&dec.scan, ' ') == scanEnd {
scanp += i + 1
break Input
}
if v == scanError {
dec.err = dec.scan.err
return 0, dec.scan.err
}
}
scanp = len(dec.buf)
// Did the last read have an error?
// Delayed until now to allow buffer scan.
if err != nil {
if err == io.EOF {
if dec.scan.step(&dec.scan, ' ') == scanEnd {
break Input
}
if nonSpace(dec.buf) {
err = io.ErrUnexpectedEOF
}
}
dec.err = err
return 0, err
}
n := scanp - dec.scanp
err = dec.refill()
scanp = dec.scanp + n
}
return scanp - dec.scanp, nil
}
func (dec *Decoder) refill() error {
// Make room to read more into the buffer.
// First slide down data already consumed.
if dec.scanp > 0 {
n := copy(dec.buf, dec.buf[dec.scanp:])
dec.buf = dec.buf[:n]
dec.scanp = 0
}
// Grow buffer if not large enough.
const minRead = 512
if cap(dec.buf)-len(dec.buf) < minRead {
newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
copy(newBuf, dec.buf)
dec.buf = newBuf
}
// Read. Delay error for next iteration (after scan).
n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
dec.buf = dec.buf[0 : len(dec.buf)+n]
return err
}
func nonSpace(b []byte) bool {
for _, c := range b {
if !isSpace(rune(c)) {
return true
}
}
return false
}
// An Encoder writes JSON objects to an output stream.
type Encoder struct {
w io.Writer
err error
canonical bool
}
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{w: w}
}
// Canonical causes the encoder to switch to Canonical JSON mode.
// Read more at: http://wiki.laptop.org/go/Canonical_JSON
func (enc *Encoder) Canonical() { enc.canonical = true }
// Encode writes the JSON encoding of v to the stream,
// followed by a newline character.
//
// See the documentation for Marshal for details about the
// conversion of Go values to JSON.
func (enc *Encoder) Encode(v interface{}) error {
if enc.err != nil {
return enc.err
}
e := newEncodeState(enc.canonical)
err := e.marshal(v)
if err != nil {
return err
}
if !enc.canonical {
// Terminate each value with a newline.
// This makes the output look a little nicer
// when debugging, and some kind of space
// is required if the encoded value was a number,
// so that the reader knows there aren't more
// digits coming.
e.WriteByte('\n')
}
if _, err = enc.w.Write(e.Bytes()); err != nil {
enc.err = err
}
encodeStatePool.Put(e)
return err
}
// RawMessage is a raw encoded JSON object.
// It implements Marshaler and Unmarshaler and can
// be used to delay JSON decoding or precompute a JSON encoding.
type RawMessage []byte
// MarshalJSON returns *m as the JSON encoding of m.
func (m *RawMessage) MarshalJSON() ([]byte, error) {
return *m, nil
}
// UnmarshalJSON sets *m to a copy of data.
func (m *RawMessage) UnmarshalJSON(data []byte) error {
if m == nil {
return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
}
*m = append((*m)[0:0], data...)
return nil
}
var _ Marshaler = (*RawMessage)(nil)
var _ Unmarshaler = (*RawMessage)(nil)
// A Token holds a value of one of these types:
//
// Delim, for the four JSON delimiters [ ] { }
// bool, for JSON booleans
// float64, for JSON numbers
// Number, for JSON numbers
// string, for JSON string literals
// nil, for JSON null
//
type Token interface{}
const (
tokenTopValue = iota
tokenArrayStart
tokenArrayValue
tokenArrayComma
tokenObjectStart
tokenObjectKey
tokenObjectColon
tokenObjectValue
tokenObjectComma
)
// advance tokenstate from a separator state to a value state
func (dec *Decoder) tokenPrepareForDecode() error {
// Note: Not calling peek before switch, to avoid
// putting peek into the standard Decode path.
// peek is only called when using the Token API.
switch dec.tokenState {
case tokenArrayComma:
c, err := dec.peek()
if err != nil {
return err
}
if c != ',' {
return &SyntaxError{"expected comma after array element", 0}
}
dec.scanp++
dec.tokenState = tokenArrayValue
case tokenObjectColon:
c, err := dec.peek()
if err != nil {
return err
}
if c != ':' {
return &SyntaxError{"expected colon after object key", 0}
}
dec.scanp++
dec.tokenState = tokenObjectValue
}
return nil
}
func (dec *Decoder) tokenValueAllowed() bool {
switch dec.tokenState {
case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
return true
}
return false
}
func (dec *Decoder) tokenValueEnd() {
switch dec.tokenState {
case tokenArrayStart, tokenArrayValue:
dec.tokenState = tokenArrayComma
case tokenObjectValue:
dec.tokenState = tokenObjectComma
}
}
// A Delim is a JSON array or object delimiter, one of [ ] { or }.
type Delim rune
func (d Delim) String() string {
return string(d)
}
// Token returns the next JSON token in the input stream.
// At the end of the input stream, Token returns nil, io.EOF.
//
// Token guarantees that the delimiters [ ] { } it returns are
// properly nested and matched: if Token encounters an unexpected
// delimiter in the input, it will return an error.
//
// The input stream consists of basic JSON values—bool, string,
// number, and null—along with delimiters [ ] { } of type Delim
// to mark the start and end of arrays and objects.
// Commas and colons are elided.
func (dec *Decoder) Token() (Token, error) {
for {
c, err := dec.peek()
if err != nil {
return nil, err
}
switch c {
case '[':
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
dec.tokenState = tokenArrayStart
return Delim('['), nil
case ']':
if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
dec.tokenValueEnd()
return Delim(']'), nil
case '{':
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
dec.tokenState = tokenObjectStart
return Delim('{'), nil
case '}':
if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
dec.tokenValueEnd()
return Delim('}'), nil
case ':':
if dec.tokenState != tokenObjectColon {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = tokenObjectValue
continue
case ',':
if dec.tokenState == tokenArrayComma {
dec.scanp++
dec.tokenState = tokenArrayValue
continue
}
if dec.tokenState == tokenObjectComma {
dec.scanp++
dec.tokenState = tokenObjectKey
continue
}
return dec.tokenError(c)
case '"':
if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
var x string
old := dec.tokenState
dec.tokenState = tokenTopValue
err := dec.Decode(&x)
dec.tokenState = old
if err != nil {
clearOffset(err)
return nil, err
}
dec.tokenState = tokenObjectColon
return x, nil
}
fallthrough
default:
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
var x interface{}
if err := dec.Decode(&x); err != nil {
clearOffset(err)
return nil, err
}
return x, nil
}
}
}
func clearOffset(err error) {
if s, ok := err.(*SyntaxError); ok {
s.Offset = 0
}
}
func (dec *Decoder) tokenError(c byte) (Token, error) {
var context string
switch dec.tokenState {
case tokenTopValue:
context = " looking for beginning of value"
case tokenArrayStart, tokenArrayValue, tokenObjectValue:
context = " looking for beginning of value"
case tokenArrayComma:
context = " after array element"
case tokenObjectKey:
context = " looking for beginning of object key string"
case tokenObjectColon:
context = " after object key"
case tokenObjectComma:
context = " after object key:value pair"
}
return nil, &SyntaxError{"invalid character " + quoteChar(int(c)) + " " + context, 0}
}
// More reports whether there is another element in the
// current array or object being parsed.
func (dec *Decoder) More() bool {
c, err := dec.peek()
return err == nil && c != ']' && c != '}'
}
func (dec *Decoder) peek() (byte, error) {
var err error
for {
for i := dec.scanp; i < len(dec.buf); i++ {
c := dec.buf[i]
if isSpace(rune(c)) {
continue
}
dec.scanp = i
return c, nil
}
// buffer has been scanned, now report any error
if err != nil {
return 0, err
}
err = dec.refill()
}
}
/*
TODO
// EncodeToken writes the given JSON token to the stream.
// It returns an error if the delimiters [ ] { } are not properly used.
//
// EncodeToken does not call Flush, because usually it is part of
// a larger operation such as Encode, and those will call Flush when finished.
// Callers that create an Encoder and then invoke EncodeToken directly,
// without using Encode, need to call Flush when finished to ensure that
// the JSON is written to the underlying writer.
func (e *Encoder) EncodeToken(t Token) error {
...
}
*/

View file

@ -1,44 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"strings"
)
// tagOptions is the string following a comma in a struct field's "json"
// tag, or the empty string. It does not include the leading comma.
type tagOptions string
// parseTag splits a struct field's json tag into its name and
// comma-separated options.
func parseTag(tag string) (string, tagOptions) {
if idx := strings.Index(tag, ","); idx != -1 {
return tag[:idx], tagOptions(tag[idx+1:])
}
return tag, tagOptions("")
}
// Contains reports whether a comma-separated list of options
// contains a particular substr flag. substr must be surrounded by a
// string boundary or commas.
func (o tagOptions) Contains(optionName string) bool {
if len(o) == 0 {
return false
}
s := string(o)
for s != "" {
var next string
i := strings.Index(s, ",")
if i >= 0 {
s, next = s[:i], s[i+1:]
}
if s == optionName {
return true
}
s = next
}
return false
}

View file

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2015 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,100 +0,0 @@
# Notary
[![Circle CI](https://circleci.com/gh/docker/notary/tree/master.svg?style=shield)](https://circleci.com/gh/docker/notary/tree/master) [![CodeCov](https://codecov.io/github/docker/notary/coverage.svg?branch=master)](https://codecov.io/github/docker/notary) [![GoReportCard](https://goreportcard.com/badge/docker/notary)](https://goreportcard.com/report/github.com/docker/notary)
The Notary project comprises a [server](cmd/notary-server) and a [client](cmd/notary) for running and interacting
with trusted collections. Please see the [service architecture](docs/service_architecture.md) documentation
for more information.
Notary aims to make the internet more secure by making it easy for people to
publish and verify content. We often rely on TLS to secure our communications
with a web server which is inherently flawed, as any compromise of the server
enables malicious content to be substituted for the legitimate content.
With Notary, publishers can sign their content offline using keys kept highly
secure. Once the publisher is ready to make the content available, they can
push their signed trusted collection to a Notary Server.
Consumers, having acquired the publisher's public key through a secure channel,
can then communicate with any notary server or (insecure) mirror, relying
only on the publisher's key to determine the validity and integrity of the
received content.
## Goals
Notary is based on [The Update Framework](https://www.theupdateframework.com/), a secure general design for the problem of software distribution and updates. By using TUF, notary achieves a number of key advantages:
* **Survivable Key Compromise**: Content publishers must manage keys in order to sign their content. Signing keys may be compromised or lost so systems must be designed in order to be flexible and recoverable in the case of key compromise. TUF's notion of key roles is utilized to separate responsibilities across a hierarchy of keys such that loss of any particular key (except the root role) by itself is not fatal to the security of the system.
* **Freshness Guarantees**: Replay attacks are a common problem in designing secure systems, where previously valid payloads are replayed to trick another system. The same problem exists in the software update systems, where old signed can be presented as the most recent. notary makes use of timestamping on publishing so that consumers can know that they are receiving the most up to date content. This is particularly important when dealing with software update where old vulnerable versions could be used to attack users.
* **Configurable Trust Thresholds**: Oftentimes there are a large number of publishers that are allowed to publish a particular piece of content. For example, open source projects where there are a number of core maintainers. Trust thresholds can be used so that content consumers require a configurable number of signatures on a piece of content in order to trust it. Using thresholds increases security so that loss of individual signing keys doesn't allow publishing of malicious content.
* **Signing Delegation**: To allow for flexible publishing of trusted collections, a content publisher can delegate part of their collection to another signer. This delegation is represented as signed metadata so that a consumer of the content can verify both the content and the delegation.
* **Use of Existing Distribution**: Notary's trust guarantees are not tied at all to particular distribution channels from which content is delivered. Therefore, trust can be added to any existing content delivery mechanism.
* **Untrusted Mirrors and Transport**: All of the notary metadata can be mirrored and distributed via arbitrary channels.
## Security
Please see our [service architecture docs](docs/service_architecture.md#threat-model) for more information about our threat model, which details the varying survivability and severities for key compromise as well as mitigations.
Our last security audit was on July 31, 2015 by NCC ([results](docs/resources/ncc_docker_notary_audit_2015_07_31.pdf)).
Any security vulnerabilities can be reported to security@docker.com.
# Getting started with the Notary CLI
Please get the Notary Client CLI binary from [the official releases page](https://github.com/docker/notary/releases) or you can [build one yourself](#building-notary).
The version of Notary server and signer should be greater than or equal to Notary CLI's version to ensure feature compatibility (ex: CLI version 0.2, server/signer version >= 0.2), and all official releases are associated with GitHub tags.
To use the Notary CLI with Docker hub images, please have a look at our
[getting started docs](docs/getting_started.md).
For more advanced usage, please see the
[advanced usage docs](docs/advanced_usage.md).
To use the CLI against a local Notary server rather than against Docker Hub:
1. Please ensure that you have [docker and docker-compose](http://docs.docker.com/compose/install/) installed.
1. `git clone https://github.com/docker/notary.git` and from the cloned repository path,
start up a local Notary server and signer and copy the config file and testing certs to your
local notary config directory:
```sh
$ docker-compose build
$ docker-compose up -d
$ mkdir -p ~/.notary && cp cmd/notary/config.json cmd/notary/root-ca.crt ~/.notary
```
1. Add `127.0.0.1 notary-server` to your `/etc/hosts`, or if using docker-machine,
add `$(docker-machine ip) notary-server`).
You can run through the examples in the
[getting started docs](docs/getting_started.md) and
[advanced usage docs](docs/advanced_usage.md), but
without the `-s` (server URL) argument to the `notary` command since the server
URL is specified already in the configuration, file you copied.
You can also leave off the `-d ~/.docker/trust` argument if you do not care
to use `notary` with Docker images.
## Building Notary
Prerequisites:
- Go >= 1.7
- [godep](https://github.com/tools/godep) installed
- libtool development headers installed
- Ubuntu: `apt-get install libltdl-dev`
- CentOS/RedHat: `yum install libtool-ltdl-devel`
- Mac OS ([Homebrew](http://brew.sh/)): `brew install libtool`
Run `make binaries`, which creates the Notary Client CLI binary at `bin/notary`.
Note that `make binaries` assumes a standard Go directory structure, in which
Notary is checked out to the `src` directory in your `GOPATH`. For example:
```
$GOPATH/
src/
github.com/
docker/
notary/
```

View file

@ -1,102 +0,0 @@
package changelist
import (
"github.com/docker/notary/tuf/data"
)
// Scopes for TUFChanges are simply the TUF roles.
// Unfortunately because of targets delegations, we can only
// cover the base roles.
const (
ScopeRoot = "root"
ScopeTargets = "targets"
ScopeSnapshot = "snapshot"
ScopeTimestamp = "timestamp"
)
// Types for TUFChanges are namespaced by the Role they
// are relevant for. The Root and Targets roles are the
// only ones for which user action can cause a change, as
// all changes in Snapshot and Timestamp are programmatically
// generated base on Root and Targets changes.
const (
TypeRootRole = "role"
TypeTargetsTarget = "target"
TypeTargetsDelegation = "delegation"
TypeWitness = "witness"
)
// TUFChange represents a change to a TUF repo
type TUFChange struct {
// Abbreviated because Go doesn't permit a field and method of the same name
Actn string `json:"action"`
Role string `json:"role"`
ChangeType string `json:"type"`
ChangePath string `json:"path"`
Data []byte `json:"data"`
}
// TUFRootData represents a modification of the keys associated
// with a role that appears in the root.json
type TUFRootData struct {
Keys data.KeyList `json:"keys"`
RoleName string `json:"role"`
}
// NewTUFChange initializes a TUFChange object
func NewTUFChange(action string, role, changeType, changePath string, content []byte) *TUFChange {
return &TUFChange{
Actn: action,
Role: role,
ChangeType: changeType,
ChangePath: changePath,
Data: content,
}
}
// Action return c.Actn
func (c TUFChange) Action() string {
return c.Actn
}
// Scope returns c.Role
func (c TUFChange) Scope() string {
return c.Role
}
// Type returns c.ChangeType
func (c TUFChange) Type() string {
return c.ChangeType
}
// Path return c.ChangePath
func (c TUFChange) Path() string {
return c.ChangePath
}
// Content returns c.Data
func (c TUFChange) Content() []byte {
return c.Data
}
// TUFDelegation represents a modification to a target delegation
// this includes creating a delegations. This format is used to avoid
// unexpected race conditions between humans modifying the same delegation
type TUFDelegation struct {
NewName string `json:"new_name,omitempty"`
NewThreshold int `json:"threshold, omitempty"`
AddKeys data.KeyList `json:"add_keys, omitempty"`
RemoveKeys []string `json:"remove_keys,omitempty"`
AddPaths []string `json:"add_paths,omitempty"`
RemovePaths []string `json:"remove_paths,omitempty"`
ClearAllPaths bool `json:"clear_paths,omitempty"`
}
// ToNewRole creates a fresh role object from the TUFDelegation data
func (td TUFDelegation) ToNewRole(scope string) (*data.Role, error) {
name := scope
if td.NewName != "" {
name = td.NewName
}
return data.NewRole(name, td.NewThreshold, td.AddKeys.IDs(), td.AddPaths)
}

View file

@ -1,77 +0,0 @@
package changelist
// memChangeList implements a simple in memory change list.
type memChangelist struct {
changes []Change
}
// NewMemChangelist instantiates a new in-memory changelist
func NewMemChangelist() Changelist {
return &memChangelist{}
}
// List returns a list of Changes
func (cl memChangelist) List() []Change {
return cl.changes
}
// Add adds a change to the in-memory change list
func (cl *memChangelist) Add(c Change) error {
cl.changes = append(cl.changes, c)
return nil
}
// Remove deletes the changes found at the given indices
func (cl *memChangelist) Remove(idxs []int) error {
remove := make(map[int]struct{})
for _, i := range idxs {
remove[i] = struct{}{}
}
var keep []Change
for i, c := range cl.changes {
if _, ok := remove[i]; ok {
continue
}
keep = append(keep, c)
}
cl.changes = keep
return nil
}
// Clear empties the changelist file.
func (cl *memChangelist) Clear(archive string) error {
// appending to a nil list initializes it.
cl.changes = nil
return nil
}
// Close is a no-op in this in-memory change-list
func (cl *memChangelist) Close() error {
return nil
}
func (cl *memChangelist) NewIterator() (ChangeIterator, error) {
return &MemChangeListIterator{index: 0, collection: cl.changes}, nil
}
// MemChangeListIterator is a concrete instance of ChangeIterator
type MemChangeListIterator struct {
index int
collection []Change // Same type as memChangeList.changes
}
// Next returns the next Change
func (m *MemChangeListIterator) Next() (item Change, err error) {
if m.index >= len(m.collection) {
return nil, IteratorBoundsError(m.index)
}
item = m.collection[m.index]
m.index++
return item, err
}
// HasNext indicates whether the iterator is exhausted
func (m *MemChangeListIterator) HasNext() bool {
return m.index < len(m.collection)
}

View file

@ -1,197 +0,0 @@
package changelist
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"sort"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/uuid"
"path/filepath"
)
// FileChangelist stores all the changes as files
type FileChangelist struct {
dir string
}
// NewFileChangelist is a convenience method for returning FileChangeLists
func NewFileChangelist(dir string) (*FileChangelist, error) {
logrus.Debug("Making dir path: ", dir)
err := os.MkdirAll(dir, 0700)
if err != nil {
return nil, err
}
return &FileChangelist{dir: dir}, nil
}
// getFileNames reads directory, filtering out child directories
func getFileNames(dirName string) ([]os.FileInfo, error) {
var dirListing, fileInfos []os.FileInfo
dir, err := os.Open(dirName)
if err != nil {
return fileInfos, err
}
defer dir.Close()
dirListing, err = dir.Readdir(0)
if err != nil {
return fileInfos, err
}
for _, f := range dirListing {
if f.IsDir() {
continue
}
fileInfos = append(fileInfos, f)
}
sort.Sort(fileChanges(fileInfos))
return fileInfos, nil
}
// Read a JSON formatted file from disk; convert to TUFChange struct
func unmarshalFile(dirname string, f os.FileInfo) (*TUFChange, error) {
c := &TUFChange{}
raw, err := ioutil.ReadFile(filepath.Join(dirname, f.Name()))
if err != nil {
return c, err
}
err = json.Unmarshal(raw, c)
if err != nil {
return c, err
}
return c, nil
}
// List returns a list of sorted changes
func (cl FileChangelist) List() []Change {
var changes []Change
fileInfos, err := getFileNames(cl.dir)
if err != nil {
return changes
}
for _, f := range fileInfos {
c, err := unmarshalFile(cl.dir, f)
if err != nil {
logrus.Warn(err.Error())
continue
}
changes = append(changes, c)
}
return changes
}
// Add adds a change to the file change list
func (cl FileChangelist) Add(c Change) error {
cJSON, err := json.Marshal(c)
if err != nil {
return err
}
filename := fmt.Sprintf("%020d_%s.change", time.Now().UnixNano(), uuid.Generate())
return ioutil.WriteFile(filepath.Join(cl.dir, filename), cJSON, 0644)
}
// Remove deletes the changes found at the given indices
func (cl FileChangelist) Remove(idxs []int) error {
fileInfos, err := getFileNames(cl.dir)
if err != nil {
return err
}
remove := make(map[int]struct{})
for _, i := range idxs {
remove[i] = struct{}{}
}
for i, c := range fileInfos {
if _, ok := remove[i]; ok {
file := filepath.Join(cl.dir, c.Name())
if err := os.Remove(file); err != nil {
logrus.Errorf("could not remove change %d: %s", i, err.Error())
}
}
}
return nil
}
// Clear clears the change list
// N.B. archiving not currently implemented
func (cl FileChangelist) Clear(archive string) error {
dir, err := os.Open(cl.dir)
if err != nil {
return err
}
defer dir.Close()
files, err := dir.Readdir(0)
if err != nil {
return err
}
for _, f := range files {
os.Remove(filepath.Join(cl.dir, f.Name()))
}
return nil
}
// Close is a no-op
func (cl FileChangelist) Close() error {
// Nothing to do here
return nil
}
// NewIterator creates an iterator from FileChangelist
func (cl FileChangelist) NewIterator() (ChangeIterator, error) {
fileInfos, err := getFileNames(cl.dir)
if err != nil {
return &FileChangeListIterator{}, err
}
return &FileChangeListIterator{dirname: cl.dir, collection: fileInfos}, nil
}
// IteratorBoundsError is an Error type used by Next()
type IteratorBoundsError int
// Error implements the Error interface
func (e IteratorBoundsError) Error() string {
return fmt.Sprintf("Iterator index (%d) out of bounds", e)
}
// FileChangeListIterator is a concrete instance of ChangeIterator
type FileChangeListIterator struct {
index int
dirname string
collection []os.FileInfo
}
// Next returns the next Change in the FileChangeList
func (m *FileChangeListIterator) Next() (item Change, err error) {
if m.index >= len(m.collection) {
return nil, IteratorBoundsError(m.index)
}
f := m.collection[m.index]
m.index++
item, err = unmarshalFile(m.dirname, f)
return
}
// HasNext indicates whether iterator is exhausted
func (m *FileChangeListIterator) HasNext() bool {
return m.index < len(m.collection)
}
type fileChanges []os.FileInfo
// Len returns the length of a file change list
func (cs fileChanges) Len() int {
return len(cs)
}
// Less compares the names of two different file changes
func (cs fileChanges) Less(i, j int) bool {
return cs[i].Name() < cs[j].Name()
}
// Swap swaps the position of two file changes
func (cs fileChanges) Swap(i, j int) {
tmp := cs[i]
cs[i] = cs[j]
cs[j] = tmp
}

View file

@ -1,73 +0,0 @@
package changelist
// Changelist is the interface for all TUF change lists
type Changelist interface {
// List returns the ordered list of changes
// currently stored
List() []Change
// Add change appends the provided change to
// the list of changes
Add(Change) error
// Clear empties the current change list.
// Archive may be provided as a directory path
// to save a copy of the changelist in that location
Clear(archive string) error
// Remove deletes the changes corresponding with the indices given
Remove(idxs []int) error
// Close syncronizes any pending writes to the underlying
// storage and closes the file/connection
Close() error
// NewIterator returns an iterator for walking through the list
// of changes currently stored
NewIterator() (ChangeIterator, error)
}
const (
// ActionCreate represents a Create action
ActionCreate = "create"
// ActionUpdate represents an Update action
ActionUpdate = "update"
// ActionDelete represents a Delete action
ActionDelete = "delete"
)
// Change is the interface for a TUF Change
type Change interface {
// "create","update", or "delete"
Action() string
// Where the change should be made.
// For TUF this will be the role
Scope() string
// The content type being affected.
// For TUF this will be "target", or "delegation".
// If the type is "delegation", the Scope will be
// used to determine if a root role is being updated
// or a target delegation.
Type() string
// Path indicates the entry within a role to be affected by the
// change. For targets, this is simply the target's path,
// for delegations it's the delegated role name.
Path() string
// Serialized content that the interpreter of a changelist
// can use to apply the change.
// For TUF this will be the serialized JSON that needs
// to be inserted or merged. In the case of a "delete"
// action, it will be nil.
Content() []byte
}
// ChangeIterator is the interface for iterating across collections of
// TUF Change items
type ChangeIterator interface {
Next() (Change, error)
HasNext() bool
}

File diff suppressed because it is too large Load diff

View file

@ -1,299 +0,0 @@
package client
import (
"encoding/json"
"fmt"
"path/filepath"
"github.com/Sirupsen/logrus"
"github.com/docker/notary"
"github.com/docker/notary/client/changelist"
store "github.com/docker/notary/storage"
"github.com/docker/notary/tuf/data"
"github.com/docker/notary/tuf/utils"
)
// AddDelegation creates changelist entries to add provided delegation public keys and paths.
// This method composes AddDelegationRoleAndKeys and AddDelegationPaths (each creates one changelist if called).
func (r *NotaryRepository) AddDelegation(name string, delegationKeys []data.PublicKey, paths []string) error {
if len(delegationKeys) > 0 {
err := r.AddDelegationRoleAndKeys(name, delegationKeys)
if err != nil {
return err
}
}
if len(paths) > 0 {
err := r.AddDelegationPaths(name, paths)
if err != nil {
return err
}
}
return nil
}
// AddDelegationRoleAndKeys creates a changelist entry to add provided delegation public keys.
// This method is the simplest way to create a new delegation, because the delegation must have at least
// one key upon creation to be valid since we will reject the changelist while validating the threshold.
func (r *NotaryRepository) AddDelegationRoleAndKeys(name string, delegationKeys []data.PublicKey) error {
if !data.IsDelegation(name) {
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
}
cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist"))
if err != nil {
return err
}
defer cl.Close()
logrus.Debugf(`Adding delegation "%s" with threshold %d, and %d keys\n`,
name, notary.MinThreshold, len(delegationKeys))
// Defaulting to threshold of 1, since we don't allow for larger thresholds at the moment.
tdJSON, err := json.Marshal(&changelist.TUFDelegation{
NewThreshold: notary.MinThreshold,
AddKeys: data.KeyList(delegationKeys),
})
if err != nil {
return err
}
template := newCreateDelegationChange(name, tdJSON)
return addChange(cl, template, name)
}
// AddDelegationPaths creates a changelist entry to add provided paths to an existing delegation.
// This method cannot create a new delegation itself because the role must meet the key threshold upon creation.
func (r *NotaryRepository) AddDelegationPaths(name string, paths []string) error {
if !data.IsDelegation(name) {
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
}
cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist"))
if err != nil {
return err
}
defer cl.Close()
logrus.Debugf(`Adding %s paths to delegation %s\n`, paths, name)
tdJSON, err := json.Marshal(&changelist.TUFDelegation{
AddPaths: paths,
})
if err != nil {
return err
}
template := newCreateDelegationChange(name, tdJSON)
return addChange(cl, template, name)
}
// RemoveDelegationKeysAndPaths creates changelist entries to remove provided delegation key IDs and paths.
// This method composes RemoveDelegationPaths and RemoveDelegationKeys (each creates one changelist if called).
func (r *NotaryRepository) RemoveDelegationKeysAndPaths(name string, keyIDs, paths []string) error {
if len(paths) > 0 {
err := r.RemoveDelegationPaths(name, paths)
if err != nil {
return err
}
}
if len(keyIDs) > 0 {
err := r.RemoveDelegationKeys(name, keyIDs)
if err != nil {
return err
}
}
return nil
}
// RemoveDelegationRole creates a changelist to remove all paths and keys from a role, and delete the role in its entirety.
func (r *NotaryRepository) RemoveDelegationRole(name string) error {
if !data.IsDelegation(name) {
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
}
cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist"))
if err != nil {
return err
}
defer cl.Close()
logrus.Debugf(`Removing delegation "%s"\n`, name)
template := newDeleteDelegationChange(name, nil)
return addChange(cl, template, name)
}
// RemoveDelegationPaths creates a changelist entry to remove provided paths from an existing delegation.
func (r *NotaryRepository) RemoveDelegationPaths(name string, paths []string) error {
if !data.IsDelegation(name) {
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
}
cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist"))
if err != nil {
return err
}
defer cl.Close()
logrus.Debugf(`Removing %s paths from delegation "%s"\n`, paths, name)
tdJSON, err := json.Marshal(&changelist.TUFDelegation{
RemovePaths: paths,
})
if err != nil {
return err
}
template := newUpdateDelegationChange(name, tdJSON)
return addChange(cl, template, name)
}
// RemoveDelegationKeys creates a changelist entry to remove provided keys from an existing delegation.
// When this changelist is applied, if the specified keys are the only keys left in the role,
// the role itself will be deleted in its entirety.
// It can also delete a key from all delegations under a parent using a name
// with a wildcard at the end.
func (r *NotaryRepository) RemoveDelegationKeys(name string, keyIDs []string) error {
if !data.IsDelegation(name) && !data.IsWildDelegation(name) {
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
}
cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist"))
if err != nil {
return err
}
defer cl.Close()
logrus.Debugf(`Removing %s keys from delegation "%s"\n`, keyIDs, name)
tdJSON, err := json.Marshal(&changelist.TUFDelegation{
RemoveKeys: keyIDs,
})
if err != nil {
return err
}
template := newUpdateDelegationChange(name, tdJSON)
return addChange(cl, template, name)
}
// ClearDelegationPaths creates a changelist entry to remove all paths from an existing delegation.
func (r *NotaryRepository) ClearDelegationPaths(name string) error {
if !data.IsDelegation(name) {
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
}
cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist"))
if err != nil {
return err
}
defer cl.Close()
logrus.Debugf(`Removing all paths from delegation "%s"\n`, name)
tdJSON, err := json.Marshal(&changelist.TUFDelegation{
ClearAllPaths: true,
})
if err != nil {
return err
}
template := newUpdateDelegationChange(name, tdJSON)
return addChange(cl, template, name)
}
func newUpdateDelegationChange(name string, content []byte) *changelist.TUFChange {
return changelist.NewTUFChange(
changelist.ActionUpdate,
name,
changelist.TypeTargetsDelegation,
"", // no path for delegations
content,
)
}
func newCreateDelegationChange(name string, content []byte) *changelist.TUFChange {
return changelist.NewTUFChange(
changelist.ActionCreate,
name,
changelist.TypeTargetsDelegation,
"", // no path for delegations
content,
)
}
func newDeleteDelegationChange(name string, content []byte) *changelist.TUFChange {
return changelist.NewTUFChange(
changelist.ActionDelete,
name,
changelist.TypeTargetsDelegation,
"", // no path for delegations
content,
)
}
// GetDelegationRoles returns the keys and roles of the repository's delegations
// Also converts key IDs to canonical key IDs to keep consistent with signing prompts
func (r *NotaryRepository) GetDelegationRoles() ([]data.Role, error) {
// Update state of the repo to latest
if err := r.Update(false); err != nil {
return nil, err
}
// All top level delegations (ex: targets/level1) are stored exclusively in targets.json
_, ok := r.tufRepo.Targets[data.CanonicalTargetsRole]
if !ok {
return nil, store.ErrMetaNotFound{Resource: data.CanonicalTargetsRole}
}
// make a copy for traversing nested delegations
allDelegations := []data.Role{}
// Define a visitor function to populate the delegations list and translate their key IDs to canonical IDs
delegationCanonicalListVisitor := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
// For the return list, update with a copy that includes canonicalKeyIDs
// These aren't validated by the validRole
canonicalDelegations, err := translateDelegationsToCanonicalIDs(tgt.Signed.Delegations)
if err != nil {
return err
}
allDelegations = append(allDelegations, canonicalDelegations...)
return nil
}
err := r.tufRepo.WalkTargets("", "", delegationCanonicalListVisitor)
if err != nil {
return nil, err
}
return allDelegations, nil
}
func translateDelegationsToCanonicalIDs(delegationInfo data.Delegations) ([]data.Role, error) {
canonicalDelegations := make([]data.Role, len(delegationInfo.Roles))
// Do a copy by value to ensure local delegation metadata is untouched
for idx, origRole := range delegationInfo.Roles {
canonicalDelegations[idx] = *origRole
}
delegationKeys := delegationInfo.Keys
for i, delegation := range canonicalDelegations {
canonicalKeyIDs := []string{}
for _, keyID := range delegation.KeyIDs {
pubKey, ok := delegationKeys[keyID]
if !ok {
return []data.Role{}, fmt.Errorf("Could not translate canonical key IDs for %s", delegation.Name)
}
canonicalKeyID, err := utils.CanonicalKeyID(pubKey)
if err != nil {
return []data.Role{}, fmt.Errorf("Could not translate canonical key IDs for %s: %v", delegation.Name, err)
}
canonicalKeyIDs = append(canonicalKeyIDs, canonicalKeyID)
}
canonicalDelegations[i].KeyIDs = canonicalKeyIDs
}
return canonicalDelegations, nil
}

View file

@ -1,278 +0,0 @@
package client
import (
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/notary/client/changelist"
store "github.com/docker/notary/storage"
"github.com/docker/notary/tuf"
"github.com/docker/notary/tuf/data"
"github.com/docker/notary/tuf/utils"
)
// Use this to initialize remote HTTPStores from the config settings
func getRemoteStore(baseURL, gun string, rt http.RoundTripper) (store.RemoteStore, error) {
s, err := store.NewHTTPStore(
baseURL+"/v2/"+gun+"/_trust/tuf/",
"",
"json",
"key",
rt,
)
if err != nil {
return store.OfflineStore{}, err
}
return s, err
}
func applyChangelist(repo *tuf.Repo, invalid *tuf.Repo, cl changelist.Changelist) error {
it, err := cl.NewIterator()
if err != nil {
return err
}
index := 0
for it.HasNext() {
c, err := it.Next()
if err != nil {
return err
}
isDel := data.IsDelegation(c.Scope()) || data.IsWildDelegation(c.Scope())
switch {
case c.Scope() == changelist.ScopeTargets || isDel:
err = applyTargetsChange(repo, invalid, c)
case c.Scope() == changelist.ScopeRoot:
err = applyRootChange(repo, c)
default:
return fmt.Errorf("scope not supported: %s", c.Scope())
}
if err != nil {
logrus.Debugf("error attempting to apply change #%d: %s, on scope: %s path: %s type: %s", index, c.Action(), c.Scope(), c.Path(), c.Type())
return err
}
index++
}
logrus.Debugf("applied %d change(s)", index)
return nil
}
func applyTargetsChange(repo *tuf.Repo, invalid *tuf.Repo, c changelist.Change) error {
switch c.Type() {
case changelist.TypeTargetsTarget:
return changeTargetMeta(repo, c)
case changelist.TypeTargetsDelegation:
return changeTargetsDelegation(repo, c)
case changelist.TypeWitness:
return witnessTargets(repo, invalid, c.Scope())
default:
return fmt.Errorf("only target meta and delegations changes supported")
}
}
func changeTargetsDelegation(repo *tuf.Repo, c changelist.Change) error {
switch c.Action() {
case changelist.ActionCreate:
td := changelist.TUFDelegation{}
err := json.Unmarshal(c.Content(), &td)
if err != nil {
return err
}
// Try to create brand new role or update one
// First add the keys, then the paths. We can only add keys and paths in this scenario
err = repo.UpdateDelegationKeys(c.Scope(), td.AddKeys, []string{}, td.NewThreshold)
if err != nil {
return err
}
return repo.UpdateDelegationPaths(c.Scope(), td.AddPaths, []string{}, false)
case changelist.ActionUpdate:
td := changelist.TUFDelegation{}
err := json.Unmarshal(c.Content(), &td)
if err != nil {
return err
}
if data.IsWildDelegation(c.Scope()) {
return repo.PurgeDelegationKeys(c.Scope(), td.RemoveKeys)
}
delgRole, err := repo.GetDelegationRole(c.Scope())
if err != nil {
return err
}
// We need to translate the keys from canonical ID to TUF ID for compatibility
canonicalToTUFID := make(map[string]string)
for tufID, pubKey := range delgRole.Keys {
canonicalID, err := utils.CanonicalKeyID(pubKey)
if err != nil {
return err
}
canonicalToTUFID[canonicalID] = tufID
}
removeTUFKeyIDs := []string{}
for _, canonID := range td.RemoveKeys {
removeTUFKeyIDs = append(removeTUFKeyIDs, canonicalToTUFID[canonID])
}
err = repo.UpdateDelegationKeys(c.Scope(), td.AddKeys, removeTUFKeyIDs, td.NewThreshold)
if err != nil {
return err
}
return repo.UpdateDelegationPaths(c.Scope(), td.AddPaths, td.RemovePaths, td.ClearAllPaths)
case changelist.ActionDelete:
return repo.DeleteDelegation(c.Scope())
default:
return fmt.Errorf("unsupported action against delegations: %s", c.Action())
}
}
func changeTargetMeta(repo *tuf.Repo, c changelist.Change) error {
var err error
switch c.Action() {
case changelist.ActionCreate:
logrus.Debug("changelist add: ", c.Path())
meta := &data.FileMeta{}
err = json.Unmarshal(c.Content(), meta)
if err != nil {
return err
}
files := data.Files{c.Path(): *meta}
// Attempt to add the target to this role
if _, err = repo.AddTargets(c.Scope(), files); err != nil {
logrus.Errorf("couldn't add target to %s: %s", c.Scope(), err.Error())
}
case changelist.ActionDelete:
logrus.Debug("changelist remove: ", c.Path())
// Attempt to remove the target from this role
if err = repo.RemoveTargets(c.Scope(), c.Path()); err != nil {
logrus.Errorf("couldn't remove target from %s: %s", c.Scope(), err.Error())
}
default:
err = fmt.Errorf("action not yet supported: %s", c.Action())
}
return err
}
func applyRootChange(repo *tuf.Repo, c changelist.Change) error {
var err error
switch c.Type() {
case changelist.TypeRootRole:
err = applyRootRoleChange(repo, c)
default:
err = fmt.Errorf("type of root change not yet supported: %s", c.Type())
}
return err // might be nil
}
func applyRootRoleChange(repo *tuf.Repo, c changelist.Change) error {
switch c.Action() {
case changelist.ActionCreate:
// replaces all keys for a role
d := &changelist.TUFRootData{}
err := json.Unmarshal(c.Content(), d)
if err != nil {
return err
}
err = repo.ReplaceBaseKeys(d.RoleName, d.Keys...)
if err != nil {
return err
}
default:
return fmt.Errorf("action not yet supported for root: %s", c.Action())
}
return nil
}
func nearExpiry(r data.SignedCommon) bool {
plus6mo := time.Now().AddDate(0, 6, 0)
return r.Expires.Before(plus6mo)
}
func warnRolesNearExpiry(r *tuf.Repo) {
//get every role and its respective signed common and call nearExpiry on it
//Root check
if nearExpiry(r.Root.Signed.SignedCommon) {
logrus.Warn("root is nearing expiry, you should re-sign the role metadata")
}
//Targets and delegations check
for role, signedTOrD := range r.Targets {
//signedTOrD is of type *data.SignedTargets
if nearExpiry(signedTOrD.Signed.SignedCommon) {
logrus.Warn(role, " metadata is nearing expiry, you should re-sign the role metadata")
}
}
//Snapshot check
if nearExpiry(r.Snapshot.Signed.SignedCommon) {
logrus.Warn("snapshot is nearing expiry, you should re-sign the role metadata")
}
//do not need to worry about Timestamp, notary signer will re-sign with the timestamp key
}
// Fetches a public key from a remote store, given a gun and role
func getRemoteKey(url, gun, role string, rt http.RoundTripper) (data.PublicKey, error) {
remote, err := getRemoteStore(url, gun, rt)
if err != nil {
return nil, err
}
rawPubKey, err := remote.GetKey(role)
if err != nil {
return nil, err
}
pubKey, err := data.UnmarshalPublicKey(rawPubKey)
if err != nil {
return nil, err
}
return pubKey, nil
}
// Rotates a private key in a remote store and returns the public key component
func rotateRemoteKey(url, gun, role string, rt http.RoundTripper) (data.PublicKey, error) {
remote, err := getRemoteStore(url, gun, rt)
if err != nil {
return nil, err
}
rawPubKey, err := remote.RotateKey(role)
if err != nil {
return nil, err
}
pubKey, err := data.UnmarshalPublicKey(rawPubKey)
if err != nil {
return nil, err
}
return pubKey, nil
}
// signs and serializes the metadata for a canonical role in a TUF repo to JSON
func serializeCanonicalRole(tufRepo *tuf.Repo, role string) (out []byte, err error) {
var s *data.Signed
switch {
case role == data.CanonicalRootRole:
s, err = tufRepo.SignRoot(data.DefaultExpires(role))
case role == data.CanonicalSnapshotRole:
s, err = tufRepo.SignSnapshot(data.DefaultExpires(role))
case tufRepo.Targets[role] != nil:
s, err = tufRepo.SignTargets(
role, data.DefaultExpires(data.CanonicalTargetsRole))
default:
err = fmt.Errorf("%s not supported role to sign on the client", role)
}
if err != nil {
return
}
return json.Marshal(s)
}

View file

@ -1,29 +0,0 @@
// +build !pkcs11
package client
import (
"fmt"
"net/http"
"github.com/docker/notary"
"github.com/docker/notary/trustmanager"
"github.com/docker/notary/trustpinning"
)
// NewNotaryRepository is a helper method that returns a new notary repository.
// It takes the base directory under where all the trust files will be stored
// (This is normally defaults to "~/.notary" or "~/.docker/trust" when enabling
// docker content trust).
func NewNotaryRepository(baseDir, gun, baseURL string, rt http.RoundTripper,
retriever notary.PassRetriever, trustPinning trustpinning.TrustPinConfig) (
*NotaryRepository, error) {
fileKeyStore, err := trustmanager.NewKeyFileStore(baseDir, retriever)
if err != nil {
return nil, fmt.Errorf("failed to create private key store in directory: %s", baseDir)
}
return repositoryFromKeystores(baseDir, gun, baseURL, rt,
[]trustmanager.KeyStore{fileKeyStore}, trustPinning)
}

View file

@ -1,34 +0,0 @@
// +build pkcs11
package client
import (
"fmt"
"net/http"
"github.com/docker/notary"
"github.com/docker/notary/trustmanager"
"github.com/docker/notary/trustmanager/yubikey"
"github.com/docker/notary/trustpinning"
)
// NewNotaryRepository is a helper method that returns a new notary repository.
// It takes the base directory under where all the trust files will be stored
// (usually ~/.docker/trust/).
func NewNotaryRepository(baseDir, gun, baseURL string, rt http.RoundTripper,
retriever notary.PassRetriever, trustPinning trustpinning.TrustPinConfig) (
*NotaryRepository, error) {
fileKeyStore, err := trustmanager.NewKeyFileStore(baseDir, retriever)
if err != nil {
return nil, fmt.Errorf("failed to create private key store in directory: %s", baseDir)
}
keyStores := []trustmanager.KeyStore{fileKeyStore}
yubiKeyStore, _ := yubikey.NewYubiStore(fileKeyStore, retriever)
if yubiKeyStore != nil {
keyStores = []trustmanager.KeyStore{yubiKeyStore, fileKeyStore}
}
return repositoryFromKeystores(baseDir, gun, baseURL, rt, keyStores, trustPinning)
}

View file

@ -1,239 +0,0 @@
package client
import (
"encoding/json"
"github.com/Sirupsen/logrus"
"github.com/docker/notary"
store "github.com/docker/notary/storage"
"github.com/docker/notary/tuf"
"github.com/docker/notary/tuf/data"
"github.com/docker/notary/tuf/signed"
)
// TUFClient is a usability wrapper around a raw TUF repo
type TUFClient struct {
remote store.RemoteStore
cache store.MetadataStore
oldBuilder tuf.RepoBuilder
newBuilder tuf.RepoBuilder
}
// NewTUFClient initialized a TUFClient with the given repo, remote source of content, and cache
func NewTUFClient(oldBuilder, newBuilder tuf.RepoBuilder, remote store.RemoteStore, cache store.MetadataStore) *TUFClient {
return &TUFClient{
oldBuilder: oldBuilder,
newBuilder: newBuilder,
remote: remote,
cache: cache,
}
}
// Update performs an update to the TUF repo as defined by the TUF spec
func (c *TUFClient) Update() (*tuf.Repo, *tuf.Repo, error) {
// 1. Get timestamp
// a. If timestamp error (verification, expired, etc...) download new root and return to 1.
// 2. Check if local snapshot is up to date
// a. If out of date, get updated snapshot
// i. If snapshot error, download new root and return to 1.
// 3. Check if root correct against snapshot
// a. If incorrect, download new root and return to 1.
// 4. Iteratively download and search targets and delegations to find target meta
logrus.Debug("updating TUF client")
err := c.update()
if err != nil {
logrus.Debug("Error occurred. Root will be downloaded and another update attempted")
logrus.Debug("Resetting the TUF builder...")
c.newBuilder = c.newBuilder.BootstrapNewBuilder()
if err := c.downloadRoot(); err != nil {
logrus.Debug("Client Update (Root):", err)
return nil, nil, err
}
// If we error again, we now have the latest root and just want to fail
// out as there's no expectation the problem can be resolved automatically
logrus.Debug("retrying TUF client update")
if err := c.update(); err != nil {
return nil, nil, err
}
}
return c.newBuilder.Finish()
}
func (c *TUFClient) update() error {
if err := c.downloadTimestamp(); err != nil {
logrus.Debugf("Client Update (Timestamp): %s", err.Error())
return err
}
if err := c.downloadSnapshot(); err != nil {
logrus.Debugf("Client Update (Snapshot): %s", err.Error())
return err
}
// will always need top level targets at a minimum
if err := c.downloadTargets(); err != nil {
logrus.Debugf("Client Update (Targets): %s", err.Error())
return err
}
return nil
}
// downloadRoot is responsible for downloading the root.json
func (c *TUFClient) downloadRoot() error {
role := data.CanonicalRootRole
consistentInfo := c.newBuilder.GetConsistentInfo(role)
// We can't read an exact size for the root metadata without risking getting stuck in the TUF update cycle
// since it's possible that downloading timestamp/snapshot metadata may fail due to a signature mismatch
if !consistentInfo.ChecksumKnown() {
logrus.Debugf("Loading root with no expected checksum")
// get the cached root, if it exists, just for version checking
cachedRoot, _ := c.cache.GetSized(role, -1)
// prefer to download a new root
_, remoteErr := c.tryLoadRemote(consistentInfo, cachedRoot)
return remoteErr
}
_, err := c.tryLoadCacheThenRemote(consistentInfo)
return err
}
// downloadTimestamp is responsible for downloading the timestamp.json
// Timestamps are special in that we ALWAYS attempt to download and only
// use cache if the download fails (and the cache is still valid).
func (c *TUFClient) downloadTimestamp() error {
logrus.Debug("Loading timestamp...")
role := data.CanonicalTimestampRole
consistentInfo := c.newBuilder.GetConsistentInfo(role)
// always get the remote timestamp, since it supersedes the local one
cachedTS, cachedErr := c.cache.GetSized(role, notary.MaxTimestampSize)
_, remoteErr := c.tryLoadRemote(consistentInfo, cachedTS)
// check that there was no remote error, or if there was a network problem
// If there was a validation error, we should error out so we can download a new root or fail the update
switch remoteErr.(type) {
case nil:
return nil
case store.ErrMetaNotFound, store.ErrServerUnavailable, store.ErrOffline, store.NetworkError:
break
default:
return remoteErr
}
// since it was a network error: get the cached timestamp, if it exists
if cachedErr != nil {
logrus.Debug("no cached or remote timestamp available")
return remoteErr
}
logrus.Warn("Error while downloading remote metadata, using cached timestamp - this might not be the latest version available remotely")
err := c.newBuilder.Load(role, cachedTS, 1, false)
if err == nil {
logrus.Debug("successfully verified cached timestamp")
}
return err
}
// downloadSnapshot is responsible for downloading the snapshot.json
func (c *TUFClient) downloadSnapshot() error {
logrus.Debug("Loading snapshot...")
role := data.CanonicalSnapshotRole
consistentInfo := c.newBuilder.GetConsistentInfo(role)
_, err := c.tryLoadCacheThenRemote(consistentInfo)
return err
}
// downloadTargets downloads all targets and delegated targets for the repository.
// It uses a pre-order tree traversal as it's necessary to download parents first
// to obtain the keys to validate children.
func (c *TUFClient) downloadTargets() error {
toDownload := []data.DelegationRole{{
BaseRole: data.BaseRole{Name: data.CanonicalTargetsRole},
Paths: []string{""},
}}
for len(toDownload) > 0 {
role := toDownload[0]
toDownload = toDownload[1:]
consistentInfo := c.newBuilder.GetConsistentInfo(role.Name)
if !consistentInfo.ChecksumKnown() {
logrus.Debugf("skipping %s because there is no checksum for it", role.Name)
continue
}
children, err := c.getTargetsFile(role, consistentInfo)
switch err.(type) {
case signed.ErrExpired, signed.ErrRoleThreshold:
if role.Name == data.CanonicalTargetsRole {
return err
}
logrus.Warnf("Error getting %s: %s", role.Name, err)
break
case nil:
toDownload = append(children, toDownload...)
default:
return err
}
}
return nil
}
func (c TUFClient) getTargetsFile(role data.DelegationRole, ci tuf.ConsistentInfo) ([]data.DelegationRole, error) {
logrus.Debugf("Loading %s...", role.Name)
tgs := &data.SignedTargets{}
raw, err := c.tryLoadCacheThenRemote(ci)
if err != nil {
return nil, err
}
// we know it unmarshals because if `tryLoadCacheThenRemote` didn't fail, then
// the raw has already been loaded into the builder
json.Unmarshal(raw, tgs)
return tgs.GetValidDelegations(role), nil
}
func (c *TUFClient) tryLoadCacheThenRemote(consistentInfo tuf.ConsistentInfo) ([]byte, error) {
cachedTS, err := c.cache.GetSized(consistentInfo.RoleName, consistentInfo.Length())
if err != nil {
logrus.Debugf("no %s in cache, must download", consistentInfo.RoleName)
return c.tryLoadRemote(consistentInfo, nil)
}
if err = c.newBuilder.Load(consistentInfo.RoleName, cachedTS, 1, false); err == nil {
logrus.Debugf("successfully verified cached %s", consistentInfo.RoleName)
return cachedTS, nil
}
logrus.Debugf("cached %s is invalid (must download): %s", consistentInfo.RoleName, err)
return c.tryLoadRemote(consistentInfo, cachedTS)
}
func (c *TUFClient) tryLoadRemote(consistentInfo tuf.ConsistentInfo, old []byte) ([]byte, error) {
consistentName := consistentInfo.ConsistentName()
raw, err := c.remote.GetSized(consistentName, consistentInfo.Length())
if err != nil {
logrus.Debugf("error downloading %s: %s", consistentName, err)
return old, err
}
// try to load the old data into the old builder - only use it to validate
// versions if it loads successfully. If it errors, then the loaded version
// will be 1
c.oldBuilder.Load(consistentInfo.RoleName, old, 1, true)
minVersion := c.oldBuilder.GetLoadedVersion(consistentInfo.RoleName)
if err := c.newBuilder.Load(consistentInfo.RoleName, raw, minVersion, false); err != nil {
logrus.Debugf("downloaded %s is invalid: %s", consistentName, err)
return raw, err
}
logrus.Debugf("successfully verified downloaded %s", consistentName)
if err := c.cache.Set(consistentInfo.RoleName, raw); err != nil {
logrus.Debugf("Unable to write %s to cache: %s", consistentInfo.RoleName, err)
}
return raw, nil
}

View file

@ -1,69 +0,0 @@
package client
import (
"path/filepath"
"github.com/docker/notary/client/changelist"
"github.com/docker/notary/tuf"
"github.com/docker/notary/tuf/data"
)
// Witness creates change objects to witness (i.e. re-sign) the given
// roles on the next publish. One change is created per role
func (r *NotaryRepository) Witness(roles ...string) ([]string, error) {
cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist"))
if err != nil {
return nil, err
}
defer cl.Close()
successful := make([]string, 0, len(roles))
for _, role := range roles {
// scope is role
c := changelist.NewTUFChange(
changelist.ActionUpdate,
role,
changelist.TypeWitness,
"",
nil,
)
err = cl.Add(c)
if err != nil {
break
}
successful = append(successful, role)
}
return successful, err
}
func witnessTargets(repo *tuf.Repo, invalid *tuf.Repo, role string) error {
if r, ok := repo.Targets[role]; ok {
// role is already valid, mark for re-signing/updating
r.Dirty = true
return nil
}
if roleObj, err := repo.GetDelegationRole(role); err == nil && invalid != nil {
// A role with a threshold > len(keys) is technically invalid, but we let it build in the builder because
// we want to be able to download the role (which may still have targets on it), add more keys, and then
// witness the role, thus bringing it back to valid. However, if no keys have been added before witnessing,
// then it is still an invalid role, and can't be witnessed because nothing can bring it back to valid.
if roleObj.Threshold > len(roleObj.Keys) {
return data.ErrInvalidRole{
Role: role,
Reason: "role does not specify enough valid signing keys to meet its required threshold",
}
}
if r, ok := invalid.Targets[role]; ok {
// role is recognized but invalid, move to valid data and mark for re-signing
repo.Targets[role] = r
r.Dirty = true
return nil
}
}
// role isn't recognized, even as invalid
return data.ErrInvalidRole{
Role: role,
Reason: "this role is not known",
}
}

View file

@ -1,70 +0,0 @@
package notary
import "time"
// application wide constants
const (
// MaxDownloadSize is the maximum size we'll download for metadata if no limit is given
MaxDownloadSize int64 = 100 << 20
// MaxTimestampSize is the maximum size of timestamp metadata - 1MiB.
MaxTimestampSize int64 = 1 << 20
// MinRSABitSize is the minimum bit size for RSA keys allowed in notary
MinRSABitSize = 2048
// MinThreshold requires a minimum of one threshold for roles; currently we do not support a higher threshold
MinThreshold = 1
// PrivKeyPerms are the file permissions to use when writing private keys to disk
PrivKeyPerms = 0700
// PubCertPerms are the file permissions to use when writing public certificates to disk
PubCertPerms = 0755
// Sha256HexSize is how big a Sha256 hex is in number of characters
Sha256HexSize = 64
// Sha512HexSize is how big a Sha512 hex is in number of characters
Sha512HexSize = 128
// SHA256 is the name of SHA256 hash algorithm
SHA256 = "sha256"
// SHA512 is the name of SHA512 hash algorithm
SHA512 = "sha512"
// TrustedCertsDir is the directory, under the notary repo base directory, where trusted certs are stored
TrustedCertsDir = "trusted_certificates"
// PrivDir is the directory, under the notary repo base directory, where private keys are stored
PrivDir = "private"
// RootKeysSubdir is the subdirectory under PrivDir where root private keys are stored
RootKeysSubdir = "root_keys"
// NonRootKeysSubdir is the subdirectory under PrivDir where non-root private keys are stored
NonRootKeysSubdir = "tuf_keys"
// KeyExtension is the file extension to use for private key files
KeyExtension = "key"
// Day is a duration of one day
Day = 24 * time.Hour
Year = 365 * Day
// NotaryRootExpiry is the duration representing the expiry time of the Root role
NotaryRootExpiry = 10 * Year
NotaryTargetsExpiry = 3 * Year
NotarySnapshotExpiry = 3 * Year
NotaryTimestampExpiry = 14 * Day
ConsistentMetadataCacheMaxAge = 30 * Day
CurrentMetadataCacheMaxAge = 5 * time.Minute
// CacheMaxAgeLimit is the generally recommended maximum age for Cache-Control headers
// (one year, in seconds, since one year is forever in terms of internet
// content)
CacheMaxAgeLimit = 1 * Year
MySQLBackend = "mysql"
MemoryBackend = "memory"
SQLiteBackend = "sqlite3"
RethinkDBBackend = "rethinkdb"
DefaultImportRole = "delegation"
)
// NotaryDefaultExpiries is the construct used to configure the default expiry times of
// the various role files.
var NotaryDefaultExpiries = map[string]time.Duration{
"root": NotaryRootExpiry,
"targets": NotaryTargetsExpiry,
"snapshot": NotarySnapshotExpiry,
"timestamp": NotaryTimestampExpiry,
}

View file

@ -1,16 +0,0 @@
// +build !windows
package notary
import (
"os"
"syscall"
)
// NotarySupportedSignals contains the signals we would like to capture:
// - SIGUSR1, indicates a increment of the log level.
// - SIGUSR2, indicates a decrement of the log level.
var NotarySupportedSignals = []os.Signal{
syscall.SIGUSR1,
syscall.SIGUSR2,
}

View file

@ -1,8 +0,0 @@
// +build windows
package notary
import "os"
// NotarySupportedSignals does not contain any signals, because SIGUSR1/2 are not supported on windows
var NotarySupportedSignals = []os.Signal{}

View file

@ -1,41 +0,0 @@
package cryptoservice
import (
"crypto"
"crypto/rand"
"crypto/x509"
"fmt"
"time"
"github.com/docker/notary/tuf/data"
"github.com/docker/notary/tuf/utils"
)
// GenerateCertificate generates an X509 Certificate from a template, given a GUN and validity interval
func GenerateCertificate(rootKey data.PrivateKey, gun string, startTime, endTime time.Time) (*x509.Certificate, error) {
signer := rootKey.CryptoSigner()
if signer == nil {
return nil, fmt.Errorf("key type not supported for Certificate generation: %s\n", rootKey.Algorithm())
}
return generateCertificate(signer, gun, startTime, endTime)
}
func generateCertificate(signer crypto.Signer, gun string, startTime, endTime time.Time) (*x509.Certificate, error) {
template, err := utils.NewCertificate(gun, startTime, endTime)
if err != nil {
return nil, fmt.Errorf("failed to create the certificate template for: %s (%v)", gun, err)
}
derBytes, err := x509.CreateCertificate(rand.Reader, template, template, signer.Public(), signer)
if err != nil {
return nil, fmt.Errorf("failed to create the certificate for: %s (%v)", gun, err)
}
cert, err := x509.ParseCertificate(derBytes)
if err != nil {
return nil, fmt.Errorf("failed to parse the certificate for key: %s (%v)", gun, err)
}
return cert, nil
}

View file

@ -1,181 +0,0 @@
package cryptoservice
import (
"crypto/rand"
"fmt"
"crypto/x509"
"encoding/pem"
"errors"
"github.com/Sirupsen/logrus"
"github.com/docker/notary"
"github.com/docker/notary/trustmanager"
"github.com/docker/notary/tuf/data"
"github.com/docker/notary/tuf/utils"
)
var (
// ErrNoValidPrivateKey is returned if a key being imported doesn't
// look like a private key
ErrNoValidPrivateKey = errors.New("no valid private key found")
// ErrRootKeyNotEncrypted is returned if a root key being imported is
// unencrypted
ErrRootKeyNotEncrypted = errors.New("only encrypted root keys may be imported")
)
// CryptoService implements Sign and Create, holding a specific GUN and keystore to
// operate on
type CryptoService struct {
keyStores []trustmanager.KeyStore
}
// NewCryptoService returns an instance of CryptoService
func NewCryptoService(keyStores ...trustmanager.KeyStore) *CryptoService {
return &CryptoService{keyStores: keyStores}
}
// Create is used to generate keys for targets, snapshots and timestamps
func (cs *CryptoService) Create(role, gun, algorithm string) (data.PublicKey, error) {
var privKey data.PrivateKey
var err error
switch algorithm {
case data.RSAKey:
privKey, err = utils.GenerateRSAKey(rand.Reader, notary.MinRSABitSize)
if err != nil {
return nil, fmt.Errorf("failed to generate RSA key: %v", err)
}
case data.ECDSAKey:
privKey, err = utils.GenerateECDSAKey(rand.Reader)
if err != nil {
return nil, fmt.Errorf("failed to generate EC key: %v", err)
}
case data.ED25519Key:
privKey, err = utils.GenerateED25519Key(rand.Reader)
if err != nil {
return nil, fmt.Errorf("failed to generate ED25519 key: %v", err)
}
default:
return nil, fmt.Errorf("private key type not supported for key generation: %s", algorithm)
}
logrus.Debugf("generated new %s key for role: %s and keyID: %s", algorithm, role, privKey.ID())
// Store the private key into our keystore
for _, ks := range cs.keyStores {
err = ks.AddKey(trustmanager.KeyInfo{Role: role, Gun: gun}, privKey)
if err == nil {
return data.PublicKeyFromPrivate(privKey), nil
}
}
if err != nil {
return nil, fmt.Errorf("failed to add key to filestore: %v", err)
}
return nil, fmt.Errorf("keystores would not accept new private keys for unknown reasons")
}
// GetPrivateKey returns a private key and role if present by ID.
func (cs *CryptoService) GetPrivateKey(keyID string) (k data.PrivateKey, role string, err error) {
for _, ks := range cs.keyStores {
if k, role, err = ks.GetKey(keyID); err == nil {
return
}
switch err.(type) {
case trustmanager.ErrPasswordInvalid, trustmanager.ErrAttemptsExceeded:
return
default:
continue
}
}
return // returns whatever the final values were
}
// GetKey returns a key by ID
func (cs *CryptoService) GetKey(keyID string) data.PublicKey {
privKey, _, err := cs.GetPrivateKey(keyID)
if err != nil {
return nil
}
return data.PublicKeyFromPrivate(privKey)
}
// GetKeyInfo returns role and GUN info of a key by ID
func (cs *CryptoService) GetKeyInfo(keyID string) (trustmanager.KeyInfo, error) {
for _, store := range cs.keyStores {
if info, err := store.GetKeyInfo(keyID); err == nil {
return info, nil
}
}
return trustmanager.KeyInfo{}, fmt.Errorf("Could not find info for keyID %s", keyID)
}
// RemoveKey deletes a key by ID
func (cs *CryptoService) RemoveKey(keyID string) (err error) {
for _, ks := range cs.keyStores {
ks.RemoveKey(keyID)
}
return // returns whatever the final values were
}
// AddKey adds a private key to a specified role.
// The GUN is inferred from the cryptoservice itself for non-root roles
func (cs *CryptoService) AddKey(role, gun string, key data.PrivateKey) (err error) {
// First check if this key already exists in any of our keystores
for _, ks := range cs.keyStores {
if keyInfo, err := ks.GetKeyInfo(key.ID()); err == nil {
if keyInfo.Role != role {
return fmt.Errorf("key with same ID already exists for role: %s", keyInfo.Role)
}
logrus.Debugf("key with same ID %s and role %s already exists", key.ID(), keyInfo.Role)
return nil
}
}
// If the key didn't exist in any of our keystores, add and return on the first successful keystore
for _, ks := range cs.keyStores {
// Try to add to this keystore, return if successful
if err = ks.AddKey(trustmanager.KeyInfo{Role: role, Gun: gun}, key); err == nil {
return nil
}
}
return // returns whatever the final values were
}
// ListKeys returns a list of key IDs valid for the given role
func (cs *CryptoService) ListKeys(role string) []string {
var res []string
for _, ks := range cs.keyStores {
for k, r := range ks.ListKeys() {
if r.Role == role {
res = append(res, k)
}
}
}
return res
}
// ListAllKeys returns a map of key IDs to role
func (cs *CryptoService) ListAllKeys() map[string]string {
res := make(map[string]string)
for _, ks := range cs.keyStores {
for k, r := range ks.ListKeys() {
res[k] = r.Role // keys are content addressed so don't care about overwrites
}
}
return res
}
// CheckRootKeyIsEncrypted makes sure the root key is encrypted. We have
// internal assumptions that depend on this.
func CheckRootKeyIsEncrypted(pemBytes []byte) error {
block, _ := pem.Decode(pemBytes)
if block == nil {
return ErrNoValidPrivateKey
}
if !x509.IsEncryptedPEMBlock(block) {
return ErrRootKeyNotEncrypted
}
return nil
}

View file

@ -1,7 +0,0 @@
package notary
// PassRetriever is a callback function that should retrieve a passphrase
// for a given named key. If it should be treated as new passphrase (e.g. with
// confirmation), createNew will be true. Attempts is passed in so that implementers
// decide how many chances to give to a human, for example.
type PassRetriever func(keyName, alias string, createNew bool, attempts int) (passphrase string, giveup bool, err error)

View file

@ -1,205 +0,0 @@
// Package passphrase is a utility function for managing passphrase
// for TUF and Notary keys.
package passphrase
import (
"bufio"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/docker/docker/pkg/term"
"github.com/docker/notary"
)
const (
idBytesToDisplay = 7
tufRootAlias = "root"
tufTargetsAlias = "targets"
tufSnapshotAlias = "snapshot"
tufRootKeyGenerationWarning = `You are about to create a new root signing key passphrase. This passphrase
will be used to protect the most sensitive key in your signing system. Please
choose a long, complex passphrase and be careful to keep the password and the
key file itself secure and backed up. It is highly recommended that you use a
password manager to generate the passphrase and keep it safe. There will be no
way to recover this key. You can find the key in your config directory.`
)
var (
// ErrTooShort is returned if the passphrase entered for a new key is
// below the minimum length
ErrTooShort = errors.New("Passphrase too short")
// ErrDontMatch is returned if the two entered passphrases don't match.
// new key is below the minimum length
ErrDontMatch = errors.New("The entered passphrases do not match")
// ErrTooManyAttempts is returned if the maximum number of passphrase
// entry attempts is reached.
ErrTooManyAttempts = errors.New("Too many attempts")
// ErrNoInput is returned if we do not have a valid input method for passphrases
ErrNoInput = errors.New("Please either use environment variables or STDIN with a terminal to provide key passphrases")
)
// PromptRetriever returns a new Retriever which will provide a prompt on stdin
// and stdout to retrieve a passphrase. stdin will be checked if it is a terminal,
// else the PromptRetriever will error when attempting to retrieve a passphrase.
// Upon successful passphrase retrievals, the passphrase will be cached such that
// subsequent prompts will produce the same passphrase.
func PromptRetriever() notary.PassRetriever {
if !term.IsTerminal(os.Stdin.Fd()) {
return func(string, string, bool, int) (string, bool, error) {
return "", false, ErrNoInput
}
}
return PromptRetrieverWithInOut(os.Stdin, os.Stdout, nil)
}
type boundRetriever struct {
in io.Reader
out io.Writer
aliasMap map[string]string
passphraseCache map[string]string
}
func (br *boundRetriever) getPassphrase(keyName, alias string, createNew bool, numAttempts int) (string, bool, error) {
if numAttempts == 0 {
if alias == tufRootAlias && createNew {
fmt.Fprintln(br.out, tufRootKeyGenerationWarning)
}
if pass, ok := br.passphraseCache[alias]; ok {
return pass, false, nil
}
} else if !createNew { // per `if`, numAttempts > 0 if we're at this `else`
if numAttempts > 3 {
return "", true, ErrTooManyAttempts
}
fmt.Fprintln(br.out, "Passphrase incorrect. Please retry.")
}
// passphrase not cached and we're not aborting, get passphrase from user!
return br.requestPassphrase(keyName, alias, createNew, numAttempts)
}
func (br *boundRetriever) requestPassphrase(keyName, alias string, createNew bool, numAttempts int) (string, bool, error) {
// Figure out if we should display a different string for this alias
displayAlias := alias
if val, ok := br.aliasMap[alias]; ok {
displayAlias = val
}
// If typing on the terminal, we do not want the terminal to echo the
// password that is typed (so it doesn't display)
if term.IsTerminal(os.Stdin.Fd()) {
state, err := term.SaveState(os.Stdin.Fd())
if err != nil {
return "", false, err
}
term.DisableEcho(os.Stdin.Fd(), state)
defer term.RestoreTerminal(os.Stdin.Fd(), state)
}
indexOfLastSeparator := strings.LastIndex(keyName, string(filepath.Separator))
if indexOfLastSeparator == -1 {
indexOfLastSeparator = 0
}
var shortName string
if len(keyName) > indexOfLastSeparator+idBytesToDisplay {
if indexOfLastSeparator > 0 {
keyNamePrefix := keyName[:indexOfLastSeparator]
keyNameID := keyName[indexOfLastSeparator+1 : indexOfLastSeparator+idBytesToDisplay+1]
shortName = keyNameID + " (" + keyNamePrefix + ")"
} else {
shortName = keyName[indexOfLastSeparator : indexOfLastSeparator+idBytesToDisplay]
}
}
withID := fmt.Sprintf(" with ID %s", shortName)
if shortName == "" {
withID = ""
}
switch {
case createNew:
fmt.Fprintf(br.out, "Enter passphrase for new %s key%s: ", displayAlias, withID)
case displayAlias == "yubikey":
fmt.Fprintf(br.out, "Enter the %s for the attached Yubikey: ", keyName)
default:
fmt.Fprintf(br.out, "Enter passphrase for %s key%s: ", displayAlias, withID)
}
stdin := bufio.NewReader(br.in)
passphrase, err := stdin.ReadBytes('\n')
fmt.Fprintln(br.out)
if err != nil {
return "", false, err
}
retPass := strings.TrimSpace(string(passphrase))
if createNew {
err = br.verifyAndConfirmPassword(stdin, retPass, displayAlias, withID)
if err != nil {
return "", false, err
}
}
br.cachePassword(alias, retPass)
return retPass, false, nil
}
func (br *boundRetriever) verifyAndConfirmPassword(stdin *bufio.Reader, retPass, displayAlias, withID string) error {
if len(retPass) < 8 {
fmt.Fprintln(br.out, "Passphrase is too short. Please use a password manager to generate and store a good random passphrase.")
return ErrTooShort
}
fmt.Fprintf(br.out, "Repeat passphrase for new %s key%s: ", displayAlias, withID)
confirmation, err := stdin.ReadBytes('\n')
fmt.Fprintln(br.out)
if err != nil {
return err
}
confirmationStr := strings.TrimSpace(string(confirmation))
if retPass != confirmationStr {
fmt.Fprintln(br.out, "Passphrases do not match. Please retry.")
return ErrDontMatch
}
return nil
}
func (br *boundRetriever) cachePassword(alias, retPass string) {
br.passphraseCache[alias] = retPass
}
// PromptRetrieverWithInOut returns a new Retriever which will provide a
// prompt using the given in and out readers. The passphrase will be cached
// such that subsequent prompts will produce the same passphrase.
// aliasMap can be used to specify display names for TUF key aliases. If aliasMap
// is nil, a sensible default will be used.
func PromptRetrieverWithInOut(in io.Reader, out io.Writer, aliasMap map[string]string) notary.PassRetriever {
bound := &boundRetriever{
in: in,
out: out,
aliasMap: aliasMap,
passphraseCache: make(map[string]string),
}
return bound.getPassphrase
}
// ConstantRetriever returns a new Retriever which will return a constant string
// as a passphrase.
func ConstantRetriever(constantPassphrase string) notary.PassRetriever {
return func(k, a string, c bool, n int) (string, bool, error) {
return constantPassphrase, false, nil
}
}

View file

@ -1,22 +0,0 @@
package storage
import (
"errors"
"fmt"
)
var (
// ErrPathOutsideStore indicates that the returned path would be
// outside the store
ErrPathOutsideStore = errors.New("path outside file store")
)
// ErrMetaNotFound indicates we did not find a particular piece
// of metadata in the store
type ErrMetaNotFound struct {
Resource string
}
func (err ErrMetaNotFound) Error() string {
return fmt.Sprintf("%s trust data unavailable. Has a notary repository been initialized?", err.Resource)
}

View file

@ -1,222 +0,0 @@
package storage
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/docker/notary"
)
// NewFilesystemStore creates a new store in a directory tree
func NewFilesystemStore(baseDir, subDir, extension string) (*FilesystemStore, error) {
baseDir = filepath.Join(baseDir, subDir)
return NewFileStore(baseDir, extension, notary.PrivKeyPerms)
}
// NewFileStore creates a fully configurable file store
func NewFileStore(baseDir, fileExt string, perms os.FileMode) (*FilesystemStore, error) {
baseDir = filepath.Clean(baseDir)
if err := createDirectory(baseDir, perms); err != nil {
return nil, err
}
if !strings.HasPrefix(fileExt, ".") {
fileExt = "." + fileExt
}
return &FilesystemStore{
baseDir: baseDir,
ext: fileExt,
perms: perms,
}, nil
}
// NewSimpleFileStore is a convenience wrapper to create a world readable,
// owner writeable filestore
func NewSimpleFileStore(baseDir, fileExt string) (*FilesystemStore, error) {
return NewFileStore(baseDir, fileExt, notary.PubCertPerms)
}
// NewPrivateKeyFileStorage initializes a new filestore for private keys, appending
// the notary.PrivDir to the baseDir.
func NewPrivateKeyFileStorage(baseDir, fileExt string) (*FilesystemStore, error) {
baseDir = filepath.Join(baseDir, notary.PrivDir)
return NewFileStore(baseDir, fileExt, notary.PrivKeyPerms)
}
// NewPrivateSimpleFileStore is a wrapper to create an owner readable/writeable
// _only_ filestore
func NewPrivateSimpleFileStore(baseDir, fileExt string) (*FilesystemStore, error) {
return NewFileStore(baseDir, fileExt, notary.PrivKeyPerms)
}
// FilesystemStore is a store in a locally accessible directory
type FilesystemStore struct {
baseDir string
ext string
perms os.FileMode
}
func (f *FilesystemStore) getPath(name string) (string, error) {
fileName := fmt.Sprintf("%s%s", name, f.ext)
fullPath := filepath.Join(f.baseDir, fileName)
if !strings.HasPrefix(fullPath, f.baseDir) {
return "", ErrPathOutsideStore
}
return fullPath, nil
}
// GetSized returns the meta for the given name (a role) up to size bytes
// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a
// predefined threshold "notary.MaxDownloadSize". If the file is larger than size
// we return ErrMaliciousServer for consistency with the HTTPStore
func (f *FilesystemStore) GetSized(name string, size int64) ([]byte, error) {
p, err := f.getPath(name)
if err != nil {
return nil, err
}
file, err := os.OpenFile(p, os.O_RDONLY, f.perms)
if err != nil {
if os.IsNotExist(err) {
err = ErrMetaNotFound{Resource: name}
}
return nil, err
}
defer file.Close()
if size == NoSizeLimit {
size = notary.MaxDownloadSize
}
stat, err := file.Stat()
if err != nil {
return nil, err
}
if stat.Size() > size {
return nil, ErrMaliciousServer{}
}
l := io.LimitReader(file, size)
return ioutil.ReadAll(l)
}
// Get returns the meta for the given name.
func (f *FilesystemStore) Get(name string) ([]byte, error) {
p, err := f.getPath(name)
if err != nil {
return nil, err
}
meta, err := ioutil.ReadFile(p)
if err != nil {
if os.IsNotExist(err) {
err = ErrMetaNotFound{Resource: name}
}
return nil, err
}
return meta, nil
}
// SetMulti sets the metadata for multiple roles in one operation
func (f *FilesystemStore) SetMulti(metas map[string][]byte) error {
for role, blob := range metas {
err := f.Set(role, blob)
if err != nil {
return err
}
}
return nil
}
// Set sets the meta for a single role
func (f *FilesystemStore) Set(name string, meta []byte) error {
fp, err := f.getPath(name)
if err != nil {
return err
}
// Ensures the parent directories of the file we are about to write exist
err = os.MkdirAll(filepath.Dir(fp), f.perms)
if err != nil {
return err
}
// if something already exists, just delete it and re-write it
os.RemoveAll(fp)
// Write the file to disk
if err = ioutil.WriteFile(fp, meta, f.perms); err != nil {
return err
}
return nil
}
// RemoveAll clears the existing filestore by removing its base directory
func (f *FilesystemStore) RemoveAll() error {
return os.RemoveAll(f.baseDir)
}
// Remove removes the metadata for a single role - if the metadata doesn't
// exist, no error is returned
func (f *FilesystemStore) Remove(name string) error {
p, err := f.getPath(name)
if err != nil {
return err
}
return os.RemoveAll(p) // RemoveAll succeeds if path doesn't exist
}
// Location returns a human readable name for the storage location
func (f FilesystemStore) Location() string {
return f.baseDir
}
// ListFiles returns a list of all the filenames that can be used with Get*
// to retrieve content from this filestore
func (f FilesystemStore) ListFiles() []string {
files := make([]string, 0, 0)
filepath.Walk(f.baseDir, func(fp string, fi os.FileInfo, err error) error {
// If there are errors, ignore this particular file
if err != nil {
return nil
}
// Ignore if it is a directory
if fi.IsDir() {
return nil
}
// If this is a symlink, ignore it
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
return nil
}
// Only allow matches that end with our certificate extension (e.g. *.crt)
matched, _ := filepath.Match("*"+f.ext, fi.Name())
if matched {
// Find the relative path for this file relative to the base path.
fp, err = filepath.Rel(f.baseDir, fp)
if err != nil {
return err
}
trimmed := strings.TrimSuffix(fp, f.ext)
files = append(files, trimmed)
}
return nil
})
return files
}
// createDirectory receives a string of the path to a directory.
// It does not support passing files, so the caller has to remove
// the filename by doing filepath.Dir(full_path_to_file)
func createDirectory(dir string, perms os.FileMode) error {
// This prevents someone passing /path/to/dir and 'dir' not being created
// If two '//' exist, MkdirAll deals it with correctly
dir = dir + "/"
return os.MkdirAll(dir, perms)
}

View file

@ -1,339 +0,0 @@
// A Store that can fetch and set metadata on a remote server.
// Some API constraints:
// - Response bodies for error codes should be unmarshallable as:
// {"errors": [{..., "detail": <serialized validation error>}]}
// else validation error details, etc. will be unparsable. The errors
// should have a github.com/docker/notary/tuf/validation/SerializableError
// in the Details field.
// If writing your own server, please have a look at
// github.com/docker/distribution/registry/api/errcode
package storage
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/url"
"path"
"github.com/Sirupsen/logrus"
"github.com/docker/notary"
"github.com/docker/notary/tuf/validation"
)
// ErrServerUnavailable indicates an error from the server. code allows us to
// populate the http error we received
type ErrServerUnavailable struct {
code int
}
// NetworkError represents any kind of network error when attempting to make a request
type NetworkError struct {
Wrapped error
}
func (n NetworkError) Error() string {
return n.Wrapped.Error()
}
func (err ErrServerUnavailable) Error() string {
if err.code == 401 {
return fmt.Sprintf("you are not authorized to perform this operation: server returned 401.")
}
return fmt.Sprintf("unable to reach trust server at this time: %d.", err.code)
}
// ErrMaliciousServer indicates the server returned a response that is highly suspected
// of being malicious. i.e. it attempted to send us more data than the known size of a
// particular role metadata.
type ErrMaliciousServer struct{}
func (err ErrMaliciousServer) Error() string {
return "trust server returned a bad response."
}
// ErrInvalidOperation indicates that the server returned a 400 response and
// propagate any body we received.
type ErrInvalidOperation struct {
msg string
}
func (err ErrInvalidOperation) Error() string {
if err.msg != "" {
return fmt.Sprintf("trust server rejected operation: %s", err.msg)
}
return "trust server rejected operation."
}
// HTTPStore manages pulling and pushing metadata from and to a remote
// service over HTTP. It assumes the URL structure of the remote service
// maps identically to the structure of the TUF repo:
// <baseURL>/<metaPrefix>/(root|targets|snapshot|timestamp).json
// <baseURL>/<targetsPrefix>/foo.sh
//
// If consistent snapshots are disabled, it is advised that caching is not
// enabled. Simple set a cachePath (and ensure it's writeable) to enable
// caching.
type HTTPStore struct {
baseURL url.URL
metaPrefix string
metaExtension string
keyExtension string
roundTrip http.RoundTripper
}
// NewHTTPStore initializes a new store against a URL and a number of configuration options
func NewHTTPStore(baseURL, metaPrefix, metaExtension, keyExtension string, roundTrip http.RoundTripper) (RemoteStore, error) {
base, err := url.Parse(baseURL)
if err != nil {
return nil, err
}
if !base.IsAbs() {
return nil, errors.New("HTTPStore requires an absolute baseURL")
}
if roundTrip == nil {
return &OfflineStore{}, nil
}
return &HTTPStore{
baseURL: *base,
metaPrefix: metaPrefix,
metaExtension: metaExtension,
keyExtension: keyExtension,
roundTrip: roundTrip,
}, nil
}
func tryUnmarshalError(resp *http.Response, defaultError error) error {
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return defaultError
}
var parsedErrors struct {
Errors []struct {
Detail validation.SerializableError `json:"detail"`
} `json:"errors"`
}
if err := json.Unmarshal(bodyBytes, &parsedErrors); err != nil {
return defaultError
}
if len(parsedErrors.Errors) != 1 {
return defaultError
}
err = parsedErrors.Errors[0].Detail.Error
if err == nil {
return defaultError
}
return err
}
func translateStatusToError(resp *http.Response, resource string) error {
switch resp.StatusCode {
case http.StatusOK:
return nil
case http.StatusNotFound:
return ErrMetaNotFound{Resource: resource}
case http.StatusBadRequest:
return tryUnmarshalError(resp, ErrInvalidOperation{})
default:
return ErrServerUnavailable{code: resp.StatusCode}
}
}
// GetSized downloads the named meta file with the given size. A short body
// is acceptable because in the case of timestamp.json, the size is a cap,
// not an exact length.
// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a
// predefined threshold "notary.MaxDownloadSize".
func (s HTTPStore) GetSized(name string, size int64) ([]byte, error) {
url, err := s.buildMetaURL(name)
if err != nil {
return nil, err
}
req, err := http.NewRequest("GET", url.String(), nil)
if err != nil {
return nil, err
}
resp, err := s.roundTrip.RoundTrip(req)
if err != nil {
return nil, NetworkError{Wrapped: err}
}
defer resp.Body.Close()
if err := translateStatusToError(resp, name); err != nil {
logrus.Debugf("received HTTP status %d when requesting %s.", resp.StatusCode, name)
return nil, err
}
if size == NoSizeLimit {
size = notary.MaxDownloadSize
}
if resp.ContentLength > size {
return nil, ErrMaliciousServer{}
}
logrus.Debugf("%d when retrieving metadata for %s", resp.StatusCode, name)
b := io.LimitReader(resp.Body, size)
body, err := ioutil.ReadAll(b)
if err != nil {
return nil, err
}
return body, nil
}
// Set sends a single piece of metadata to the TUF server
func (s HTTPStore) Set(name string, blob []byte) error {
return s.SetMulti(map[string][]byte{name: blob})
}
// Remove always fails, because we should never be able to delete metadata
// remotely
func (s HTTPStore) Remove(name string) error {
return ErrInvalidOperation{msg: "cannot delete individual metadata files"}
}
// NewMultiPartMetaRequest builds a request with the provided metadata updates
// in multipart form
func NewMultiPartMetaRequest(url string, metas map[string][]byte) (*http.Request, error) {
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
for role, blob := range metas {
part, err := writer.CreateFormFile("files", role)
if err != nil {
return nil, err
}
_, err = io.Copy(part, bytes.NewBuffer(blob))
if err != nil {
return nil, err
}
}
err := writer.Close()
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", url, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", writer.FormDataContentType())
return req, nil
}
// SetMulti does a single batch upload of multiple pieces of TUF metadata.
// This should be preferred for updating a remote server as it enable the server
// to remain consistent, either accepting or rejecting the complete update.
func (s HTTPStore) SetMulti(metas map[string][]byte) error {
url, err := s.buildMetaURL("")
if err != nil {
return err
}
req, err := NewMultiPartMetaRequest(url.String(), metas)
if err != nil {
return err
}
resp, err := s.roundTrip.RoundTrip(req)
if err != nil {
return NetworkError{Wrapped: err}
}
defer resp.Body.Close()
// if this 404's something is pretty wrong
return translateStatusToError(resp, "POST metadata endpoint")
}
// RemoveAll will attempt to delete all TUF metadata for a GUN
func (s HTTPStore) RemoveAll() error {
url, err := s.buildMetaURL("")
if err != nil {
return err
}
req, err := http.NewRequest("DELETE", url.String(), nil)
if err != nil {
return err
}
resp, err := s.roundTrip.RoundTrip(req)
if err != nil {
return NetworkError{Wrapped: err}
}
defer resp.Body.Close()
return translateStatusToError(resp, "DELETE metadata for GUN endpoint")
}
func (s HTTPStore) buildMetaURL(name string) (*url.URL, error) {
var filename string
if name != "" {
filename = fmt.Sprintf("%s.%s", name, s.metaExtension)
}
uri := path.Join(s.metaPrefix, filename)
return s.buildURL(uri)
}
func (s HTTPStore) buildKeyURL(name string) (*url.URL, error) {
filename := fmt.Sprintf("%s.%s", name, s.keyExtension)
uri := path.Join(s.metaPrefix, filename)
return s.buildURL(uri)
}
func (s HTTPStore) buildURL(uri string) (*url.URL, error) {
sub, err := url.Parse(uri)
if err != nil {
return nil, err
}
return s.baseURL.ResolveReference(sub), nil
}
// GetKey retrieves a public key from the remote server
func (s HTTPStore) GetKey(role string) ([]byte, error) {
url, err := s.buildKeyURL(role)
if err != nil {
return nil, err
}
req, err := http.NewRequest("GET", url.String(), nil)
if err != nil {
return nil, err
}
resp, err := s.roundTrip.RoundTrip(req)
if err != nil {
return nil, NetworkError{Wrapped: err}
}
defer resp.Body.Close()
if err := translateStatusToError(resp, role+" key"); err != nil {
return nil, err
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return body, nil
}
// RotateKey rotates a private key and returns the public component from the remote server
func (s HTTPStore) RotateKey(role string) ([]byte, error) {
url, err := s.buildKeyURL(role)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", url.String(), nil)
if err != nil {
return nil, err
}
resp, err := s.roundTrip.RoundTrip(req)
if err != nil {
return nil, NetworkError{Wrapped: err}
}
defer resp.Body.Close()
if err := translateStatusToError(resp, role+" key"); err != nil {
return nil, err
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return body, nil
}
// Location returns a human readable name for the storage location
func (s HTTPStore) Location() string {
return s.baseURL.String()
}

View file

@ -1,34 +0,0 @@
package storage
// NoSizeLimit is represented as -1 for arguments to GetMeta
const NoSizeLimit int64 = -1
// MetadataStore must be implemented by anything that intends to interact
// with a store of TUF files
type MetadataStore interface {
GetSized(name string, size int64) ([]byte, error)
Set(name string, blob []byte) error
SetMulti(map[string][]byte) error
RemoveAll() error
Remove(name string) error
}
// PublicKeyStore must be implemented by a key service
type PublicKeyStore interface {
GetKey(role string) ([]byte, error)
RotateKey(role string) ([]byte, error)
}
// RemoteStore is similar to LocalStore with the added expectation that it should
// provide a way to download targets once located
type RemoteStore interface {
MetadataStore
PublicKeyStore
}
// Bootstrapper is a thing that can set itself up
type Bootstrapper interface {
// Bootstrap instructs a configured Bootstrapper to perform
// its setup operations.
Bootstrap() error
}

View file

@ -1,124 +0,0 @@
package storage
import (
"crypto/sha256"
"github.com/docker/notary"
"github.com/docker/notary/tuf/utils"
)
// NewMemoryStore returns a MetadataStore that operates entirely in memory.
// Very useful for testing
func NewMemoryStore(initial map[string][]byte) *MemoryStore {
var consistent = make(map[string][]byte)
if initial == nil {
initial = make(map[string][]byte)
} else {
// add all seed meta to consistent
for name, data := range initial {
checksum := sha256.Sum256(data)
path := utils.ConsistentName(name, checksum[:])
consistent[path] = data
}
}
return &MemoryStore{
data: initial,
consistent: consistent,
}
}
// MemoryStore implements a mock RemoteStore entirely in memory.
// For testing purposes only.
type MemoryStore struct {
data map[string][]byte
consistent map[string][]byte
}
// GetSized returns up to size bytes of data references by name.
// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a
// predefined threshold "notary.MaxDownloadSize", as we will always know the
// size for everything but a timestamp and sometimes a root,
// neither of which should be exceptionally large
func (m MemoryStore) GetSized(name string, size int64) ([]byte, error) {
d, ok := m.data[name]
if ok {
if size == NoSizeLimit {
size = notary.MaxDownloadSize
}
if int64(len(d)) < size {
return d, nil
}
return d[:size], nil
}
d, ok = m.consistent[name]
if ok {
if int64(len(d)) < size {
return d, nil
}
return d[:size], nil
}
return nil, ErrMetaNotFound{Resource: name}
}
// Get returns the data associated with name
func (m MemoryStore) Get(name string) ([]byte, error) {
if d, ok := m.data[name]; ok {
return d, nil
}
if d, ok := m.consistent[name]; ok {
return d, nil
}
return nil, ErrMetaNotFound{Resource: name}
}
// Set sets the metadata value for the given name
func (m *MemoryStore) Set(name string, meta []byte) error {
m.data[name] = meta
checksum := sha256.Sum256(meta)
path := utils.ConsistentName(name, checksum[:])
m.consistent[path] = meta
return nil
}
// SetMulti sets multiple pieces of metadata for multiple names
// in a single operation.
func (m *MemoryStore) SetMulti(metas map[string][]byte) error {
for role, blob := range metas {
m.Set(role, blob)
}
return nil
}
// Remove removes the metadata for a single role - if the metadata doesn't
// exist, no error is returned
func (m *MemoryStore) Remove(name string) error {
if meta, ok := m.data[name]; ok {
checksum := sha256.Sum256(meta)
path := utils.ConsistentName(name, checksum[:])
delete(m.data, name)
delete(m.consistent, path)
}
return nil
}
// RemoveAll clears the existing memory store by setting this store as new empty one
func (m *MemoryStore) RemoveAll() error {
*m = *NewMemoryStore(nil)
return nil
}
// Location provides a human readable name for the storage location
func (m MemoryStore) Location() string {
return "memory"
}
// ListFiles returns a list of all files. The names returned should be
// usable with Get directly, with no modification.
func (m *MemoryStore) ListFiles() []string {
names := make([]string, 0, len(m.data))
for n := range m.data {
names = append(names, n)
}
return names
}

View file

@ -1,54 +0,0 @@
package storage
// ErrOffline is used to indicate we are operating offline
type ErrOffline struct{}
func (e ErrOffline) Error() string {
return "client is offline"
}
var err = ErrOffline{}
// OfflineStore is to be used as a placeholder for a nil store. It simply
// returns ErrOffline for every operation
type OfflineStore struct{}
// GetSized returns ErrOffline
func (es OfflineStore) GetSized(name string, size int64) ([]byte, error) {
return nil, err
}
// Set returns ErrOffline
func (es OfflineStore) Set(name string, blob []byte) error {
return err
}
// SetMulti returns ErrOffline
func (es OfflineStore) SetMulti(map[string][]byte) error {
return err
}
// Remove returns ErrOffline
func (es OfflineStore) Remove(name string) error {
return err
}
// GetKey returns ErrOffline
func (es OfflineStore) GetKey(role string) ([]byte, error) {
return nil, err
}
// RotateKey returns ErrOffline
func (es OfflineStore) RotateKey(role string) ([]byte, error) {
return nil, err
}
// RemoveAll return ErrOffline
func (es OfflineStore) RemoveAll() error {
return err
}
// Location returns a human readable name for the storage location
func (es OfflineStore) Location() string {
return "offline"
}

View file

@ -1,82 +0,0 @@
package trustmanager
import (
"fmt"
"github.com/docker/notary/tuf/data"
)
// Storage implements the bare bones primitives (no hierarchy)
type Storage interface {
// Add writes a file to the specified location, returning an error if this
// is not possible (reasons may include permissions errors). The path is cleaned
// before being made absolute against the store's base dir.
Set(fileName string, data []byte) error
// Remove deletes a file from the store relative to the store's base directory.
// The path is cleaned before being made absolute to ensure no path traversal
// outside the base directory is possible.
Remove(fileName string) error
// Get returns the file content found at fileName relative to the base directory
// of the file store. The path is cleaned before being made absolute to ensure
// path traversal outside the store is not possible. If the file is not found
// an error to that effect is returned.
Get(fileName string) ([]byte, error)
// ListFiles returns a list of paths relative to the base directory of the
// filestore. Any of these paths must be retrievable via the
// Storage.Get method.
ListFiles() []string
// Location returns a human readable name indicating where the implementer
// is storing keys
Location() string
}
// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key
type ErrAttemptsExceeded struct{}
// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key
func (err ErrAttemptsExceeded) Error() string {
return "maximum number of passphrase attempts exceeded"
}
// ErrPasswordInvalid is returned when signing fails. It could also mean the signing
// key file was corrupted, but we have no way to distinguish.
type ErrPasswordInvalid struct{}
// ErrPasswordInvalid is returned when signing fails. It could also mean the signing
// key file was corrupted, but we have no way to distinguish.
func (err ErrPasswordInvalid) Error() string {
return "password invalid, operation has failed."
}
// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key.
type ErrKeyNotFound struct {
KeyID string
}
// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key.
func (err ErrKeyNotFound) Error() string {
return fmt.Sprintf("signing key not found: %s", err.KeyID)
}
// KeyStore is a generic interface for private key storage
type KeyStore interface {
// AddKey adds a key to the KeyStore, and if the key already exists,
// succeeds. Otherwise, returns an error if it cannot add.
AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error
// Should fail with ErrKeyNotFound if the keystore is operating normally
// and knows that it does not store the requested key.
GetKey(keyID string) (data.PrivateKey, string, error)
GetKeyInfo(keyID string) (KeyInfo, error)
ListKeys() map[string]KeyInfo
RemoveKey(keyID string) error
Name() string
}
type cachedKey struct {
alias string
key data.PrivateKey
}

View file

@ -1,325 +0,0 @@
package trustmanager
import (
"encoding/pem"
"fmt"
"path/filepath"
"strings"
"sync"
"github.com/Sirupsen/logrus"
"github.com/docker/notary"
store "github.com/docker/notary/storage"
"github.com/docker/notary/tuf/data"
"github.com/docker/notary/tuf/utils"
)
type keyInfoMap map[string]KeyInfo
// KeyInfo stores the role, path, and gun for a corresponding private key ID
// It is assumed that each private key ID is unique
type KeyInfo struct {
Gun string
Role string
}
// GenericKeyStore is a wrapper for Storage instances that provides
// translation between the []byte form and Public/PrivateKey objects
type GenericKeyStore struct {
store Storage
sync.Mutex
notary.PassRetriever
cachedKeys map[string]*cachedKey
keyInfoMap
}
// NewKeyFileStore returns a new KeyFileStore creating a private directory to
// hold the keys.
func NewKeyFileStore(baseDir string, p notary.PassRetriever) (*GenericKeyStore, error) {
fileStore, err := store.NewPrivateKeyFileStorage(baseDir, notary.KeyExtension)
if err != nil {
return nil, err
}
return NewGenericKeyStore(fileStore, p), nil
}
// NewKeyMemoryStore returns a new KeyMemoryStore which holds keys in memory
func NewKeyMemoryStore(p notary.PassRetriever) *GenericKeyStore {
memStore := store.NewMemoryStore(nil)
return NewGenericKeyStore(memStore, p)
}
// NewGenericKeyStore creates a GenericKeyStore wrapping the provided
// Storage instance, using the PassRetriever to enc/decrypt keys
func NewGenericKeyStore(s Storage, p notary.PassRetriever) *GenericKeyStore {
ks := GenericKeyStore{
store: s,
PassRetriever: p,
cachedKeys: make(map[string]*cachedKey),
keyInfoMap: make(keyInfoMap),
}
ks.loadKeyInfo()
return &ks
}
func generateKeyInfoMap(s Storage) map[string]KeyInfo {
keyInfoMap := make(map[string]KeyInfo)
for _, keyPath := range s.ListFiles() {
d, err := s.Get(keyPath)
if err != nil {
logrus.Error(err)
continue
}
keyID, keyInfo, err := KeyInfoFromPEM(d, keyPath)
if err != nil {
logrus.Error(err)
continue
}
keyInfoMap[keyID] = keyInfo
}
return keyInfoMap
}
// Attempts to infer the keyID, role, and GUN from the specified key path.
// Note that non-root roles can only be inferred if this is a legacy style filename: KEYID_ROLE.key
func inferKeyInfoFromKeyPath(keyPath string) (string, string, string) {
var keyID, role, gun string
keyID = filepath.Base(keyPath)
underscoreIndex := strings.LastIndex(keyID, "_")
// This is the legacy KEYID_ROLE filename
// The keyID is the first part of the keyname
// The keyRole is the second part of the keyname
// in a key named abcde_root, abcde is the keyID and root is the KeyAlias
if underscoreIndex != -1 {
role = keyID[underscoreIndex+1:]
keyID = keyID[:underscoreIndex]
}
if filepath.HasPrefix(keyPath, notary.RootKeysSubdir+"/") {
return keyID, data.CanonicalRootRole, ""
}
keyPath = strings.TrimPrefix(keyPath, notary.NonRootKeysSubdir+"/")
gun = getGunFromFullID(keyPath)
return keyID, role, gun
}
func getGunFromFullID(fullKeyID string) string {
keyGun := filepath.Dir(fullKeyID)
// If the gun is empty, Dir will return .
if keyGun == "." {
keyGun = ""
}
return keyGun
}
func (s *GenericKeyStore) loadKeyInfo() {
s.keyInfoMap = generateKeyInfoMap(s.store)
}
// GetKeyInfo returns the corresponding gun and role key info for a keyID
func (s *GenericKeyStore) GetKeyInfo(keyID string) (KeyInfo, error) {
if info, ok := s.keyInfoMap[keyID]; ok {
return info, nil
}
return KeyInfo{}, fmt.Errorf("Could not find info for keyID %s", keyID)
}
// AddKey stores the contents of a PEM-encoded private key as a PEM block
func (s *GenericKeyStore) AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error {
var (
chosenPassphrase string
giveup bool
err error
pemPrivKey []byte
)
s.Lock()
defer s.Unlock()
if keyInfo.Role == data.CanonicalRootRole || data.IsDelegation(keyInfo.Role) || !data.ValidRole(keyInfo.Role) {
keyInfo.Gun = ""
}
name := filepath.Join(keyInfo.Gun, privKey.ID())
for attempts := 0; ; attempts++ {
chosenPassphrase, giveup, err = s.PassRetriever(name, keyInfo.Role, true, attempts)
if err == nil {
break
}
if giveup || attempts > 10 {
return ErrAttemptsExceeded{}
}
}
if chosenPassphrase != "" {
pemPrivKey, err = utils.EncryptPrivateKey(privKey, keyInfo.Role, keyInfo.Gun, chosenPassphrase)
} else {
pemPrivKey, err = utils.KeyToPEM(privKey, keyInfo.Role)
}
if err != nil {
return err
}
s.cachedKeys[name] = &cachedKey{alias: keyInfo.Role, key: privKey}
err = s.store.Set(filepath.Join(getSubdir(keyInfo.Role), name), pemPrivKey)
if err != nil {
return err
}
s.keyInfoMap[privKey.ID()] = keyInfo
return nil
}
// GetKey returns the PrivateKey given a KeyID
func (s *GenericKeyStore) GetKey(name string) (data.PrivateKey, string, error) {
s.Lock()
defer s.Unlock()
cachedKeyEntry, ok := s.cachedKeys[name]
if ok {
return cachedKeyEntry.key, cachedKeyEntry.alias, nil
}
keyBytes, _, keyAlias, err := getKey(s.store, name)
if err != nil {
return nil, "", err
}
// See if the key is encrypted. If its encrypted we'll fail to parse the private key
privKey, err := utils.ParsePEMPrivateKey(keyBytes, "")
if err != nil {
privKey, _, err = GetPasswdDecryptBytes(s.PassRetriever, keyBytes, name, string(keyAlias))
if err != nil {
return nil, "", err
}
}
s.cachedKeys[name] = &cachedKey{alias: keyAlias, key: privKey}
return privKey, keyAlias, nil
}
// ListKeys returns a list of unique PublicKeys present on the KeyFileStore, by returning a copy of the keyInfoMap
func (s *GenericKeyStore) ListKeys() map[string]KeyInfo {
return copyKeyInfoMap(s.keyInfoMap)
}
// RemoveKey removes the key from the keyfilestore
func (s *GenericKeyStore) RemoveKey(keyID string) error {
s.Lock()
defer s.Unlock()
_, filename, _, err := getKey(s.store, keyID)
switch err.(type) {
case ErrKeyNotFound, nil:
break
default:
return err
}
delete(s.cachedKeys, keyID)
err = s.store.Remove(filename) // removing a file that doesn't exist doesn't fail
if err != nil {
return err
}
// Remove this key from our keyInfo map if we removed from our filesystem
delete(s.keyInfoMap, filepath.Base(keyID))
return nil
}
// Name returns a user friendly name for the location this store
// keeps its data
func (s *GenericKeyStore) Name() string {
return s.store.Location()
}
// copyKeyInfoMap returns a deep copy of the passed-in keyInfoMap
func copyKeyInfoMap(keyInfoMap map[string]KeyInfo) map[string]KeyInfo {
copyMap := make(map[string]KeyInfo)
for keyID, keyInfo := range keyInfoMap {
copyMap[keyID] = KeyInfo{Role: keyInfo.Role, Gun: keyInfo.Gun}
}
return copyMap
}
// KeyInfoFromPEM attempts to get a keyID and KeyInfo from the filename and PEM bytes of a key
func KeyInfoFromPEM(pemBytes []byte, filename string) (string, KeyInfo, error) {
keyID, role, gun := inferKeyInfoFromKeyPath(filename)
if role == "" {
block, _ := pem.Decode(pemBytes)
if block == nil {
return "", KeyInfo{}, fmt.Errorf("could not decode PEM block for key %s", filename)
}
if keyRole, ok := block.Headers["role"]; ok {
role = keyRole
}
}
return keyID, KeyInfo{Gun: gun, Role: role}, nil
}
// getKey finds the key and role for the given keyID. It attempts to
// look both in the newer format PEM headers, and also in the legacy filename
// format. It returns: the key bytes, the filename it was found under, the role,
// and an error
func getKey(s Storage, keyID string) ([]byte, string, string, error) {
name := strings.TrimSpace(strings.TrimSuffix(filepath.Base(keyID), filepath.Ext(keyID)))
for _, file := range s.ListFiles() {
filename := filepath.Base(file)
if strings.HasPrefix(filename, name) {
d, err := s.Get(file)
if err != nil {
return nil, "", "", err
}
block, _ := pem.Decode(d)
if block != nil {
if role, ok := block.Headers["role"]; ok {
return d, file, role, nil
}
}
role := strings.TrimPrefix(filename, name+"_")
return d, file, role, nil
}
}
return nil, "", "", ErrKeyNotFound{KeyID: keyID}
}
// Assumes 2 subdirectories, 1 containing root keys and 1 containing TUF keys
func getSubdir(alias string) string {
if alias == data.CanonicalRootRole {
return notary.RootKeysSubdir
}
return notary.NonRootKeysSubdir
}
// GetPasswdDecryptBytes gets the password to decrypt the given pem bytes.
// Returns the password and private key
func GetPasswdDecryptBytes(passphraseRetriever notary.PassRetriever, pemBytes []byte, name, alias string) (data.PrivateKey, string, error) {
var (
passwd string
privKey data.PrivateKey
)
for attempts := 0; ; attempts++ {
var (
giveup bool
err error
)
if attempts > 10 {
return nil, "", ErrAttemptsExceeded{}
}
passwd, giveup, err = passphraseRetriever(name, alias, false, attempts)
// Check if the passphrase retriever got an error or if it is telling us to give up
if giveup || err != nil {
return nil, "", ErrPasswordInvalid{}
}
// Try to convert PEM encoded bytes back to a PrivateKey using the passphrase
privKey, err = utils.ParsePEMPrivateKey(pemBytes, passwd)
if err == nil {
// We managed to parse the PrivateKey. We've succeeded!
break
}
}
return privKey, passwd, nil
}

View file

@ -1,57 +0,0 @@
// +build pkcs11
package yubikey
import (
"encoding/pem"
"errors"
"github.com/docker/notary"
"github.com/docker/notary/trustmanager"
"github.com/docker/notary/tuf/utils"
)
// YubiImport is a wrapper around the YubiStore that allows us to import private
// keys to the yubikey
type YubiImport struct {
dest *YubiStore
passRetriever notary.PassRetriever
}
// NewImporter returns a wrapper for the YubiStore provided that enables importing
// keys via the simple Set(string, []byte) interface
func NewImporter(ys *YubiStore, ret notary.PassRetriever) *YubiImport {
return &YubiImport{
dest: ys,
passRetriever: ret,
}
}
// Set determines if we are allowed to set the given key on the Yubikey and
// calls through to YubiStore.AddKey if it's valid
func (s *YubiImport) Set(name string, bytes []byte) error {
block, _ := pem.Decode(bytes)
if block == nil {
return errors.New("invalid PEM data, could not parse")
}
role, ok := block.Headers["role"]
if !ok {
return errors.New("no role found for key")
}
ki := trustmanager.KeyInfo{
// GUN is ignored by YubiStore
Role: role,
}
privKey, err := utils.ParsePEMPrivateKey(bytes, "")
if err != nil {
privKey, _, err = trustmanager.GetPasswdDecryptBytes(
s.passRetriever,
bytes,
name,
ki.Role,
)
if err != nil {
return err
}
}
return s.dest.AddKey(ki, privKey)
}

View file

@ -1,9 +0,0 @@
// go list ./... and go test ./... will not pick up this package without this
// file, because go ? ./... does not honor build tags.
// e.g. "go list -tags pkcs11 ./..." will not list this package if all the
// files in it have a build tag.
// See https://github.com/golang/go/issues/11246
package yubikey

View file

@ -1,9 +0,0 @@
// +build pkcs11,darwin
package yubikey
var possiblePkcs11Libs = []string{
"/usr/local/lib/libykcs11.dylib",
"/usr/local/docker/lib/libykcs11.dylib",
"/usr/local/docker-experimental/lib/libykcs11.dylib",
}

View file

@ -1,40 +0,0 @@
// +build pkcs11
// an interface around the pkcs11 library, so that things can be mocked out
// for testing
package yubikey
import "github.com/miekg/pkcs11"
// IPKCS11 is an interface for wrapping github.com/miekg/pkcs11
type pkcs11LibLoader func(module string) IPKCS11Ctx
func defaultLoader(module string) IPKCS11Ctx {
return pkcs11.New(module)
}
// IPKCS11Ctx is an interface for wrapping the parts of
// github.com/miekg/pkcs11.Ctx that yubikeystore requires
type IPKCS11Ctx interface {
Destroy()
Initialize() error
Finalize() error
GetSlotList(tokenPresent bool) ([]uint, error)
OpenSession(slotID uint, flags uint) (pkcs11.SessionHandle, error)
CloseSession(sh pkcs11.SessionHandle) error
Login(sh pkcs11.SessionHandle, userType uint, pin string) error
Logout(sh pkcs11.SessionHandle) error
CreateObject(sh pkcs11.SessionHandle, temp []*pkcs11.Attribute) (
pkcs11.ObjectHandle, error)
DestroyObject(sh pkcs11.SessionHandle, oh pkcs11.ObjectHandle) error
GetAttributeValue(sh pkcs11.SessionHandle, o pkcs11.ObjectHandle,
a []*pkcs11.Attribute) ([]*pkcs11.Attribute, error)
FindObjectsInit(sh pkcs11.SessionHandle, temp []*pkcs11.Attribute) error
FindObjects(sh pkcs11.SessionHandle, max int) (
[]pkcs11.ObjectHandle, bool, error)
FindObjectsFinal(sh pkcs11.SessionHandle) error
SignInit(sh pkcs11.SessionHandle, m []*pkcs11.Mechanism,
o pkcs11.ObjectHandle) error
Sign(sh pkcs11.SessionHandle, message []byte) ([]byte, error)
}

View file

@ -1,10 +0,0 @@
// +build pkcs11,linux
package yubikey
var possiblePkcs11Libs = []string{
"/usr/lib/libykcs11.so",
"/usr/lib64/libykcs11.so",
"/usr/lib/x86_64-linux-gnu/libykcs11.so",
"/usr/local/lib/libykcs11.so",
}

View file

@ -1,914 +0,0 @@
// +build pkcs11
package yubikey
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/sha256"
"crypto/x509"
"errors"
"fmt"
"io"
"math/big"
"os"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/notary"
"github.com/docker/notary/trustmanager"
"github.com/docker/notary/tuf/data"
"github.com/docker/notary/tuf/signed"
"github.com/docker/notary/tuf/utils"
"github.com/miekg/pkcs11"
)
const (
// UserPin is the user pin of a yubikey (in PIV parlance, is the PIN)
UserPin = "123456"
// SOUserPin is the "Security Officer" user pin - this is the PIV management
// (MGM) key, which is different than the admin pin of the Yubikey PGP interface
// (which in PIV parlance is the PUK, and defaults to 12345678)
SOUserPin = "010203040506070801020304050607080102030405060708"
numSlots = 4 // number of slots in the yubikey
// KeymodeNone means that no touch or PIN is required to sign with the yubikey
KeymodeNone = 0
// KeymodeTouch means that only touch is required to sign with the yubikey
KeymodeTouch = 1
// KeymodePinOnce means that the pin entry is required once the first time to sign with the yubikey
KeymodePinOnce = 2
// KeymodePinAlways means that pin entry is required every time to sign with the yubikey
KeymodePinAlways = 4
// the key size, when importing a key into yubikey, MUST be 32 bytes
ecdsaPrivateKeySize = 32
sigAttempts = 5
)
// what key mode to use when generating keys
var (
yubikeyKeymode = KeymodeTouch | KeymodePinOnce
// order in which to prefer token locations on the yubikey.
// corresponds to: 9c, 9e, 9d, 9a
slotIDs = []int{2, 1, 3, 0}
)
// SetYubikeyKeyMode - sets the mode when generating yubikey keys.
// This is to be used for testing. It does nothing if not building with tag
// pkcs11.
func SetYubikeyKeyMode(keyMode int) error {
// technically 7 (1 | 2 | 4) is valid, but KeymodePinOnce +
// KeymdoePinAlways don't really make sense together
if keyMode < 0 || keyMode > 5 {
return errors.New("Invalid key mode")
}
yubikeyKeymode = keyMode
return nil
}
// SetTouchToSignUI - allows configurable UX for notifying a user that they
// need to touch the yubikey to sign. The callback may be used to provide a
// mechanism for updating a GUI (such as removing a modal) after the touch
// has been made
func SetTouchToSignUI(notifier func(), callback func()) {
touchToSignUI = notifier
if callback != nil {
touchDoneCallback = callback
}
}
var touchToSignUI = func() {
fmt.Println("Please touch the attached Yubikey to perform signing.")
}
var touchDoneCallback = func() {
// noop
}
var pkcs11Lib string
func init() {
for _, loc := range possiblePkcs11Libs {
_, err := os.Stat(loc)
if err == nil {
p := pkcs11.New(loc)
if p != nil {
pkcs11Lib = loc
return
}
}
}
}
// ErrBackupFailed is returned when a YubiStore fails to back up a key that
// is added
type ErrBackupFailed struct {
err string
}
func (err ErrBackupFailed) Error() string {
return fmt.Sprintf("Failed to backup private key to: %s", err.err)
}
// An error indicating that the HSM is not present (as opposed to failing),
// i.e. that we can confidently claim that the key is not stored in the HSM
// without notifying the user about a missing or failing HSM.
type errHSMNotPresent struct {
err string
}
func (err errHSMNotPresent) Error() string {
return err.err
}
type yubiSlot struct {
role string
slotID []byte
}
// YubiPrivateKey represents a private key inside of a yubikey
type YubiPrivateKey struct {
data.ECDSAPublicKey
passRetriever notary.PassRetriever
slot []byte
libLoader pkcs11LibLoader
}
// yubikeySigner wraps a YubiPrivateKey and implements the crypto.Signer interface
type yubikeySigner struct {
YubiPrivateKey
}
// NewYubiPrivateKey returns a YubiPrivateKey, which implements the data.PrivateKey
// interface except that the private material is inaccessible
func NewYubiPrivateKey(slot []byte, pubKey data.ECDSAPublicKey,
passRetriever notary.PassRetriever) *YubiPrivateKey {
return &YubiPrivateKey{
ECDSAPublicKey: pubKey,
passRetriever: passRetriever,
slot: slot,
libLoader: defaultLoader,
}
}
// Public is a required method of the crypto.Signer interface
func (ys *yubikeySigner) Public() crypto.PublicKey {
publicKey, err := x509.ParsePKIXPublicKey(ys.YubiPrivateKey.Public())
if err != nil {
return nil
}
return publicKey
}
func (y *YubiPrivateKey) setLibLoader(loader pkcs11LibLoader) {
y.libLoader = loader
}
// CryptoSigner returns a crypto.Signer tha wraps the YubiPrivateKey. Needed for
// Certificate generation only
func (y *YubiPrivateKey) CryptoSigner() crypto.Signer {
return &yubikeySigner{YubiPrivateKey: *y}
}
// Private is not implemented in hardware keys
func (y *YubiPrivateKey) Private() []byte {
// We cannot return the private material from a Yubikey
// TODO(david): We probably want to return an error here
return nil
}
// SignatureAlgorithm returns which algorithm this key uses to sign - currently
// hardcoded to ECDSA
func (y YubiPrivateKey) SignatureAlgorithm() data.SigAlgorithm {
return data.ECDSASignature
}
// Sign is a required method of the crypto.Signer interface and the data.PrivateKey
// interface
func (y *YubiPrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error) {
ctx, session, err := SetupHSMEnv(pkcs11Lib, y.libLoader)
if err != nil {
return nil, err
}
defer cleanup(ctx, session)
v := signed.Verifiers[data.ECDSASignature]
for i := 0; i < sigAttempts; i++ {
sig, err := sign(ctx, session, y.slot, y.passRetriever, msg)
if err != nil {
return nil, fmt.Errorf("failed to sign using Yubikey: %v", err)
}
if err := v.Verify(&y.ECDSAPublicKey, sig, msg); err == nil {
return sig, nil
}
}
return nil, errors.New("Failed to generate signature on Yubikey.")
}
// If a byte array is less than the number of bytes specified by
// ecdsaPrivateKeySize, left-zero-pad the byte array until
// it is the required size.
func ensurePrivateKeySize(payload []byte) []byte {
final := payload
if len(payload) < ecdsaPrivateKeySize {
final = make([]byte, ecdsaPrivateKeySize)
copy(final[ecdsaPrivateKeySize-len(payload):], payload)
}
return final
}
// addECDSAKey adds a key to the yubikey
func addECDSAKey(
ctx IPKCS11Ctx,
session pkcs11.SessionHandle,
privKey data.PrivateKey,
pkcs11KeyID []byte,
passRetriever notary.PassRetriever,
role string,
) error {
logrus.Debugf("Attempting to add key to yubikey with ID: %s", privKey.ID())
err := login(ctx, session, passRetriever, pkcs11.CKU_SO, SOUserPin)
if err != nil {
return err
}
defer ctx.Logout(session)
// Create an ecdsa.PrivateKey out of the private key bytes
ecdsaPrivKey, err := x509.ParseECPrivateKey(privKey.Private())
if err != nil {
return err
}
ecdsaPrivKeyD := ensurePrivateKeySize(ecdsaPrivKey.D.Bytes())
// Hard-coded policy: the generated certificate expires in 10 years.
startTime := time.Now()
template, err := utils.NewCertificate(role, startTime, startTime.AddDate(10, 0, 0))
if err != nil {
return fmt.Errorf("failed to create the certificate template: %v", err)
}
certBytes, err := x509.CreateCertificate(rand.Reader, template, template, ecdsaPrivKey.Public(), ecdsaPrivKey)
if err != nil {
return fmt.Errorf("failed to create the certificate: %v", err)
}
certTemplate := []*pkcs11.Attribute{
pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_CERTIFICATE),
pkcs11.NewAttribute(pkcs11.CKA_VALUE, certBytes),
pkcs11.NewAttribute(pkcs11.CKA_ID, pkcs11KeyID),
}
privateKeyTemplate := []*pkcs11.Attribute{
pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_PRIVATE_KEY),
pkcs11.NewAttribute(pkcs11.CKA_KEY_TYPE, pkcs11.CKK_ECDSA),
pkcs11.NewAttribute(pkcs11.CKA_ID, pkcs11KeyID),
pkcs11.NewAttribute(pkcs11.CKA_EC_PARAMS, []byte{0x06, 0x08, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x07}),
pkcs11.NewAttribute(pkcs11.CKA_VALUE, ecdsaPrivKeyD),
pkcs11.NewAttribute(pkcs11.CKA_VENDOR_DEFINED, yubikeyKeymode),
}
_, err = ctx.CreateObject(session, certTemplate)
if err != nil {
return fmt.Errorf("error importing: %v", err)
}
_, err = ctx.CreateObject(session, privateKeyTemplate)
if err != nil {
return fmt.Errorf("error importing: %v", err)
}
return nil
}
func getECDSAKey(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byte) (*data.ECDSAPublicKey, string, error) {
findTemplate := []*pkcs11.Attribute{
pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true),
pkcs11.NewAttribute(pkcs11.CKA_ID, pkcs11KeyID),
pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_PUBLIC_KEY),
}
attrTemplate := []*pkcs11.Attribute{
pkcs11.NewAttribute(pkcs11.CKA_KEY_TYPE, []byte{0}),
pkcs11.NewAttribute(pkcs11.CKA_EC_POINT, []byte{0}),
pkcs11.NewAttribute(pkcs11.CKA_EC_PARAMS, []byte{0}),
}
if err := ctx.FindObjectsInit(session, findTemplate); err != nil {
logrus.Debugf("Failed to init: %s", err.Error())
return nil, "", err
}
obj, _, err := ctx.FindObjects(session, 1)
if err != nil {
logrus.Debugf("Failed to find objects: %v", err)
return nil, "", err
}
if err := ctx.FindObjectsFinal(session); err != nil {
logrus.Debugf("Failed to finalize: %s", err.Error())
return nil, "", err
}
if len(obj) != 1 {
logrus.Debugf("should have found one object")
return nil, "", errors.New("no matching keys found inside of yubikey")
}
// Retrieve the public-key material to be able to create a new ECSAKey
attr, err := ctx.GetAttributeValue(session, obj[0], attrTemplate)
if err != nil {
logrus.Debugf("Failed to get Attribute for: %v", obj[0])
return nil, "", err
}
// Iterate through all the attributes of this key and saves CKA_PUBLIC_EXPONENT and CKA_MODULUS. Removes ordering specific issues.
var rawPubKey []byte
for _, a := range attr {
if a.Type == pkcs11.CKA_EC_POINT {
rawPubKey = a.Value
}
}
ecdsaPubKey := ecdsa.PublicKey{Curve: elliptic.P256(), X: new(big.Int).SetBytes(rawPubKey[3:35]), Y: new(big.Int).SetBytes(rawPubKey[35:])}
pubBytes, err := x509.MarshalPKIXPublicKey(&ecdsaPubKey)
if err != nil {
logrus.Debugf("Failed to Marshal public key")
return nil, "", err
}
return data.NewECDSAPublicKey(pubBytes), data.CanonicalRootRole, nil
}
// sign returns a signature for a given signature request
func sign(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byte, passRetriever notary.PassRetriever, payload []byte) ([]byte, error) {
err := login(ctx, session, passRetriever, pkcs11.CKU_USER, UserPin)
if err != nil {
return nil, fmt.Errorf("error logging in: %v", err)
}
defer ctx.Logout(session)
// Define the ECDSA Private key template
class := pkcs11.CKO_PRIVATE_KEY
privateKeyTemplate := []*pkcs11.Attribute{
pkcs11.NewAttribute(pkcs11.CKA_CLASS, class),
pkcs11.NewAttribute(pkcs11.CKA_KEY_TYPE, pkcs11.CKK_ECDSA),
pkcs11.NewAttribute(pkcs11.CKA_ID, pkcs11KeyID),
}
if err := ctx.FindObjectsInit(session, privateKeyTemplate); err != nil {
logrus.Debugf("Failed to init find objects: %s", err.Error())
return nil, err
}
obj, _, err := ctx.FindObjects(session, 1)
if err != nil {
logrus.Debugf("Failed to find objects: %v", err)
return nil, err
}
if err = ctx.FindObjectsFinal(session); err != nil {
logrus.Debugf("Failed to finalize find objects: %s", err.Error())
return nil, err
}
if len(obj) != 1 {
return nil, errors.New("length of objects found not 1")
}
var sig []byte
err = ctx.SignInit(
session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_ECDSA, nil)}, obj[0])
if err != nil {
return nil, err
}
// Get the SHA256 of the payload
digest := sha256.Sum256(payload)
if (yubikeyKeymode & KeymodeTouch) > 0 {
touchToSignUI()
defer touchDoneCallback()
}
// a call to Sign, whether or not Sign fails, will clear the SignInit
sig, err = ctx.Sign(session, digest[:])
if err != nil {
logrus.Debugf("Error while signing: %s", err)
return nil, err
}
if sig == nil {
return nil, errors.New("Failed to create signature")
}
return sig[:], nil
}
func yubiRemoveKey(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byte, passRetriever notary.PassRetriever, keyID string) error {
err := login(ctx, session, passRetriever, pkcs11.CKU_SO, SOUserPin)
if err != nil {
return err
}
defer ctx.Logout(session)
template := []*pkcs11.Attribute{
pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true),
pkcs11.NewAttribute(pkcs11.CKA_ID, pkcs11KeyID),
//pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_PRIVATE_KEY),
pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_CERTIFICATE),
}
if err := ctx.FindObjectsInit(session, template); err != nil {
logrus.Debugf("Failed to init find objects: %s", err.Error())
return err
}
obj, b, err := ctx.FindObjects(session, 1)
if err != nil {
logrus.Debugf("Failed to find objects: %s %v", err.Error(), b)
return err
}
if err := ctx.FindObjectsFinal(session); err != nil {
logrus.Debugf("Failed to finalize find objects: %s", err.Error())
return err
}
if len(obj) != 1 {
logrus.Debugf("should have found exactly one object")
return err
}
// Delete the certificate
err = ctx.DestroyObject(session, obj[0])
if err != nil {
logrus.Debugf("Failed to delete cert")
return err
}
return nil
}
func yubiListKeys(ctx IPKCS11Ctx, session pkcs11.SessionHandle) (keys map[string]yubiSlot, err error) {
keys = make(map[string]yubiSlot)
findTemplate := []*pkcs11.Attribute{
pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true),
//pkcs11.NewAttribute(pkcs11.CKA_ID, pkcs11KeyID),
pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_CERTIFICATE),
}
attrTemplate := []*pkcs11.Attribute{
pkcs11.NewAttribute(pkcs11.CKA_ID, []byte{0}),
pkcs11.NewAttribute(pkcs11.CKA_VALUE, []byte{0}),
}
if err = ctx.FindObjectsInit(session, findTemplate); err != nil {
logrus.Debugf("Failed to init: %s", err.Error())
return
}
objs, b, err := ctx.FindObjects(session, numSlots)
for err == nil {
var o []pkcs11.ObjectHandle
o, b, err = ctx.FindObjects(session, numSlots)
if err != nil {
continue
}
if len(o) == 0 {
break
}
objs = append(objs, o...)
}
if err != nil {
logrus.Debugf("Failed to find: %s %v", err.Error(), b)
if len(objs) == 0 {
return nil, err
}
}
if err = ctx.FindObjectsFinal(session); err != nil {
logrus.Debugf("Failed to finalize: %s", err.Error())
return
}
if len(objs) == 0 {
return nil, errors.New("No keys found in yubikey.")
}
logrus.Debugf("Found %d objects matching list filters", len(objs))
for _, obj := range objs {
var (
cert *x509.Certificate
slot []byte
)
// Retrieve the public-key material to be able to create a new ECDSA
attr, err := ctx.GetAttributeValue(session, obj, attrTemplate)
if err != nil {
logrus.Debugf("Failed to get Attribute for: %v", obj)
continue
}
// Iterate through all the attributes of this key and saves CKA_PUBLIC_EXPONENT and CKA_MODULUS. Removes ordering specific issues.
for _, a := range attr {
if a.Type == pkcs11.CKA_ID {
slot = a.Value
}
if a.Type == pkcs11.CKA_VALUE {
cert, err = x509.ParseCertificate(a.Value)
if err != nil {
continue
}
if !data.ValidRole(cert.Subject.CommonName) {
continue
}
}
}
// we found nothing
if cert == nil {
continue
}
var ecdsaPubKey *ecdsa.PublicKey
switch cert.PublicKeyAlgorithm {
case x509.ECDSA:
ecdsaPubKey = cert.PublicKey.(*ecdsa.PublicKey)
default:
logrus.Infof("Unsupported x509 PublicKeyAlgorithm: %d", cert.PublicKeyAlgorithm)
continue
}
pubBytes, err := x509.MarshalPKIXPublicKey(ecdsaPubKey)
if err != nil {
logrus.Debugf("Failed to Marshal public key")
continue
}
keys[data.NewECDSAPublicKey(pubBytes).ID()] = yubiSlot{
role: cert.Subject.CommonName,
slotID: slot,
}
}
return
}
func getNextEmptySlot(ctx IPKCS11Ctx, session pkcs11.SessionHandle) ([]byte, error) {
findTemplate := []*pkcs11.Attribute{
pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true),
}
attrTemplate := []*pkcs11.Attribute{
pkcs11.NewAttribute(pkcs11.CKA_ID, []byte{0}),
}
if err := ctx.FindObjectsInit(session, findTemplate); err != nil {
logrus.Debugf("Failed to init: %s", err.Error())
return nil, err
}
objs, b, err := ctx.FindObjects(session, numSlots)
// if there are more objects than `numSlots`, get all of them until
// there are no more to get
for err == nil {
var o []pkcs11.ObjectHandle
o, b, err = ctx.FindObjects(session, numSlots)
if err != nil {
continue
}
if len(o) == 0 {
break
}
objs = append(objs, o...)
}
taken := make(map[int]bool)
if err != nil {
logrus.Debugf("Failed to find: %s %v", err.Error(), b)
return nil, err
}
if err = ctx.FindObjectsFinal(session); err != nil {
logrus.Debugf("Failed to finalize: %s\n", err.Error())
return nil, err
}
for _, obj := range objs {
// Retrieve the slot ID
attr, err := ctx.GetAttributeValue(session, obj, attrTemplate)
if err != nil {
continue
}
// Iterate through attributes. If an ID attr was found, mark it as taken
for _, a := range attr {
if a.Type == pkcs11.CKA_ID {
if len(a.Value) < 1 {
continue
}
// a byte will always be capable of representing all slot IDs
// for the Yubikeys
slotNum := int(a.Value[0])
if slotNum >= numSlots {
// defensive
continue
}
taken[slotNum] = true
}
}
}
// iterate the token locations in our preferred order and use the first
// available one. Otherwise exit the loop and return an error.
for _, loc := range slotIDs {
if !taken[loc] {
return []byte{byte(loc)}, nil
}
}
return nil, errors.New("Yubikey has no available slots.")
}
// YubiStore is a KeyStore for private keys inside a Yubikey
type YubiStore struct {
passRetriever notary.PassRetriever
keys map[string]yubiSlot
backupStore trustmanager.KeyStore
libLoader pkcs11LibLoader
}
// NewYubiStore returns a YubiStore, given a backup key store to write any
// generated keys to (usually a KeyFileStore)
func NewYubiStore(backupStore trustmanager.KeyStore, passphraseRetriever notary.PassRetriever) (
*YubiStore, error) {
s := &YubiStore{
passRetriever: passphraseRetriever,
keys: make(map[string]yubiSlot),
backupStore: backupStore,
libLoader: defaultLoader,
}
s.ListKeys() // populate keys field
return s, nil
}
// Name returns a user friendly name for the location this store
// keeps its data
func (s YubiStore) Name() string {
return "yubikey"
}
func (s *YubiStore) setLibLoader(loader pkcs11LibLoader) {
s.libLoader = loader
}
// ListKeys returns a list of keys in the yubikey store
func (s *YubiStore) ListKeys() map[string]trustmanager.KeyInfo {
if len(s.keys) > 0 {
return buildKeyMap(s.keys)
}
ctx, session, err := SetupHSMEnv(pkcs11Lib, s.libLoader)
if err != nil {
logrus.Debugf("No yubikey found, using alternative key storage: %s", err.Error())
return nil
}
defer cleanup(ctx, session)
keys, err := yubiListKeys(ctx, session)
if err != nil {
logrus.Debugf("Failed to list key from the yubikey: %s", err.Error())
return nil
}
s.keys = keys
return buildKeyMap(keys)
}
// AddKey puts a key inside the Yubikey, as well as writing it to the backup store
func (s *YubiStore) AddKey(keyInfo trustmanager.KeyInfo, privKey data.PrivateKey) error {
added, err := s.addKey(privKey.ID(), keyInfo.Role, privKey)
if err != nil {
return err
}
if added && s.backupStore != nil {
err = s.backupStore.AddKey(keyInfo, privKey)
if err != nil {
defer s.RemoveKey(privKey.ID())
return ErrBackupFailed{err: err.Error()}
}
}
return nil
}
// Only add if we haven't seen the key already. Return whether the key was
// added.
func (s *YubiStore) addKey(keyID, role string, privKey data.PrivateKey) (
bool, error) {
// We only allow adding root keys for now
if role != data.CanonicalRootRole {
return false, fmt.Errorf(
"yubikey only supports storing root keys, got %s for key: %s", role, keyID)
}
ctx, session, err := SetupHSMEnv(pkcs11Lib, s.libLoader)
if err != nil {
logrus.Debugf("No yubikey found, using alternative key storage: %s", err.Error())
return false, err
}
defer cleanup(ctx, session)
if k, ok := s.keys[keyID]; ok {
if k.role == role {
// already have the key and it's associated with the correct role
return false, nil
}
}
slot, err := getNextEmptySlot(ctx, session)
if err != nil {
logrus.Debugf("Failed to get an empty yubikey slot: %s", err.Error())
return false, err
}
logrus.Debugf("Attempting to store key using yubikey slot %v", slot)
err = addECDSAKey(
ctx, session, privKey, slot, s.passRetriever, role)
if err == nil {
s.keys[privKey.ID()] = yubiSlot{
role: role,
slotID: slot,
}
return true, nil
}
logrus.Debugf("Failed to add key to yubikey: %v", err)
return false, err
}
// GetKey retrieves a key from the Yubikey only (it does not look inside the
// backup store)
func (s *YubiStore) GetKey(keyID string) (data.PrivateKey, string, error) {
ctx, session, err := SetupHSMEnv(pkcs11Lib, s.libLoader)
if err != nil {
logrus.Debugf("No yubikey found, using alternative key storage: %s", err.Error())
if _, ok := err.(errHSMNotPresent); ok {
err = trustmanager.ErrKeyNotFound{KeyID: keyID}
}
return nil, "", err
}
defer cleanup(ctx, session)
key, ok := s.keys[keyID]
if !ok {
return nil, "", trustmanager.ErrKeyNotFound{KeyID: keyID}
}
pubKey, alias, err := getECDSAKey(ctx, session, key.slotID)
if err != nil {
logrus.Debugf("Failed to get key from slot %s: %s", key.slotID, err.Error())
return nil, "", err
}
// Check to see if we're returning the intended keyID
if pubKey.ID() != keyID {
return nil, "", fmt.Errorf("expected root key: %s, but found: %s", keyID, pubKey.ID())
}
privKey := NewYubiPrivateKey(key.slotID, *pubKey, s.passRetriever)
if privKey == nil {
return nil, "", errors.New("could not initialize new YubiPrivateKey")
}
return privKey, alias, err
}
// RemoveKey deletes a key from the Yubikey only (it does not remove it from the
// backup store)
func (s *YubiStore) RemoveKey(keyID string) error {
ctx, session, err := SetupHSMEnv(pkcs11Lib, s.libLoader)
if err != nil {
logrus.Debugf("No yubikey found, using alternative key storage: %s", err.Error())
return nil
}
defer cleanup(ctx, session)
key, ok := s.keys[keyID]
if !ok {
return errors.New("Key not present in yubikey")
}
err = yubiRemoveKey(ctx, session, key.slotID, s.passRetriever, keyID)
if err == nil {
delete(s.keys, keyID)
} else {
logrus.Debugf("Failed to remove from the yubikey KeyID %s: %v", keyID, err)
}
return err
}
// GetKeyInfo is not yet implemented
func (s *YubiStore) GetKeyInfo(keyID string) (trustmanager.KeyInfo, error) {
return trustmanager.KeyInfo{}, fmt.Errorf("Not yet implemented")
}
func cleanup(ctx IPKCS11Ctx, session pkcs11.SessionHandle) {
err := ctx.CloseSession(session)
if err != nil {
logrus.Debugf("Error closing session: %s", err.Error())
}
finalizeAndDestroy(ctx)
}
func finalizeAndDestroy(ctx IPKCS11Ctx) {
err := ctx.Finalize()
if err != nil {
logrus.Debugf("Error finalizing: %s", err.Error())
}
ctx.Destroy()
}
// SetupHSMEnv is a method that depends on the existences
func SetupHSMEnv(libraryPath string, libLoader pkcs11LibLoader) (
IPKCS11Ctx, pkcs11.SessionHandle, error) {
if libraryPath == "" {
return nil, 0, errHSMNotPresent{err: "no library found"}
}
p := libLoader(libraryPath)
if p == nil {
return nil, 0, fmt.Errorf("failed to load library %s", libraryPath)
}
if err := p.Initialize(); err != nil {
defer finalizeAndDestroy(p)
return nil, 0, fmt.Errorf("found library %s, but initialize error %s", libraryPath, err.Error())
}
slots, err := p.GetSlotList(true)
if err != nil {
defer finalizeAndDestroy(p)
return nil, 0, fmt.Errorf(
"loaded library %s, but failed to list HSM slots %s", libraryPath, err)
}
// Check to see if we got any slots from the HSM.
if len(slots) < 1 {
defer finalizeAndDestroy(p)
return nil, 0, fmt.Errorf(
"loaded library %s, but no HSM slots found", libraryPath)
}
// CKF_SERIAL_SESSION: TRUE if cryptographic functions are performed in serial with the application; FALSE if the functions may be performed in parallel with the application.
// CKF_RW_SESSION: TRUE if the session is read/write; FALSE if the session is read-only
session, err := p.OpenSession(slots[0], pkcs11.CKF_SERIAL_SESSION|pkcs11.CKF_RW_SESSION)
if err != nil {
defer cleanup(p, session)
return nil, 0, fmt.Errorf(
"loaded library %s, but failed to start session with HSM %s",
libraryPath, err)
}
logrus.Debugf("Initialized PKCS11 library %s and started HSM session", libraryPath)
return p, session, nil
}
// IsAccessible returns true if a Yubikey can be accessed
func IsAccessible() bool {
if pkcs11Lib == "" {
return false
}
ctx, session, err := SetupHSMEnv(pkcs11Lib, defaultLoader)
if err != nil {
return false
}
defer cleanup(ctx, session)
return true
}
func login(ctx IPKCS11Ctx, session pkcs11.SessionHandle, passRetriever notary.PassRetriever, userFlag uint, defaultPassw string) error {
// try default password
err := ctx.Login(session, userFlag, defaultPassw)
if err == nil {
return nil
}
// default failed, ask user for password
for attempts := 0; ; attempts++ {
var (
giveup bool
err error
user string
)
if userFlag == pkcs11.CKU_SO {
user = "SO Pin"
} else {
user = "User Pin"
}
passwd, giveup, err := passRetriever(user, "yubikey", false, attempts)
// Check if the passphrase retriever got an error or if it is telling us to give up
if giveup || err != nil {
return trustmanager.ErrPasswordInvalid{}
}
if attempts > 2 {
return trustmanager.ErrAttemptsExceeded{}
}
// attempt to login. Loop if failed
err = ctx.Login(session, userFlag, passwd)
if err == nil {
return nil
}
}
}
func buildKeyMap(keys map[string]yubiSlot) map[string]trustmanager.KeyInfo {
res := make(map[string]trustmanager.KeyInfo)
for k, v := range keys {
res[k] = trustmanager.KeyInfo{Role: v.role, Gun: ""}
}
return res
}

View file

@ -1,291 +0,0 @@
package trustpinning
import (
"crypto/x509"
"errors"
"fmt"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/notary/tuf/data"
"github.com/docker/notary/tuf/signed"
"github.com/docker/notary/tuf/utils"
)
// ErrValidationFail is returned when there is no valid trusted certificates
// being served inside of the roots.json
type ErrValidationFail struct {
Reason string
}
// ErrValidationFail is returned when there is no valid trusted certificates
// being served inside of the roots.json
func (err ErrValidationFail) Error() string {
return fmt.Sprintf("could not validate the path to a trusted root: %s", err.Reason)
}
// ErrRootRotationFail is returned when we fail to do a full root key rotation
// by either failing to add the new root certificate, or delete the old ones
type ErrRootRotationFail struct {
Reason string
}
// ErrRootRotationFail is returned when we fail to do a full root key rotation
// by either failing to add the new root certificate, or delete the old ones
func (err ErrRootRotationFail) Error() string {
return fmt.Sprintf("could not rotate trust to a new trusted root: %s", err.Reason)
}
func prettyFormatCertIDs(certs map[string]*x509.Certificate) string {
ids := make([]string, 0, len(certs))
for id := range certs {
ids = append(ids, id)
}
return strings.Join(ids, ", ")
}
/*
ValidateRoot receives a new root, validates its correctness and attempts to
do root key rotation if needed.
First we check if we have any trusted certificates for a particular GUN in
a previous root, if we have one. If the previous root is not nil and we find
certificates for this GUN, we've already seen this repository before, and
have a list of trusted certificates for it. In this case, we use this list of
certificates to attempt to validate this root file.
If the previous validation succeeds, we check the integrity of the root by
making sure that it is validated by itself. This means that we will attempt to
validate the root data with the certificates that are included in the root keys
themselves.
However, if we do not have any current trusted certificates for this GUN, we
check if there are any pinned certificates specified in the trust_pinning section
of the notary client config. If this section specifies a Certs section with this
GUN, we attempt to validate that the certificates present in the downloaded root
file match the pinned ID.
If the Certs section is empty for this GUN, we check if the trust_pinning
section specifies a CA section specified in the config for this GUN. If so, we check
that the specified CA is valid and has signed a certificate included in the downloaded
root file. The specified CA can be a prefix for this GUN.
If both the Certs and CA configs do not match this GUN, we fall back to the TOFU
section in the config: if true, we trust certificates specified in the root for
this GUN. If later we see a different certificate for that certificate, we return
an ErrValidationFailed error.
Note that since we only allow trust data to be downloaded over an HTTPS channel
we are using the current public PKI to validate the first download of the certificate
adding an extra layer of security over the normal (SSH style) trust model.
We shall call this: TOFUS.
Validation failure at any step will result in an ErrValidationFailed error.
*/
func ValidateRoot(prevRoot *data.SignedRoot, root *data.Signed, gun string, trustPinning TrustPinConfig) (*data.SignedRoot, error) {
logrus.Debugf("entered ValidateRoot with dns: %s", gun)
signedRoot, err := data.RootFromSigned(root)
if err != nil {
return nil, err
}
rootRole, err := signedRoot.BuildBaseRole(data.CanonicalRootRole)
if err != nil {
return nil, err
}
// Retrieve all the leaf and intermediate certificates in root for which the CN matches the GUN
allLeafCerts, allIntCerts := parseAllCerts(signedRoot)
certsFromRoot, err := validRootLeafCerts(allLeafCerts, gun, true)
validIntCerts := validRootIntCerts(allIntCerts)
if err != nil {
logrus.Debugf("error retrieving valid leaf certificates for: %s, %v", gun, err)
return nil, &ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"}
}
logrus.Debugf("found %d leaf certs, of which %d are valid leaf certs for %s", len(allLeafCerts), len(certsFromRoot), gun)
// If we have a previous root, let's try to use it to validate that this new root is valid.
havePrevRoot := prevRoot != nil
if havePrevRoot {
// Retrieve all the trusted certificates from our previous root
// Note that we do not validate expiries here since our originally trusted root might have expired certs
allTrustedLeafCerts, allTrustedIntCerts := parseAllCerts(prevRoot)
trustedLeafCerts, err := validRootLeafCerts(allTrustedLeafCerts, gun, false)
if err != nil {
return nil, &ErrValidationFail{Reason: "could not retrieve trusted certs from previous root role data"}
}
// Use the certificates we found in the previous root for the GUN to verify its signatures
// This could potentially be an empty set, in which case we will fail to verify
logrus.Debugf("found %d valid root leaf certificates for %s: %s", len(trustedLeafCerts), gun,
prettyFormatCertIDs(trustedLeafCerts))
// Extract the previous root's threshold for signature verification
prevRootRoleData, ok := prevRoot.Signed.Roles[data.CanonicalRootRole]
if !ok {
return nil, &ErrValidationFail{Reason: "could not retrieve previous root role data"}
}
err = signed.VerifySignatures(
root, data.BaseRole{Keys: utils.CertsToKeys(trustedLeafCerts, allTrustedIntCerts), Threshold: prevRootRoleData.Threshold})
if err != nil {
logrus.Debugf("failed to verify TUF data for: %s, %v", gun, err)
return nil, &ErrRootRotationFail{Reason: "failed to validate data with current trusted certificates"}
}
// Clear the IsValid marks we could have received from VerifySignatures
for i := range root.Signatures {
root.Signatures[i].IsValid = false
}
}
// Regardless of having a previous root or not, confirm that the new root validates against the trust pinning
logrus.Debugf("checking root against trust_pinning config", gun)
trustPinCheckFunc, err := NewTrustPinChecker(trustPinning, gun, !havePrevRoot)
if err != nil {
return nil, &ErrValidationFail{Reason: err.Error()}
}
validPinnedCerts := map[string]*x509.Certificate{}
for id, cert := range certsFromRoot {
logrus.Debugf("checking trust-pinning for cert: %s", id)
if ok := trustPinCheckFunc(cert, validIntCerts[id]); !ok {
logrus.Debugf("trust-pinning check failed for cert: %s", id)
continue
}
validPinnedCerts[id] = cert
}
if len(validPinnedCerts) == 0 {
return nil, &ErrValidationFail{Reason: "unable to match any certificates to trust_pinning config"}
}
certsFromRoot = validPinnedCerts
// Validate the integrity of the new root (does it have valid signatures)
// Note that certsFromRoot is guaranteed to be unchanged only if we had prior cert data for this GUN or enabled TOFUS
// If we attempted to pin a certain certificate or CA, certsFromRoot could have been pruned accordingly
err = signed.VerifySignatures(root, data.BaseRole{
Keys: utils.CertsToKeys(certsFromRoot, validIntCerts), Threshold: rootRole.Threshold})
if err != nil {
logrus.Debugf("failed to verify TUF data for: %s, %v", gun, err)
return nil, &ErrValidationFail{Reason: "failed to validate integrity of roots"}
}
logrus.Debugf("root validation succeeded for %s", gun)
// Call RootFromSigned to make sure we pick up on the IsValid markings from VerifySignatures
return data.RootFromSigned(root)
}
// validRootLeafCerts returns a list of possibly (if checkExpiry is true) non-expired, non-sha1 certificates
// found in root whose Common-Names match the provided GUN. Note that this
// "validity" alone does not imply any measure of trust.
func validRootLeafCerts(allLeafCerts map[string]*x509.Certificate, gun string, checkExpiry bool) (map[string]*x509.Certificate, error) {
validLeafCerts := make(map[string]*x509.Certificate)
// Go through every leaf certificate and check that the CN matches the gun
for id, cert := range allLeafCerts {
// Validate that this leaf certificate has a CN that matches the exact gun
if cert.Subject.CommonName != gun {
logrus.Debugf("error leaf certificate CN: %s doesn't match the given GUN: %s",
cert.Subject.CommonName, gun)
continue
}
// Make sure the certificate is not expired if checkExpiry is true
// and warn if it hasn't expired yet but is within 6 months of expiry
if err := utils.ValidateCertificate(cert, checkExpiry); err != nil {
logrus.Debugf("%s is invalid: %s", id, err.Error())
continue
}
validLeafCerts[id] = cert
}
if len(validLeafCerts) < 1 {
logrus.Debugf("didn't find any valid leaf certificates for %s", gun)
return nil, errors.New("no valid leaf certificates found in any of the root keys")
}
logrus.Debugf("found %d valid leaf certificates for %s: %s", len(validLeafCerts), gun,
prettyFormatCertIDs(validLeafCerts))
return validLeafCerts, nil
}
// validRootIntCerts filters the passed in structure of intermediate certificates to only include non-expired, non-sha1 certificates
// Note that this "validity" alone does not imply any measure of trust.
func validRootIntCerts(allIntCerts map[string][]*x509.Certificate) map[string][]*x509.Certificate {
validIntCerts := make(map[string][]*x509.Certificate)
// Go through every leaf cert ID, and build its valid intermediate certificate list
for leafID, intCertList := range allIntCerts {
for _, intCert := range intCertList {
if err := utils.ValidateCertificate(intCert, true); err != nil {
continue
}
validIntCerts[leafID] = append(validIntCerts[leafID], intCert)
}
}
return validIntCerts
}
// parseAllCerts returns two maps, one with all of the leafCertificates and one
// with all the intermediate certificates found in signedRoot
func parseAllCerts(signedRoot *data.SignedRoot) (map[string]*x509.Certificate, map[string][]*x509.Certificate) {
if signedRoot == nil {
return nil, nil
}
leafCerts := make(map[string]*x509.Certificate)
intCerts := make(map[string][]*x509.Certificate)
// Before we loop through all root keys available, make sure any exist
rootRoles, ok := signedRoot.Signed.Roles[data.CanonicalRootRole]
if !ok {
logrus.Debugf("tried to parse certificates from invalid root signed data")
return nil, nil
}
logrus.Debugf("found the following root keys: %v", rootRoles.KeyIDs)
// Iterate over every keyID for the root role inside of roots.json
for _, keyID := range rootRoles.KeyIDs {
// check that the key exists in the signed root keys map
key, ok := signedRoot.Signed.Keys[keyID]
if !ok {
logrus.Debugf("error while getting data for keyID: %s", keyID)
continue
}
// Decode all the x509 certificates that were bundled with this
// Specific root key
decodedCerts, err := utils.LoadCertBundleFromPEM(key.Public())
if err != nil {
logrus.Debugf("error while parsing root certificate with keyID: %s, %v", keyID, err)
continue
}
// Get all non-CA certificates in the decoded certificates
leafCertList := utils.GetLeafCerts(decodedCerts)
// If we got no leaf certificates or we got more than one, fail
if len(leafCertList) != 1 {
logrus.Debugf("invalid chain due to leaf certificate missing or too many leaf certificates for keyID: %s", keyID)
continue
}
// If we found a leaf certificate, assert that the cert bundle started with a leaf
if decodedCerts[0].IsCA {
logrus.Debugf("invalid chain due to leaf certificate not being first certificate for keyID: %s", keyID)
continue
}
// Get the ID of the leaf certificate
leafCert := leafCertList[0]
// Store the leaf cert in the map
leafCerts[key.ID()] = leafCert
// Get all the remainder certificates marked as a CA to be used as intermediates
intermediateCerts := utils.GetIntermediateCerts(decodedCerts)
intCerts[key.ID()] = intermediateCerts
}
return leafCerts, intCerts
}

View file

@ -1,121 +0,0 @@
package trustpinning
import (
"crypto/x509"
"fmt"
"github.com/Sirupsen/logrus"
"github.com/docker/notary/tuf/utils"
"strings"
)
// TrustPinConfig represents the configuration under the trust_pinning section of the config file
// This struct represents the preferred way to bootstrap trust for this repository
type TrustPinConfig struct {
CA map[string]string
Certs map[string][]string
DisableTOFU bool
}
type trustPinChecker struct {
gun string
config TrustPinConfig
pinnedCAPool *x509.CertPool
pinnedCertIDs []string
}
// CertChecker is a function type that will be used to check leaf certs against pinned trust
type CertChecker func(leafCert *x509.Certificate, intCerts []*x509.Certificate) bool
// NewTrustPinChecker returns a new certChecker function from a TrustPinConfig for a GUN
func NewTrustPinChecker(trustPinConfig TrustPinConfig, gun string, firstBootstrap bool) (CertChecker, error) {
t := trustPinChecker{gun: gun, config: trustPinConfig}
// Determine the mode, and if it's even valid
if pinnedCerts, ok := trustPinConfig.Certs[gun]; ok {
logrus.Debugf("trust-pinning using Cert IDs")
t.pinnedCertIDs = pinnedCerts
return t.certsCheck, nil
}
if caFilepath, err := getPinnedCAFilepathByPrefix(gun, trustPinConfig); err == nil {
logrus.Debugf("trust-pinning using root CA bundle at: %s", caFilepath)
// Try to add the CA certs from its bundle file to our certificate store,
// and use it to validate certs in the root.json later
caCerts, err := utils.LoadCertBundleFromFile(caFilepath)
if err != nil {
return nil, fmt.Errorf("could not load root cert from CA path")
}
// Now only consider certificates that are direct children from this CA cert chain
caRootPool := x509.NewCertPool()
for _, caCert := range caCerts {
if err = utils.ValidateCertificate(caCert, true); err != nil {
logrus.Debugf("ignoring root CA certificate with CN %s in bundle: %s", caCert.Subject.CommonName, err)
continue
}
caRootPool.AddCert(caCert)
}
// If we didn't have any valid CA certs, error out
if len(caRootPool.Subjects()) == 0 {
return nil, fmt.Errorf("invalid CA certs provided")
}
t.pinnedCAPool = caRootPool
return t.caCheck, nil
}
// If TOFUs is disabled and we don't have any previous trusted root data for this GUN, we error out
if trustPinConfig.DisableTOFU && firstBootstrap {
return nil, fmt.Errorf("invalid trust pinning specified")
}
return t.tofusCheck, nil
}
func (t trustPinChecker) certsCheck(leafCert *x509.Certificate, intCerts []*x509.Certificate) bool {
// reconstruct the leaf + intermediate cert chain, which is bundled as {leaf, intermediates...},
// in order to get the matching id in the root file
key, err := utils.CertBundleToKey(leafCert, intCerts)
if err != nil {
logrus.Debug("error creating cert bundle: ", err.Error())
return false
}
return utils.StrSliceContains(t.pinnedCertIDs, key.ID())
}
func (t trustPinChecker) caCheck(leafCert *x509.Certificate, intCerts []*x509.Certificate) bool {
// Use intermediate certificates included in the root TUF metadata for our validation
caIntPool := x509.NewCertPool()
for _, intCert := range intCerts {
caIntPool.AddCert(intCert)
}
// Attempt to find a valid certificate chain from the leaf cert to CA root
// Use this certificate if such a valid chain exists (possibly using intermediates)
var err error
if _, err = leafCert.Verify(x509.VerifyOptions{Roots: t.pinnedCAPool, Intermediates: caIntPool}); err == nil {
return true
}
logrus.Debugf("unable to find a valid certificate chain from leaf cert to CA root: %s", err)
return false
}
func (t trustPinChecker) tofusCheck(leafCert *x509.Certificate, intCerts []*x509.Certificate) bool {
return true
}
// Will return the CA filepath corresponding to the most specific (longest) entry in the map that is still a prefix
// of the provided gun. Returns an error if no entry matches this GUN as a prefix.
func getPinnedCAFilepathByPrefix(gun string, t TrustPinConfig) (string, error) {
specificGUN := ""
specificCAFilepath := ""
foundCA := false
for gunPrefix, caFilepath := range t.CA {
if strings.HasPrefix(gun, gunPrefix) && len(gunPrefix) >= len(specificGUN) {
specificGUN = gunPrefix
specificCAFilepath = caFilepath
foundCA = true
}
}
if !foundCA {
return "", fmt.Errorf("could not find pinned CA for GUN: %s\n", gun)
}
return specificCAFilepath, nil
}

View file

@ -1,30 +0,0 @@
Copyright (c) 2015, Docker Inc.
Copyright (c) 2014-2015 Prime Directive, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Prime Directive, Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,6 +0,0 @@
## Credits
This implementation was originally forked from [flynn/go-tuf](https://github.com/flynn/go-tuf)
This implementation retains the same 3 Clause BSD license present on
the original flynn implementation.

View file

@ -1,710 +0,0 @@
package tuf
import (
"fmt"
"github.com/docker/go/canonical/json"
"github.com/docker/notary"
"github.com/docker/notary/trustpinning"
"github.com/docker/notary/tuf/data"
"github.com/docker/notary/tuf/signed"
"github.com/docker/notary/tuf/utils"
)
// ErrBuildDone is returned when any functions are called on RepoBuilder, and it
// is already finished building
var ErrBuildDone = fmt.Errorf(
"the builder has finished building and cannot accept any more input or produce any more output")
// ErrInvalidBuilderInput is returned when RepoBuilder.Load is called
// with the wrong type of metadata for the state that it's in
type ErrInvalidBuilderInput struct{ msg string }
func (e ErrInvalidBuilderInput) Error() string {
return e.msg
}
// ConsistentInfo is the consistent name and size of a role, or just the name
// of the role and a -1 if no file metadata for the role is known
type ConsistentInfo struct {
RoleName string
fileMeta data.FileMeta
}
// ChecksumKnown determines whether or not we know enough to provide a size and
// consistent name
func (c ConsistentInfo) ChecksumKnown() bool {
// empty hash, no size : this is the zero value
return len(c.fileMeta.Hashes) > 0 || c.fileMeta.Length != 0
}
// ConsistentName returns the consistent name (rolename.sha256) for the role
// given this consistent information
func (c ConsistentInfo) ConsistentName() string {
return utils.ConsistentName(c.RoleName, c.fileMeta.Hashes[notary.SHA256])
}
// Length returns the expected length of the role as per this consistent
// information - if no checksum information is known, the size is -1.
func (c ConsistentInfo) Length() int64 {
if c.ChecksumKnown() {
return c.fileMeta.Length
}
return -1
}
// RepoBuilder is an interface for an object which builds a tuf.Repo
type RepoBuilder interface {
Load(roleName string, content []byte, minVersion int, allowExpired bool) error
GenerateSnapshot(prev *data.SignedSnapshot) ([]byte, int, error)
GenerateTimestamp(prev *data.SignedTimestamp) ([]byte, int, error)
Finish() (*Repo, *Repo, error)
BootstrapNewBuilder() RepoBuilder
BootstrapNewBuilderWithNewTrustpin(trustpin trustpinning.TrustPinConfig) RepoBuilder
// informative functions
IsLoaded(roleName string) bool
GetLoadedVersion(roleName string) int
GetConsistentInfo(roleName string) ConsistentInfo
}
// finishedBuilder refuses any more input or output
type finishedBuilder struct{}
func (f finishedBuilder) Load(roleName string, content []byte, minVersion int, allowExpired bool) error {
return ErrBuildDone
}
func (f finishedBuilder) GenerateSnapshot(prev *data.SignedSnapshot) ([]byte, int, error) {
return nil, 0, ErrBuildDone
}
func (f finishedBuilder) GenerateTimestamp(prev *data.SignedTimestamp) ([]byte, int, error) {
return nil, 0, ErrBuildDone
}
func (f finishedBuilder) Finish() (*Repo, *Repo, error) { return nil, nil, ErrBuildDone }
func (f finishedBuilder) BootstrapNewBuilder() RepoBuilder { return f }
func (f finishedBuilder) BootstrapNewBuilderWithNewTrustpin(trustpin trustpinning.TrustPinConfig) RepoBuilder {
return f
}
func (f finishedBuilder) IsLoaded(roleName string) bool { return false }
func (f finishedBuilder) GetLoadedVersion(roleName string) int { return 0 }
func (f finishedBuilder) GetConsistentInfo(roleName string) ConsistentInfo {
return ConsistentInfo{RoleName: roleName}
}
// NewRepoBuilder is the only way to get a pre-built RepoBuilder
func NewRepoBuilder(gun string, cs signed.CryptoService, trustpin trustpinning.TrustPinConfig) RepoBuilder {
return NewBuilderFromRepo(gun, NewRepo(cs), trustpin)
}
// NewBuilderFromRepo allows us to bootstrap a builder given existing repo data.
// YOU PROBABLY SHOULDN'T BE USING THIS OUTSIDE OF TESTING CODE!!!
func NewBuilderFromRepo(gun string, repo *Repo, trustpin trustpinning.TrustPinConfig) RepoBuilder {
return &repoBuilderWrapper{
RepoBuilder: &repoBuilder{
repo: repo,
invalidRoles: NewRepo(nil),
gun: gun,
trustpin: trustpin,
loadedNotChecksummed: make(map[string][]byte),
},
}
}
// repoBuilderWrapper embeds a repoBuilder, but once Finish is called, swaps
// the embed out with a finishedBuilder
type repoBuilderWrapper struct {
RepoBuilder
}
func (rbw *repoBuilderWrapper) Finish() (*Repo, *Repo, error) {
switch rbw.RepoBuilder.(type) {
case finishedBuilder:
return rbw.RepoBuilder.Finish()
default:
old := rbw.RepoBuilder
rbw.RepoBuilder = finishedBuilder{}
return old.Finish()
}
}
// repoBuilder actually builds a tuf.Repo
type repoBuilder struct {
repo *Repo
invalidRoles *Repo
// needed for root trust pininng verification
gun string
trustpin trustpinning.TrustPinConfig
// in case we load root and/or targets before snapshot and timestamp (
// or snapshot and not timestamp), so we know what to verify when the
// data with checksums come in
loadedNotChecksummed map[string][]byte
// bootstrapped values to validate a new root
prevRoot *data.SignedRoot
bootstrappedRootChecksum *data.FileMeta
// for bootstrapping the next builder
nextRootChecksum *data.FileMeta
}
func (rb *repoBuilder) Finish() (*Repo, *Repo, error) {
return rb.repo, rb.invalidRoles, nil
}
func (rb *repoBuilder) BootstrapNewBuilder() RepoBuilder {
return &repoBuilderWrapper{RepoBuilder: &repoBuilder{
repo: NewRepo(rb.repo.cryptoService),
invalidRoles: NewRepo(nil),
gun: rb.gun,
loadedNotChecksummed: make(map[string][]byte),
trustpin: rb.trustpin,
prevRoot: rb.repo.Root,
bootstrappedRootChecksum: rb.nextRootChecksum,
}}
}
func (rb *repoBuilder) BootstrapNewBuilderWithNewTrustpin(trustpin trustpinning.TrustPinConfig) RepoBuilder {
return &repoBuilderWrapper{RepoBuilder: &repoBuilder{
repo: NewRepo(rb.repo.cryptoService),
gun: rb.gun,
loadedNotChecksummed: make(map[string][]byte),
trustpin: trustpin,
prevRoot: rb.repo.Root,
bootstrappedRootChecksum: rb.nextRootChecksum,
}}
}
// IsLoaded returns whether a particular role has already been loaded
func (rb *repoBuilder) IsLoaded(roleName string) bool {
switch roleName {
case data.CanonicalRootRole:
return rb.repo.Root != nil
case data.CanonicalSnapshotRole:
return rb.repo.Snapshot != nil
case data.CanonicalTimestampRole:
return rb.repo.Timestamp != nil
default:
return rb.repo.Targets[roleName] != nil
}
}
// GetLoadedVersion returns the metadata version, if it is loaded, or 1 (the
// minimum valid version number) otherwise
func (rb *repoBuilder) GetLoadedVersion(roleName string) int {
switch {
case roleName == data.CanonicalRootRole && rb.repo.Root != nil:
return rb.repo.Root.Signed.Version
case roleName == data.CanonicalSnapshotRole && rb.repo.Snapshot != nil:
return rb.repo.Snapshot.Signed.Version
case roleName == data.CanonicalTimestampRole && rb.repo.Timestamp != nil:
return rb.repo.Timestamp.Signed.Version
default:
if tgts, ok := rb.repo.Targets[roleName]; ok {
return tgts.Signed.Version
}
}
return 1
}
// GetConsistentInfo returns the consistent name and size of a role, if it is known,
// otherwise just the rolename and a -1 for size (both of which are inside a
// ConsistentInfo object)
func (rb *repoBuilder) GetConsistentInfo(roleName string) ConsistentInfo {
info := ConsistentInfo{RoleName: roleName} // starts out with unknown filemeta
switch roleName {
case data.CanonicalTimestampRole:
// we do not want to get a consistent timestamp, but we do want to
// limit its size
info.fileMeta.Length = notary.MaxTimestampSize
case data.CanonicalSnapshotRole:
if rb.repo.Timestamp != nil {
info.fileMeta = rb.repo.Timestamp.Signed.Meta[roleName]
}
case data.CanonicalRootRole:
switch {
case rb.bootstrappedRootChecksum != nil:
info.fileMeta = *rb.bootstrappedRootChecksum
case rb.repo.Snapshot != nil:
info.fileMeta = rb.repo.Snapshot.Signed.Meta[roleName]
}
default:
if rb.repo.Snapshot != nil {
info.fileMeta = rb.repo.Snapshot.Signed.Meta[roleName]
}
}
return info
}
func (rb *repoBuilder) Load(roleName string, content []byte, minVersion int, allowExpired bool) error {
if !data.ValidRole(roleName) {
return ErrInvalidBuilderInput{msg: fmt.Sprintf("%s is an invalid role", roleName)}
}
if rb.IsLoaded(roleName) {
return ErrInvalidBuilderInput{msg: fmt.Sprintf("%s has already been loaded", roleName)}
}
var err error
switch roleName {
case data.CanonicalRootRole:
break
case data.CanonicalTimestampRole, data.CanonicalSnapshotRole, data.CanonicalTargetsRole:
err = rb.checkPrereqsLoaded([]string{data.CanonicalRootRole})
default: // delegations
err = rb.checkPrereqsLoaded([]string{data.CanonicalRootRole, data.CanonicalTargetsRole})
}
if err != nil {
return err
}
switch roleName {
case data.CanonicalRootRole:
return rb.loadRoot(content, minVersion, allowExpired)
case data.CanonicalSnapshotRole:
return rb.loadSnapshot(content, minVersion, allowExpired)
case data.CanonicalTimestampRole:
return rb.loadTimestamp(content, minVersion, allowExpired)
case data.CanonicalTargetsRole:
return rb.loadTargets(content, minVersion, allowExpired)
default:
return rb.loadDelegation(roleName, content, minVersion, allowExpired)
}
}
func (rb *repoBuilder) checkPrereqsLoaded(prereqRoles []string) error {
for _, req := range prereqRoles {
if !rb.IsLoaded(req) {
return ErrInvalidBuilderInput{msg: fmt.Sprintf("%s must be loaded first", req)}
}
}
return nil
}
// GenerateSnapshot generates a new snapshot given a previous (optional) snapshot
// We can't just load the previous snapshot, because it may have been signed by a different
// snapshot key (maybe from a previous root version). Note that we need the root role and
// targets role to be loaded, because we need to generate metadata for both (and we need
// the root to be loaded so we can get the snapshot role to sign with)
func (rb *repoBuilder) GenerateSnapshot(prev *data.SignedSnapshot) ([]byte, int, error) {
switch {
case rb.repo.cryptoService == nil:
return nil, 0, ErrInvalidBuilderInput{msg: "cannot generate snapshot without a cryptoservice"}
case rb.IsLoaded(data.CanonicalSnapshotRole):
return nil, 0, ErrInvalidBuilderInput{msg: "snapshot has already been loaded"}
case rb.IsLoaded(data.CanonicalTimestampRole):
return nil, 0, ErrInvalidBuilderInput{msg: "cannot generate snapshot if timestamp has already been loaded"}
}
if err := rb.checkPrereqsLoaded([]string{data.CanonicalRootRole}); err != nil {
return nil, 0, err
}
// If there is no previous snapshot, we need to generate one, and so the targets must
// have already been loaded. Otherwise, so long as the previous snapshot structure is
// valid (it has a targets meta), we're good.
switch prev {
case nil:
if err := rb.checkPrereqsLoaded([]string{data.CanonicalTargetsRole}); err != nil {
return nil, 0, err
}
if err := rb.repo.InitSnapshot(); err != nil {
rb.repo.Snapshot = nil
return nil, 0, err
}
default:
if err := data.IsValidSnapshotStructure(prev.Signed); err != nil {
return nil, 0, err
}
rb.repo.Snapshot = prev
}
sgnd, err := rb.repo.SignSnapshot(data.DefaultExpires(data.CanonicalSnapshotRole))
if err != nil {
rb.repo.Snapshot = nil
return nil, 0, err
}
sgndJSON, err := json.Marshal(sgnd)
if err != nil {
rb.repo.Snapshot = nil
return nil, 0, err
}
// loadedNotChecksummed should currently contain the root awaiting checksumming,
// since it has to have been loaded. Since the snapshot was generated using
// the root and targets data (there may not be any) that that have been loaded,
// remove all of them from rb.loadedNotChecksummed
for tgtName := range rb.repo.Targets {
delete(rb.loadedNotChecksummed, tgtName)
}
delete(rb.loadedNotChecksummed, data.CanonicalRootRole)
// The timestamp can't have been loaded yet, so we want to cache the snapshot
// bytes so we can validate the checksum when a timestamp gets generated or
// loaded later.
rb.loadedNotChecksummed[data.CanonicalSnapshotRole] = sgndJSON
return sgndJSON, rb.repo.Snapshot.Signed.Version, nil
}
// GenerateTimestamp generates a new timestamp given a previous (optional) timestamp
// We can't just load the previous timestamp, because it may have been signed by a different
// timestamp key (maybe from a previous root version)
func (rb *repoBuilder) GenerateTimestamp(prev *data.SignedTimestamp) ([]byte, int, error) {
switch {
case rb.repo.cryptoService == nil:
return nil, 0, ErrInvalidBuilderInput{msg: "cannot generate timestamp without a cryptoservice"}
case rb.IsLoaded(data.CanonicalTimestampRole):
return nil, 0, ErrInvalidBuilderInput{msg: "timestamp has already been loaded"}
}
// SignTimestamp always serializes the loaded snapshot and signs in the data, so we must always
// have the snapshot loaded first
if err := rb.checkPrereqsLoaded([]string{data.CanonicalRootRole, data.CanonicalSnapshotRole}); err != nil {
return nil, 0, err
}
switch prev {
case nil:
if err := rb.repo.InitTimestamp(); err != nil {
rb.repo.Timestamp = nil
return nil, 0, err
}
default:
if err := data.IsValidTimestampStructure(prev.Signed); err != nil {
return nil, 0, err
}
rb.repo.Timestamp = prev
}
sgnd, err := rb.repo.SignTimestamp(data.DefaultExpires(data.CanonicalTimestampRole))
if err != nil {
rb.repo.Timestamp = nil
return nil, 0, err
}
sgndJSON, err := json.Marshal(sgnd)
if err != nil {
rb.repo.Timestamp = nil
return nil, 0, err
}
// The snapshot should have been loaded (and not checksummed, since a timestamp
// cannot have been loaded), so it is awaiting checksumming. Since this
// timestamp was generated using the snapshot awaiting checksumming, we can
// remove it from rb.loadedNotChecksummed. There should be no other items
// awaiting checksumming now since loading/generating a snapshot should have
// cleared out everything else in `loadNotChecksummed`.
delete(rb.loadedNotChecksummed, data.CanonicalSnapshotRole)
return sgndJSON, rb.repo.Timestamp.Signed.Version, nil
}
// loadRoot loads a root if one has not been loaded
func (rb *repoBuilder) loadRoot(content []byte, minVersion int, allowExpired bool) error {
roleName := data.CanonicalRootRole
signedObj, err := rb.bytesToSigned(content, data.CanonicalRootRole)
if err != nil {
return err
}
// ValidateRoot validates against the previous root's role, as well as validates that the root
// itself is self-consistent with its own signatures and thresholds.
// This assumes that ValidateRoot calls data.RootFromSigned, which validates
// the metadata, rather than just unmarshalling signedObject into a SignedRoot object itself.
signedRoot, err := trustpinning.ValidateRoot(rb.prevRoot, signedObj, rb.gun, rb.trustpin)
if err != nil {
return err
}
if err := signed.VerifyVersion(&(signedRoot.Signed.SignedCommon), minVersion); err != nil {
return err
}
if !allowExpired { // check must go at the end because all other validation should pass
if err := signed.VerifyExpiry(&(signedRoot.Signed.SignedCommon), roleName); err != nil {
return err
}
}
rootRole, err := signedRoot.BuildBaseRole(data.CanonicalRootRole)
if err != nil { // this should never happen since the root has been validated
return err
}
rb.repo.Root = signedRoot
rb.repo.originalRootRole = rootRole
return nil
}
func (rb *repoBuilder) loadTimestamp(content []byte, minVersion int, allowExpired bool) error {
roleName := data.CanonicalTimestampRole
timestampRole, err := rb.repo.Root.BuildBaseRole(roleName)
if err != nil { // this should never happen, since it's already been validated
return err
}
signedObj, err := rb.bytesToSignedAndValidateSigs(timestampRole, content)
if err != nil {
return err
}
signedTimestamp, err := data.TimestampFromSigned(signedObj)
if err != nil {
return err
}
if err := signed.VerifyVersion(&(signedTimestamp.Signed.SignedCommon), minVersion); err != nil {
return err
}
if !allowExpired { // check must go at the end because all other validation should pass
if err := signed.VerifyExpiry(&(signedTimestamp.Signed.SignedCommon), roleName); err != nil {
return err
}
}
if err := rb.validateChecksumsFromTimestamp(signedTimestamp); err != nil {
return err
}
rb.repo.Timestamp = signedTimestamp
return nil
}
func (rb *repoBuilder) loadSnapshot(content []byte, minVersion int, allowExpired bool) error {
roleName := data.CanonicalSnapshotRole
snapshotRole, err := rb.repo.Root.BuildBaseRole(roleName)
if err != nil { // this should never happen, since it's already been validated
return err
}
signedObj, err := rb.bytesToSignedAndValidateSigs(snapshotRole, content)
if err != nil {
return err
}
signedSnapshot, err := data.SnapshotFromSigned(signedObj)
if err != nil {
return err
}
if err := signed.VerifyVersion(&(signedSnapshot.Signed.SignedCommon), minVersion); err != nil {
return err
}
if !allowExpired { // check must go at the end because all other validation should pass
if err := signed.VerifyExpiry(&(signedSnapshot.Signed.SignedCommon), roleName); err != nil {
return err
}
}
// at this point, the only thing left to validate is existing checksums - we can use
// this snapshot to bootstrap the next builder if needed - and we don't need to do
// the 2-value assignment since we've already validated the signedSnapshot, which MUST
// have root metadata
rootMeta := signedSnapshot.Signed.Meta[data.CanonicalRootRole]
rb.nextRootChecksum = &rootMeta
if err := rb.validateChecksumsFromSnapshot(signedSnapshot); err != nil {
return err
}
rb.repo.Snapshot = signedSnapshot
return nil
}
func (rb *repoBuilder) loadTargets(content []byte, minVersion int, allowExpired bool) error {
roleName := data.CanonicalTargetsRole
targetsRole, err := rb.repo.Root.BuildBaseRole(roleName)
if err != nil { // this should never happen, since it's already been validated
return err
}
signedObj, err := rb.bytesToSignedAndValidateSigs(targetsRole, content)
if err != nil {
return err
}
signedTargets, err := data.TargetsFromSigned(signedObj, roleName)
if err != nil {
return err
}
if err := signed.VerifyVersion(&(signedTargets.Signed.SignedCommon), minVersion); err != nil {
return err
}
if !allowExpired { // check must go at the end because all other validation should pass
if err := signed.VerifyExpiry(&(signedTargets.Signed.SignedCommon), roleName); err != nil {
return err
}
}
signedTargets.Signatures = signedObj.Signatures
rb.repo.Targets[roleName] = signedTargets
return nil
}
func (rb *repoBuilder) loadDelegation(roleName string, content []byte, minVersion int, allowExpired bool) error {
delegationRole, err := rb.repo.GetDelegationRole(roleName)
if err != nil {
return err
}
// bytesToSigned checks checksum
signedObj, err := rb.bytesToSigned(content, roleName)
if err != nil {
return err
}
signedTargets, err := data.TargetsFromSigned(signedObj, roleName)
if err != nil {
return err
}
if err := signed.VerifyVersion(&(signedTargets.Signed.SignedCommon), minVersion); err != nil {
// don't capture in invalidRoles because the role we received is a rollback
return err
}
// verify signature
if err := signed.VerifySignatures(signedObj, delegationRole.BaseRole); err != nil {
rb.invalidRoles.Targets[roleName] = signedTargets
return err
}
if !allowExpired { // check must go at the end because all other validation should pass
if err := signed.VerifyExpiry(&(signedTargets.Signed.SignedCommon), roleName); err != nil {
rb.invalidRoles.Targets[roleName] = signedTargets
return err
}
}
signedTargets.Signatures = signedObj.Signatures
rb.repo.Targets[roleName] = signedTargets
return nil
}
func (rb *repoBuilder) validateChecksumsFromTimestamp(ts *data.SignedTimestamp) error {
sn, ok := rb.loadedNotChecksummed[data.CanonicalSnapshotRole]
if ok {
// by this point, the SignedTimestamp has been validated so it must have a snapshot hash
snMeta := ts.Signed.Meta[data.CanonicalSnapshotRole].Hashes
if err := data.CheckHashes(sn, data.CanonicalSnapshotRole, snMeta); err != nil {
return err
}
delete(rb.loadedNotChecksummed, data.CanonicalSnapshotRole)
}
return nil
}
func (rb *repoBuilder) validateChecksumsFromSnapshot(sn *data.SignedSnapshot) error {
var goodRoles []string
for roleName, loadedBytes := range rb.loadedNotChecksummed {
switch roleName {
case data.CanonicalSnapshotRole, data.CanonicalTimestampRole:
break
default:
if err := data.CheckHashes(loadedBytes, roleName, sn.Signed.Meta[roleName].Hashes); err != nil {
return err
}
goodRoles = append(goodRoles, roleName)
}
}
for _, roleName := range goodRoles {
delete(rb.loadedNotChecksummed, roleName)
}
return nil
}
func (rb *repoBuilder) validateChecksumFor(content []byte, roleName string) error {
// validate the bootstrap checksum for root, if provided
if roleName == data.CanonicalRootRole && rb.bootstrappedRootChecksum != nil {
if err := data.CheckHashes(content, roleName, rb.bootstrappedRootChecksum.Hashes); err != nil {
return err
}
}
// but we also want to cache the root content, so that when the snapshot is
// loaded it is validated (to make sure everything in the repo is self-consistent)
checksums := rb.getChecksumsFor(roleName)
if checksums != nil { // as opposed to empty, in which case hash check should fail
if err := data.CheckHashes(content, roleName, *checksums); err != nil {
return err
}
} else if roleName != data.CanonicalTimestampRole {
// timestamp is the only role which does not need to be checksummed, but
// for everything else, cache the contents in the list of roles that have
// not been checksummed by the snapshot/timestamp yet
rb.loadedNotChecksummed[roleName] = content
}
return nil
}
// Checksums the given bytes, and if they validate, convert to a data.Signed object.
// If a checksums are nil (as opposed to empty), adds the bytes to the list of roles that
// haven't been checksummed (unless it's a timestamp, which has no checksum reference).
func (rb *repoBuilder) bytesToSigned(content []byte, roleName string) (*data.Signed, error) {
if err := rb.validateChecksumFor(content, roleName); err != nil {
return nil, err
}
// unmarshal to signed
signedObj := &data.Signed{}
if err := json.Unmarshal(content, signedObj); err != nil {
return nil, err
}
return signedObj, nil
}
func (rb *repoBuilder) bytesToSignedAndValidateSigs(role data.BaseRole, content []byte) (*data.Signed, error) {
signedObj, err := rb.bytesToSigned(content, role.Name)
if err != nil {
return nil, err
}
// verify signature
if err := signed.VerifySignatures(signedObj, role); err != nil {
return nil, err
}
return signedObj, nil
}
// If the checksum reference (the loaded timestamp for the snapshot role, and
// the loaded snapshot for every other role except timestamp and snapshot) is nil,
// then return nil for the checksums, meaning that the checksum is not yet
// available. If the checksum reference *is* loaded, then always returns the
// Hashes object for the given role - if it doesn't exist, returns an empty Hash
// object (against which any checksum validation would fail).
func (rb *repoBuilder) getChecksumsFor(role string) *data.Hashes {
var hashes data.Hashes
switch role {
case data.CanonicalTimestampRole:
return nil
case data.CanonicalSnapshotRole:
if rb.repo.Timestamp == nil {
return nil
}
hashes = rb.repo.Timestamp.Signed.Meta[data.CanonicalSnapshotRole].Hashes
default:
if rb.repo.Snapshot == nil {
return nil
}
hashes = rb.repo.Snapshot.Signed.Meta[role].Hashes
}
return &hashes
}

View file

@ -1,53 +0,0 @@
package data
import "fmt"
// ErrInvalidMetadata is the error to be returned when metadata is invalid
type ErrInvalidMetadata struct {
role string
msg string
}
func (e ErrInvalidMetadata) Error() string {
return fmt.Sprintf("%s type metadata invalid: %s", e.role, e.msg)
}
// ErrMissingMeta - couldn't find the FileMeta object for the given Role, or
// the FileMeta object contained no supported checksums
type ErrMissingMeta struct {
Role string
}
func (e ErrMissingMeta) Error() string {
return fmt.Sprintf("no checksums for supported algorithms were provided for %s", e.Role)
}
// ErrInvalidChecksum is the error to be returned when checksum is invalid
type ErrInvalidChecksum struct {
alg string
}
func (e ErrInvalidChecksum) Error() string {
return fmt.Sprintf("%s checksum invalid", e.alg)
}
// ErrMismatchedChecksum is the error to be returned when checksum is mismatched
type ErrMismatchedChecksum struct {
alg string
name string
expected string
}
func (e ErrMismatchedChecksum) Error() string {
return fmt.Sprintf("%s checksum for %s did not match: expected %s", e.alg, e.name,
e.expected)
}
// ErrCertExpired is the error to be returned when a certificate has expired
type ErrCertExpired struct {
CN string
}
func (e ErrCertExpired) Error() string {
return fmt.Sprintf("certificate with CN %s is expired", e.CN)
}

View file

@ -1,528 +0,0 @@
package data
import (
"crypto"
"crypto/ecdsa"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/asn1"
"encoding/hex"
"errors"
"io"
"math/big"
"github.com/Sirupsen/logrus"
"github.com/agl/ed25519"
"github.com/docker/go/canonical/json"
)
// PublicKey is the necessary interface for public keys
type PublicKey interface {
ID() string
Algorithm() string
Public() []byte
}
// PrivateKey adds the ability to access the private key
type PrivateKey interface {
PublicKey
Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) (signature []byte, err error)
Private() []byte
CryptoSigner() crypto.Signer
SignatureAlgorithm() SigAlgorithm
}
// KeyPair holds the public and private key bytes
type KeyPair struct {
Public []byte `json:"public"`
Private []byte `json:"private"`
}
// Keys represents a map of key ID to PublicKey object. It's necessary
// to allow us to unmarshal into an interface via the json.Unmarshaller
// interface
type Keys map[string]PublicKey
// UnmarshalJSON implements the json.Unmarshaller interface
func (ks *Keys) UnmarshalJSON(data []byte) error {
parsed := make(map[string]TUFKey)
err := json.Unmarshal(data, &parsed)
if err != nil {
return err
}
final := make(map[string]PublicKey)
for k, tk := range parsed {
final[k] = typedPublicKey(tk)
}
*ks = final
return nil
}
// KeyList represents a list of keys
type KeyList []PublicKey
// UnmarshalJSON implements the json.Unmarshaller interface
func (ks *KeyList) UnmarshalJSON(data []byte) error {
parsed := make([]TUFKey, 0, 1)
err := json.Unmarshal(data, &parsed)
if err != nil {
return err
}
final := make([]PublicKey, 0, len(parsed))
for _, tk := range parsed {
final = append(final, typedPublicKey(tk))
}
*ks = final
return nil
}
// IDs generates a list of the hex encoded key IDs in the KeyList
func (ks KeyList) IDs() []string {
keyIDs := make([]string, 0, len(ks))
for _, k := range ks {
keyIDs = append(keyIDs, k.ID())
}
return keyIDs
}
func typedPublicKey(tk TUFKey) PublicKey {
switch tk.Algorithm() {
case ECDSAKey:
return &ECDSAPublicKey{TUFKey: tk}
case ECDSAx509Key:
return &ECDSAx509PublicKey{TUFKey: tk}
case RSAKey:
return &RSAPublicKey{TUFKey: tk}
case RSAx509Key:
return &RSAx509PublicKey{TUFKey: tk}
case ED25519Key:
return &ED25519PublicKey{TUFKey: tk}
}
return &UnknownPublicKey{TUFKey: tk}
}
func typedPrivateKey(tk TUFKey) (PrivateKey, error) {
private := tk.Value.Private
tk.Value.Private = nil
switch tk.Algorithm() {
case ECDSAKey:
return NewECDSAPrivateKey(
&ECDSAPublicKey{
TUFKey: tk,
},
private,
)
case ECDSAx509Key:
return NewECDSAPrivateKey(
&ECDSAx509PublicKey{
TUFKey: tk,
},
private,
)
case RSAKey:
return NewRSAPrivateKey(
&RSAPublicKey{
TUFKey: tk,
},
private,
)
case RSAx509Key:
return NewRSAPrivateKey(
&RSAx509PublicKey{
TUFKey: tk,
},
private,
)
case ED25519Key:
return NewED25519PrivateKey(
ED25519PublicKey{
TUFKey: tk,
},
private,
)
}
return &UnknownPrivateKey{
TUFKey: tk,
privateKey: privateKey{private: private},
}, nil
}
// NewPublicKey creates a new, correctly typed PublicKey, using the
// UnknownPublicKey catchall for unsupported ciphers
func NewPublicKey(alg string, public []byte) PublicKey {
tk := TUFKey{
Type: alg,
Value: KeyPair{
Public: public,
},
}
return typedPublicKey(tk)
}
// NewPrivateKey creates a new, correctly typed PrivateKey, using the
// UnknownPrivateKey catchall for unsupported ciphers
func NewPrivateKey(pubKey PublicKey, private []byte) (PrivateKey, error) {
tk := TUFKey{
Type: pubKey.Algorithm(),
Value: KeyPair{
Public: pubKey.Public(),
Private: private, // typedPrivateKey moves this value
},
}
return typedPrivateKey(tk)
}
// UnmarshalPublicKey is used to parse individual public keys in JSON
func UnmarshalPublicKey(data []byte) (PublicKey, error) {
var parsed TUFKey
err := json.Unmarshal(data, &parsed)
if err != nil {
return nil, err
}
return typedPublicKey(parsed), nil
}
// UnmarshalPrivateKey is used to parse individual private keys in JSON
func UnmarshalPrivateKey(data []byte) (PrivateKey, error) {
var parsed TUFKey
err := json.Unmarshal(data, &parsed)
if err != nil {
return nil, err
}
return typedPrivateKey(parsed)
}
// TUFKey is the structure used for both public and private keys in TUF.
// Normally it would make sense to use a different structures for public and
// private keys, but that would change the key ID algorithm (since the canonical
// JSON would be different). This structure should normally be accessed through
// the PublicKey or PrivateKey interfaces.
type TUFKey struct {
id string
Type string `json:"keytype"`
Value KeyPair `json:"keyval"`
}
// Algorithm returns the algorithm of the key
func (k TUFKey) Algorithm() string {
return k.Type
}
// ID efficiently generates if necessary, and caches the ID of the key
func (k *TUFKey) ID() string {
if k.id == "" {
pubK := TUFKey{
Type: k.Algorithm(),
Value: KeyPair{
Public: k.Public(),
Private: nil,
},
}
data, err := json.MarshalCanonical(&pubK)
if err != nil {
logrus.Error("Error generating key ID:", err)
}
digest := sha256.Sum256(data)
k.id = hex.EncodeToString(digest[:])
}
return k.id
}
// Public returns the public bytes
func (k TUFKey) Public() []byte {
return k.Value.Public
}
// Public key types
// ECDSAPublicKey represents an ECDSA key using a raw serialization
// of the public key
type ECDSAPublicKey struct {
TUFKey
}
// ECDSAx509PublicKey represents an ECDSA key using an x509 cert
// as the serialized format of the public key
type ECDSAx509PublicKey struct {
TUFKey
}
// RSAPublicKey represents an RSA key using a raw serialization
// of the public key
type RSAPublicKey struct {
TUFKey
}
// RSAx509PublicKey represents an RSA key using an x509 cert
// as the serialized format of the public key
type RSAx509PublicKey struct {
TUFKey
}
// ED25519PublicKey represents an ED25519 key using a raw serialization
// of the public key
type ED25519PublicKey struct {
TUFKey
}
// UnknownPublicKey is a catchall for key types that are not supported
type UnknownPublicKey struct {
TUFKey
}
// NewECDSAPublicKey initializes a new public key with the ECDSAKey type
func NewECDSAPublicKey(public []byte) *ECDSAPublicKey {
return &ECDSAPublicKey{
TUFKey: TUFKey{
Type: ECDSAKey,
Value: KeyPair{
Public: public,
Private: nil,
},
},
}
}
// NewECDSAx509PublicKey initializes a new public key with the ECDSAx509Key type
func NewECDSAx509PublicKey(public []byte) *ECDSAx509PublicKey {
return &ECDSAx509PublicKey{
TUFKey: TUFKey{
Type: ECDSAx509Key,
Value: KeyPair{
Public: public,
Private: nil,
},
},
}
}
// NewRSAPublicKey initializes a new public key with the RSA type
func NewRSAPublicKey(public []byte) *RSAPublicKey {
return &RSAPublicKey{
TUFKey: TUFKey{
Type: RSAKey,
Value: KeyPair{
Public: public,
Private: nil,
},
},
}
}
// NewRSAx509PublicKey initializes a new public key with the RSAx509Key type
func NewRSAx509PublicKey(public []byte) *RSAx509PublicKey {
return &RSAx509PublicKey{
TUFKey: TUFKey{
Type: RSAx509Key,
Value: KeyPair{
Public: public,
Private: nil,
},
},
}
}
// NewED25519PublicKey initializes a new public key with the ED25519Key type
func NewED25519PublicKey(public []byte) *ED25519PublicKey {
return &ED25519PublicKey{
TUFKey: TUFKey{
Type: ED25519Key,
Value: KeyPair{
Public: public,
Private: nil,
},
},
}
}
// Private key types
type privateKey struct {
private []byte
}
type signer struct {
signer crypto.Signer
}
// ECDSAPrivateKey represents a private ECDSA key
type ECDSAPrivateKey struct {
PublicKey
privateKey
signer
}
// RSAPrivateKey represents a private RSA key
type RSAPrivateKey struct {
PublicKey
privateKey
signer
}
// ED25519PrivateKey represents a private ED25519 key
type ED25519PrivateKey struct {
ED25519PublicKey
privateKey
}
// UnknownPrivateKey is a catchall for unsupported key types
type UnknownPrivateKey struct {
TUFKey
privateKey
}
// NewECDSAPrivateKey initializes a new ECDSA private key
func NewECDSAPrivateKey(public PublicKey, private []byte) (*ECDSAPrivateKey, error) {
switch public.(type) {
case *ECDSAPublicKey, *ECDSAx509PublicKey:
default:
return nil, errors.New("Invalid public key type provided to NewECDSAPrivateKey")
}
ecdsaPrivKey, err := x509.ParseECPrivateKey(private)
if err != nil {
return nil, err
}
return &ECDSAPrivateKey{
PublicKey: public,
privateKey: privateKey{private: private},
signer: signer{signer: ecdsaPrivKey},
}, nil
}
// NewRSAPrivateKey initialized a new RSA private key
func NewRSAPrivateKey(public PublicKey, private []byte) (*RSAPrivateKey, error) {
switch public.(type) {
case *RSAPublicKey, *RSAx509PublicKey:
default:
return nil, errors.New("Invalid public key type provided to NewRSAPrivateKey")
}
rsaPrivKey, err := x509.ParsePKCS1PrivateKey(private)
if err != nil {
return nil, err
}
return &RSAPrivateKey{
PublicKey: public,
privateKey: privateKey{private: private},
signer: signer{signer: rsaPrivKey},
}, nil
}
// NewED25519PrivateKey initialized a new ED25519 private key
func NewED25519PrivateKey(public ED25519PublicKey, private []byte) (*ED25519PrivateKey, error) {
return &ED25519PrivateKey{
ED25519PublicKey: public,
privateKey: privateKey{private: private},
}, nil
}
// Private return the serialized private bytes of the key
func (k privateKey) Private() []byte {
return k.private
}
// CryptoSigner returns the underlying crypto.Signer for use cases where we need the default
// signature or public key functionality (like when we generate certificates)
func (s signer) CryptoSigner() crypto.Signer {
return s.signer
}
// CryptoSigner returns the ED25519PrivateKey which already implements crypto.Signer
func (k ED25519PrivateKey) CryptoSigner() crypto.Signer {
return nil
}
// CryptoSigner returns the UnknownPrivateKey which already implements crypto.Signer
func (k UnknownPrivateKey) CryptoSigner() crypto.Signer {
return nil
}
type ecdsaSig struct {
R *big.Int
S *big.Int
}
// Sign creates an ecdsa signature
func (k ECDSAPrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) (signature []byte, err error) {
ecdsaPrivKey, ok := k.CryptoSigner().(*ecdsa.PrivateKey)
if !ok {
return nil, errors.New("Signer was based on the wrong key type")
}
hashed := sha256.Sum256(msg)
sigASN1, err := ecdsaPrivKey.Sign(rand, hashed[:], opts)
if err != nil {
return nil, err
}
sig := ecdsaSig{}
_, err = asn1.Unmarshal(sigASN1, &sig)
if err != nil {
return nil, err
}
rBytes, sBytes := sig.R.Bytes(), sig.S.Bytes()
octetLength := (ecdsaPrivKey.Params().BitSize + 7) >> 3
// MUST include leading zeros in the output
rBuf := make([]byte, octetLength-len(rBytes), octetLength)
sBuf := make([]byte, octetLength-len(sBytes), octetLength)
rBuf = append(rBuf, rBytes...)
sBuf = append(sBuf, sBytes...)
return append(rBuf, sBuf...), nil
}
// Sign creates an rsa signature
func (k RSAPrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) (signature []byte, err error) {
hashed := sha256.Sum256(msg)
if opts == nil {
opts = &rsa.PSSOptions{
SaltLength: rsa.PSSSaltLengthEqualsHash,
Hash: crypto.SHA256,
}
}
return k.CryptoSigner().Sign(rand, hashed[:], opts)
}
// Sign creates an ed25519 signature
func (k ED25519PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) (signature []byte, err error) {
priv := [ed25519.PrivateKeySize]byte{}
copy(priv[:], k.private[ed25519.PublicKeySize:])
return ed25519.Sign(&priv, msg)[:], nil
}
// Sign on an UnknownPrivateKey raises an error because the client does not
// know how to sign with this key type.
func (k UnknownPrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) (signature []byte, err error) {
return nil, errors.New("Unknown key type, cannot sign.")
}
// SignatureAlgorithm returns the SigAlgorithm for a ECDSAPrivateKey
func (k ECDSAPrivateKey) SignatureAlgorithm() SigAlgorithm {
return ECDSASignature
}
// SignatureAlgorithm returns the SigAlgorithm for a RSAPrivateKey
func (k RSAPrivateKey) SignatureAlgorithm() SigAlgorithm {
return RSAPSSSignature
}
// SignatureAlgorithm returns the SigAlgorithm for a ED25519PrivateKey
func (k ED25519PrivateKey) SignatureAlgorithm() SigAlgorithm {
return EDDSASignature
}
// SignatureAlgorithm returns the SigAlgorithm for an UnknownPrivateKey
func (k UnknownPrivateKey) SignatureAlgorithm() SigAlgorithm {
return ""
}
// PublicKeyFromPrivate returns a new TUFKey based on a private key, with
// the private key bytes guaranteed to be nil.
func PublicKeyFromPrivate(pk PrivateKey) PublicKey {
return typedPublicKey(TUFKey{
Type: pk.Algorithm(),
Value: KeyPair{
Public: pk.Public(),
Private: nil,
},
})
}

View file

@ -1,338 +0,0 @@
package data
import (
"fmt"
"path"
"regexp"
"strings"
"github.com/Sirupsen/logrus"
)
// Canonical base role names
const (
CanonicalRootRole = "root"
CanonicalTargetsRole = "targets"
CanonicalSnapshotRole = "snapshot"
CanonicalTimestampRole = "timestamp"
)
// BaseRoles is an easy to iterate list of the top level
// roles.
var BaseRoles = []string{
CanonicalRootRole,
CanonicalTargetsRole,
CanonicalSnapshotRole,
CanonicalTimestampRole,
}
// Regex for validating delegation names
var delegationRegexp = regexp.MustCompile("^[-a-z0-9_/]+$")
// ErrNoSuchRole indicates the roles doesn't exist
type ErrNoSuchRole struct {
Role string
}
func (e ErrNoSuchRole) Error() string {
return fmt.Sprintf("role does not exist: %s", e.Role)
}
// ErrInvalidRole represents an error regarding a role. Typically
// something like a role for which sone of the public keys were
// not found in the TUF repo.
type ErrInvalidRole struct {
Role string
Reason string
}
func (e ErrInvalidRole) Error() string {
if e.Reason != "" {
return fmt.Sprintf("tuf: invalid role %s. %s", e.Role, e.Reason)
}
return fmt.Sprintf("tuf: invalid role %s.", e.Role)
}
// ValidRole only determines the name is semantically
// correct. For target delegated roles, it does NOT check
// the the appropriate parent roles exist.
func ValidRole(name string) bool {
if IsDelegation(name) {
return true
}
for _, v := range BaseRoles {
if name == v {
return true
}
}
return false
}
// IsDelegation checks if the role is a delegation or a root role
func IsDelegation(role string) bool {
targetsBase := CanonicalTargetsRole + "/"
whitelistedChars := delegationRegexp.MatchString(role)
// Limit size of full role string to 255 chars for db column size limit
correctLength := len(role) < 256
// Removes ., .., extra slashes, and trailing slash
isClean := path.Clean(role) == role
return strings.HasPrefix(role, targetsBase) &&
whitelistedChars &&
correctLength &&
isClean
}
// IsBaseRole checks if the role is a base role
func IsBaseRole(role string) bool {
for _, baseRole := range BaseRoles {
if role == baseRole {
return true
}
}
return false
}
// IsWildDelegation determines if a role represents a valid wildcard delegation
// path, i.e. targets/*, targets/foo/*.
// The wildcard may only appear as the final part of the delegation and must
// be a whole segment, i.e. targets/foo* is not a valid wildcard delegation.
func IsWildDelegation(role string) bool {
if path.Clean(role) != role {
return false
}
base := path.Dir(role)
if !(IsDelegation(base) || base == CanonicalTargetsRole) {
return false
}
return role[len(role)-2:] == "/*"
}
// BaseRole is an internal representation of a root/targets/snapshot/timestamp role, with its public keys included
type BaseRole struct {
Keys map[string]PublicKey
Name string
Threshold int
}
// NewBaseRole creates a new BaseRole object with the provided parameters
func NewBaseRole(name string, threshold int, keys ...PublicKey) BaseRole {
r := BaseRole{
Name: name,
Threshold: threshold,
Keys: make(map[string]PublicKey),
}
for _, k := range keys {
r.Keys[k.ID()] = k
}
return r
}
// ListKeys retrieves the public keys valid for this role
func (b BaseRole) ListKeys() KeyList {
return listKeys(b.Keys)
}
// ListKeyIDs retrieves the list of key IDs valid for this role
func (b BaseRole) ListKeyIDs() []string {
return listKeyIDs(b.Keys)
}
// Equals returns whether this BaseRole equals another BaseRole
func (b BaseRole) Equals(o BaseRole) bool {
if b.Threshold != o.Threshold || b.Name != o.Name || len(b.Keys) != len(o.Keys) {
return false
}
for keyID, key := range b.Keys {
oKey, ok := o.Keys[keyID]
if !ok || key.ID() != oKey.ID() {
return false
}
}
return true
}
// DelegationRole is an internal representation of a delegation role, with its public keys included
type DelegationRole struct {
BaseRole
Paths []string
}
func listKeys(keyMap map[string]PublicKey) KeyList {
keys := KeyList{}
for _, key := range keyMap {
keys = append(keys, key)
}
return keys
}
func listKeyIDs(keyMap map[string]PublicKey) []string {
keyIDs := []string{}
for id := range keyMap {
keyIDs = append(keyIDs, id)
}
return keyIDs
}
// Restrict restricts the paths and path hash prefixes for the passed in delegation role,
// returning a copy of the role with validated paths as if it was a direct child
func (d DelegationRole) Restrict(child DelegationRole) (DelegationRole, error) {
if !d.IsParentOf(child) {
return DelegationRole{}, fmt.Errorf("%s is not a parent of %s", d.Name, child.Name)
}
return DelegationRole{
BaseRole: BaseRole{
Keys: child.Keys,
Name: child.Name,
Threshold: child.Threshold,
},
Paths: RestrictDelegationPathPrefixes(d.Paths, child.Paths),
}, nil
}
// IsParentOf returns whether the passed in delegation role is the direct child of this role,
// determined by delegation name.
// Ex: targets/a is a direct parent of targets/a/b, but targets/a is not a direct parent of targets/a/b/c
func (d DelegationRole) IsParentOf(child DelegationRole) bool {
return path.Dir(child.Name) == d.Name
}
// CheckPaths checks if a given path is valid for the role
func (d DelegationRole) CheckPaths(path string) bool {
return checkPaths(path, d.Paths)
}
func checkPaths(path string, permitted []string) bool {
for _, p := range permitted {
if strings.HasPrefix(path, p) {
return true
}
}
return false
}
// RestrictDelegationPathPrefixes returns the list of valid delegationPaths that are prefixed by parentPaths
func RestrictDelegationPathPrefixes(parentPaths, delegationPaths []string) []string {
validPaths := []string{}
if len(delegationPaths) == 0 {
return validPaths
}
// Validate each individual delegation path
for _, delgPath := range delegationPaths {
isPrefixed := false
for _, parentPath := range parentPaths {
if strings.HasPrefix(delgPath, parentPath) {
isPrefixed = true
break
}
}
// If the delegation path did not match prefix against any parent path, it is not valid
if isPrefixed {
validPaths = append(validPaths, delgPath)
}
}
return validPaths
}
// RootRole is a cut down role as it appears in the root.json
// Eventually should only be used for immediately before and after serialization/deserialization
type RootRole struct {
KeyIDs []string `json:"keyids"`
Threshold int `json:"threshold"`
}
// Role is a more verbose role as they appear in targets delegations
// Eventually should only be used for immediately before and after serialization/deserialization
type Role struct {
RootRole
Name string `json:"name"`
Paths []string `json:"paths,omitempty"`
}
// NewRole creates a new Role object from the given parameters
func NewRole(name string, threshold int, keyIDs, paths []string) (*Role, error) {
if IsDelegation(name) {
if len(paths) == 0 {
logrus.Debugf("role %s with no Paths will never be able to publish content until one or more are added", name)
}
}
if threshold < 1 {
return nil, ErrInvalidRole{Role: name}
}
if !ValidRole(name) {
return nil, ErrInvalidRole{Role: name}
}
return &Role{
RootRole: RootRole{
KeyIDs: keyIDs,
Threshold: threshold,
},
Name: name,
Paths: paths,
}, nil
}
// CheckPaths checks if a given path is valid for the role
func (r Role) CheckPaths(path string) bool {
return checkPaths(path, r.Paths)
}
// AddKeys merges the ids into the current list of role key ids
func (r *Role) AddKeys(ids []string) {
r.KeyIDs = mergeStrSlices(r.KeyIDs, ids)
}
// AddPaths merges the paths into the current list of role paths
func (r *Role) AddPaths(paths []string) error {
if len(paths) == 0 {
return nil
}
r.Paths = mergeStrSlices(r.Paths, paths)
return nil
}
// RemoveKeys removes the ids from the current list of key ids
func (r *Role) RemoveKeys(ids []string) {
r.KeyIDs = subtractStrSlices(r.KeyIDs, ids)
}
// RemovePaths removes the paths from the current list of role paths
func (r *Role) RemovePaths(paths []string) {
r.Paths = subtractStrSlices(r.Paths, paths)
}
func mergeStrSlices(orig, new []string) []string {
have := make(map[string]bool)
for _, e := range orig {
have[e] = true
}
merged := make([]string, len(orig), len(orig)+len(new))
copy(merged, orig)
for _, e := range new {
if !have[e] {
merged = append(merged, e)
}
}
return merged
}
func subtractStrSlices(orig, remove []string) []string {
kill := make(map[string]bool)
for _, e := range remove {
kill[e] = true
}
var keep []string
for _, e := range orig {
if !kill[e] {
keep = append(keep, e)
}
}
return keep
}

View file

@ -1,171 +0,0 @@
package data
import (
"fmt"
"github.com/docker/go/canonical/json"
)
// SignedRoot is a fully unpacked root.json
type SignedRoot struct {
Signatures []Signature
Signed Root
Dirty bool
}
// Root is the Signed component of a root.json
type Root struct {
SignedCommon
Keys Keys `json:"keys"`
Roles map[string]*RootRole `json:"roles"`
ConsistentSnapshot bool `json:"consistent_snapshot"`
}
// isValidRootStructure returns an error, or nil, depending on whether the content of the struct
// is valid for root metadata. This does not check signatures or expiry, just that
// the metadata content is valid.
func isValidRootStructure(r Root) error {
expectedType := TUFTypes[CanonicalRootRole]
if r.Type != expectedType {
return ErrInvalidMetadata{
role: CanonicalRootRole, msg: fmt.Sprintf("expected type %s, not %s", expectedType, r.Type)}
}
if r.Version < 1 {
return ErrInvalidMetadata{
role: CanonicalRootRole, msg: "version cannot be less than 1"}
}
// all the base roles MUST appear in the root.json - other roles are allowed,
// but other than the mirror role (not currently supported) are out of spec
for _, roleName := range BaseRoles {
roleObj, ok := r.Roles[roleName]
if !ok || roleObj == nil {
return ErrInvalidMetadata{
role: CanonicalRootRole, msg: fmt.Sprintf("missing %s role specification", roleName)}
}
if err := isValidRootRoleStructure(CanonicalRootRole, roleName, *roleObj, r.Keys); err != nil {
return err
}
}
return nil
}
func isValidRootRoleStructure(metaContainingRole, rootRoleName string, r RootRole, validKeys Keys) error {
if r.Threshold < 1 {
return ErrInvalidMetadata{
role: metaContainingRole,
msg: fmt.Sprintf("invalid threshold specified for %s: %v ", rootRoleName, r.Threshold),
}
}
for _, keyID := range r.KeyIDs {
if _, ok := validKeys[keyID]; !ok {
return ErrInvalidMetadata{
role: metaContainingRole,
msg: fmt.Sprintf("key ID %s specified in %s without corresponding key", keyID, rootRoleName),
}
}
}
return nil
}
// NewRoot initializes a new SignedRoot with a set of keys, roles, and the consistent flag
func NewRoot(keys map[string]PublicKey, roles map[string]*RootRole, consistent bool) (*SignedRoot, error) {
signedRoot := &SignedRoot{
Signatures: make([]Signature, 0),
Signed: Root{
SignedCommon: SignedCommon{
Type: TUFTypes[CanonicalRootRole],
Version: 0,
Expires: DefaultExpires(CanonicalRootRole),
},
Keys: keys,
Roles: roles,
ConsistentSnapshot: consistent,
},
Dirty: true,
}
return signedRoot, nil
}
// BuildBaseRole returns a copy of a BaseRole using the information in this SignedRoot for the specified role name.
// Will error for invalid role name or key metadata within this SignedRoot
func (r SignedRoot) BuildBaseRole(roleName string) (BaseRole, error) {
roleData, ok := r.Signed.Roles[roleName]
if !ok {
return BaseRole{}, ErrInvalidRole{Role: roleName, Reason: "role not found in root file"}
}
// Get all public keys for the base role from TUF metadata
keyIDs := roleData.KeyIDs
pubKeys := make(map[string]PublicKey)
for _, keyID := range keyIDs {
pubKey, ok := r.Signed.Keys[keyID]
if !ok {
return BaseRole{}, ErrInvalidRole{
Role: roleName,
Reason: fmt.Sprintf("key with ID %s was not found in root metadata", keyID),
}
}
pubKeys[keyID] = pubKey
}
return BaseRole{
Name: roleName,
Keys: pubKeys,
Threshold: roleData.Threshold,
}, nil
}
// ToSigned partially serializes a SignedRoot for further signing
func (r SignedRoot) ToSigned() (*Signed, error) {
s, err := defaultSerializer.MarshalCanonical(r.Signed)
if err != nil {
return nil, err
}
// cast into a json.RawMessage
signed := json.RawMessage{}
err = signed.UnmarshalJSON(s)
if err != nil {
return nil, err
}
sigs := make([]Signature, len(r.Signatures))
copy(sigs, r.Signatures)
return &Signed{
Signatures: sigs,
Signed: &signed,
}, nil
}
// MarshalJSON returns the serialized form of SignedRoot as bytes
func (r SignedRoot) MarshalJSON() ([]byte, error) {
signed, err := r.ToSigned()
if err != nil {
return nil, err
}
return defaultSerializer.Marshal(signed)
}
// RootFromSigned fully unpacks a Signed object into a SignedRoot and ensures
// that it is a valid SignedRoot
func RootFromSigned(s *Signed) (*SignedRoot, error) {
r := Root{}
if s.Signed == nil {
return nil, ErrInvalidMetadata{
role: CanonicalRootRole,
msg: "root file contained an empty payload",
}
}
if err := defaultSerializer.Unmarshal(*s.Signed, &r); err != nil {
return nil, err
}
if err := isValidRootStructure(r); err != nil {
return nil, err
}
sigs := make([]Signature, len(s.Signatures))
copy(sigs, s.Signatures)
return &SignedRoot{
Signatures: sigs,
Signed: r,
}, nil
}

View file

@ -1,36 +0,0 @@
package data
import "github.com/docker/go/canonical/json"
// Serializer is an interface that can marshal and unmarshal TUF data. This
// is expected to be a canonical JSON marshaller
type serializer interface {
MarshalCanonical(from interface{}) ([]byte, error)
Marshal(from interface{}) ([]byte, error)
Unmarshal(from []byte, to interface{}) error
}
// CanonicalJSON marshals to and from canonical JSON
type canonicalJSON struct{}
// MarshalCanonical returns the canonical JSON form of a thing
func (c canonicalJSON) MarshalCanonical(from interface{}) ([]byte, error) {
return json.MarshalCanonical(from)
}
// Marshal returns the regular non-canonical JSON form of a thing
func (c canonicalJSON) Marshal(from interface{}) ([]byte, error) {
return json.Marshal(from)
}
// Unmarshal unmarshals some JSON bytes
func (c canonicalJSON) Unmarshal(from []byte, to interface{}) error {
return json.Unmarshal(from, to)
}
// defaultSerializer is a canonical JSON serializer
var defaultSerializer serializer = canonicalJSON{}
func setDefaultSerializer(s serializer) {
defaultSerializer = s
}

View file

@ -1,169 +0,0 @@
package data
import (
"bytes"
"fmt"
"github.com/Sirupsen/logrus"
"github.com/docker/go/canonical/json"
"github.com/docker/notary"
)
// SignedSnapshot is a fully unpacked snapshot.json
type SignedSnapshot struct {
Signatures []Signature
Signed Snapshot
Dirty bool
}
// Snapshot is the Signed component of a snapshot.json
type Snapshot struct {
SignedCommon
Meta Files `json:"meta"`
}
// IsValidSnapshotStructure returns an error, or nil, depending on whether the content of the
// struct is valid for snapshot metadata. This does not check signatures or expiry, just that
// the metadata content is valid.
func IsValidSnapshotStructure(s Snapshot) error {
expectedType := TUFTypes[CanonicalSnapshotRole]
if s.Type != expectedType {
return ErrInvalidMetadata{
role: CanonicalSnapshotRole, msg: fmt.Sprintf("expected type %s, not %s", expectedType, s.Type)}
}
if s.Version < 1 {
return ErrInvalidMetadata{
role: CanonicalSnapshotRole, msg: "version cannot be less than one"}
}
for _, role := range []string{CanonicalRootRole, CanonicalTargetsRole} {
// Meta is a map of FileMeta, so if the role isn't in the map it returns
// an empty FileMeta, which has an empty map, and you can check on keys
// from an empty map.
//
// For now sha256 is required and sha512 is not.
if _, ok := s.Meta[role].Hashes[notary.SHA256]; !ok {
return ErrInvalidMetadata{
role: CanonicalSnapshotRole,
msg: fmt.Sprintf("missing %s sha256 checksum information", role),
}
}
if err := CheckValidHashStructures(s.Meta[role].Hashes); err != nil {
return ErrInvalidMetadata{
role: CanonicalSnapshotRole,
msg: fmt.Sprintf("invalid %s checksum information, %v", role, err),
}
}
}
return nil
}
// NewSnapshot initilizes a SignedSnapshot with a given top level root
// and targets objects
func NewSnapshot(root *Signed, targets *Signed) (*SignedSnapshot, error) {
logrus.Debug("generating new snapshot...")
targetsJSON, err := json.Marshal(targets)
if err != nil {
logrus.Debug("Error Marshalling Targets")
return nil, err
}
rootJSON, err := json.Marshal(root)
if err != nil {
logrus.Debug("Error Marshalling Root")
return nil, err
}
rootMeta, err := NewFileMeta(bytes.NewReader(rootJSON), NotaryDefaultHashes...)
if err != nil {
return nil, err
}
targetsMeta, err := NewFileMeta(bytes.NewReader(targetsJSON), NotaryDefaultHashes...)
if err != nil {
return nil, err
}
return &SignedSnapshot{
Signatures: make([]Signature, 0),
Signed: Snapshot{
SignedCommon: SignedCommon{
Type: TUFTypes[CanonicalSnapshotRole],
Version: 0,
Expires: DefaultExpires(CanonicalSnapshotRole),
},
Meta: Files{
CanonicalRootRole: rootMeta,
CanonicalTargetsRole: targetsMeta,
},
},
}, nil
}
// ToSigned partially serializes a SignedSnapshot for further signing
func (sp *SignedSnapshot) ToSigned() (*Signed, error) {
s, err := defaultSerializer.MarshalCanonical(sp.Signed)
if err != nil {
return nil, err
}
signed := json.RawMessage{}
err = signed.UnmarshalJSON(s)
if err != nil {
return nil, err
}
sigs := make([]Signature, len(sp.Signatures))
copy(sigs, sp.Signatures)
return &Signed{
Signatures: sigs,
Signed: &signed,
}, nil
}
// AddMeta updates a role in the snapshot with new meta
func (sp *SignedSnapshot) AddMeta(role string, meta FileMeta) {
sp.Signed.Meta[role] = meta
sp.Dirty = true
}
// GetMeta gets the metadata for a particular role, returning an error if it's
// not found
func (sp *SignedSnapshot) GetMeta(role string) (*FileMeta, error) {
if meta, ok := sp.Signed.Meta[role]; ok {
if _, ok := meta.Hashes["sha256"]; ok {
return &meta, nil
}
}
return nil, ErrMissingMeta{Role: role}
}
// DeleteMeta removes a role from the snapshot. If the role doesn't
// exist in the snapshot, it's a noop.
func (sp *SignedSnapshot) DeleteMeta(role string) {
if _, ok := sp.Signed.Meta[role]; ok {
delete(sp.Signed.Meta, role)
sp.Dirty = true
}
}
// MarshalJSON returns the serialized form of SignedSnapshot as bytes
func (sp *SignedSnapshot) MarshalJSON() ([]byte, error) {
signed, err := sp.ToSigned()
if err != nil {
return nil, err
}
return defaultSerializer.Marshal(signed)
}
// SnapshotFromSigned fully unpacks a Signed object into a SignedSnapshot
func SnapshotFromSigned(s *Signed) (*SignedSnapshot, error) {
sp := Snapshot{}
if err := defaultSerializer.Unmarshal(*s.Signed, &sp); err != nil {
return nil, err
}
if err := IsValidSnapshotStructure(sp); err != nil {
return nil, err
}
sigs := make([]Signature, len(s.Signatures))
copy(sigs, s.Signatures)
return &SignedSnapshot{
Signatures: sigs,
Signed: sp,
}, nil
}

View file

@ -1,201 +0,0 @@
package data
import (
"errors"
"fmt"
"path"
"github.com/docker/go/canonical/json"
)
// SignedTargets is a fully unpacked targets.json, or target delegation
// json file
type SignedTargets struct {
Signatures []Signature
Signed Targets
Dirty bool
}
// Targets is the Signed components of a targets.json or delegation json file
type Targets struct {
SignedCommon
Targets Files `json:"targets"`
Delegations Delegations `json:"delegations,omitempty"`
}
// isValidTargetsStructure returns an error, or nil, depending on whether the content of the struct
// is valid for targets metadata. This does not check signatures or expiry, just that
// the metadata content is valid.
func isValidTargetsStructure(t Targets, roleName string) error {
if roleName != CanonicalTargetsRole && !IsDelegation(roleName) {
return ErrInvalidRole{Role: roleName}
}
// even if it's a delegated role, the metadata type is "Targets"
expectedType := TUFTypes[CanonicalTargetsRole]
if t.Type != expectedType {
return ErrInvalidMetadata{
role: roleName, msg: fmt.Sprintf("expected type %s, not %s", expectedType, t.Type)}
}
if t.Version < 1 {
return ErrInvalidMetadata{role: roleName, msg: "version cannot be less than one"}
}
for _, roleObj := range t.Delegations.Roles {
if !IsDelegation(roleObj.Name) || path.Dir(roleObj.Name) != roleName {
return ErrInvalidMetadata{
role: roleName, msg: fmt.Sprintf("delegation role %s invalid", roleObj.Name)}
}
if err := isValidRootRoleStructure(roleName, roleObj.Name, roleObj.RootRole, t.Delegations.Keys); err != nil {
return err
}
}
return nil
}
// NewTargets intiializes a new empty SignedTargets object
func NewTargets() *SignedTargets {
return &SignedTargets{
Signatures: make([]Signature, 0),
Signed: Targets{
SignedCommon: SignedCommon{
Type: TUFTypes["targets"],
Version: 0,
Expires: DefaultExpires("targets"),
},
Targets: make(Files),
Delegations: *NewDelegations(),
},
Dirty: true,
}
}
// GetMeta attempts to find the targets entry for the path. It
// will return nil in the case of the target not being found.
func (t SignedTargets) GetMeta(path string) *FileMeta {
for p, meta := range t.Signed.Targets {
if p == path {
return &meta
}
}
return nil
}
// GetValidDelegations filters the delegation roles specified in the signed targets, and
// only returns roles that are direct children and restricts their paths
func (t SignedTargets) GetValidDelegations(parent DelegationRole) []DelegationRole {
roles := t.buildDelegationRoles()
result := []DelegationRole{}
for _, r := range roles {
validRole, err := parent.Restrict(r)
if err != nil {
continue
}
result = append(result, validRole)
}
return result
}
// BuildDelegationRole returns a copy of a DelegationRole using the information in this SignedTargets for the specified role name.
// Will error for invalid role name or key metadata within this SignedTargets. Path data is not validated.
func (t *SignedTargets) BuildDelegationRole(roleName string) (DelegationRole, error) {
for _, role := range t.Signed.Delegations.Roles {
if role.Name == roleName {
pubKeys := make(map[string]PublicKey)
for _, keyID := range role.KeyIDs {
pubKey, ok := t.Signed.Delegations.Keys[keyID]
if !ok {
// Couldn't retrieve all keys, so stop walking and return invalid role
return DelegationRole{}, ErrInvalidRole{
Role: roleName,
Reason: "role lists unknown key " + keyID + " as a signing key",
}
}
pubKeys[keyID] = pubKey
}
return DelegationRole{
BaseRole: BaseRole{
Name: role.Name,
Keys: pubKeys,
Threshold: role.Threshold,
},
Paths: role.Paths,
}, nil
}
}
return DelegationRole{}, ErrNoSuchRole{Role: roleName}
}
// helper function to create DelegationRole structures from all delegations in a SignedTargets,
// these delegations are read directly from the SignedTargets and not modified or validated
func (t SignedTargets) buildDelegationRoles() []DelegationRole {
var roles []DelegationRole
for _, roleData := range t.Signed.Delegations.Roles {
delgRole, err := t.BuildDelegationRole(roleData.Name)
if err != nil {
continue
}
roles = append(roles, delgRole)
}
return roles
}
// AddTarget adds or updates the meta for the given path
func (t *SignedTargets) AddTarget(path string, meta FileMeta) {
t.Signed.Targets[path] = meta
t.Dirty = true
}
// AddDelegation will add a new delegated role with the given keys,
// ensuring the keys either already exist, or are added to the map
// of delegation keys
func (t *SignedTargets) AddDelegation(role *Role, keys []*PublicKey) error {
return errors.New("Not Implemented")
}
// ToSigned partially serializes a SignedTargets for further signing
func (t *SignedTargets) ToSigned() (*Signed, error) {
s, err := defaultSerializer.MarshalCanonical(t.Signed)
if err != nil {
return nil, err
}
signed := json.RawMessage{}
err = signed.UnmarshalJSON(s)
if err != nil {
return nil, err
}
sigs := make([]Signature, len(t.Signatures))
copy(sigs, t.Signatures)
return &Signed{
Signatures: sigs,
Signed: &signed,
}, nil
}
// MarshalJSON returns the serialized form of SignedTargets as bytes
func (t *SignedTargets) MarshalJSON() ([]byte, error) {
signed, err := t.ToSigned()
if err != nil {
return nil, err
}
return defaultSerializer.Marshal(signed)
}
// TargetsFromSigned fully unpacks a Signed object into a SignedTargets, given
// a role name (so it can validate the SignedTargets object)
func TargetsFromSigned(s *Signed, roleName string) (*SignedTargets, error) {
t := Targets{}
if err := defaultSerializer.Unmarshal(*s.Signed, &t); err != nil {
return nil, err
}
if err := isValidTargetsStructure(t, roleName); err != nil {
return nil, err
}
sigs := make([]Signature, len(s.Signatures))
copy(sigs, s.Signatures)
return &SignedTargets{
Signatures: sigs,
Signed: t,
}, nil
}

View file

@ -1,136 +0,0 @@
package data
import (
"bytes"
"fmt"
"github.com/docker/go/canonical/json"
"github.com/docker/notary"
)
// SignedTimestamp is a fully unpacked timestamp.json
type SignedTimestamp struct {
Signatures []Signature
Signed Timestamp
Dirty bool
}
// Timestamp is the Signed component of a timestamp.json
type Timestamp struct {
SignedCommon
Meta Files `json:"meta"`
}
// IsValidTimestampStructure returns an error, or nil, depending on whether the content of the struct
// is valid for timestamp metadata. This does not check signatures or expiry, just that
// the metadata content is valid.
func IsValidTimestampStructure(t Timestamp) error {
expectedType := TUFTypes[CanonicalTimestampRole]
if t.Type != expectedType {
return ErrInvalidMetadata{
role: CanonicalTimestampRole, msg: fmt.Sprintf("expected type %s, not %s", expectedType, t.Type)}
}
if t.Version < 1 {
return ErrInvalidMetadata{
role: CanonicalTimestampRole, msg: "version cannot be less than one"}
}
// Meta is a map of FileMeta, so if the role isn't in the map it returns
// an empty FileMeta, which has an empty map, and you can check on keys
// from an empty map.
//
// For now sha256 is required and sha512 is not.
if _, ok := t.Meta[CanonicalSnapshotRole].Hashes[notary.SHA256]; !ok {
return ErrInvalidMetadata{
role: CanonicalTimestampRole, msg: "missing snapshot sha256 checksum information"}
}
if err := CheckValidHashStructures(t.Meta[CanonicalSnapshotRole].Hashes); err != nil {
return ErrInvalidMetadata{
role: CanonicalTimestampRole, msg: fmt.Sprintf("invalid snapshot checksum information, %v", err)}
}
return nil
}
// NewTimestamp initializes a timestamp with an existing snapshot
func NewTimestamp(snapshot *Signed) (*SignedTimestamp, error) {
snapshotJSON, err := json.Marshal(snapshot)
if err != nil {
return nil, err
}
snapshotMeta, err := NewFileMeta(bytes.NewReader(snapshotJSON), NotaryDefaultHashes...)
if err != nil {
return nil, err
}
return &SignedTimestamp{
Signatures: make([]Signature, 0),
Signed: Timestamp{
SignedCommon: SignedCommon{
Type: TUFTypes[CanonicalTimestampRole],
Version: 0,
Expires: DefaultExpires(CanonicalTimestampRole),
},
Meta: Files{
CanonicalSnapshotRole: snapshotMeta,
},
},
}, nil
}
// ToSigned partially serializes a SignedTimestamp such that it can
// be signed
func (ts *SignedTimestamp) ToSigned() (*Signed, error) {
s, err := defaultSerializer.MarshalCanonical(ts.Signed)
if err != nil {
return nil, err
}
signed := json.RawMessage{}
err = signed.UnmarshalJSON(s)
if err != nil {
return nil, err
}
sigs := make([]Signature, len(ts.Signatures))
copy(sigs, ts.Signatures)
return &Signed{
Signatures: sigs,
Signed: &signed,
}, nil
}
// GetSnapshot gets the expected snapshot metadata hashes in the timestamp metadata,
// or nil if it doesn't exist
func (ts *SignedTimestamp) GetSnapshot() (*FileMeta, error) {
snapshotExpected, ok := ts.Signed.Meta[CanonicalSnapshotRole]
if !ok {
return nil, ErrMissingMeta{Role: CanonicalSnapshotRole}
}
return &snapshotExpected, nil
}
// MarshalJSON returns the serialized form of SignedTimestamp as bytes
func (ts *SignedTimestamp) MarshalJSON() ([]byte, error) {
signed, err := ts.ToSigned()
if err != nil {
return nil, err
}
return defaultSerializer.Marshal(signed)
}
// TimestampFromSigned parsed a Signed object into a fully unpacked
// SignedTimestamp
func TimestampFromSigned(s *Signed) (*SignedTimestamp, error) {
ts := Timestamp{}
if err := defaultSerializer.Unmarshal(*s.Signed, &ts); err != nil {
return nil, err
}
if err := IsValidTimestampStructure(ts); err != nil {
return nil, err
}
sigs := make([]Signature, len(s.Signatures))
copy(sigs, s.Signatures)
return &SignedTimestamp{
Signatures: sigs,
Signed: ts,
}, nil
}

View file

@ -1,311 +0,0 @@
package data
import (
"crypto/sha256"
"crypto/sha512"
"crypto/subtle"
"encoding/hex"
"fmt"
"hash"
"io"
"io/ioutil"
"strings"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/go/canonical/json"
"github.com/docker/notary"
)
// SigAlgorithm for types of signatures
type SigAlgorithm string
func (k SigAlgorithm) String() string {
return string(k)
}
const defaultHashAlgorithm = "sha256"
// Signature types
const (
EDDSASignature SigAlgorithm = "eddsa"
RSAPSSSignature SigAlgorithm = "rsapss"
RSAPKCS1v15Signature SigAlgorithm = "rsapkcs1v15"
ECDSASignature SigAlgorithm = "ecdsa"
PyCryptoSignature SigAlgorithm = "pycrypto-pkcs#1 pss"
)
// Key types
const (
ED25519Key = "ed25519"
RSAKey = "rsa"
RSAx509Key = "rsa-x509"
ECDSAKey = "ecdsa"
ECDSAx509Key = "ecdsa-x509"
)
// TUFTypes is the set of metadata types
var TUFTypes = map[string]string{
CanonicalRootRole: "Root",
CanonicalTargetsRole: "Targets",
CanonicalSnapshotRole: "Snapshot",
CanonicalTimestampRole: "Timestamp",
}
// SetTUFTypes allows one to override some or all of the default
// type names in TUF.
func SetTUFTypes(ts map[string]string) {
for k, v := range ts {
TUFTypes[k] = v
}
}
// ValidTUFType checks if the given type is valid for the role
func ValidTUFType(typ, role string) bool {
if ValidRole(role) {
// All targets delegation roles must have
// the valid type is for targets.
if role == "" {
// role is unknown and does not map to
// a type
return false
}
if strings.HasPrefix(role, CanonicalTargetsRole+"/") {
role = CanonicalTargetsRole
}
}
// most people will just use the defaults so have this optimal check
// first. Do comparison just in case there is some unknown vulnerability
// if a key and value in the map differ.
if v, ok := TUFTypes[role]; ok {
return typ == v
}
return false
}
// Signed is the high level, partially deserialized metadata object
// used to verify signatures before fully unpacking, or to add signatures
// before fully packing
type Signed struct {
Signed *json.RawMessage `json:"signed"`
Signatures []Signature `json:"signatures"`
}
// SignedCommon contains the fields common to the Signed component of all
// TUF metadata files
type SignedCommon struct {
Type string `json:"_type"`
Expires time.Time `json:"expires"`
Version int `json:"version"`
}
// SignedMeta is used in server validation where we only need signatures
// and common fields
type SignedMeta struct {
Signed SignedCommon `json:"signed"`
Signatures []Signature `json:"signatures"`
}
// Signature is a signature on a piece of metadata
type Signature struct {
KeyID string `json:"keyid"`
Method SigAlgorithm `json:"method"`
Signature []byte `json:"sig"`
IsValid bool `json:"-"`
}
// Files is the map of paths to file meta container in targets and delegations
// metadata files
type Files map[string]FileMeta
// Hashes is the map of hash type to digest created for each metadata
// and target file
type Hashes map[string][]byte
// NotaryDefaultHashes contains the default supported hash algorithms.
var NotaryDefaultHashes = []string{notary.SHA256, notary.SHA512}
// FileMeta contains the size and hashes for a metadata or target file. Custom
// data can be optionally added.
type FileMeta struct {
Length int64 `json:"length"`
Hashes Hashes `json:"hashes"`
Custom *json.RawMessage `json:"custom,omitempty"`
}
// CheckHashes verifies all the checksums specified by the "hashes" of the payload.
func CheckHashes(payload []byte, name string, hashes Hashes) error {
cnt := 0
// k, v indicate the hash algorithm and the corresponding value
for k, v := range hashes {
switch k {
case notary.SHA256:
checksum := sha256.Sum256(payload)
if subtle.ConstantTimeCompare(checksum[:], v) == 0 {
return ErrMismatchedChecksum{alg: notary.SHA256, name: name, expected: hex.EncodeToString(v)}
}
cnt++
case notary.SHA512:
checksum := sha512.Sum512(payload)
if subtle.ConstantTimeCompare(checksum[:], v) == 0 {
return ErrMismatchedChecksum{alg: notary.SHA512, name: name, expected: hex.EncodeToString(v)}
}
cnt++
}
}
if cnt == 0 {
return ErrMissingMeta{Role: name}
}
return nil
}
// CompareMultiHashes verifies that the two Hashes passed in can represent the same data.
// This means that both maps must have at least one key defined for which they map, and no conflicts.
// Note that we check the intersection of map keys, which adds support for non-default hash algorithms in notary
func CompareMultiHashes(hashes1, hashes2 Hashes) error {
// First check if the two hash structures are valid
if err := CheckValidHashStructures(hashes1); err != nil {
return err
}
if err := CheckValidHashStructures(hashes2); err != nil {
return err
}
// Check if they have at least one matching hash, and no conflicts
cnt := 0
for hashAlg, hash1 := range hashes1 {
hash2, ok := hashes2[hashAlg]
if !ok {
continue
}
if subtle.ConstantTimeCompare(hash1[:], hash2[:]) == 0 {
return fmt.Errorf("mismatched %s checksum", hashAlg)
}
// If we reached here, we had a match
cnt++
}
if cnt == 0 {
return fmt.Errorf("at least one matching hash needed")
}
return nil
}
// CheckValidHashStructures returns an error, or nil, depending on whether
// the content of the hashes is valid or not.
func CheckValidHashStructures(hashes Hashes) error {
cnt := 0
for k, v := range hashes {
switch k {
case notary.SHA256:
if len(v) != sha256.Size {
return ErrInvalidChecksum{alg: notary.SHA256}
}
cnt++
case notary.SHA512:
if len(v) != sha512.Size {
return ErrInvalidChecksum{alg: notary.SHA512}
}
cnt++
}
}
if cnt == 0 {
return fmt.Errorf("at least one supported hash needed")
}
return nil
}
// NewFileMeta generates a FileMeta object from the reader, using the
// hash algorithms provided
func NewFileMeta(r io.Reader, hashAlgorithms ...string) (FileMeta, error) {
if len(hashAlgorithms) == 0 {
hashAlgorithms = []string{defaultHashAlgorithm}
}
hashes := make(map[string]hash.Hash, len(hashAlgorithms))
for _, hashAlgorithm := range hashAlgorithms {
var h hash.Hash
switch hashAlgorithm {
case notary.SHA256:
h = sha256.New()
case notary.SHA512:
h = sha512.New()
default:
return FileMeta{}, fmt.Errorf("Unknown hash algorithm: %s", hashAlgorithm)
}
hashes[hashAlgorithm] = h
r = io.TeeReader(r, h)
}
n, err := io.Copy(ioutil.Discard, r)
if err != nil {
return FileMeta{}, err
}
m := FileMeta{Length: n, Hashes: make(Hashes, len(hashes))}
for hashAlgorithm, h := range hashes {
m.Hashes[hashAlgorithm] = h.Sum(nil)
}
return m, nil
}
// Delegations holds a tier of targets delegations
type Delegations struct {
Keys Keys `json:"keys"`
Roles []*Role `json:"roles"`
}
// NewDelegations initializes an empty Delegations object
func NewDelegations() *Delegations {
return &Delegations{
Keys: make(map[string]PublicKey),
Roles: make([]*Role, 0),
}
}
// These values are recommended TUF expiry times.
var defaultExpiryTimes = map[string]time.Duration{
CanonicalRootRole: notary.Year,
CanonicalTargetsRole: 90 * notary.Day,
CanonicalSnapshotRole: 7 * notary.Day,
CanonicalTimestampRole: notary.Day,
}
// SetDefaultExpiryTimes allows one to change the default expiries.
func SetDefaultExpiryTimes(times map[string]time.Duration) {
for key, value := range times {
if _, ok := defaultExpiryTimes[key]; !ok {
logrus.Errorf("Attempted to set default expiry for an unknown role: %s", key)
continue
}
defaultExpiryTimes[key] = value
}
}
// DefaultExpires gets the default expiry time for the given role
func DefaultExpires(role string) time.Time {
if d, ok := defaultExpiryTimes[role]; ok {
return time.Now().Add(d)
}
var t time.Time
return t.UTC().Round(time.Second)
}
type unmarshalledSignature Signature
// UnmarshalJSON does a custom unmarshalling of the signature JSON
func (s *Signature) UnmarshalJSON(data []byte) error {
uSignature := unmarshalledSignature{}
err := json.Unmarshal(data, &uSignature)
if err != nil {
return err
}
uSignature.Method = SigAlgorithm(strings.ToLower(string(uSignature.Method)))
*s = Signature(uSignature)
return nil
}

View file

@ -1,111 +0,0 @@
package signed
import (
"crypto/rand"
"errors"
"github.com/docker/notary/trustmanager"
"github.com/docker/notary/tuf/data"
"github.com/docker/notary/tuf/utils"
)
type edCryptoKey struct {
role string
privKey data.PrivateKey
}
// Ed25519 implements a simple in memory cryptosystem for ED25519 keys
type Ed25519 struct {
keys map[string]edCryptoKey
}
// NewEd25519 initializes a new empty Ed25519 CryptoService that operates
// entirely in memory
func NewEd25519() *Ed25519 {
return &Ed25519{
make(map[string]edCryptoKey),
}
}
// AddKey allows you to add a private key
func (e *Ed25519) AddKey(role, gun string, k data.PrivateKey) error {
e.addKey(role, k)
return nil
}
// addKey allows you to add a private key
func (e *Ed25519) addKey(role string, k data.PrivateKey) {
e.keys[k.ID()] = edCryptoKey{
role: role,
privKey: k,
}
}
// RemoveKey deletes a key from the signer
func (e *Ed25519) RemoveKey(keyID string) error {
delete(e.keys, keyID)
return nil
}
// ListKeys returns the list of keys IDs for the role
func (e *Ed25519) ListKeys(role string) []string {
keyIDs := make([]string, 0, len(e.keys))
for id, edCryptoKey := range e.keys {
if edCryptoKey.role == role {
keyIDs = append(keyIDs, id)
}
}
return keyIDs
}
// ListAllKeys returns the map of keys IDs to role
func (e *Ed25519) ListAllKeys() map[string]string {
keys := make(map[string]string)
for id, edKey := range e.keys {
keys[id] = edKey.role
}
return keys
}
// Create generates a new key and returns the public part
func (e *Ed25519) Create(role, gun, algorithm string) (data.PublicKey, error) {
if algorithm != data.ED25519Key {
return nil, errors.New("only ED25519 supported by this cryptoservice")
}
private, err := utils.GenerateED25519Key(rand.Reader)
if err != nil {
return nil, err
}
e.addKey(role, private)
return data.PublicKeyFromPrivate(private), nil
}
// PublicKeys returns a map of public keys for the ids provided, when those IDs are found
// in the store.
func (e *Ed25519) PublicKeys(keyIDs ...string) (map[string]data.PublicKey, error) {
k := make(map[string]data.PublicKey)
for _, keyID := range keyIDs {
if edKey, ok := e.keys[keyID]; ok {
k[keyID] = data.PublicKeyFromPrivate(edKey.privKey)
}
}
return k, nil
}
// GetKey returns a single public key based on the ID
func (e *Ed25519) GetKey(keyID string) data.PublicKey {
if privKey, _, err := e.GetPrivateKey(keyID); err == nil {
return data.PublicKeyFromPrivate(privKey)
}
return nil
}
// GetPrivateKey returns a single private key and role if present, based on the ID
func (e *Ed25519) GetPrivateKey(keyID string) (data.PrivateKey, string, error) {
if k, ok := e.keys[keyID]; ok {
return k.privKey, k.role, nil
}
return nil, "", trustmanager.ErrKeyNotFound{KeyID: keyID}
}

View file

@ -1,96 +0,0 @@
package signed
import (
"fmt"
"strings"
)
// ErrInsufficientSignatures - can not create enough signatures on a piece of
// metadata
type ErrInsufficientSignatures struct {
FoundKeys int
NeededKeys int
MissingKeyIDs []string
}
func (e ErrInsufficientSignatures) Error() string {
candidates := ""
if len(e.MissingKeyIDs) > 0 {
candidates = fmt.Sprintf(" (%s)", strings.Join(e.MissingKeyIDs, ", "))
}
if e.FoundKeys == 0 {
return fmt.Sprintf("signing keys not available: need %d keys from %d possible keys%s",
e.NeededKeys, len(e.MissingKeyIDs), candidates)
}
return fmt.Sprintf("not enough signing keys: found %d of %d needed keys - %d other possible keys%s",
e.FoundKeys, e.NeededKeys, len(e.MissingKeyIDs), candidates)
}
// ErrExpired indicates a piece of metadata has expired
type ErrExpired struct {
Role string
Expired string
}
func (e ErrExpired) Error() string {
return fmt.Sprintf("%s expired at %v", e.Role, e.Expired)
}
// ErrLowVersion indicates the piece of metadata has a version number lower than
// a version number we're already seen for this role
type ErrLowVersion struct {
Actual int
Current int
}
func (e ErrLowVersion) Error() string {
return fmt.Sprintf("version %d is lower than current version %d", e.Actual, e.Current)
}
// ErrRoleThreshold indicates we did not validate enough signatures to meet the threshold
type ErrRoleThreshold struct {
Msg string
}
func (e ErrRoleThreshold) Error() string {
if e.Msg == "" {
return "valid signatures did not meet threshold"
}
return e.Msg
}
// ErrInvalidKeyType indicates the types for the key and signature it's associated with are
// mismatched. Probably a sign of malicious behaviour
type ErrInvalidKeyType struct{}
func (e ErrInvalidKeyType) Error() string {
return "key type is not valid for signature"
}
// ErrInvalidKeyID indicates the specified key ID was incorrect for its associated data
type ErrInvalidKeyID struct{}
func (e ErrInvalidKeyID) Error() string {
return "key ID is not valid for key content"
}
// ErrInvalidKeyLength indicates that while we may support the cipher, the provided
// key length is not specifically supported, i.e. we support RSA, but not 1024 bit keys
type ErrInvalidKeyLength struct {
msg string
}
func (e ErrInvalidKeyLength) Error() string {
return fmt.Sprintf("key length is not supported: %s", e.msg)
}
// ErrNoKeys indicates no signing keys were found when trying to sign
type ErrNoKeys struct {
KeyIDs []string
}
func (e ErrNoKeys) Error() string {
return fmt.Sprintf("could not find necessary signing keys, at least one of these keys must be available: %s",
strings.Join(e.KeyIDs, ", "))
}

View file

@ -1,49 +0,0 @@
package signed
import (
"github.com/docker/notary/tuf/data"
)
// KeyService provides management of keys locally. It will never
// accept or provide private keys. Communication between the KeyService
// and a SigningService happen behind the Create function.
type KeyService interface {
// Create issues a new key pair and is responsible for loading
// the private key into the appropriate signing service.
Create(role, gun, algorithm string) (data.PublicKey, error)
// AddKey adds a private key to the specified role and gun
AddKey(role, gun string, key data.PrivateKey) error
// GetKey retrieves the public key if present, otherwise it returns nil
GetKey(keyID string) data.PublicKey
// GetPrivateKey retrieves the private key and role if present and retrievable,
// otherwise it returns nil and an error
GetPrivateKey(keyID string) (data.PrivateKey, string, error)
// RemoveKey deletes the specified key, and returns an error only if the key
// removal fails. If the key doesn't exist, no error should be returned.
RemoveKey(keyID string) error
// ListKeys returns a list of key IDs for the role, or an empty list or
// nil if there are no keys.
ListKeys(role string) []string
// ListAllKeys returns a map of all available signing key IDs to role, or
// an empty map or nil if there are no keys.
ListAllKeys() map[string]string
}
// CryptoService is deprecated and all instances of its use should be
// replaced with KeyService
type CryptoService interface {
KeyService
}
// Verifier defines an interface for verfying signatures. An implementer
// of this interface should verify signatures for one and only one
// signing scheme.
type Verifier interface {
Verify(key data.PublicKey, sig []byte, msg []byte) error
}

View file

@ -1,113 +0,0 @@
package signed
// The Sign function is a choke point for all code paths that do signing.
// We use this fact to do key ID translation. There are 2 types of key ID:
// - Scoped: the key ID based purely on the data that appears in the TUF
// files. This may be wrapped by a certificate that scopes the
// key to be used in a specific context.
// - Canonical: the key ID based purely on the public key bytes. This is
// used by keystores to easily identify keys that may be reused
// in many scoped locations.
// Currently these types only differ in the context of Root Keys in Notary
// for which the root key is wrapped using an x509 certificate.
import (
"crypto/rand"
"github.com/Sirupsen/logrus"
"github.com/docker/notary/trustmanager"
"github.com/docker/notary/tuf/data"
"github.com/docker/notary/tuf/utils"
)
// Sign takes a data.Signed and a cryptoservice containing private keys,
// calculates and adds at least minSignature signatures using signingKeys the
// data.Signed. It will also clean up any signatures that are not in produced
// by either a signingKey or an otherWhitelistedKey.
// Note that in most cases, otherWhitelistedKeys should probably be null. They
// are for keys you don't want to sign with, but you also don't want to remove
// existing signatures by those keys. For instance, if you want to call Sign
// multiple times with different sets of signing keys without undoing removing
// signatures produced by the previous call to Sign.
func Sign(service CryptoService, s *data.Signed, signingKeys []data.PublicKey,
minSignatures int, otherWhitelistedKeys []data.PublicKey) error {
logrus.Debugf("sign called with %d/%d required keys", minSignatures, len(signingKeys))
signatures := make([]data.Signature, 0, len(s.Signatures)+1)
signingKeyIDs := make(map[string]struct{})
tufIDs := make(map[string]data.PublicKey)
privKeys := make(map[string]data.PrivateKey)
// Get all the private key objects related to the public keys
missingKeyIDs := []string{}
for _, key := range signingKeys {
canonicalID, err := utils.CanonicalKeyID(key)
tufIDs[key.ID()] = key
if err != nil {
return err
}
k, _, err := service.GetPrivateKey(canonicalID)
if err != nil {
if _, ok := err.(trustmanager.ErrKeyNotFound); ok {
missingKeyIDs = append(missingKeyIDs, canonicalID)
continue
}
return err
}
privKeys[key.ID()] = k
}
// include the list of otherWhitelistedKeys
for _, key := range otherWhitelistedKeys {
if _, ok := tufIDs[key.ID()]; !ok {
tufIDs[key.ID()] = key
}
}
// Check to ensure we have enough signing keys
if len(privKeys) < minSignatures {
return ErrInsufficientSignatures{FoundKeys: len(privKeys),
NeededKeys: minSignatures, MissingKeyIDs: missingKeyIDs}
}
emptyStruct := struct{}{}
// Do signing and generate list of signatures
for keyID, pk := range privKeys {
sig, err := pk.Sign(rand.Reader, *s.Signed, nil)
if err != nil {
logrus.Debugf("Failed to sign with key: %s. Reason: %v", keyID, err)
return err
}
signingKeyIDs[keyID] = emptyStruct
signatures = append(signatures, data.Signature{
KeyID: keyID,
Method: pk.SignatureAlgorithm(),
Signature: sig[:],
})
}
for _, sig := range s.Signatures {
if _, ok := signingKeyIDs[sig.KeyID]; ok {
// key is in the set of key IDs for which a signature has been created
continue
}
var (
k data.PublicKey
ok bool
)
if k, ok = tufIDs[sig.KeyID]; !ok {
// key is no longer a valid signing key
continue
}
if err := VerifySignature(*s.Signed, &sig, k); err != nil {
// signature is no longer valid
continue
}
// keep any signatures that still represent valid keys and are
// themselves valid
signatures = append(signatures, sig)
}
s.Signatures = signatures
return nil
}

View file

@ -1,283 +0,0 @@
package signed
import (
"crypto"
"crypto/ecdsa"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/pem"
"fmt"
"math/big"
"reflect"
"github.com/Sirupsen/logrus"
"github.com/agl/ed25519"
"github.com/docker/notary/tuf/data"
)
const (
minRSAKeySizeBit = 2048 // 2048 bits = 256 bytes
minRSAKeySizeByte = minRSAKeySizeBit / 8
)
// Verifiers serves as a map of all verifiers available on the system and
// can be injected into a verificationService. For testing and configuration
// purposes, it will not be used by default.
var Verifiers = map[data.SigAlgorithm]Verifier{
data.RSAPSSSignature: RSAPSSVerifier{},
data.RSAPKCS1v15Signature: RSAPKCS1v15Verifier{},
data.PyCryptoSignature: RSAPyCryptoVerifier{},
data.ECDSASignature: ECDSAVerifier{},
data.EDDSASignature: Ed25519Verifier{},
}
// RegisterVerifier provides a convenience function for init() functions
// to register additional verifiers or replace existing ones.
func RegisterVerifier(algorithm data.SigAlgorithm, v Verifier) {
curr, ok := Verifiers[algorithm]
if ok {
typOld := reflect.TypeOf(curr)
typNew := reflect.TypeOf(v)
logrus.Debugf(
"replacing already loaded verifier %s:%s with %s:%s",
typOld.PkgPath(), typOld.Name(),
typNew.PkgPath(), typNew.Name(),
)
} else {
logrus.Debug("adding verifier for: ", algorithm)
}
Verifiers[algorithm] = v
}
// Ed25519Verifier used to verify Ed25519 signatures
type Ed25519Verifier struct{}
// Verify checks that an ed25519 signature is valid
func (v Ed25519Verifier) Verify(key data.PublicKey, sig []byte, msg []byte) error {
if key.Algorithm() != data.ED25519Key {
return ErrInvalidKeyType{}
}
var sigBytes [ed25519.SignatureSize]byte
if len(sig) != ed25519.SignatureSize {
logrus.Debugf("signature length is incorrect, must be %d, was %d.", ed25519.SignatureSize, len(sig))
return ErrInvalid
}
copy(sigBytes[:], sig)
var keyBytes [ed25519.PublicKeySize]byte
pub := key.Public()
if len(pub) != ed25519.PublicKeySize {
logrus.Errorf("public key is incorrect size, must be %d, was %d.", ed25519.PublicKeySize, len(pub))
return ErrInvalidKeyLength{msg: fmt.Sprintf("ed25519 public key must be %d bytes.", ed25519.PublicKeySize)}
}
n := copy(keyBytes[:], key.Public())
if n < ed25519.PublicKeySize {
logrus.Errorf("failed to copy the key, must have %d bytes, copied %d bytes.", ed25519.PublicKeySize, n)
return ErrInvalid
}
if !ed25519.Verify(&keyBytes, msg, &sigBytes) {
logrus.Debugf("failed ed25519 verification")
return ErrInvalid
}
return nil
}
func verifyPSS(key interface{}, digest, sig []byte) error {
rsaPub, ok := key.(*rsa.PublicKey)
if !ok {
logrus.Debugf("value was not an RSA public key")
return ErrInvalid
}
if rsaPub.N.BitLen() < minRSAKeySizeBit {
logrus.Debugf("RSA keys less than 2048 bits are not acceptable, provided key has length %d.", rsaPub.N.BitLen())
return ErrInvalidKeyLength{msg: fmt.Sprintf("RSA key must be at least %d bits.", minRSAKeySizeBit)}
}
if len(sig) < minRSAKeySizeByte {
logrus.Debugf("RSA keys less than 2048 bits are not acceptable, provided signature has length %d.", len(sig))
return ErrInvalid
}
opts := rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256}
if err := rsa.VerifyPSS(rsaPub, crypto.SHA256, digest[:], sig, &opts); err != nil {
logrus.Debugf("failed RSAPSS verification: %s", err)
return ErrInvalid
}
return nil
}
func getRSAPubKey(key data.PublicKey) (crypto.PublicKey, error) {
algorithm := key.Algorithm()
var pubKey crypto.PublicKey
switch algorithm {
case data.RSAx509Key:
pemCert, _ := pem.Decode([]byte(key.Public()))
if pemCert == nil {
logrus.Debugf("failed to decode PEM-encoded x509 certificate")
return nil, ErrInvalid
}
cert, err := x509.ParseCertificate(pemCert.Bytes)
if err != nil {
logrus.Debugf("failed to parse x509 certificate: %s\n", err)
return nil, ErrInvalid
}
pubKey = cert.PublicKey
case data.RSAKey:
var err error
pubKey, err = x509.ParsePKIXPublicKey(key.Public())
if err != nil {
logrus.Debugf("failed to parse public key: %s\n", err)
return nil, ErrInvalid
}
default:
// only accept RSA keys
logrus.Debugf("invalid key type for RSAPSS verifier: %s", algorithm)
return nil, ErrInvalidKeyType{}
}
return pubKey, nil
}
// RSAPSSVerifier checks RSASSA-PSS signatures
type RSAPSSVerifier struct{}
// Verify does the actual check.
func (v RSAPSSVerifier) Verify(key data.PublicKey, sig []byte, msg []byte) error {
// will return err if keytype is not a recognized RSA type
pubKey, err := getRSAPubKey(key)
if err != nil {
return err
}
digest := sha256.Sum256(msg)
return verifyPSS(pubKey, digest[:], sig)
}
// RSAPKCS1v15Verifier checks RSA PKCS1v15 signatures
type RSAPKCS1v15Verifier struct{}
// Verify does the actual verification
func (v RSAPKCS1v15Verifier) Verify(key data.PublicKey, sig []byte, msg []byte) error {
// will return err if keytype is not a recognized RSA type
pubKey, err := getRSAPubKey(key)
if err != nil {
return err
}
digest := sha256.Sum256(msg)
rsaPub, ok := pubKey.(*rsa.PublicKey)
if !ok {
logrus.Debugf("value was not an RSA public key")
return ErrInvalid
}
if rsaPub.N.BitLen() < minRSAKeySizeBit {
logrus.Debugf("RSA keys less than 2048 bits are not acceptable, provided key has length %d.", rsaPub.N.BitLen())
return ErrInvalidKeyLength{msg: fmt.Sprintf("RSA key must be at least %d bits.", minRSAKeySizeBit)}
}
if len(sig) < minRSAKeySizeByte {
logrus.Debugf("RSA keys less than 2048 bits are not acceptable, provided signature has length %d.", len(sig))
return ErrInvalid
}
if err = rsa.VerifyPKCS1v15(rsaPub, crypto.SHA256, digest[:], sig); err != nil {
logrus.Errorf("Failed verification: %s", err.Error())
return ErrInvalid
}
return nil
}
// RSAPyCryptoVerifier checks RSASSA-PSS signatures
type RSAPyCryptoVerifier struct{}
// Verify does the actual check.
// N.B. We have not been able to make this work in a way that is compatible
// with PyCrypto.
func (v RSAPyCryptoVerifier) Verify(key data.PublicKey, sig []byte, msg []byte) error {
digest := sha256.Sum256(msg)
if key.Algorithm() != data.RSAKey {
return ErrInvalidKeyType{}
}
k, _ := pem.Decode([]byte(key.Public()))
if k == nil {
logrus.Debugf("failed to decode PEM-encoded x509 certificate")
return ErrInvalid
}
pub, err := x509.ParsePKIXPublicKey(k.Bytes)
if err != nil {
logrus.Debugf("failed to parse public key: %s\n", err)
return ErrInvalid
}
return verifyPSS(pub, digest[:], sig)
}
// ECDSAVerifier checks ECDSA signatures, decoding the keyType appropriately
type ECDSAVerifier struct{}
// Verify does the actual check.
func (v ECDSAVerifier) Verify(key data.PublicKey, sig []byte, msg []byte) error {
algorithm := key.Algorithm()
var pubKey crypto.PublicKey
switch algorithm {
case data.ECDSAx509Key:
pemCert, _ := pem.Decode([]byte(key.Public()))
if pemCert == nil {
logrus.Debugf("failed to decode PEM-encoded x509 certificate for keyID: %s", key.ID())
logrus.Debugf("certificate bytes: %s", string(key.Public()))
return ErrInvalid
}
cert, err := x509.ParseCertificate(pemCert.Bytes)
if err != nil {
logrus.Debugf("failed to parse x509 certificate: %s\n", err)
return ErrInvalid
}
pubKey = cert.PublicKey
case data.ECDSAKey:
var err error
pubKey, err = x509.ParsePKIXPublicKey(key.Public())
if err != nil {
logrus.Debugf("Failed to parse private key for keyID: %s, %s\n", key.ID(), err)
return ErrInvalid
}
default:
// only accept ECDSA keys.
logrus.Debugf("invalid key type for ECDSA verifier: %s", algorithm)
return ErrInvalidKeyType{}
}
ecdsaPubKey, ok := pubKey.(*ecdsa.PublicKey)
if !ok {
logrus.Debugf("value isn't an ECDSA public key")
return ErrInvalid
}
sigLength := len(sig)
expectedOctetLength := 2 * ((ecdsaPubKey.Params().BitSize + 7) >> 3)
if sigLength != expectedOctetLength {
logrus.Debugf("signature had an unexpected length")
return ErrInvalid
}
rBytes, sBytes := sig[:sigLength/2], sig[sigLength/2:]
r := new(big.Int).SetBytes(rBytes)
s := new(big.Int).SetBytes(sBytes)
digest := sha256.Sum256(msg)
if !ecdsa.Verify(ecdsaPubKey, digest[:], r, s) {
logrus.Debugf("failed ECDSA signature validation")
return ErrInvalid
}
return nil
}

View file

@ -1,112 +0,0 @@
package signed
import (
"errors"
"fmt"
"strings"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/go/canonical/json"
"github.com/docker/notary/tuf/data"
)
// Various basic signing errors
var (
ErrMissingKey = errors.New("tuf: missing key")
ErrNoSignatures = errors.New("tuf: data has no signatures")
ErrInvalid = errors.New("tuf: signature verification failed")
ErrWrongMethod = errors.New("tuf: invalid signature type")
ErrUnknownRole = errors.New("tuf: unknown role")
ErrWrongType = errors.New("tuf: meta file has wrong type")
)
// IsExpired checks if the given time passed before the present time
func IsExpired(t time.Time) bool {
return t.Before(time.Now())
}
// VerifyExpiry returns ErrExpired if the metadata is expired
func VerifyExpiry(s *data.SignedCommon, role string) error {
if IsExpired(s.Expires) {
logrus.Errorf("Metadata for %s expired", role)
return ErrExpired{Role: role, Expired: s.Expires.Format("Mon Jan 2 15:04:05 MST 2006")}
}
return nil
}
// VerifyVersion returns ErrLowVersion if the metadata version is lower than the min version
func VerifyVersion(s *data.SignedCommon, minVersion int) error {
if s.Version < minVersion {
return ErrLowVersion{Actual: s.Version, Current: minVersion}
}
return nil
}
// VerifySignatures checks the we have sufficient valid signatures for the given role
func VerifySignatures(s *data.Signed, roleData data.BaseRole) error {
if len(s.Signatures) == 0 {
return ErrNoSignatures
}
if roleData.Threshold < 1 {
return ErrRoleThreshold{}
}
logrus.Debugf("%s role has key IDs: %s", roleData.Name, strings.Join(roleData.ListKeyIDs(), ","))
// remarshal the signed part so we can verify the signature, since the signature has
// to be of a canonically marshalled signed object
var decoded map[string]interface{}
if err := json.Unmarshal(*s.Signed, &decoded); err != nil {
return err
}
msg, err := json.MarshalCanonical(decoded)
if err != nil {
return err
}
valid := make(map[string]struct{})
for i := range s.Signatures {
sig := &(s.Signatures[i])
logrus.Debug("verifying signature for key ID: ", sig.KeyID)
key, ok := roleData.Keys[sig.KeyID]
if !ok {
logrus.Debugf("continuing b/c keyid lookup was nil: %s\n", sig.KeyID)
continue
}
// Check that the signature key ID actually matches the content ID of the key
if key.ID() != sig.KeyID {
return ErrInvalidKeyID{}
}
if err := VerifySignature(msg, sig, key); err != nil {
logrus.Debugf("continuing b/c %s", err.Error())
continue
}
valid[sig.KeyID] = struct{}{}
}
if len(valid) < roleData.Threshold {
return ErrRoleThreshold{
Msg: fmt.Sprintf("valid signatures did not meet threshold for %s", roleData.Name),
}
}
return nil
}
// VerifySignature checks a single signature and public key against a payload
// If the signature is verified, the signature's is valid field will actually
// be mutated to be equal to the boolean true
func VerifySignature(msg []byte, sig *data.Signature, pk data.PublicKey) error {
// method lookup is consistent due to Unmarshal JSON doing lower case for us.
method := sig.Method
verifier, ok := Verifiers[method]
if !ok {
return fmt.Errorf("signing method is not supported: %s\n", sig.Method)
}
if err := verifier.Verify(pk, sig.Signature, msg); err != nil {
return fmt.Errorf("signature was invalid\n")
}
sig.IsValid = true
return nil
}

File diff suppressed because it is too large Load diff

View file

@ -1,31 +0,0 @@
package utils
import (
"strings"
)
// RoleList is a list of roles
type RoleList []string
// Len returns the length of the list
func (r RoleList) Len() int {
return len(r)
}
// Less returns true if the item at i should be sorted
// before the item at j. It's an unstable partial ordering
// based on the number of segments, separated by "/", in
// the role name
func (r RoleList) Less(i, j int) bool {
segsI := strings.Split(r[i], "/")
segsJ := strings.Split(r[j], "/")
if len(segsI) == len(segsJ) {
return r[i] < r[j]
}
return len(segsI) < len(segsJ)
}
// Swap the items at 2 locations in the list
func (r RoleList) Swap(i, j int) {
r[i], r[j] = r[j], r[i]
}

View file

@ -1,85 +0,0 @@
package utils
import (
"fmt"
"sync"
)
// ErrEmptyStack is used when an action that requires some
// content is invoked and the stack is empty
type ErrEmptyStack struct {
action string
}
func (err ErrEmptyStack) Error() string {
return fmt.Sprintf("attempted to %s with empty stack", err.action)
}
// ErrBadTypeCast is used by PopX functions when the item
// cannot be typed to X
type ErrBadTypeCast struct{}
func (err ErrBadTypeCast) Error() string {
return "attempted to do a typed pop and item was not of type"
}
// Stack is a simple type agnostic stack implementation
type Stack struct {
s []interface{}
l sync.Mutex
}
// NewStack create a new stack
func NewStack() *Stack {
s := &Stack{
s: make([]interface{}, 0),
}
return s
}
// Push adds an item to the top of the stack.
func (s *Stack) Push(item interface{}) {
s.l.Lock()
defer s.l.Unlock()
s.s = append(s.s, item)
}
// Pop removes and returns the top item on the stack, or returns
// ErrEmptyStack if the stack has no content
func (s *Stack) Pop() (interface{}, error) {
s.l.Lock()
defer s.l.Unlock()
l := len(s.s)
if l > 0 {
item := s.s[l-1]
s.s = s.s[:l-1]
return item, nil
}
return nil, ErrEmptyStack{action: "Pop"}
}
// PopString attempts to cast the top item on the stack to the string type.
// If this succeeds, it removes and returns the top item. If the item
// is not of the string type, ErrBadTypeCast is returned. If the stack
// is empty, ErrEmptyStack is returned
func (s *Stack) PopString() (string, error) {
s.l.Lock()
defer s.l.Unlock()
l := len(s.s)
if l > 0 {
item := s.s[l-1]
if item, ok := item.(string); ok {
s.s = s.s[:l-1]
return item, nil
}
return "", ErrBadTypeCast{}
}
return "", ErrEmptyStack{action: "PopString"}
}
// Empty returns true if the stack is empty
func (s *Stack) Empty() bool {
s.l.Lock()
defer s.l.Unlock()
return len(s.s) == 0
}

View file

@ -1,152 +0,0 @@
package utils
import (
"crypto/sha256"
"crypto/sha512"
"crypto/tls"
"encoding/hex"
"fmt"
"io"
"net/http"
"net/url"
"os"
"strings"
"github.com/docker/notary/tuf/data"
)
// Download does a simple download from a URL
func Download(url url.URL) (*http.Response, error) {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: tr}
return client.Get(url.String())
}
// Upload does a simple JSON upload to a URL
func Upload(url string, body io.Reader) (*http.Response, error) {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: tr}
return client.Post(url, "application/json", body)
}
// StrSliceContains checks if the given string appears in the slice
func StrSliceContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}
// StrSliceRemove removes the the given string from the slice, returning a new slice
func StrSliceRemove(ss []string, s string) []string {
res := []string{}
for _, v := range ss {
if v != s {
res = append(res, v)
}
}
return res
}
// StrSliceContainsI checks if the given string appears in the slice
// in a case insensitive manner
func StrSliceContainsI(ss []string, s string) bool {
s = strings.ToLower(s)
for _, v := range ss {
v = strings.ToLower(v)
if v == s {
return true
}
}
return false
}
// FileExists returns true if a file (or dir) exists at the given path,
// false otherwise
func FileExists(path string) bool {
_, err := os.Stat(path)
return os.IsNotExist(err)
}
// NoopCloser is a simple Reader wrapper that does nothing when Close is
// called
type NoopCloser struct {
io.Reader
}
// Close does nothing for a NoopCloser
func (nc *NoopCloser) Close() error {
return nil
}
// DoHash returns the digest of d using the hashing algorithm named
// in alg
func DoHash(alg string, d []byte) []byte {
switch alg {
case "sha256":
digest := sha256.Sum256(d)
return digest[:]
case "sha512":
digest := sha512.Sum512(d)
return digest[:]
}
return nil
}
// UnusedDelegationKeys prunes a list of keys, returning those that are no
// longer in use for a given targets file
func UnusedDelegationKeys(t data.SignedTargets) []string {
// compare ids to all still active key ids in all active roles
// with the targets file
found := make(map[string]bool)
for _, r := range t.Signed.Delegations.Roles {
for _, id := range r.KeyIDs {
found[id] = true
}
}
var discard []string
for id := range t.Signed.Delegations.Keys {
if !found[id] {
discard = append(discard, id)
}
}
return discard
}
// RemoveUnusedKeys determines which keys in the slice of IDs are no longer
// used in the given targets file and removes them from the delegated keys
// map
func RemoveUnusedKeys(t *data.SignedTargets) {
unusedIDs := UnusedDelegationKeys(*t)
for _, id := range unusedIDs {
delete(t.Signed.Delegations.Keys, id)
}
}
// FindRoleIndex returns the index of the role named <name> or -1 if no
// matching role is found.
func FindRoleIndex(rs []*data.Role, name string) int {
for i, r := range rs {
if r.Name == name {
return i
}
}
return -1
}
// ConsistentName generates the appropriate HTTP URL path for the role,
// based on whether the repo is marked as consistent. The RemoteStore
// is responsible for adding file extensions.
func ConsistentName(role string, hashSha256 []byte) string {
if len(hashSha256) > 0 {
hash := hex.EncodeToString(hashSha256)
return fmt.Sprintf("%s.%s", role, hash)
}
return role
}

View file

@ -1,551 +0,0 @@
package utils
import (
"bytes"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"errors"
"fmt"
"io"
"io/ioutil"
"math/big"
"time"
"github.com/Sirupsen/logrus"
"github.com/agl/ed25519"
"github.com/docker/notary"
"github.com/docker/notary/tuf/data"
)
// CanonicalKeyID returns the ID of the public bytes version of a TUF key.
// On regular RSA/ECDSA TUF keys, this is just the key ID. On X509 RSA/ECDSA
// TUF keys, this is the key ID of the public key part of the key in the leaf cert
func CanonicalKeyID(k data.PublicKey) (string, error) {
switch k.Algorithm() {
case data.ECDSAx509Key, data.RSAx509Key:
return X509PublicKeyID(k)
default:
return k.ID(), nil
}
}
// LoadCertFromPEM returns the first certificate found in a bunch of bytes or error
// if nothing is found. Taken from https://golang.org/src/crypto/x509/cert_pool.go#L85.
func LoadCertFromPEM(pemBytes []byte) (*x509.Certificate, error) {
for len(pemBytes) > 0 {
var block *pem.Block
block, pemBytes = pem.Decode(pemBytes)
if block == nil {
return nil, errors.New("no certificates found in PEM data")
}
if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
continue
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
continue
}
return cert, nil
}
return nil, errors.New("no certificates found in PEM data")
}
// X509PublicKeyID returns a public key ID as a string, given a
// data.PublicKey that contains an X509 Certificate
func X509PublicKeyID(certPubKey data.PublicKey) (string, error) {
// Note that this only loads the first certificate from the public key
cert, err := LoadCertFromPEM(certPubKey.Public())
if err != nil {
return "", err
}
pubKeyBytes, err := x509.MarshalPKIXPublicKey(cert.PublicKey)
if err != nil {
return "", err
}
var key data.PublicKey
switch certPubKey.Algorithm() {
case data.ECDSAx509Key:
key = data.NewECDSAPublicKey(pubKeyBytes)
case data.RSAx509Key:
key = data.NewRSAPublicKey(pubKeyBytes)
}
return key.ID(), nil
}
// ParsePEMPrivateKey returns a data.PrivateKey from a PEM encoded private key. It
// only supports RSA (PKCS#1) and attempts to decrypt using the passphrase, if encrypted.
func ParsePEMPrivateKey(pemBytes []byte, passphrase string) (data.PrivateKey, error) {
block, _ := pem.Decode(pemBytes)
if block == nil {
return nil, errors.New("no valid private key found")
}
var privKeyBytes []byte
var err error
if x509.IsEncryptedPEMBlock(block) {
privKeyBytes, err = x509.DecryptPEMBlock(block, []byte(passphrase))
if err != nil {
return nil, errors.New("could not decrypt private key")
}
} else {
privKeyBytes = block.Bytes
}
switch block.Type {
case "RSA PRIVATE KEY":
rsaPrivKey, err := x509.ParsePKCS1PrivateKey(privKeyBytes)
if err != nil {
return nil, fmt.Errorf("could not parse DER encoded key: %v", err)
}
tufRSAPrivateKey, err := RSAToPrivateKey(rsaPrivKey)
if err != nil {
return nil, fmt.Errorf("could not convert rsa.PrivateKey to data.PrivateKey: %v", err)
}
return tufRSAPrivateKey, nil
case "EC PRIVATE KEY":
ecdsaPrivKey, err := x509.ParseECPrivateKey(privKeyBytes)
if err != nil {
return nil, fmt.Errorf("could not parse DER encoded private key: %v", err)
}
tufECDSAPrivateKey, err := ECDSAToPrivateKey(ecdsaPrivKey)
if err != nil {
return nil, fmt.Errorf("could not convert ecdsa.PrivateKey to data.PrivateKey: %v", err)
}
return tufECDSAPrivateKey, nil
case "ED25519 PRIVATE KEY":
// We serialize ED25519 keys by concatenating the private key
// to the public key and encoding with PEM. See the
// ED25519ToPrivateKey function.
tufECDSAPrivateKey, err := ED25519ToPrivateKey(privKeyBytes)
if err != nil {
return nil, fmt.Errorf("could not convert ecdsa.PrivateKey to data.PrivateKey: %v", err)
}
return tufECDSAPrivateKey, nil
default:
return nil, fmt.Errorf("unsupported key type %q", block.Type)
}
}
// CertToPEM is a utility function returns a PEM encoded x509 Certificate
func CertToPEM(cert *x509.Certificate) []byte {
pemCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})
return pemCert
}
// CertChainToPEM is a utility function returns a PEM encoded chain of x509 Certificates, in the order they are passed
func CertChainToPEM(certChain []*x509.Certificate) ([]byte, error) {
var pemBytes bytes.Buffer
for _, cert := range certChain {
if err := pem.Encode(&pemBytes, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}); err != nil {
return nil, err
}
}
return pemBytes.Bytes(), nil
}
// LoadCertFromFile loads the first certificate from the file provided. The
// data is expected to be PEM Encoded and contain one of more certificates
// with PEM type "CERTIFICATE"
func LoadCertFromFile(filename string) (*x509.Certificate, error) {
certs, err := LoadCertBundleFromFile(filename)
if err != nil {
return nil, err
}
return certs[0], nil
}
// LoadCertBundleFromFile loads certificates from the []byte provided. The
// data is expected to be PEM Encoded and contain one of more certificates
// with PEM type "CERTIFICATE"
func LoadCertBundleFromFile(filename string) ([]*x509.Certificate, error) {
b, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
return LoadCertBundleFromPEM(b)
}
// LoadCertBundleFromPEM loads certificates from the []byte provided. The
// data is expected to be PEM Encoded and contain one of more certificates
// with PEM type "CERTIFICATE"
func LoadCertBundleFromPEM(pemBytes []byte) ([]*x509.Certificate, error) {
certificates := []*x509.Certificate{}
var block *pem.Block
block, pemBytes = pem.Decode(pemBytes)
for ; block != nil; block, pemBytes = pem.Decode(pemBytes) {
if block.Type == "CERTIFICATE" {
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, err
}
certificates = append(certificates, cert)
} else {
return nil, fmt.Errorf("invalid pem block type: %s", block.Type)
}
}
if len(certificates) == 0 {
return nil, fmt.Errorf("no valid certificates found")
}
return certificates, nil
}
// GetLeafCerts parses a list of x509 Certificates and returns all of them
// that aren't CA
func GetLeafCerts(certs []*x509.Certificate) []*x509.Certificate {
var leafCerts []*x509.Certificate
for _, cert := range certs {
if cert.IsCA {
continue
}
leafCerts = append(leafCerts, cert)
}
return leafCerts
}
// GetIntermediateCerts parses a list of x509 Certificates and returns all of the
// ones marked as a CA, to be used as intermediates
func GetIntermediateCerts(certs []*x509.Certificate) []*x509.Certificate {
var intCerts []*x509.Certificate
for _, cert := range certs {
if cert.IsCA {
intCerts = append(intCerts, cert)
}
}
return intCerts
}
// ParsePEMPublicKey returns a data.PublicKey from a PEM encoded public key or certificate.
func ParsePEMPublicKey(pubKeyBytes []byte) (data.PublicKey, error) {
pemBlock, _ := pem.Decode(pubKeyBytes)
if pemBlock == nil {
return nil, errors.New("no valid public key found")
}
switch pemBlock.Type {
case "CERTIFICATE":
cert, err := x509.ParseCertificate(pemBlock.Bytes)
if err != nil {
return nil, fmt.Errorf("could not parse provided certificate: %v", err)
}
err = ValidateCertificate(cert, true)
if err != nil {
return nil, fmt.Errorf("invalid certificate: %v", err)
}
return CertToKey(cert), nil
default:
return nil, fmt.Errorf("unsupported PEM block type %q, expected certificate", pemBlock.Type)
}
}
// ValidateCertificate returns an error if the certificate is not valid for notary
// Currently this is only ensuring the public key has a large enough modulus if RSA,
// using a non SHA1 signature algorithm, and an optional time expiry check
func ValidateCertificate(c *x509.Certificate, checkExpiry bool) error {
if (c.NotBefore).After(c.NotAfter) {
return fmt.Errorf("certificate validity window is invalid")
}
// Can't have SHA1 sig algorithm
if c.SignatureAlgorithm == x509.SHA1WithRSA || c.SignatureAlgorithm == x509.DSAWithSHA1 || c.SignatureAlgorithm == x509.ECDSAWithSHA1 {
return fmt.Errorf("certificate with CN %s uses invalid SHA1 signature algorithm", c.Subject.CommonName)
}
// If we have an RSA key, make sure it's long enough
if c.PublicKeyAlgorithm == x509.RSA {
rsaKey, ok := c.PublicKey.(*rsa.PublicKey)
if !ok {
return fmt.Errorf("unable to parse RSA public key")
}
if rsaKey.N.BitLen() < notary.MinRSABitSize {
return fmt.Errorf("RSA bit length is too short")
}
}
if checkExpiry {
now := time.Now()
tomorrow := now.AddDate(0, 0, 1)
// Give one day leeway on creation "before" time, check "after" against today
if (tomorrow).Before(c.NotBefore) || now.After(c.NotAfter) {
return data.ErrCertExpired{CN: c.Subject.CommonName}
}
// If this certificate is expiring within 6 months, put out a warning
if (c.NotAfter).Before(time.Now().AddDate(0, 6, 0)) {
logrus.Warnf("certificate with CN %s is near expiry", c.Subject.CommonName)
}
}
return nil
}
// GenerateRSAKey generates an RSA private key and returns a TUF PrivateKey
func GenerateRSAKey(random io.Reader, bits int) (data.PrivateKey, error) {
rsaPrivKey, err := rsa.GenerateKey(random, bits)
if err != nil {
return nil, fmt.Errorf("could not generate private key: %v", err)
}
tufPrivKey, err := RSAToPrivateKey(rsaPrivKey)
if err != nil {
return nil, err
}
logrus.Debugf("generated RSA key with keyID: %s", tufPrivKey.ID())
return tufPrivKey, nil
}
// RSAToPrivateKey converts an rsa.Private key to a TUF data.PrivateKey type
func RSAToPrivateKey(rsaPrivKey *rsa.PrivateKey) (data.PrivateKey, error) {
// Get a DER-encoded representation of the PublicKey
rsaPubBytes, err := x509.MarshalPKIXPublicKey(&rsaPrivKey.PublicKey)
if err != nil {
return nil, fmt.Errorf("failed to marshal public key: %v", err)
}
// Get a DER-encoded representation of the PrivateKey
rsaPrivBytes := x509.MarshalPKCS1PrivateKey(rsaPrivKey)
pubKey := data.NewRSAPublicKey(rsaPubBytes)
return data.NewRSAPrivateKey(pubKey, rsaPrivBytes)
}
// GenerateECDSAKey generates an ECDSA Private key and returns a TUF PrivateKey
func GenerateECDSAKey(random io.Reader) (data.PrivateKey, error) {
ecdsaPrivKey, err := ecdsa.GenerateKey(elliptic.P256(), random)
if err != nil {
return nil, err
}
tufPrivKey, err := ECDSAToPrivateKey(ecdsaPrivKey)
if err != nil {
return nil, err
}
logrus.Debugf("generated ECDSA key with keyID: %s", tufPrivKey.ID())
return tufPrivKey, nil
}
// GenerateED25519Key generates an ED25519 private key and returns a TUF
// PrivateKey. The serialization format we use is just the public key bytes
// followed by the private key bytes
func GenerateED25519Key(random io.Reader) (data.PrivateKey, error) {
pub, priv, err := ed25519.GenerateKey(random)
if err != nil {
return nil, err
}
var serialized [ed25519.PublicKeySize + ed25519.PrivateKeySize]byte
copy(serialized[:], pub[:])
copy(serialized[ed25519.PublicKeySize:], priv[:])
tufPrivKey, err := ED25519ToPrivateKey(serialized[:])
if err != nil {
return nil, err
}
logrus.Debugf("generated ED25519 key with keyID: %s", tufPrivKey.ID())
return tufPrivKey, nil
}
// ECDSAToPrivateKey converts an ecdsa.Private key to a TUF data.PrivateKey type
func ECDSAToPrivateKey(ecdsaPrivKey *ecdsa.PrivateKey) (data.PrivateKey, error) {
// Get a DER-encoded representation of the PublicKey
ecdsaPubBytes, err := x509.MarshalPKIXPublicKey(&ecdsaPrivKey.PublicKey)
if err != nil {
return nil, fmt.Errorf("failed to marshal public key: %v", err)
}
// Get a DER-encoded representation of the PrivateKey
ecdsaPrivKeyBytes, err := x509.MarshalECPrivateKey(ecdsaPrivKey)
if err != nil {
return nil, fmt.Errorf("failed to marshal private key: %v", err)
}
pubKey := data.NewECDSAPublicKey(ecdsaPubBytes)
return data.NewECDSAPrivateKey(pubKey, ecdsaPrivKeyBytes)
}
// ED25519ToPrivateKey converts a serialized ED25519 key to a TUF
// data.PrivateKey type
func ED25519ToPrivateKey(privKeyBytes []byte) (data.PrivateKey, error) {
if len(privKeyBytes) != ed25519.PublicKeySize+ed25519.PrivateKeySize {
return nil, errors.New("malformed ed25519 private key")
}
pubKey := data.NewED25519PublicKey(privKeyBytes[:ed25519.PublicKeySize])
return data.NewED25519PrivateKey(*pubKey, privKeyBytes)
}
func blockType(k data.PrivateKey) (string, error) {
switch k.Algorithm() {
case data.RSAKey, data.RSAx509Key:
return "RSA PRIVATE KEY", nil
case data.ECDSAKey, data.ECDSAx509Key:
return "EC PRIVATE KEY", nil
case data.ED25519Key:
return "ED25519 PRIVATE KEY", nil
default:
return "", fmt.Errorf("algorithm %s not supported", k.Algorithm())
}
}
// KeyToPEM returns a PEM encoded key from a Private Key
func KeyToPEM(privKey data.PrivateKey, role string) ([]byte, error) {
bt, err := blockType(privKey)
if err != nil {
return nil, err
}
headers := map[string]string{}
if role != "" {
headers = map[string]string{
"role": role,
}
}
block := &pem.Block{
Type: bt,
Headers: headers,
Bytes: privKey.Private(),
}
return pem.EncodeToMemory(block), nil
}
// EncryptPrivateKey returns an encrypted PEM key given a Privatekey
// and a passphrase
func EncryptPrivateKey(key data.PrivateKey, role, gun, passphrase string) ([]byte, error) {
bt, err := blockType(key)
if err != nil {
return nil, err
}
password := []byte(passphrase)
cipherType := x509.PEMCipherAES256
encryptedPEMBlock, err := x509.EncryptPEMBlock(rand.Reader,
bt,
key.Private(),
password,
cipherType)
if err != nil {
return nil, err
}
if encryptedPEMBlock.Headers == nil {
return nil, fmt.Errorf("unable to encrypt key - invalid PEM file produced")
}
encryptedPEMBlock.Headers["role"] = role
if gun != "" {
encryptedPEMBlock.Headers["gun"] = gun
}
return pem.EncodeToMemory(encryptedPEMBlock), nil
}
// ReadRoleFromPEM returns the value from the role PEM header, if it exists
func ReadRoleFromPEM(pemBytes []byte) string {
pemBlock, _ := pem.Decode(pemBytes)
if pemBlock == nil || pemBlock.Headers == nil {
return ""
}
role, ok := pemBlock.Headers["role"]
if !ok {
return ""
}
return role
}
// CertToKey transforms a single input certificate into its corresponding
// PublicKey
func CertToKey(cert *x509.Certificate) data.PublicKey {
block := pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}
pemdata := pem.EncodeToMemory(&block)
switch cert.PublicKeyAlgorithm {
case x509.RSA:
return data.NewRSAx509PublicKey(pemdata)
case x509.ECDSA:
return data.NewECDSAx509PublicKey(pemdata)
default:
logrus.Debugf("Unknown key type parsed from certificate: %v", cert.PublicKeyAlgorithm)
return nil
}
}
// CertsToKeys transforms each of the input certificate chains into its corresponding
// PublicKey
func CertsToKeys(leafCerts map[string]*x509.Certificate, intCerts map[string][]*x509.Certificate) map[string]data.PublicKey {
keys := make(map[string]data.PublicKey)
for id, leafCert := range leafCerts {
if key, err := CertBundleToKey(leafCert, intCerts[id]); err == nil {
keys[key.ID()] = key
}
}
return keys
}
// CertBundleToKey creates a TUF key from a leaf certs and a list of
// intermediates
func CertBundleToKey(leafCert *x509.Certificate, intCerts []*x509.Certificate) (data.PublicKey, error) {
certBundle := []*x509.Certificate{leafCert}
certBundle = append(certBundle, intCerts...)
certChainPEM, err := CertChainToPEM(certBundle)
if err != nil {
return nil, err
}
var newKey data.PublicKey
// Use the leaf cert's public key algorithm for typing
switch leafCert.PublicKeyAlgorithm {
case x509.RSA:
newKey = data.NewRSAx509PublicKey(certChainPEM)
case x509.ECDSA:
newKey = data.NewECDSAx509PublicKey(certChainPEM)
default:
logrus.Debugf("Unknown key type parsed from certificate: %v", leafCert.PublicKeyAlgorithm)
return nil, x509.ErrUnsupportedAlgorithm
}
return newKey, nil
}
// NewCertificate returns an X509 Certificate following a template, given a GUN and validity interval.
func NewCertificate(gun string, startTime, endTime time.Time) (*x509.Certificate, error) {
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, fmt.Errorf("failed to generate new certificate: %v", err)
}
return &x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
CommonName: gun,
},
NotBefore: startTime,
NotAfter: endTime,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning},
BasicConstraintsValid: true,
}, nil
}

View file

@ -1,126 +0,0 @@
package validation
import (
"encoding/json"
"fmt"
)
// VALIDATION ERRORS
// ErrValidation represents a general validation error
type ErrValidation struct {
Msg string
}
func (err ErrValidation) Error() string {
return fmt.Sprintf("An error occurred during validation: %s", err.Msg)
}
// ErrBadHierarchy represents missing metadata. Currently: a missing snapshot
// at this current time. When delegations are implemented it will also
// represent a missing delegation parent
type ErrBadHierarchy struct {
Missing string
Msg string
}
func (err ErrBadHierarchy) Error() string {
return fmt.Sprintf("Metadata hierarchy is incomplete: %s", err.Msg)
}
// ErrBadRoot represents a failure validating the root
type ErrBadRoot struct {
Msg string
}
func (err ErrBadRoot) Error() string {
return fmt.Sprintf("The root metadata is invalid: %s", err.Msg)
}
// ErrBadTargets represents a failure to validate a targets (incl delegations)
type ErrBadTargets struct {
Msg string
}
func (err ErrBadTargets) Error() string {
return fmt.Sprintf("The targets metadata is invalid: %s", err.Msg)
}
// ErrBadSnapshot represents a failure to validate the snapshot
type ErrBadSnapshot struct {
Msg string
}
func (err ErrBadSnapshot) Error() string {
return fmt.Sprintf("The snapshot metadata is invalid: %s", err.Msg)
}
// END VALIDATION ERRORS
// SerializableError is a struct that can be used to serialize an error as JSON
type SerializableError struct {
Name string
Error error
}
// UnmarshalJSON attempts to unmarshal the error into the right type
func (s *SerializableError) UnmarshalJSON(text []byte) (err error) {
var x struct{ Name string }
err = json.Unmarshal(text, &x)
if err != nil {
return
}
var theError error
switch x.Name {
case "ErrValidation":
var e struct{ Error ErrValidation }
err = json.Unmarshal(text, &e)
theError = e.Error
case "ErrBadHierarchy":
var e struct{ Error ErrBadHierarchy }
err = json.Unmarshal(text, &e)
theError = e.Error
case "ErrBadRoot":
var e struct{ Error ErrBadRoot }
err = json.Unmarshal(text, &e)
theError = e.Error
case "ErrBadTargets":
var e struct{ Error ErrBadTargets }
err = json.Unmarshal(text, &e)
theError = e.Error
case "ErrBadSnapshot":
var e struct{ Error ErrBadSnapshot }
err = json.Unmarshal(text, &e)
theError = e.Error
default:
err = fmt.Errorf("do not know how to unmarshal %s", x.Name)
return
}
if err != nil {
return
}
s.Name = x.Name
s.Error = theError
return nil
}
// NewSerializableError serializes one of the above errors into JSON
func NewSerializableError(err error) (*SerializableError, error) {
// make sure it's one of our errors
var name string
switch err.(type) {
case ErrValidation:
name = "ErrValidation"
case ErrBadHierarchy:
name = "ErrBadHierarchy"
case ErrBadRoot:
name = "ErrBadRoot"
case ErrBadTargets:
name = "ErrBadTargets"
case ErrBadSnapshot:
name = "ErrBadSnapshot"
default:
return nil, fmt.Errorf("does not support serializing non-validation errors")
}
return &SerializableError{Name: name, Error: err}, nil
}

View file

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,2 +0,0 @@
go-shlex is a simple lexer for go that supports shell-style quoting,
commenting, and escaping.

View file

@ -1,457 +0,0 @@
/*
Copyright 2012 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package shlex
/*
Package shlex implements a simple lexer which splits input in to tokens using
shell-style rules for quoting and commenting.
*/
import (
"bufio"
"errors"
"fmt"
"io"
"strings"
)
/*
A TokenType is a top-level token; a word, space, comment, unknown.
*/
type TokenType int
/*
A RuneTokenType is the type of a UTF-8 character; a character, quote, space, escape.
*/
type RuneTokenType int
type lexerState int
type Token struct {
tokenType TokenType
value string
}
/*
Two tokens are equal if both their types and values are equal. A nil token can
never equal another token.
*/
func (a *Token) Equal(b *Token) bool {
if a == nil || b == nil {
return false
}
if a.tokenType != b.tokenType {
return false
}
return a.value == b.value
}
const (
RUNE_CHAR string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789._-,/@$*()+=><:;&^%~|!?[]{}"
RUNE_SPACE string = " \t\r\n"
RUNE_ESCAPING_QUOTE string = "\""
RUNE_NONESCAPING_QUOTE string = "'"
RUNE_ESCAPE = "\\"
RUNE_COMMENT = "#"
RUNETOKEN_UNKNOWN RuneTokenType = 0
RUNETOKEN_CHAR RuneTokenType = 1
RUNETOKEN_SPACE RuneTokenType = 2
RUNETOKEN_ESCAPING_QUOTE RuneTokenType = 3
RUNETOKEN_NONESCAPING_QUOTE RuneTokenType = 4
RUNETOKEN_ESCAPE RuneTokenType = 5
RUNETOKEN_COMMENT RuneTokenType = 6
RUNETOKEN_EOF RuneTokenType = 7
TOKEN_UNKNOWN TokenType = 0
TOKEN_WORD TokenType = 1
TOKEN_SPACE TokenType = 2
TOKEN_COMMENT TokenType = 3
STATE_START lexerState = 0
STATE_INWORD lexerState = 1
STATE_ESCAPING lexerState = 2
STATE_ESCAPING_QUOTED lexerState = 3
STATE_QUOTED_ESCAPING lexerState = 4
STATE_QUOTED lexerState = 5
STATE_COMMENT lexerState = 6
INITIAL_TOKEN_CAPACITY int = 100
)
/*
A type for classifying characters. This allows for different sorts of
classifiers - those accepting extended non-ascii chars, or strict posix
compatibility, for example.
*/
type TokenClassifier struct {
typeMap map[int32]RuneTokenType
}
func addRuneClass(typeMap *map[int32]RuneTokenType, runes string, tokenType RuneTokenType) {
for _, rune := range runes {
(*typeMap)[int32(rune)] = tokenType
}
}
/*
Create a new classifier for basic ASCII characters.
*/
func NewDefaultClassifier() *TokenClassifier {
typeMap := map[int32]RuneTokenType{}
addRuneClass(&typeMap, RUNE_CHAR, RUNETOKEN_CHAR)
addRuneClass(&typeMap, RUNE_SPACE, RUNETOKEN_SPACE)
addRuneClass(&typeMap, RUNE_ESCAPING_QUOTE, RUNETOKEN_ESCAPING_QUOTE)
addRuneClass(&typeMap, RUNE_NONESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE)
addRuneClass(&typeMap, RUNE_ESCAPE, RUNETOKEN_ESCAPE)
addRuneClass(&typeMap, RUNE_COMMENT, RUNETOKEN_COMMENT)
return &TokenClassifier{
typeMap: typeMap}
}
func (classifier *TokenClassifier) ClassifyRune(rune int32) RuneTokenType {
return classifier.typeMap[rune]
}
/*
A type for turning an input stream in to a sequence of strings. Whitespace and
comments are skipped.
*/
type Lexer struct {
tokenizer *Tokenizer
}
/*
Create a new lexer.
*/
func NewLexer(r io.Reader) (*Lexer, error) {
tokenizer, err := NewTokenizer(r)
if err != nil {
return nil, err
}
lexer := &Lexer{tokenizer: tokenizer}
return lexer, nil
}
/*
Return the next word, and an error value. If there are no more words, the error
will be io.EOF.
*/
func (l *Lexer) NextWord() (string, error) {
var token *Token
var err error
for {
token, err = l.tokenizer.NextToken()
if err != nil {
return "", err
}
switch token.tokenType {
case TOKEN_WORD:
{
return token.value, nil
}
case TOKEN_COMMENT:
{
// skip comments
}
default:
{
panic(fmt.Sprintf("Unknown token type: %v", token.tokenType))
}
}
}
return "", io.EOF
}
/*
A type for turning an input stream in to a sequence of typed tokens.
*/
type Tokenizer struct {
input *bufio.Reader
classifier *TokenClassifier
}
/*
Create a new tokenizer.
*/
func NewTokenizer(r io.Reader) (*Tokenizer, error) {
input := bufio.NewReader(r)
classifier := NewDefaultClassifier()
tokenizer := &Tokenizer{
input: input,
classifier: classifier}
return tokenizer, nil
}
/*
Scan the stream for the next token.
This uses an internal state machine. It will panic if it encounters a character
which it does not know how to handle.
*/
func (t *Tokenizer) scanStream() (*Token, error) {
state := STATE_START
var tokenType TokenType
value := make([]int32, 0, INITIAL_TOKEN_CAPACITY)
var (
nextRune int32
nextRuneType RuneTokenType
err error
)
SCAN:
for {
nextRune, _, err = t.input.ReadRune()
nextRuneType = t.classifier.ClassifyRune(nextRune)
if err != nil {
if err == io.EOF {
nextRuneType = RUNETOKEN_EOF
err = nil
} else {
return nil, err
}
}
switch state {
case STATE_START: // no runes read yet
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
return nil, io.EOF
}
case RUNETOKEN_CHAR:
{
tokenType = TOKEN_WORD
value = append(value, nextRune)
state = STATE_INWORD
}
case RUNETOKEN_SPACE:
{
}
case RUNETOKEN_ESCAPING_QUOTE:
{
tokenType = TOKEN_WORD
state = STATE_QUOTED_ESCAPING
}
case RUNETOKEN_NONESCAPING_QUOTE:
{
tokenType = TOKEN_WORD
state = STATE_QUOTED
}
case RUNETOKEN_ESCAPE:
{
tokenType = TOKEN_WORD
state = STATE_ESCAPING
}
case RUNETOKEN_COMMENT:
{
tokenType = TOKEN_COMMENT
state = STATE_COMMENT
}
default:
{
return nil, errors.New(fmt.Sprintf("Unknown rune: %v", nextRune))
}
}
}
case STATE_INWORD: // in a regular word
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_COMMENT:
{
value = append(value, nextRune)
}
case RUNETOKEN_SPACE:
{
t.input.UnreadRune()
break SCAN
}
case RUNETOKEN_ESCAPING_QUOTE:
{
state = STATE_QUOTED_ESCAPING
}
case RUNETOKEN_NONESCAPING_QUOTE:
{
state = STATE_QUOTED
}
case RUNETOKEN_ESCAPE:
{
state = STATE_ESCAPING
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
case STATE_ESCAPING: // the next rune after an escape character
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
err = errors.New("EOF found after escape character")
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT:
{
state = STATE_INWORD
value = append(value, nextRune)
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
case STATE_ESCAPING_QUOTED: // the next rune after an escape character, in double quotes
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
err = errors.New("EOF found after escape character")
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT:
{
state = STATE_QUOTED_ESCAPING
value = append(value, nextRune)
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
case STATE_QUOTED_ESCAPING: // in escaping double quotes
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
err = errors.New("EOF found when expecting closing quote.")
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_SPACE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_COMMENT:
{
value = append(value, nextRune)
}
case RUNETOKEN_ESCAPING_QUOTE:
{
state = STATE_INWORD
}
case RUNETOKEN_ESCAPE:
{
state = STATE_ESCAPING_QUOTED
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
case STATE_QUOTED: // in non-escaping single quotes
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
err = errors.New("EOF found when expecting closing quote.")
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT:
{
value = append(value, nextRune)
}
case RUNETOKEN_NONESCAPING_QUOTE:
{
state = STATE_INWORD
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
case STATE_COMMENT:
{
switch nextRuneType {
case RUNETOKEN_EOF:
{
break SCAN
}
case RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT, RUNETOKEN_NONESCAPING_QUOTE:
{
value = append(value, nextRune)
}
case RUNETOKEN_SPACE:
{
if nextRune == '\n' {
state = STATE_START
break SCAN
} else {
value = append(value, nextRune)
}
}
default:
{
return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune))
}
}
}
default:
{
panic(fmt.Sprintf("Unexpected state: %v", state))
}
}
}
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
}
/*
Return the next token in the stream, and an error value. If there are no more
tokens available, the error value will be io.EOF.
*/
func (t *Tokenizer) NextToken() (*Token, error) {
return t.scanStream()
}
/*
Split a string in to a slice of strings, based upon shell-style rules for
quoting, escaping, and spaces.
*/
func Split(s string) ([]string, error) {
l, err := NewLexer(strings.NewReader(s))
if err != nil {
return nil, err
}
subStrings := []string{}
for {
word, err := l.NextWord()
if err != nil {
if err == io.EOF {
return subStrings, nil
}
return subStrings, err
}
subStrings = append(subStrings, word)
}
return subStrings, nil
}

View file

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2013 Mitchell Hashimoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View file

@ -1,46 +0,0 @@
# mapstructure
mapstructure is a Go library for decoding generic map values to structures
and vice versa, while providing helpful error handling.
This library is most useful when decoding values from some data stream (JSON,
Gob, etc.) where you don't _quite_ know the structure of the underlying data
until you read a part of it. You can therefore read a `map[string]interface{}`
and use this library to decode it into the proper underlying native Go
structure.
## Installation
Standard `go get`:
```
$ go get github.com/mitchellh/mapstructure
```
## Usage & Example
For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure).
The `Decode` function has examples associated with it there.
## But Why?!
Go offers fantastic standard libraries for decoding formats such as JSON.
The standard method is to have a struct pre-created, and populate that struct
from the bytes of the encoded format. This is great, but the problem is if
you have configuration or an encoding that changes slightly depending on
specific fields. For example, consider this JSON:
```json
{
"type": "person",
"name": "Mitchell"
}
```
Perhaps we can't populate a specific structure without first reading
the "type" field from the JSON. We could always do two passes over the
decoding of the JSON (reading the "type" first, and the rest later).
However, it is much simpler to just decode this into a `map[string]interface{}`
structure, read the "type" key, then use something like this library
to decode it into the proper structure.

View file

@ -1,154 +0,0 @@
package mapstructure
import (
"errors"
"reflect"
"strconv"
"strings"
"time"
)
// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
// Create variables here so we can reference them with the reflect pkg
var f1 DecodeHookFuncType
var f2 DecodeHookFuncKind
// Fill in the variables into this interface and the rest is done
// automatically using the reflect package.
potential := []interface{}{f1, f2}
v := reflect.ValueOf(h)
vt := v.Type()
for _, raw := range potential {
pt := reflect.ValueOf(raw).Type()
if vt.ConvertibleTo(pt) {
return v.Convert(pt).Interface()
}
}
return nil
}
// DecodeHookExec executes the given decode hook. This should be used
// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
// that took reflect.Kind instead of reflect.Type.
func DecodeHookExec(
raw DecodeHookFunc,
from reflect.Type, to reflect.Type,
data interface{}) (interface{}, error) {
// Build our arguments that reflect expects
argVals := make([]reflect.Value, 3)
argVals[0] = reflect.ValueOf(from)
argVals[1] = reflect.ValueOf(to)
argVals[2] = reflect.ValueOf(data)
switch f := typedDecodeHook(raw).(type) {
case DecodeHookFuncType:
return f(from, to, data)
case DecodeHookFuncKind:
return f(from.Kind(), to.Kind(), data)
default:
return nil, errors.New("invalid decode hook signature")
}
}
// ComposeDecodeHookFunc creates a single DecodeHookFunc that
// automatically composes multiple DecodeHookFuncs.
//
// The composed funcs are called in order, with the result of the
// previous transformation.
func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{}) (interface{}, error) {
var err error
for _, f1 := range fs {
data, err = DecodeHookExec(f1, f, t, data)
if err != nil {
return nil, err
}
// Modify the from kind to be correct with the new data
f = nil
if val := reflect.ValueOf(data); val.IsValid() {
f = val.Type()
}
}
return data, nil
}
}
// StringToSliceHookFunc returns a DecodeHookFunc that converts
// string to []string by splitting on the given sep.
func StringToSliceHookFunc(sep string) DecodeHookFunc {
return func(
f reflect.Kind,
t reflect.Kind,
data interface{}) (interface{}, error) {
if f != reflect.String || t != reflect.Slice {
return data, nil
}
raw := data.(string)
if raw == "" {
return []string{}, nil
}
return strings.Split(raw, sep), nil
}
}
// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
// strings to time.Duration.
func StringToTimeDurationHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{}) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(time.Duration(5)) {
return data, nil
}
// Convert it by parsing
return time.ParseDuration(data.(string))
}
}
func WeaklyTypedHook(
f reflect.Kind,
t reflect.Kind,
data interface{}) (interface{}, error) {
dataVal := reflect.ValueOf(data)
switch t {
case reflect.String:
switch f {
case reflect.Bool:
if dataVal.Bool() {
return "1", nil
} else {
return "0", nil
}
case reflect.Float32:
return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
case reflect.Int:
return strconv.FormatInt(dataVal.Int(), 10), nil
case reflect.Slice:
dataType := dataVal.Type()
elemKind := dataType.Elem().Kind()
if elemKind == reflect.Uint8 {
return string(dataVal.Interface().([]uint8)), nil
}
case reflect.Uint:
return strconv.FormatUint(dataVal.Uint(), 10), nil
}
}
return data, nil
}

View file

@ -1,50 +0,0 @@
package mapstructure
import (
"errors"
"fmt"
"sort"
"strings"
)
// Error implements the error interface and can represents multiple
// errors that occur in the course of a single decode.
type Error struct {
Errors []string
}
func (e *Error) Error() string {
points := make([]string, len(e.Errors))
for i, err := range e.Errors {
points[i] = fmt.Sprintf("* %s", err)
}
sort.Strings(points)
return fmt.Sprintf(
"%d error(s) decoding:\n\n%s",
len(e.Errors), strings.Join(points, "\n"))
}
// WrappedErrors implements the errwrap.Wrapper interface to make this
// return value more useful with the errwrap and go-multierror libraries.
func (e *Error) WrappedErrors() []error {
if e == nil {
return nil
}
result := make([]error, len(e.Errors))
for i, e := range e.Errors {
result[i] = errors.New(e)
}
return result
}
func appendErrors(errors []string, err error) []string {
switch e := err.(type) {
case *Error:
return append(errors, e.Errors...)
default:
return append(errors, e.Error())
}
}

View file

@ -1,782 +0,0 @@
// The mapstructure package exposes functionality to convert an
// abitrary map[string]interface{} into a native Go structure.
//
// The Go structure can be arbitrarily complex, containing slices,
// other structs, etc. and the decoder will properly decode nested
// maps and so on into the proper structures in the native Go struct.
// See the examples to see what the decoder is capable of.
package mapstructure
import (
"encoding/json"
"errors"
"fmt"
"reflect"
"sort"
"strconv"
"strings"
)
// DecodeHookFunc is the callback function that can be used for
// data transformations. See "DecodeHook" in the DecoderConfig
// struct.
//
// The type should be DecodeHookFuncType or DecodeHookFuncKind.
// Either is accepted. Types are a superset of Kinds (Types can return
// Kinds) and are generally a richer thing to use, but Kinds are simpler
// if you only need those.
//
// The reason DecodeHookFunc is multi-typed is for backwards compatibility:
// we started with Kinds and then realized Types were the better solution,
// but have a promise to not break backwards compat so we now support
// both.
type DecodeHookFunc interface{}
type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
// DecoderConfig is the configuration that is used to create a new decoder
// and allows customization of various aspects of decoding.
type DecoderConfig struct {
// DecodeHook, if set, will be called before any decoding and any
// type conversion (if WeaklyTypedInput is on). This lets you modify
// the values before they're set down onto the resulting struct.
//
// If an error is returned, the entire decode will fail with that
// error.
DecodeHook DecodeHookFunc
// If ErrorUnused is true, then it is an error for there to exist
// keys in the original map that were unused in the decoding process
// (extra keys).
ErrorUnused bool
// ZeroFields, if set to true, will zero fields before writing them.
// For example, a map will be emptied before decoded values are put in
// it. If this is false, a map will be merged.
ZeroFields bool
// If WeaklyTypedInput is true, the decoder will make the following
// "weak" conversions:
//
// - bools to string (true = "1", false = "0")
// - numbers to string (base 10)
// - bools to int/uint (true = 1, false = 0)
// - strings to int/uint (base implied by prefix)
// - int to bool (true if value != 0)
// - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F,
// FALSE, false, False. Anything else is an error)
// - empty array = empty map and vice versa
// - negative numbers to overflowed uint values (base 10)
// - slice of maps to a merged map
//
WeaklyTypedInput bool
// Metadata is the struct that will contain extra metadata about
// the decoding. If this is nil, then no metadata will be tracked.
Metadata *Metadata
// Result is a pointer to the struct that will contain the decoded
// value.
Result interface{}
// The tag name that mapstructure reads for field names. This
// defaults to "mapstructure"
TagName string
}
// A Decoder takes a raw interface value and turns it into structured
// data, keeping track of rich error information along the way in case
// anything goes wrong. Unlike the basic top-level Decode method, you can
// more finely control how the Decoder behaves using the DecoderConfig
// structure. The top-level Decode method is just a convenience that sets
// up the most basic Decoder.
type Decoder struct {
config *DecoderConfig
}
// Metadata contains information about decoding a structure that
// is tedious or difficult to get otherwise.
type Metadata struct {
// Keys are the keys of the structure which were successfully decoded
Keys []string
// Unused is a slice of keys that were found in the raw value but
// weren't decoded since there was no matching field in the result interface
Unused []string
}
// Decode takes a map and uses reflection to convert it into the
// given Go native structure. val must be a pointer to a struct.
func Decode(m interface{}, rawVal interface{}) error {
config := &DecoderConfig{
Metadata: nil,
Result: rawVal,
}
decoder, err := NewDecoder(config)
if err != nil {
return err
}
return decoder.Decode(m)
}
// WeakDecode is the same as Decode but is shorthand to enable
// WeaklyTypedInput. See DecoderConfig for more info.
func WeakDecode(input, output interface{}) error {
config := &DecoderConfig{
Metadata: nil,
Result: output,
WeaklyTypedInput: true,
}
decoder, err := NewDecoder(config)
if err != nil {
return err
}
return decoder.Decode(input)
}
// NewDecoder returns a new decoder for the given configuration. Once
// a decoder has been returned, the same configuration must not be used
// again.
func NewDecoder(config *DecoderConfig) (*Decoder, error) {
val := reflect.ValueOf(config.Result)
if val.Kind() != reflect.Ptr {
return nil, errors.New("result must be a pointer")
}
val = val.Elem()
if !val.CanAddr() {
return nil, errors.New("result must be addressable (a pointer)")
}
if config.Metadata != nil {
if config.Metadata.Keys == nil {
config.Metadata.Keys = make([]string, 0)
}
if config.Metadata.Unused == nil {
config.Metadata.Unused = make([]string, 0)
}
}
if config.TagName == "" {
config.TagName = "mapstructure"
}
result := &Decoder{
config: config,
}
return result, nil
}
// Decode decodes the given raw interface to the target pointer specified
// by the configuration.
func (d *Decoder) Decode(raw interface{}) error {
return d.decode("", raw, reflect.ValueOf(d.config.Result).Elem())
}
// Decodes an unknown data type into a specific reflection value.
func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error {
if data == nil {
// If the data is nil, then we don't set anything.
return nil
}
dataVal := reflect.ValueOf(data)
if !dataVal.IsValid() {
// If the data value is invalid, then we just set the value
// to be the zero value.
val.Set(reflect.Zero(val.Type()))
return nil
}
if d.config.DecodeHook != nil {
// We have a DecodeHook, so let's pre-process the data.
var err error
data, err = DecodeHookExec(
d.config.DecodeHook,
dataVal.Type(), val.Type(), data)
if err != nil {
return err
}
}
var err error
dataKind := getKind(val)
switch dataKind {
case reflect.Bool:
err = d.decodeBool(name, data, val)
case reflect.Interface:
err = d.decodeBasic(name, data, val)
case reflect.String:
err = d.decodeString(name, data, val)
case reflect.Int:
err = d.decodeInt(name, data, val)
case reflect.Uint:
err = d.decodeUint(name, data, val)
case reflect.Float32:
err = d.decodeFloat(name, data, val)
case reflect.Struct:
err = d.decodeStruct(name, data, val)
case reflect.Map:
err = d.decodeMap(name, data, val)
case reflect.Ptr:
err = d.decodePtr(name, data, val)
case reflect.Slice:
err = d.decodeSlice(name, data, val)
default:
// If we reached this point then we weren't able to decode it
return fmt.Errorf("%s: unsupported type: %s", name, dataKind)
}
// If we reached here, then we successfully decoded SOMETHING, so
// mark the key as used if we're tracking metadata.
if d.config.Metadata != nil && name != "" {
d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
}
return err
}
// This decodes a basic type (bool, int, string, etc.) and sets the
// value to "data" of that type.
func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
dataVal := reflect.ValueOf(data)
if !dataVal.IsValid() {
dataVal = reflect.Zero(val.Type())
}
dataValType := dataVal.Type()
if !dataValType.AssignableTo(val.Type()) {
return fmt.Errorf(
"'%s' expected type '%s', got '%s'",
name, val.Type(), dataValType)
}
val.Set(dataVal)
return nil
}
func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error {
dataVal := reflect.ValueOf(data)
dataKind := getKind(dataVal)
converted := true
switch {
case dataKind == reflect.String:
val.SetString(dataVal.String())
case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
if dataVal.Bool() {
val.SetString("1")
} else {
val.SetString("0")
}
case dataKind == reflect.Int && d.config.WeaklyTypedInput:
val.SetString(strconv.FormatInt(dataVal.Int(), 10))
case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
case dataKind == reflect.Slice && d.config.WeaklyTypedInput:
dataType := dataVal.Type()
elemKind := dataType.Elem().Kind()
switch {
case elemKind == reflect.Uint8:
val.SetString(string(dataVal.Interface().([]uint8)))
default:
converted = false
}
default:
converted = false
}
if !converted {
return fmt.Errorf(
"'%s' expected type '%s', got unconvertible type '%s'",
name, val.Type(), dataVal.Type())
}
return nil
}
func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
dataVal := reflect.ValueOf(data)
dataKind := getKind(dataVal)
dataType := dataVal.Type()
switch {
case dataKind == reflect.Int:
val.SetInt(dataVal.Int())
case dataKind == reflect.Uint:
val.SetInt(int64(dataVal.Uint()))
case dataKind == reflect.Float32:
val.SetInt(int64(dataVal.Float()))
case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
if dataVal.Bool() {
val.SetInt(1)
} else {
val.SetInt(0)
}
case dataKind == reflect.String && d.config.WeaklyTypedInput:
i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits())
if err == nil {
val.SetInt(i)
} else {
return fmt.Errorf("cannot parse '%s' as int: %s", name, err)
}
case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
jn := data.(json.Number)
i, err := jn.Int64()
if err != nil {
return fmt.Errorf(
"error decoding json.Number into %s: %s", name, err)
}
val.SetInt(i)
default:
return fmt.Errorf(
"'%s' expected type '%s', got unconvertible type '%s'",
name, val.Type(), dataVal.Type())
}
return nil
}
func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error {
dataVal := reflect.ValueOf(data)
dataKind := getKind(dataVal)
switch {
case dataKind == reflect.Int:
i := dataVal.Int()
if i < 0 && !d.config.WeaklyTypedInput {
return fmt.Errorf("cannot parse '%s', %d overflows uint",
name, i)
}
val.SetUint(uint64(i))
case dataKind == reflect.Uint:
val.SetUint(dataVal.Uint())
case dataKind == reflect.Float32:
f := dataVal.Float()
if f < 0 && !d.config.WeaklyTypedInput {
return fmt.Errorf("cannot parse '%s', %f overflows uint",
name, f)
}
val.SetUint(uint64(f))
case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
if dataVal.Bool() {
val.SetUint(1)
} else {
val.SetUint(0)
}
case dataKind == reflect.String && d.config.WeaklyTypedInput:
i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits())
if err == nil {
val.SetUint(i)
} else {
return fmt.Errorf("cannot parse '%s' as uint: %s", name, err)
}
default:
return fmt.Errorf(
"'%s' expected type '%s', got unconvertible type '%s'",
name, val.Type(), dataVal.Type())
}
return nil
}
func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error {
dataVal := reflect.ValueOf(data)
dataKind := getKind(dataVal)
switch {
case dataKind == reflect.Bool:
val.SetBool(dataVal.Bool())
case dataKind == reflect.Int && d.config.WeaklyTypedInput:
val.SetBool(dataVal.Int() != 0)
case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
val.SetBool(dataVal.Uint() != 0)
case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
val.SetBool(dataVal.Float() != 0)
case dataKind == reflect.String && d.config.WeaklyTypedInput:
b, err := strconv.ParseBool(dataVal.String())
if err == nil {
val.SetBool(b)
} else if dataVal.String() == "" {
val.SetBool(false)
} else {
return fmt.Errorf("cannot parse '%s' as bool: %s", name, err)
}
default:
return fmt.Errorf(
"'%s' expected type '%s', got unconvertible type '%s'",
name, val.Type(), dataVal.Type())
}
return nil
}
func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
dataVal := reflect.ValueOf(data)
dataKind := getKind(dataVal)
dataType := dataVal.Type()
switch {
case dataKind == reflect.Int:
val.SetFloat(float64(dataVal.Int()))
case dataKind == reflect.Uint:
val.SetFloat(float64(dataVal.Uint()))
case dataKind == reflect.Float32:
val.SetFloat(float64(dataVal.Float()))
case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
if dataVal.Bool() {
val.SetFloat(1)
} else {
val.SetFloat(0)
}
case dataKind == reflect.String && d.config.WeaklyTypedInput:
f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits())
if err == nil {
val.SetFloat(f)
} else {
return fmt.Errorf("cannot parse '%s' as float: %s", name, err)
}
case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
jn := data.(json.Number)
i, err := jn.Float64()
if err != nil {
return fmt.Errorf(
"error decoding json.Number into %s: %s", name, err)
}
val.SetFloat(i)
default:
return fmt.Errorf(
"'%s' expected type '%s', got unconvertible type '%s'",
name, val.Type(), dataVal.Type())
}
return nil
}
func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error {
valType := val.Type()
valKeyType := valType.Key()
valElemType := valType.Elem()
// By default we overwrite keys in the current map
valMap := val
// If the map is nil or we're purposely zeroing fields, make a new map
if valMap.IsNil() || d.config.ZeroFields {
// Make a new map to hold our result
mapType := reflect.MapOf(valKeyType, valElemType)
valMap = reflect.MakeMap(mapType)
}
// Check input type
dataVal := reflect.Indirect(reflect.ValueOf(data))
if dataVal.Kind() != reflect.Map {
// In weak mode, we accept a slice of maps as an input...
if d.config.WeaklyTypedInput {
switch dataVal.Kind() {
case reflect.Array, reflect.Slice:
// Special case for BC reasons (covered by tests)
if dataVal.Len() == 0 {
val.Set(valMap)
return nil
}
for i := 0; i < dataVal.Len(); i++ {
err := d.decode(
fmt.Sprintf("%s[%d]", name, i),
dataVal.Index(i).Interface(), val)
if err != nil {
return err
}
}
return nil
}
}
return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
}
// Accumulate errors
errors := make([]string, 0)
for _, k := range dataVal.MapKeys() {
fieldName := fmt.Sprintf("%s[%s]", name, k)
// First decode the key into the proper type
currentKey := reflect.Indirect(reflect.New(valKeyType))
if err := d.decode(fieldName, k.Interface(), currentKey); err != nil {
errors = appendErrors(errors, err)
continue
}
// Next decode the data into the proper type
v := dataVal.MapIndex(k).Interface()
currentVal := reflect.Indirect(reflect.New(valElemType))
if err := d.decode(fieldName, v, currentVal); err != nil {
errors = appendErrors(errors, err)
continue
}
valMap.SetMapIndex(currentKey, currentVal)
}
// Set the built up map to the value
val.Set(valMap)
// If we had errors, return those
if len(errors) > 0 {
return &Error{errors}
}
return nil
}
func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error {
// Create an element of the concrete (non pointer) type and decode
// into that. Then set the value of the pointer to this type.
valType := val.Type()
valElemType := valType.Elem()
realVal := reflect.New(valElemType)
if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
return err
}
val.Set(realVal)
return nil
}
func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error {
dataVal := reflect.Indirect(reflect.ValueOf(data))
dataValKind := dataVal.Kind()
valType := val.Type()
valElemType := valType.Elem()
sliceType := reflect.SliceOf(valElemType)
// Check input type
if dataValKind != reflect.Array && dataValKind != reflect.Slice {
// Accept empty map instead of array/slice in weakly typed mode
if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 {
val.Set(reflect.MakeSlice(sliceType, 0, 0))
return nil
} else {
return fmt.Errorf(
"'%s': source data must be an array or slice, got %s", name, dataValKind)
}
}
// Make a new slice to hold our result, same size as the original data.
valSlice := reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
// Accumulate any errors
errors := make([]string, 0)
for i := 0; i < dataVal.Len(); i++ {
currentData := dataVal.Index(i).Interface()
currentField := valSlice.Index(i)
fieldName := fmt.Sprintf("%s[%d]", name, i)
if err := d.decode(fieldName, currentData, currentField); err != nil {
errors = appendErrors(errors, err)
}
}
// Finally, set the value to the slice we built up
val.Set(valSlice)
// If there were errors, we return those
if len(errors) > 0 {
return &Error{errors}
}
return nil
}
func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
dataVal := reflect.Indirect(reflect.ValueOf(data))
// If the type of the value to write to and the data match directly,
// then we just set it directly instead of recursing into the structure.
if dataVal.Type() == val.Type() {
val.Set(dataVal)
return nil
}
dataValKind := dataVal.Kind()
if dataValKind != reflect.Map {
return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind)
}
dataValType := dataVal.Type()
if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface {
return fmt.Errorf(
"'%s' needs a map with string keys, has '%s' keys",
name, dataValType.Key().Kind())
}
dataValKeys := make(map[reflect.Value]struct{})
dataValKeysUnused := make(map[interface{}]struct{})
for _, dataValKey := range dataVal.MapKeys() {
dataValKeys[dataValKey] = struct{}{}
dataValKeysUnused[dataValKey.Interface()] = struct{}{}
}
errors := make([]string, 0)
// This slice will keep track of all the structs we'll be decoding.
// There can be more than one struct if there are embedded structs
// that are squashed.
structs := make([]reflect.Value, 1, 5)
structs[0] = val
// Compile the list of all the fields that we're going to be decoding
// from all the structs.
fields := make(map[*reflect.StructField]reflect.Value)
for len(structs) > 0 {
structVal := structs[0]
structs = structs[1:]
structType := structVal.Type()
for i := 0; i < structType.NumField(); i++ {
fieldType := structType.Field(i)
fieldKind := fieldType.Type.Kind()
// If "squash" is specified in the tag, we squash the field down.
squash := false
tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
for _, tag := range tagParts[1:] {
if tag == "squash" {
squash = true
break
}
}
if squash {
if fieldKind != reflect.Struct {
errors = appendErrors(errors,
fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind))
} else {
structs = append(structs, val.FieldByName(fieldType.Name))
}
continue
}
// Normal struct field, store it away
fields[&fieldType] = structVal.Field(i)
}
}
for fieldType, field := range fields {
fieldName := fieldType.Name
tagValue := fieldType.Tag.Get(d.config.TagName)
tagValue = strings.SplitN(tagValue, ",", 2)[0]
if tagValue != "" {
fieldName = tagValue
}
rawMapKey := reflect.ValueOf(fieldName)
rawMapVal := dataVal.MapIndex(rawMapKey)
if !rawMapVal.IsValid() {
// Do a slower search by iterating over each key and
// doing case-insensitive search.
for dataValKey := range dataValKeys {
mK, ok := dataValKey.Interface().(string)
if !ok {
// Not a string key
continue
}
if strings.EqualFold(mK, fieldName) {
rawMapKey = dataValKey
rawMapVal = dataVal.MapIndex(dataValKey)
break
}
}
if !rawMapVal.IsValid() {
// There was no matching key in the map for the value in
// the struct. Just ignore.
continue
}
}
// Delete the key we're using from the unused map so we stop tracking
delete(dataValKeysUnused, rawMapKey.Interface())
if !field.IsValid() {
// This should never happen
panic("field is not valid")
}
// If we can't set the field, then it is unexported or something,
// and we just continue onwards.
if !field.CanSet() {
continue
}
// If the name is empty string, then we're at the root, and we
// don't dot-join the fields.
if name != "" {
fieldName = fmt.Sprintf("%s.%s", name, fieldName)
}
if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil {
errors = appendErrors(errors, err)
}
}
if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
keys := make([]string, 0, len(dataValKeysUnused))
for rawKey := range dataValKeysUnused {
keys = append(keys, rawKey.(string))
}
sort.Strings(keys)
err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", "))
errors = appendErrors(errors, err)
}
if len(errors) > 0 {
return &Error{errors}
}
// Add the unused keys to the list of unused keys if we're tracking metadata
if d.config.Metadata != nil {
for rawKey := range dataValKeysUnused {
key := rawKey.(string)
if name != "" {
key = fmt.Sprintf("%s.%s", name, key)
}
d.config.Metadata.Unused = append(d.config.Metadata.Unused, key)
}
}
return nil
}
func getKind(val reflect.Value) reflect.Kind {
kind := val.Kind()
switch {
case kind >= reflect.Int && kind <= reflect.Int64:
return reflect.Int
case kind >= reflect.Uint && kind <= reflect.Uint64:
return reflect.Uint
case kind >= reflect.Float32 && kind <= reflect.Float64:
return reflect.Float32
default:
return kind
}
}

View file

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2015 xeipuuv
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,8 +0,0 @@
# gojsonpointer
An implementation of JSON Pointer - Go language
## References
http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
### Note
The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented.

View file

@ -1,217 +0,0 @@
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// author xeipuuv
// author-github https://github.com/xeipuuv
// author-mail xeipuuv@gmail.com
//
// repository-name gojsonpointer
// repository-desc An implementation of JSON Pointer - Go language
//
// description Main and unique file.
//
// created 25-02-2013
package gojsonpointer
import (
"errors"
"fmt"
"reflect"
"strconv"
"strings"
)
const (
const_empty_pointer = ``
const_pointer_separator = `/`
const_invalid_start = `JSON pointer must be empty or start with a "` + const_pointer_separator + `"`
)
type implStruct struct {
mode string // "SET" or "GET"
inDocument interface{}
setInValue interface{}
getOutNode interface{}
getOutKind reflect.Kind
outError error
}
func NewJsonPointer(jsonPointerString string) (JsonPointer, error) {
var p JsonPointer
err := p.parse(jsonPointerString)
return p, err
}
type JsonPointer struct {
referenceTokens []string
}
// "Constructor", parses the given string JSON pointer
func (p *JsonPointer) parse(jsonPointerString string) error {
var err error
if jsonPointerString != const_empty_pointer {
if !strings.HasPrefix(jsonPointerString, const_pointer_separator) {
err = errors.New(const_invalid_start)
} else {
referenceTokens := strings.Split(jsonPointerString, const_pointer_separator)
for _, referenceToken := range referenceTokens[1:] {
p.referenceTokens = append(p.referenceTokens, referenceToken)
}
}
}
return err
}
// Uses the pointer to retrieve a value from a JSON document
func (p *JsonPointer) Get(document interface{}) (interface{}, reflect.Kind, error) {
is := &implStruct{mode: "GET", inDocument: document}
p.implementation(is)
return is.getOutNode, is.getOutKind, is.outError
}
// Uses the pointer to update a value from a JSON document
func (p *JsonPointer) Set(document interface{}, value interface{}) (interface{}, error) {
is := &implStruct{mode: "SET", inDocument: document, setInValue: value}
p.implementation(is)
return document, is.outError
}
// Both Get and Set functions use the same implementation to avoid code duplication
func (p *JsonPointer) implementation(i *implStruct) {
kind := reflect.Invalid
// Full document when empty
if len(p.referenceTokens) == 0 {
i.getOutNode = i.inDocument
i.outError = nil
i.getOutKind = kind
i.outError = nil
return
}
node := i.inDocument
for ti, token := range p.referenceTokens {
decodedToken := decodeReferenceToken(token)
isLastToken := ti == len(p.referenceTokens)-1
rValue := reflect.ValueOf(node)
kind = rValue.Kind()
switch kind {
case reflect.Map:
m := node.(map[string]interface{})
if _, ok := m[decodedToken]; ok {
node = m[decodedToken]
if isLastToken && i.mode == "SET" {
m[decodedToken] = i.setInValue
}
} else {
i.outError = errors.New(fmt.Sprintf("Object has no key '%s'", token))
i.getOutKind = kind
i.getOutNode = nil
return
}
case reflect.Slice:
s := node.([]interface{})
tokenIndex, err := strconv.Atoi(token)
if err != nil {
i.outError = errors.New(fmt.Sprintf("Invalid array index '%s'", token))
i.getOutKind = kind
i.getOutNode = nil
return
}
sLength := len(s)
if tokenIndex < 0 || tokenIndex >= sLength {
i.outError = errors.New(fmt.Sprintf("Out of bound array[0,%d] index '%d'", sLength, tokenIndex))
i.getOutKind = kind
i.getOutNode = nil
return
}
node = s[tokenIndex]
if isLastToken && i.mode == "SET" {
s[tokenIndex] = i.setInValue
}
default:
i.outError = errors.New(fmt.Sprintf("Invalid token reference '%s'", token))
i.getOutKind = kind
i.getOutNode = nil
return
}
}
rValue := reflect.ValueOf(node)
kind = rValue.Kind()
i.getOutNode = node
i.getOutKind = kind
i.outError = nil
}
// Pointer to string representation function
func (p *JsonPointer) String() string {
if len(p.referenceTokens) == 0 {
return const_empty_pointer
}
pointerString := const_pointer_separator + strings.Join(p.referenceTokens, const_pointer_separator)
return pointerString
}
// Specific JSON pointer encoding here
// ~0 => ~
// ~1 => /
// ... and vice versa
const (
const_encoded_reference_token_0 = `~0`
const_encoded_reference_token_1 = `~1`
const_decoded_reference_token_0 = `~`
const_decoded_reference_token_1 = `/`
)
func decodeReferenceToken(token string) string {
step1 := strings.Replace(token, const_encoded_reference_token_1, const_decoded_reference_token_1, -1)
step2 := strings.Replace(step1, const_encoded_reference_token_0, const_decoded_reference_token_0, -1)
return step2
}
func encodeReferenceToken(token string) string {
step1 := strings.Replace(token, const_decoded_reference_token_1, const_encoded_reference_token_1, -1)
step2 := strings.Replace(step1, const_decoded_reference_token_0, const_encoded_reference_token_0, -1)
return step2
}

View file

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2015 xeipuuv
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,10 +0,0 @@
# gojsonreference
An implementation of JSON Reference - Go language
## Dependencies
https://github.com/xeipuuv/gojsonpointer
## References
http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03

View file

@ -1,141 +0,0 @@
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// author xeipuuv
// author-github https://github.com/xeipuuv
// author-mail xeipuuv@gmail.com
//
// repository-name gojsonreference
// repository-desc An implementation of JSON Reference - Go language
//
// description Main and unique file.
//
// created 26-02-2013
package gojsonreference
import (
"errors"
"github.com/xeipuuv/gojsonpointer"
"net/url"
"path/filepath"
"runtime"
"strings"
)
const (
const_fragment_char = `#`
)
func NewJsonReference(jsonReferenceString string) (JsonReference, error) {
var r JsonReference
err := r.parse(jsonReferenceString)
return r, err
}
type JsonReference struct {
referenceUrl *url.URL
referencePointer gojsonpointer.JsonPointer
HasFullUrl bool
HasUrlPathOnly bool
HasFragmentOnly bool
HasFileScheme bool
HasFullFilePath bool
}
func (r *JsonReference) GetUrl() *url.URL {
return r.referenceUrl
}
func (r *JsonReference) GetPointer() *gojsonpointer.JsonPointer {
return &r.referencePointer
}
func (r *JsonReference) String() string {
if r.referenceUrl != nil {
return r.referenceUrl.String()
}
if r.HasFragmentOnly {
return const_fragment_char + r.referencePointer.String()
}
return r.referencePointer.String()
}
func (r *JsonReference) IsCanonical() bool {
return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullUrl)
}
// "Constructor", parses the given string JSON reference
func (r *JsonReference) parse(jsonReferenceString string) (err error) {
r.referenceUrl, err = url.Parse(jsonReferenceString)
if err != nil {
return
}
refUrl := r.referenceUrl
if refUrl.Scheme != "" && refUrl.Host != "" {
r.HasFullUrl = true
} else {
if refUrl.Path != "" {
r.HasUrlPathOnly = true
} else if refUrl.RawQuery == "" && refUrl.Fragment != "" {
r.HasFragmentOnly = true
}
}
r.HasFileScheme = refUrl.Scheme == "file"
if runtime.GOOS == "windows" {
// on Windows, a file URL may have an extra leading slash, and if it
// doesn't then its first component will be treated as the host by the
// Go runtime
if refUrl.Host == "" && strings.HasPrefix(refUrl.Path, "/") {
r.HasFullFilePath = filepath.IsAbs(refUrl.Path[1:])
} else {
r.HasFullFilePath = filepath.IsAbs(refUrl.Host + refUrl.Path)
}
} else {
r.HasFullFilePath = filepath.IsAbs(refUrl.Path)
}
// invalid json-pointer error means url has no json-pointer fragment. simply ignore error
r.referencePointer, _ = gojsonpointer.NewJsonPointer(refUrl.Fragment)
return
}
// Creates a new reference from a parent and a child
// If the child cannot inherit from the parent, an error is returned
func (r *JsonReference) Inherits(child JsonReference) (*JsonReference, error) {
childUrl := child.GetUrl()
parentUrl := r.GetUrl()
if childUrl == nil {
return nil, errors.New("childUrl is nil!")
}
if parentUrl == nil {
return nil, errors.New("parentUrl is nil!")
}
ref, err := NewJsonReference(parentUrl.ResolveReference(childUrl).String())
if err != nil {
return nil, err
}
return &ref, err
}

View file

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2015 xeipuuv
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,236 +0,0 @@
[![Build Status](https://travis-ci.org/xeipuuv/gojsonschema.svg)](https://travis-ci.org/xeipuuv/gojsonschema)
# gojsonschema
## Description
An implementation of JSON Schema, based on IETF's draft v4 - Go language
References :
* http://json-schema.org
* http://json-schema.org/latest/json-schema-core.html
* http://json-schema.org/latest/json-schema-validation.html
## Installation
```
go get github.com/xeipuuv/gojsonschema
```
Dependencies :
* [github.com/xeipuuv/gojsonpointer](https://github.com/xeipuuv/gojsonpointer)
* [github.com/xeipuuv/gojsonreference](https://github.com/xeipuuv/gojsonreference)
* [github.com/stretchr/testify/assert](https://github.com/stretchr/testify#assert-package)
## Usage
### Example
```go
package main
import (
"fmt"
"github.com/xeipuuv/gojsonschema"
)
func main() {
schemaLoader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json")
documentLoader := gojsonschema.NewReferenceLoader("file:///home/me/document.json")
result, err := gojsonschema.Validate(schemaLoader, documentLoader)
if err != nil {
panic(err.Error())
}
if result.Valid() {
fmt.Printf("The document is valid\n")
} else {
fmt.Printf("The document is not valid. see errors :\n")
for _, desc := range result.Errors() {
fmt.Printf("- %s\n", desc)
}
}
}
```
#### Loaders
There are various ways to load your JSON data.
In order to load your schemas and documents,
first declare an appropriate loader :
* Web / HTTP, using a reference :
```go
loader := gojsonschema.NewReferenceLoader("http://www.some_host.com/schema.json")
```
* Local file, using a reference :
```go
loader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json")
```
References use the URI scheme, the prefix (file://) and a full path to the file are required.
* JSON strings :
```go
loader := gojsonschema.NewStringLoader(`{"type": "string"}`)
```
* Custom Go types :
```go
m := map[string]interface{}{"type": "string"}
loader := gojsonschema.NewGoLoader(m)
```
And
```go
type Root struct {
Users []User `json:"users"`
}
type User struct {
Name string `json:"name"`
}
...
data := Root{}
data.Users = append(data.Users, User{"John"})
data.Users = append(data.Users, User{"Sophia"})
data.Users = append(data.Users, User{"Bill"})
loader := gojsonschema.NewGoLoader(data)
```
#### Validation
Once the loaders are set, validation is easy :
```go
result, err := gojsonschema.Validate(schemaLoader, documentLoader)
```
Alternatively, you might want to load a schema only once and process to multiple validations :
```go
schema, err := gojsonschema.NewSchema(schemaLoader)
...
result1, err := schema.Validate(documentLoader1)
...
result2, err := schema.Validate(documentLoader2)
...
// etc ...
```
To check the result :
```go
if result.Valid() {
fmt.Printf("The document is valid\n")
} else {
fmt.Printf("The document is not valid. see errors :\n")
for _, err := range result.Errors() {
// Err implements the ResultError interface
fmt.Printf("- %s\n", err)
}
}
```
## Working with Errors
The library handles string error codes which you can customize by creating your own gojsonschema.locale and setting it
```go
gojsonschema.Locale = YourCustomLocale{}
```
However, each error contains additional contextual information.
**err.Type()**: *string* Returns the "type" of error that occurred. Note you can also type check. See below
Note: An error of RequiredType has an err.Type() return value of "required"
"required": RequiredError
"invalid_type": InvalidTypeError
"number_any_of": NumberAnyOfError
"number_one_of": NumberOneOfError
"number_all_of": NumberAllOfError
"number_not": NumberNotError
"missing_dependency": MissingDependencyError
"internal": InternalError
"enum": EnumError
"array_no_additional_items": ArrayNoAdditionalItemsError
"array_min_items": ArrayMinItemsError
"array_max_items": ArrayMaxItemsError
"unique": ItemsMustBeUniqueError
"array_min_properties": ArrayMinPropertiesError
"array_max_properties": ArrayMaxPropertiesError
"additional_property_not_allowed": AdditionalPropertyNotAllowedError
"invalid_property_pattern": InvalidPropertyPatternError
"string_gte": StringLengthGTEError
"string_lte": StringLengthLTEError
"pattern": DoesNotMatchPatternError
"multiple_of": MultipleOfError
"number_gte": NumberGTEError
"number_gt": NumberGTError
"number_lte": NumberLTEError
"number_lt": NumberLTError
**err.Value()**: *interface{}* Returns the value given
**err.Context()**: *gojsonschema.jsonContext* Returns the context. This has a String() method that will print something like this: (root).firstName
**err.Field()**: *string* Returns the fieldname in the format firstName, or for embedded properties, person.firstName. This returns the same as the String() method on *err.Context()* but removes the (root). prefix.
**err.Description()**: *string* The error description. This is based on the locale you are using. See the beginning of this section for overwriting the locale with a custom implementation.
**err.Details()**: *gojsonschema.ErrorDetails* Returns a map[string]interface{} of additional error details specific to the error. For example, GTE errors will have a "min" value, LTE will have a "max" value. See errors.go for a full description of all the error details. Every error always contains a "field" key that holds the value of *err.Field()*
Note in most cases, the err.Details() will be used to generate replacement strings in your locales. and not used directly i.e.
```
%field% must be greater than or equal to %min%
```
## Formats
JSON Schema allows for optional "format" property to validate strings against well-known formats. gojsonschema ships with all of the formats defined in the spec that you can use like this:
````json
{"type": "string", "format": "email"}
````
Available formats: date-time, hostname, email, ipv4, ipv6, uri.
For repetitive or more complex formats, you can create custom format checkers and add them to gojsonschema like this:
```go
// Define the format checker
type RoleFormatChecker struct {}
// Ensure it meets the gojsonschema.FormatChecker interface
func (f RoleFormatChecker) IsFormat(input string) bool {
return strings.HasPrefix("ROLE_", input)
}
// Add it to the library
gojsonschema.FormatCheckers.Add("role", RoleFormatChecker{})
````
Now to use in your json schema:
````json
{"type": "string", "format": "role"}
````
## Uses
gojsonschema uses the following test suite :
https://github.com/json-schema/JSON-Schema-Test-Suite

View file

@ -1,242 +0,0 @@
package gojsonschema
import (
"fmt"
"strings"
)
type (
// RequiredError. ErrorDetails: property string
RequiredError struct {
ResultErrorFields
}
// InvalidTypeError. ErrorDetails: expected, given
InvalidTypeError struct {
ResultErrorFields
}
// NumberAnyOfError. ErrorDetails: -
NumberAnyOfError struct {
ResultErrorFields
}
// NumberOneOfError. ErrorDetails: -
NumberOneOfError struct {
ResultErrorFields
}
// NumberAllOfError. ErrorDetails: -
NumberAllOfError struct {
ResultErrorFields
}
// NumberNotError. ErrorDetails: -
NumberNotError struct {
ResultErrorFields
}
// MissingDependencyError. ErrorDetails: dependency
MissingDependencyError struct {
ResultErrorFields
}
// InternalError. ErrorDetails: error
InternalError struct {
ResultErrorFields
}
// EnumError. ErrorDetails: allowed
EnumError struct {
ResultErrorFields
}
// ArrayNoAdditionalItemsError. ErrorDetails: -
ArrayNoAdditionalItemsError struct {
ResultErrorFields
}
// ArrayMinItemsError. ErrorDetails: min
ArrayMinItemsError struct {
ResultErrorFields
}
// ArrayMaxItemsError. ErrorDetails: max
ArrayMaxItemsError struct {
ResultErrorFields
}
// ItemsMustBeUniqueError. ErrorDetails: type
ItemsMustBeUniqueError struct {
ResultErrorFields
}
// ArrayMinPropertiesError. ErrorDetails: min
ArrayMinPropertiesError struct {
ResultErrorFields
}
// ArrayMaxPropertiesError. ErrorDetails: max
ArrayMaxPropertiesError struct {
ResultErrorFields
}
// AdditionalPropertyNotAllowedError. ErrorDetails: property
AdditionalPropertyNotAllowedError struct {
ResultErrorFields
}
// InvalidPropertyPatternError. ErrorDetails: property, pattern
InvalidPropertyPatternError struct {
ResultErrorFields
}
// StringLengthGTEError. ErrorDetails: min
StringLengthGTEError struct {
ResultErrorFields
}
// StringLengthLTEError. ErrorDetails: max
StringLengthLTEError struct {
ResultErrorFields
}
// DoesNotMatchPatternError. ErrorDetails: pattern
DoesNotMatchPatternError struct {
ResultErrorFields
}
// DoesNotMatchFormatError. ErrorDetails: format
DoesNotMatchFormatError struct {
ResultErrorFields
}
// MultipleOfError. ErrorDetails: multiple
MultipleOfError struct {
ResultErrorFields
}
// NumberGTEError. ErrorDetails: min
NumberGTEError struct {
ResultErrorFields
}
// NumberGTError. ErrorDetails: min
NumberGTError struct {
ResultErrorFields
}
// NumberLTEError. ErrorDetails: max
NumberLTEError struct {
ResultErrorFields
}
// NumberLTError. ErrorDetails: max
NumberLTError struct {
ResultErrorFields
}
)
// newError takes a ResultError type and sets the type, context, description, details, value, and field
func newError(err ResultError, context *jsonContext, value interface{}, locale locale, details ErrorDetails) {
var t string
var d string
switch err.(type) {
case *RequiredError:
t = "required"
d = locale.Required()
case *InvalidTypeError:
t = "invalid_type"
d = locale.InvalidType()
case *NumberAnyOfError:
t = "number_any_of"
d = locale.NumberAnyOf()
case *NumberOneOfError:
t = "number_one_of"
d = locale.NumberOneOf()
case *NumberAllOfError:
t = "number_all_of"
d = locale.NumberAllOf()
case *NumberNotError:
t = "number_not"
d = locale.NumberNot()
case *MissingDependencyError:
t = "missing_dependency"
d = locale.MissingDependency()
case *InternalError:
t = "internal"
d = locale.Internal()
case *EnumError:
t = "enum"
d = locale.Enum()
case *ArrayNoAdditionalItemsError:
t = "array_no_additional_items"
d = locale.ArrayNoAdditionalItems()
case *ArrayMinItemsError:
t = "array_min_items"
d = locale.ArrayMinItems()
case *ArrayMaxItemsError:
t = "array_max_items"
d = locale.ArrayMaxItems()
case *ItemsMustBeUniqueError:
t = "unique"
d = locale.Unique()
case *ArrayMinPropertiesError:
t = "array_min_properties"
d = locale.ArrayMinProperties()
case *ArrayMaxPropertiesError:
t = "array_max_properties"
d = locale.ArrayMaxProperties()
case *AdditionalPropertyNotAllowedError:
t = "additional_property_not_allowed"
d = locale.AdditionalPropertyNotAllowed()
case *InvalidPropertyPatternError:
t = "invalid_property_pattern"
d = locale.InvalidPropertyPattern()
case *StringLengthGTEError:
t = "string_gte"
d = locale.StringGTE()
case *StringLengthLTEError:
t = "string_lte"
d = locale.StringLTE()
case *DoesNotMatchPatternError:
t = "pattern"
d = locale.DoesNotMatchPattern()
case *DoesNotMatchFormatError:
t = "format"
d = locale.DoesNotMatchFormat()
case *MultipleOfError:
t = "multiple_of"
d = locale.MultipleOf()
case *NumberGTEError:
t = "number_gte"
d = locale.NumberGTE()
case *NumberGTError:
t = "number_gt"
d = locale.NumberGT()
case *NumberLTEError:
t = "number_lte"
d = locale.NumberLTE()
case *NumberLTError:
t = "number_lt"
d = locale.NumberLT()
}
err.SetType(t)
err.SetContext(context)
err.SetValue(value)
err.SetDetails(details)
details["field"] = err.Field()
err.SetDescription(formatErrorDescription(d, details))
}
// formatErrorDescription takes a string in this format: %field% is required
// and converts it to a string with replacements. The fields come from
// the ErrorDetails struct and vary for each type of error.
func formatErrorDescription(s string, details ErrorDetails) string {
for name, val := range details {
s = strings.Replace(s, "%"+strings.ToLower(name)+"%", fmt.Sprintf("%v", val), -1)
}
return s
}

View file

@ -1,178 +0,0 @@
package gojsonschema
import (
"net"
"net/url"
"reflect"
"regexp"
"strings"
"time"
)
type (
// FormatChecker is the interface all formatters added to FormatCheckerChain must implement
FormatChecker interface {
IsFormat(input string) bool
}
// FormatCheckerChain holds the formatters
FormatCheckerChain struct {
formatters map[string]FormatChecker
}
// EmailFormatter verifies email address formats
EmailFormatChecker struct{}
// IPV4FormatChecker verifies IP addresses in the ipv4 format
IPV4FormatChecker struct{}
// IPV6FormatChecker verifies IP addresses in the ipv6 format
IPV6FormatChecker struct{}
// DateTimeFormatChecker verifies date/time formats per RFC3339 5.6
//
// Valid formats:
// Partial Time: HH:MM:SS
// Full Date: YYYY-MM-DD
// Full Time: HH:MM:SSZ-07:00
// Date Time: YYYY-MM-DDTHH:MM:SSZ-0700
//
// Where
// YYYY = 4DIGIT year
// MM = 2DIGIT month ; 01-12
// DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year
// HH = 2DIGIT hour ; 00-23
// MM = 2DIGIT ; 00-59
// SS = 2DIGIT ; 00-58, 00-60 based on leap second rules
// T = Literal
// Z = Literal
//
// Note: Nanoseconds are also suported in all formats
//
// http://tools.ietf.org/html/rfc3339#section-5.6
DateTimeFormatChecker struct{}
// URIFormatCheckers validates a URI with a valid Scheme per RFC3986
URIFormatChecker struct{}
// HostnameFormatChecker validates a hostname is in the correct format
HostnameFormatChecker struct{}
// UUIDFormatChecker validates a UUID is in the correct format
UUIDFormatChecker struct{}
)
var (
// Formatters holds the valid formatters, and is a public variable
// so library users can add custom formatters
FormatCheckers = FormatCheckerChain{
formatters: map[string]FormatChecker{
"date-time": DateTimeFormatChecker{},
"hostname": HostnameFormatChecker{},
"email": EmailFormatChecker{},
"ipv4": IPV4FormatChecker{},
"ipv6": IPV6FormatChecker{},
"uri": URIFormatChecker{},
"uuid": UUIDFormatChecker{},
},
}
// Regex credit: https://github.com/asaskevich/govalidator
rxEmail = regexp.MustCompile("^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$")
// Regex credit: https://www.socketloop.com/tutorials/golang-validate-hostname
rxHostname = regexp.MustCompile(`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`)
rxUUID = regexp.MustCompile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$")
)
// Add adds a FormatChecker to the FormatCheckerChain
// The name used will be the value used for the format key in your json schema
func (c *FormatCheckerChain) Add(name string, f FormatChecker) *FormatCheckerChain {
c.formatters[name] = f
return c
}
// Remove deletes a FormatChecker from the FormatCheckerChain (if it exists)
func (c *FormatCheckerChain) Remove(name string) *FormatCheckerChain {
delete(c.formatters, name)
return c
}
// Has checks to see if the FormatCheckerChain holds a FormatChecker with the given name
func (c *FormatCheckerChain) Has(name string) bool {
_, ok := c.formatters[name]
return ok
}
// IsFormat will check an input against a FormatChecker with the given name
// to see if it is the correct format
func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool {
f, ok := c.formatters[name]
if !ok {
return false
}
if !isKind(input, reflect.String) {
return false
}
inputString := input.(string)
return f.IsFormat(inputString)
}
func (f EmailFormatChecker) IsFormat(input string) bool {
return rxEmail.MatchString(input)
}
// Credit: https://github.com/asaskevich/govalidator
func (f IPV4FormatChecker) IsFormat(input string) bool {
ip := net.ParseIP(input)
return ip != nil && strings.Contains(input, ".")
}
// Credit: https://github.com/asaskevich/govalidator
func (f IPV6FormatChecker) IsFormat(input string) bool {
ip := net.ParseIP(input)
return ip != nil && strings.Contains(input, ":")
}
func (f DateTimeFormatChecker) IsFormat(input string) bool {
formats := []string{
"15:04:05",
"15:04:05Z07:00",
"2006-01-02",
time.RFC3339,
time.RFC3339Nano,
}
for _, format := range formats {
if _, err := time.Parse(format, input); err == nil {
return true
}
}
return false
}
func (f URIFormatChecker) IsFormat(input string) bool {
u, err := url.Parse(input)
if err != nil || u.Scheme == "" {
return false
}
return true
}
func (f HostnameFormatChecker) IsFormat(input string) bool {
return rxHostname.MatchString(input) && len(input) < 256
}
func (f UUIDFormatChecker) IsFormat(input string) bool {
return rxUUID.MatchString(input)
}

View file

@ -1,37 +0,0 @@
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// author xeipuuv
// author-github https://github.com/xeipuuv
// author-mail xeipuuv@gmail.com
//
// repository-name gojsonschema
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
//
// description Very simple log wrapper.
// Used for debugging/testing purposes.
//
// created 01-01-2015
package gojsonschema
import (
"log"
)
const internalLogEnabled = false
func internalLog(format string, v ...interface{}) {
log.Printf(format, v...)
}

View file

@ -1,72 +0,0 @@
// Copyright 2013 MongoDB, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// author tolsen
// author-github https://github.com/tolsen
//
// repository-name gojsonschema
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
//
// description Implements a persistent (immutable w/ shared structure) singly-linked list of strings for the purpose of storing a json context
//
// created 04-09-2013
package gojsonschema
import "bytes"
// jsonContext implements a persistent linked-list of strings
type jsonContext struct {
head string
tail *jsonContext
}
func newJsonContext(head string, tail *jsonContext) *jsonContext {
return &jsonContext{head, tail}
}
// String displays the context in reverse.
// This plays well with the data structure's persistent nature with
// Cons and a json document's tree structure.
func (c *jsonContext) String(del ...string) string {
byteArr := make([]byte, 0, c.stringLen())
buf := bytes.NewBuffer(byteArr)
c.writeStringToBuffer(buf, del)
return buf.String()
}
func (c *jsonContext) stringLen() int {
length := 0
if c.tail != nil {
length = c.tail.stringLen() + 1 // add 1 for "."
}
length += len(c.head)
return length
}
func (c *jsonContext) writeStringToBuffer(buf *bytes.Buffer, del []string) {
if c.tail != nil {
c.tail.writeStringToBuffer(buf, del)
if len(del) > 0 {
buf.WriteString(del[0])
} else {
buf.WriteString(".")
}
}
buf.WriteString(c.head)
}

View file

@ -1,286 +0,0 @@
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// author xeipuuv
// author-github https://github.com/xeipuuv
// author-mail xeipuuv@gmail.com
//
// repository-name gojsonschema
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
//
// description Different strategies to load JSON files.
// Includes References (file and HTTP), JSON strings and Go types.
//
// created 01-02-2015
package gojsonschema
import (
"bytes"
"encoding/json"
"errors"
"io"
"io/ioutil"
"net/http"
"path/filepath"
"runtime"
"strings"
"github.com/xeipuuv/gojsonreference"
)
// JSON loader interface
type JSONLoader interface {
jsonSource() interface{}
loadJSON() (interface{}, error)
loadSchema() (*Schema, error)
}
// JSON Reference loader
// references are used to load JSONs from files and HTTP
type jsonReferenceLoader struct {
source string
}
func (l *jsonReferenceLoader) jsonSource() interface{} {
return l.source
}
func NewReferenceLoader(source string) *jsonReferenceLoader {
return &jsonReferenceLoader{source: source}
}
func (l *jsonReferenceLoader) loadJSON() (interface{}, error) {
var err error
reference, err := gojsonreference.NewJsonReference(l.jsonSource().(string))
if err != nil {
return nil, err
}
refToUrl := reference
refToUrl.GetUrl().Fragment = ""
var document interface{}
if reference.HasFileScheme {
filename := strings.Replace(refToUrl.String(), "file://", "", -1)
if runtime.GOOS == "windows" {
// on Windows, a file URL may have an extra leading slash, use slashes
// instead of backslashes, and have spaces escaped
if strings.HasPrefix(filename, "/") {
filename = filename[1:]
}
filename = filepath.FromSlash(filename)
filename = strings.Replace(filename, "%20", " ", -1)
}
document, err = l.loadFromFile(filename)
if err != nil {
return nil, err
}
} else {
document, err = l.loadFromHTTP(refToUrl.String())
if err != nil {
return nil, err
}
}
return document, nil
}
func (l *jsonReferenceLoader) loadSchema() (*Schema, error) {
var err error
d := Schema{}
d.pool = newSchemaPool()
d.referencePool = newSchemaReferencePool()
d.documentReference, err = gojsonreference.NewJsonReference(l.jsonSource().(string))
if err != nil {
return nil, err
}
spd, err := d.pool.GetDocument(d.documentReference)
if err != nil {
return nil, err
}
err = d.parse(spd.Document)
if err != nil {
return nil, err
}
return &d, nil
}
func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) {
resp, err := http.Get(address)
if err != nil {
return nil, err
}
// must return HTTP Status 200 OK
if resp.StatusCode != http.StatusOK {
return nil, errors.New(formatErrorDescription(Locale.httpBadStatus(), ErrorDetails{"status": resp.Status}))
}
bodyBuff, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return decodeJsonUsingNumber(bytes.NewReader(bodyBuff))
}
func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) {
bodyBuff, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
return decodeJsonUsingNumber(bytes.NewReader(bodyBuff))
}
// JSON string loader
type jsonStringLoader struct {
source string
}
func (l *jsonStringLoader) jsonSource() interface{} {
return l.source
}
func NewStringLoader(source string) *jsonStringLoader {
return &jsonStringLoader{source: source}
}
func (l *jsonStringLoader) loadJSON() (interface{}, error) {
return decodeJsonUsingNumber(strings.NewReader(l.jsonSource().(string)))
}
func (l *jsonStringLoader) loadSchema() (*Schema, error) {
var err error
document, err := l.loadJSON()
if err != nil {
return nil, err
}
d := Schema{}
d.pool = newSchemaPool()
d.referencePool = newSchemaReferencePool()
d.documentReference, err = gojsonreference.NewJsonReference("#")
d.pool.SetStandaloneDocument(document)
if err != nil {
return nil, err
}
err = d.parse(document)
if err != nil {
return nil, err
}
return &d, nil
}
// JSON Go (types) loader
// used to load JSONs from the code as maps, interface{}, structs ...
type jsonGoLoader struct {
source interface{}
}
func (l *jsonGoLoader) jsonSource() interface{} {
return l.source
}
func NewGoLoader(source interface{}) *jsonGoLoader {
return &jsonGoLoader{source: source}
}
func (l *jsonGoLoader) loadJSON() (interface{}, error) {
// convert it to a compliant JSON first to avoid types "mismatches"
jsonBytes, err := json.Marshal(l.jsonSource())
if err != nil {
return nil, err
}
return decodeJsonUsingNumber(bytes.NewReader(jsonBytes))
}
func (l *jsonGoLoader) loadSchema() (*Schema, error) {
var err error
document, err := l.loadJSON()
if err != nil {
return nil, err
}
d := Schema{}
d.pool = newSchemaPool()
d.referencePool = newSchemaReferencePool()
d.documentReference, err = gojsonreference.NewJsonReference("#")
d.pool.SetStandaloneDocument(document)
if err != nil {
return nil, err
}
err = d.parse(document)
if err != nil {
return nil, err
}
return &d, nil
}
func decodeJsonUsingNumber(r io.Reader) (interface{}, error) {
var document interface{}
decoder := json.NewDecoder(r)
decoder.UseNumber()
err := decoder.Decode(&document)
if err != nil {
return nil, err
}
return document, nil
}

Some files were not shown because too many files have changed in this diff Show more