Vendor notary

Signed-off-by: Derek McGowan <derek@mcgstyle.net> (github: dmcgowan)
This commit is contained in:
Derek McGowan 2015-07-23 03:04:01 -07:00
parent 48250832a3
commit f5a4a8da15
58 changed files with 11825 additions and 0 deletions

View file

@ -38,6 +38,11 @@ clone git github.com/hashicorp/consul v0.5.2
clone git github.com/docker/distribution cd8ff553b6b1911be23dfeabb73e33108bcbf147
clone git github.com/vbatts/tar-split v0.9.4
clone git github.com/docker/notary 77bced079e83d80f40c1f0a544b1a8a3b97fb052
clone git github.com/endophage/gotuf 374908abc8af7e953a2813c5c2b3944ab625ca68
clone git github.com/tent/canonical-json-go 96e4ba3a7613a1216cbd1badca4efe382adea337
clone git github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c
clone git github.com/opencontainers/runc v0.0.2 # libcontainer
# libcontainer deps (see src/github.com/docker/libcontainer/update-vendor.sh)
clone git github.com/coreos/go-systemd v2

View file

@ -0,0 +1,27 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -0,0 +1,125 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ed25519 implements the Ed25519 signature algorithm. See
// http://ed25519.cr.yp.to/.
package ed25519
// This code is a port of the public domain, "ref10" implementation of ed25519
// from SUPERCOP.
import (
"crypto/sha512"
"crypto/subtle"
"io"
"github.com/agl/ed25519/edwards25519"
)
const (
PublicKeySize = 32
PrivateKeySize = 64
SignatureSize = 64
)
// GenerateKey generates a public/private key pair using randomness from rand.
func GenerateKey(rand io.Reader) (publicKey *[PublicKeySize]byte, privateKey *[PrivateKeySize]byte, err error) {
privateKey = new([64]byte)
publicKey = new([32]byte)
_, err = io.ReadFull(rand, privateKey[:32])
if err != nil {
return nil, nil, err
}
h := sha512.New()
h.Write(privateKey[:32])
digest := h.Sum(nil)
digest[0] &= 248
digest[31] &= 127
digest[31] |= 64
var A edwards25519.ExtendedGroupElement
var hBytes [32]byte
copy(hBytes[:], digest)
edwards25519.GeScalarMultBase(&A, &hBytes)
A.ToBytes(publicKey)
copy(privateKey[32:], publicKey[:])
return
}
// Sign signs the message with privateKey and returns a signature.
func Sign(privateKey *[PrivateKeySize]byte, message []byte) *[SignatureSize]byte {
h := sha512.New()
h.Write(privateKey[:32])
var digest1, messageDigest, hramDigest [64]byte
var expandedSecretKey [32]byte
h.Sum(digest1[:0])
copy(expandedSecretKey[:], digest1[:])
expandedSecretKey[0] &= 248
expandedSecretKey[31] &= 63
expandedSecretKey[31] |= 64
h.Reset()
h.Write(digest1[32:])
h.Write(message)
h.Sum(messageDigest[:0])
var messageDigestReduced [32]byte
edwards25519.ScReduce(&messageDigestReduced, &messageDigest)
var R edwards25519.ExtendedGroupElement
edwards25519.GeScalarMultBase(&R, &messageDigestReduced)
var encodedR [32]byte
R.ToBytes(&encodedR)
h.Reset()
h.Write(encodedR[:])
h.Write(privateKey[32:])
h.Write(message)
h.Sum(hramDigest[:0])
var hramDigestReduced [32]byte
edwards25519.ScReduce(&hramDigestReduced, &hramDigest)
var s [32]byte
edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced)
signature := new([64]byte)
copy(signature[:], encodedR[:])
copy(signature[32:], s[:])
return signature
}
// Verify returns true iff sig is a valid signature of message by publicKey.
func Verify(publicKey *[PublicKeySize]byte, message []byte, sig *[SignatureSize]byte) bool {
if sig[63]&224 != 0 {
return false
}
var A edwards25519.ExtendedGroupElement
if !A.FromBytes(publicKey) {
return false
}
h := sha512.New()
h.Write(sig[:32])
h.Write(publicKey[:])
h.Write(message)
var digest [64]byte
h.Sum(digest[:0])
var hReduced [32]byte
edwards25519.ScReduce(&hReduced, &digest)
var R edwards25519.ProjectiveGroupElement
var b [32]byte
copy(b[:], sig[32:])
edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &b)
var checkR [32]byte
R.ToBytes(&checkR)
return subtle.ConstantTimeCompare(sig[:32], checkR[:]) == 1
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,47 @@
package changelist
// TufChange represents a change to a TUF repo
type TufChange struct {
// Abbreviated because Go doesn't permit a field and method of the same name
Actn int `json:"action"`
Role string `json:"role"`
ChangeType string `json:"type"`
ChangePath string `json:"path"`
Data []byte `json:"data"`
}
// NewTufChange initializes a tufChange object
func NewTufChange(action int, role, changeType, changePath string, content []byte) *TufChange {
return &TufChange{
Actn: action,
Role: role,
ChangeType: changeType,
ChangePath: changePath,
Data: content,
}
}
// Action return c.Actn
func (c TufChange) Action() int {
return c.Actn
}
// Scope returns c.Role
func (c TufChange) Scope() string {
return c.Role
}
// Type returns c.ChangeType
func (c TufChange) Type() string {
return c.ChangeType
}
// Path return c.ChangePath
func (c TufChange) Path() string {
return c.ChangePath
}
// Content returns c.Data
func (c TufChange) Content() []byte {
return c.Data
}

View file

@ -0,0 +1,29 @@
package changelist
// memChangeList implements a simple in memory change list.
type memChangelist struct {
changes []Change
}
// List returns a list of Changes
func (cl memChangelist) List() []Change {
return cl.changes
}
// Add adds a change to the in-memory change list
func (cl *memChangelist) Add(c Change) error {
cl.changes = append(cl.changes, c)
return nil
}
// Clear empties the changelist file.
func (cl *memChangelist) Clear(archive string) error {
// appending to a nil list initializes it.
cl.changes = nil
return nil
}
// Close is a no-op in this in-memory change-list
func (cl *memChangelist) Close() error {
return nil
}

View file

@ -0,0 +1,114 @@
package changelist
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"sort"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/uuid"
)
// FileChangelist stores all the changes as files
type FileChangelist struct {
dir string
}
// NewFileChangelist is a convenience method for returning FileChangeLists
func NewFileChangelist(dir string) (*FileChangelist, error) {
logrus.Debug("Making dir path: ", dir)
err := os.MkdirAll(dir, 0700)
if err != nil {
return nil, err
}
return &FileChangelist{dir: dir}, nil
}
// List returns a list of sorted changes
func (cl FileChangelist) List() []Change {
var changes []Change
dir, err := os.Open(cl.dir)
if err != nil {
return changes
}
defer dir.Close()
fileInfos, err := dir.Readdir(0)
if err != nil {
return changes
}
sort.Sort(fileChanges(fileInfos))
for _, f := range fileInfos {
if f.IsDir() {
continue
}
raw, err := ioutil.ReadFile(path.Join(cl.dir, f.Name()))
if err != nil {
logrus.Warn(err.Error())
continue
}
c := &TufChange{}
err = json.Unmarshal(raw, c)
if err != nil {
logrus.Warn(err.Error())
continue
}
changes = append(changes, c)
}
return changes
}
// Add adds a change to the file change list
func (cl FileChangelist) Add(c Change) error {
cJSON, err := json.Marshal(c)
if err != nil {
return err
}
filename := fmt.Sprintf("%020d_%s.change", time.Now().UnixNano(), uuid.Generate())
return ioutil.WriteFile(path.Join(cl.dir, filename), cJSON, 0644)
}
// Clear clears the change list
func (cl FileChangelist) Clear(archive string) error {
dir, err := os.Open(cl.dir)
if err != nil {
return err
}
defer dir.Close()
files, err := dir.Readdir(0)
if err != nil {
return err
}
for _, f := range files {
os.Remove(path.Join(cl.dir, f.Name()))
}
return nil
}
// Close is a no-op
func (cl FileChangelist) Close() error {
// Nothing to do here
return nil
}
type fileChanges []os.FileInfo
// Len returns the length of a file change list
func (cs fileChanges) Len() int {
return len(cs)
}
// Less compares the names of two different file changes
func (cs fileChanges) Less(i, j int) bool {
return cs[i].Name() < cs[j].Name()
}
// Swap swaps the position of two file changes
func (cs fileChanges) Swap(i, j int) {
tmp := cs[i]
cs[i] = cs[j]
cs[j] = tmp
}

View file

@ -0,0 +1,59 @@
package changelist
// Changelist is the interface for all TUF change lists
type Changelist interface {
// List returns the ordered list of changes
// currently stored
List() []Change
// Add change appends the provided change to
// the list of changes
Add(Change) error
// Clear empties the current change list.
// Archive may be provided as a directory path
// to save a copy of the changelist in that location
Clear(archive string) error
// Close syncronizes any pending writes to the underlying
// storage and closes the file/connection
Close() error
}
const (
// ActionCreate represents a Create action
ActionCreate = iota
// ActionUpdate represents an Update action
ActionUpdate
// ActionDelete represents a Delete action
ActionDelete
)
// Change is the interface for a TUF Change
type Change interface {
// "create","update", or "delete"
Action() int
// Where the change should be made.
// For TUF this will be the role
Scope() string
// The content type being affected.
// For TUF this will be "target", or "delegation".
// If the type is "delegation", the Scope will be
// used to determine if a root role is being updated
// or a target delegation.
Type() string
// Path indicates the entry within a role to be affected by the
// change. For targets, this is simply the target's path,
// for delegations it's the delegated role name.
Path() string
// Serialized content that the interpreter of a changelist
// can use to apply the change.
// For TUF this will be the serialized JSON that needs
// to be inserted or merged. In the case of a "delete"
// action, it will be nil.
Content() []byte
}

View file

@ -0,0 +1,569 @@
package client
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"github.com/Sirupsen/logrus"
"github.com/docker/notary/client/changelist"
"github.com/docker/notary/cryptoservice"
"github.com/docker/notary/keystoremanager"
"github.com/docker/notary/pkg/passphrase"
"github.com/docker/notary/trustmanager"
"github.com/endophage/gotuf"
tufclient "github.com/endophage/gotuf/client"
"github.com/endophage/gotuf/data"
tuferrors "github.com/endophage/gotuf/errors"
"github.com/endophage/gotuf/keys"
"github.com/endophage/gotuf/signed"
"github.com/endophage/gotuf/store"
)
const maxSize = 5 << 20
func init() {
data.SetDefaultExpiryTimes(
map[string]int{
"root": 3650,
"targets": 1095,
"snapshot": 1095,
},
)
}
// ErrRepoNotInitialized is returned when trying to can publish on an uninitialized
// notary repository
type ErrRepoNotInitialized struct{}
// ErrRepoNotInitialized is returned when trying to can publish on an uninitialized
// notary repository
func (err *ErrRepoNotInitialized) Error() string {
return "Repository has not been initialized"
}
// ErrExpired is returned when the metadata for a role has expired
type ErrExpired struct {
signed.ErrExpired
}
const (
tufDir = "tuf"
)
// ErrRepositoryNotExist gets returned when trying to make an action over a repository
/// that doesn't exist.
var ErrRepositoryNotExist = errors.New("repository does not exist")
// NotaryRepository stores all the information needed to operate on a notary
// repository.
type NotaryRepository struct {
baseDir string
gun string
baseURL string
tufRepoPath string
fileStore store.MetadataStore
cryptoService signed.CryptoService
tufRepo *tuf.TufRepo
roundTrip http.RoundTripper
KeyStoreManager *keystoremanager.KeyStoreManager
}
// Target represents a simplified version of the data TUF operates on, so external
// applications don't have to depend on tuf data types.
type Target struct {
Name string
Hashes data.Hashes
Length int64
}
// NewTarget is a helper method that returns a Target
func NewTarget(targetName string, targetPath string) (*Target, error) {
b, err := ioutil.ReadFile(targetPath)
if err != nil {
return nil, err
}
meta, err := data.NewFileMeta(bytes.NewBuffer(b))
if err != nil {
return nil, err
}
return &Target{Name: targetName, Hashes: meta.Hashes, Length: meta.Length}, nil
}
// NewNotaryRepository is a helper method that returns a new notary repository.
// It takes the base directory under where all the trust files will be stored
// (usually ~/.docker/trust/).
func NewNotaryRepository(baseDir, gun, baseURL string, rt http.RoundTripper,
passphraseRetriever passphrase.Retriever) (*NotaryRepository, error) {
keyStoreManager, err := keystoremanager.NewKeyStoreManager(baseDir, passphraseRetriever)
if err != nil {
return nil, err
}
cryptoService := cryptoservice.NewCryptoService(gun, keyStoreManager.NonRootKeyStore())
nRepo := &NotaryRepository{
gun: gun,
baseDir: baseDir,
baseURL: baseURL,
tufRepoPath: filepath.Join(baseDir, tufDir, filepath.FromSlash(gun)),
cryptoService: cryptoService,
roundTrip: rt,
KeyStoreManager: keyStoreManager,
}
fileStore, err := store.NewFilesystemStore(
nRepo.tufRepoPath,
"metadata",
"json",
"",
)
if err != nil {
return nil, err
}
nRepo.fileStore = fileStore
return nRepo, nil
}
// Initialize creates a new repository by using rootKey as the root Key for the
// TUF repository.
func (r *NotaryRepository) Initialize(uCryptoService *cryptoservice.UnlockedCryptoService) error {
rootCert, err := uCryptoService.GenerateCertificate(r.gun)
if err != nil {
return err
}
r.KeyStoreManager.AddTrustedCert(rootCert)
// The root key gets stored in the TUF metadata X509 encoded, linking
// the tuf root.json to our X509 PKI.
// If the key is RSA, we store it as type RSAx509, if it is ECDSA we store it
// as ECDSAx509 to allow the gotuf verifiers to correctly decode the
// key on verification of signatures.
var algorithmType data.KeyAlgorithm
algorithm := uCryptoService.PrivKey.Algorithm()
switch algorithm {
case data.RSAKey:
algorithmType = data.RSAx509Key
case data.ECDSAKey:
algorithmType = data.ECDSAx509Key
default:
return fmt.Errorf("invalid format for root key: %s", algorithm)
}
// Generate a x509Key using the rootCert as the public key
rootKey := data.NewPublicKey(algorithmType, trustmanager.CertToPEM(rootCert))
// Creates a symlink between the certificate ID and the real public key it
// is associated with. This is used to be able to retrieve the root private key
// associated with a particular certificate
logrus.Debugf("Linking %s to %s.", rootKey.ID(), uCryptoService.ID())
err = r.KeyStoreManager.RootKeyStore().Link(uCryptoService.ID()+"_root", rootKey.ID()+"_root")
if err != nil {
return err
}
// All the timestamp keys are generated by the remote server.
remote, err := getRemoteStore(r.baseURL, r.gun, r.roundTrip)
rawTSKey, err := remote.GetKey("timestamp")
if err != nil {
return err
}
parsedKey := &data.TUFKey{}
err = json.Unmarshal(rawTSKey, parsedKey)
if err != nil {
return err
}
// Turn the JSON timestamp key from the remote server into a TUFKey
timestampKey := data.NewPublicKey(parsedKey.Algorithm(), parsedKey.Public())
logrus.Debugf("got remote %s timestamp key with keyID: %s", parsedKey.Algorithm(), timestampKey.ID())
// This is currently hardcoding the targets and snapshots keys to ECDSA
// Targets and snapshot keys are always generated locally.
targetsKey, err := r.cryptoService.Create("targets", data.ECDSAKey)
if err != nil {
return err
}
snapshotKey, err := r.cryptoService.Create("snapshot", data.ECDSAKey)
if err != nil {
return err
}
kdb := keys.NewDB()
kdb.AddKey(rootKey)
kdb.AddKey(targetsKey)
kdb.AddKey(snapshotKey)
kdb.AddKey(timestampKey)
err = initRoles(kdb, rootKey, targetsKey, snapshotKey, timestampKey)
if err != nil {
return err
}
r.tufRepo = tuf.NewTufRepo(kdb, r.cryptoService)
err = r.tufRepo.InitRoot(false)
if err != nil {
logrus.Debug("Error on InitRoot: ", err.Error())
switch err.(type) {
case tuferrors.ErrInsufficientSignatures, trustmanager.ErrPasswordInvalid:
default:
return err
}
}
err = r.tufRepo.InitTargets()
if err != nil {
logrus.Debug("Error on InitTargets: ", err.Error())
return err
}
err = r.tufRepo.InitSnapshot()
if err != nil {
logrus.Debug("Error on InitSnapshot: ", err.Error())
return err
}
return r.saveMetadata(uCryptoService.CryptoService)
}
// AddTarget adds a new target to the repository, forcing a timestamps check from TUF
func (r *NotaryRepository) AddTarget(target *Target) error {
cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist"))
if err != nil {
return err
}
logrus.Debugf("Adding target \"%s\" with sha256 \"%x\" and size %d bytes.\n", target.Name, target.Hashes["sha256"], target.Length)
meta := data.FileMeta{Length: target.Length, Hashes: target.Hashes}
metaJSON, err := json.Marshal(meta)
if err != nil {
return err
}
c := changelist.NewTufChange(changelist.ActionCreate, "targets", "target", target.Name, metaJSON)
err = cl.Add(c)
if err != nil {
return err
}
return cl.Close()
}
// ListTargets lists all targets for the current repository
func (r *NotaryRepository) ListTargets() ([]*Target, error) {
c, err := r.bootstrapClient()
if err != nil {
return nil, err
}
err = c.Update()
if err != nil {
if err, ok := err.(signed.ErrExpired); ok {
return nil, ErrExpired{err}
}
return nil, err
}
var targetList []*Target
for name, meta := range r.tufRepo.Targets["targets"].Signed.Targets {
target := &Target{Name: name, Hashes: meta.Hashes, Length: meta.Length}
targetList = append(targetList, target)
}
return targetList, nil
}
// GetTargetByName returns a target given a name
func (r *NotaryRepository) GetTargetByName(name string) (*Target, error) {
c, err := r.bootstrapClient()
if err != nil {
return nil, err
}
err = c.Update()
if err != nil {
if err, ok := err.(signed.ErrExpired); ok {
return nil, ErrExpired{err}
}
return nil, err
}
meta, err := c.TargetMeta(name)
if meta == nil {
return nil, fmt.Errorf("No trust data for %s", name)
} else if err != nil {
return nil, err
}
return &Target{Name: name, Hashes: meta.Hashes, Length: meta.Length}, nil
}
// Publish pushes the local changes in signed material to the remote notary-server
// Conceptually it performs an operation similar to a `git rebase`
func (r *NotaryRepository) Publish() error {
var updateRoot bool
var root *data.Signed
// attempt to initialize the repo from the remote store
c, err := r.bootstrapClient()
if err != nil {
if _, ok := err.(store.ErrMetaNotFound); ok {
// if the remote store return a 404 (translated into ErrMetaNotFound),
// the repo hasn't been initialized yet. Attempt to load it from disk.
err := r.bootstrapRepo()
if err != nil {
// Repo hasn't been initialized, It must be initialized before
// it can be published. Return an error and let caller determine
// what it wants to do.
logrus.Debug(err.Error())
logrus.Debug("Repository not initialized during Publish")
return &ErrRepoNotInitialized{}
}
// We had local data but the server doesn't know about the repo yet,
// ensure we will push the initial root file
root, err = r.tufRepo.Root.ToSigned()
if err != nil {
return err
}
updateRoot = true
} else {
// The remote store returned an error other than 404. We're
// unable to determine if the repo has been initialized or not.
logrus.Error("Could not publish Repository: ", err.Error())
return err
}
} else {
// If we were successfully able to bootstrap the client (which only pulls
// root.json), update it the rest of the tuf metadata in preparation for
// applying the changelist.
err = c.Update()
if err != nil {
if err, ok := err.(signed.ErrExpired); ok {
return ErrExpired{err}
}
return err
}
}
// load the changelist for this repo
changelistDir := filepath.Join(r.tufRepoPath, "changelist")
cl, err := changelist.NewFileChangelist(changelistDir)
if err != nil {
logrus.Debug("Error initializing changelist")
return err
}
// apply the changelist to the repo
err = applyChangelist(r.tufRepo, cl)
if err != nil {
logrus.Debug("Error applying changelist")
return err
}
// check if our root file is nearing expiry. Resign if it is.
if nearExpiry(r.tufRepo.Root) || r.tufRepo.Root.Dirty {
if err != nil {
return err
}
rootKeyID := r.tufRepo.Root.Signed.Roles["root"].KeyIDs[0]
rootCryptoService, err := r.KeyStoreManager.GetRootCryptoService(rootKeyID)
if err != nil {
return err
}
root, err = r.tufRepo.SignRoot(data.DefaultExpires("root"), rootCryptoService.CryptoService)
if err != nil {
return err
}
updateRoot = true
}
// we will always resign targets and snapshots
targets, err := r.tufRepo.SignTargets("targets", data.DefaultExpires("targets"), nil)
if err != nil {
return err
}
snapshot, err := r.tufRepo.SignSnapshot(data.DefaultExpires("snapshot"), nil)
if err != nil {
return err
}
remote, err := getRemoteStore(r.baseURL, r.gun, r.roundTrip)
if err != nil {
return err
}
// ensure we can marshal all the json before sending anything to remote
targetsJSON, err := json.Marshal(targets)
if err != nil {
return err
}
snapshotJSON, err := json.Marshal(snapshot)
if err != nil {
return err
}
update := make(map[string][]byte)
// if we need to update the root, marshal it and push the update to remote
if updateRoot {
rootJSON, err := json.Marshal(root)
if err != nil {
return err
}
update["root"] = rootJSON
}
update["targets"] = targetsJSON
update["snapshot"] = snapshotJSON
err = remote.SetMultiMeta(update)
if err != nil {
return err
}
err = cl.Clear("")
if err != nil {
// This is not a critical problem when only a single host is pushing
// but will cause weird behaviour if changelist cleanup is failing
// and there are multiple hosts writing to the repo.
logrus.Warn("Unable to clear changelist. You may want to manually delete the folder ", changelistDir)
}
return nil
}
func (r *NotaryRepository) bootstrapRepo() error {
kdb := keys.NewDB()
tufRepo := tuf.NewTufRepo(kdb, r.cryptoService)
logrus.Debugf("Loading trusted collection.")
rootJSON, err := r.fileStore.GetMeta("root", 0)
if err != nil {
return err
}
root := &data.Signed{}
err = json.Unmarshal(rootJSON, root)
if err != nil {
return err
}
tufRepo.SetRoot(root)
targetsJSON, err := r.fileStore.GetMeta("targets", 0)
if err != nil {
return err
}
targets := &data.Signed{}
err = json.Unmarshal(targetsJSON, targets)
if err != nil {
return err
}
tufRepo.SetTargets("targets", targets)
snapshotJSON, err := r.fileStore.GetMeta("snapshot", 0)
if err != nil {
return err
}
snapshot := &data.Signed{}
err = json.Unmarshal(snapshotJSON, snapshot)
if err != nil {
return err
}
tufRepo.SetSnapshot(snapshot)
r.tufRepo = tufRepo
return nil
}
func (r *NotaryRepository) saveMetadata(rootCryptoService signed.CryptoService) error {
logrus.Debugf("Saving changes to Trusted Collection.")
signedRoot, err := r.tufRepo.SignRoot(data.DefaultExpires("root"), rootCryptoService)
if err != nil {
return err
}
rootJSON, err := json.Marshal(signedRoot)
if err != nil {
return err
}
targetsToSave := make(map[string][]byte)
for t := range r.tufRepo.Targets {
signedTargets, err := r.tufRepo.SignTargets(t, data.DefaultExpires("targets"), nil)
if err != nil {
return err
}
targetsJSON, err := json.Marshal(signedTargets)
if err != nil {
return err
}
targetsToSave[t] = targetsJSON
}
signedSnapshot, err := r.tufRepo.SignSnapshot(data.DefaultExpires("snapshot"), nil)
if err != nil {
return err
}
snapshotJSON, err := json.Marshal(signedSnapshot)
if err != nil {
return err
}
err = r.fileStore.SetMeta("root", rootJSON)
if err != nil {
return err
}
for role, blob := range targetsToSave {
parentDir := filepath.Dir(role)
os.MkdirAll(parentDir, 0755)
r.fileStore.SetMeta(role, blob)
}
return r.fileStore.SetMeta("snapshot", snapshotJSON)
}
func (r *NotaryRepository) bootstrapClient() (*tufclient.Client, error) {
var rootJSON []byte
remote, err := getRemoteStore(r.baseURL, r.gun, r.roundTrip)
if err == nil {
// if remote store successfully set up, try and get root from remote
rootJSON, err = remote.GetMeta("root", maxSize)
}
// if remote store couldn't be setup, or we failed to get a root from it
// load the root from cache (offline operation)
if err != nil {
if err, ok := err.(store.ErrMetaNotFound); ok {
// if the error was MetaNotFound then we successfully contacted
// the store and it doesn't know about the repo.
return nil, err
}
rootJSON, err = r.fileStore.GetMeta("root", maxSize)
if err != nil {
// if cache didn't return a root, we cannot proceed
return nil, store.ErrMetaNotFound{}
}
}
root := &data.Signed{}
err = json.Unmarshal(rootJSON, root)
if err != nil {
return nil, err
}
err = r.KeyStoreManager.ValidateRoot(root, r.gun)
if err != nil {
return nil, err
}
kdb := keys.NewDB()
r.tufRepo = tuf.NewTufRepo(kdb, r.cryptoService)
err = r.tufRepo.SetRoot(root)
if err != nil {
return nil, err
}
return tufclient.NewClient(
r.tufRepo,
remote,
kdb,
r.fileStore,
), nil
}

View file

@ -0,0 +1,96 @@
package client
import (
"encoding/json"
"net/http"
"time"
"github.com/docker/notary/client/changelist"
"github.com/endophage/gotuf"
"github.com/endophage/gotuf/data"
"github.com/endophage/gotuf/keys"
"github.com/endophage/gotuf/store"
)
// Use this to initialize remote HTTPStores from the config settings
func getRemoteStore(baseURL, gun string, rt http.RoundTripper) (store.RemoteStore, error) {
return store.NewHTTPStore(
baseURL+"/v2/"+gun+"/_trust/tuf/",
"",
"json",
"",
"key",
rt,
)
}
func applyChangelist(repo *tuf.TufRepo, cl changelist.Changelist) error {
changes := cl.List()
var err error
for _, c := range changes {
if c.Scope() == "targets" {
applyTargetsChange(repo, c)
}
if err != nil {
return err
}
}
return nil
}
func applyTargetsChange(repo *tuf.TufRepo, c changelist.Change) error {
var err error
meta := &data.FileMeta{}
err = json.Unmarshal(c.Content(), meta)
if err != nil {
return nil
}
if c.Action() == changelist.ActionCreate {
files := data.Files{c.Path(): *meta}
_, err = repo.AddTargets("targets", files)
} else if c.Action() == changelist.ActionDelete {
err = repo.RemoveTargets("targets", c.Path())
}
if err != nil {
return err
}
return nil
}
func nearExpiry(r *data.SignedRoot) bool {
plus6mo := time.Now().AddDate(0, 6, 0)
return r.Signed.Expires.Before(plus6mo)
}
func initRoles(kdb *keys.KeyDB, rootKey, targetsKey, snapshotKey, timestampKey data.PublicKey) error {
rootRole, err := data.NewRole("root", 1, []string{rootKey.ID()}, nil, nil)
if err != nil {
return err
}
targetsRole, err := data.NewRole("targets", 1, []string{targetsKey.ID()}, nil, nil)
if err != nil {
return err
}
snapshotRole, err := data.NewRole("snapshot", 1, []string{snapshotKey.ID()}, nil, nil)
if err != nil {
return err
}
timestampRole, err := data.NewRole("timestamp", 1, []string{timestampKey.ID()}, nil, nil)
if err != nil {
return err
}
if err := kdb.AddRole(rootRole); err != nil {
return err
}
if err := kdb.AddRole(targetsRole); err != nil {
return err
}
if err := kdb.AddRole(snapshotRole); err != nil {
return err
}
if err := kdb.AddRole(timestampRole); err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,199 @@
package cryptoservice
import (
"crypto"
"crypto/ecdsa"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"fmt"
"path/filepath"
"github.com/Sirupsen/logrus"
"github.com/agl/ed25519"
"github.com/docker/notary/trustmanager"
"github.com/endophage/gotuf/data"
)
const (
rsaKeySize = 2048 // Used for snapshots and targets keys
)
// CryptoService implements Sign and Create, holding a specific GUN and keystore to
// operate on
type CryptoService struct {
gun string
keyStore trustmanager.KeyStore
}
// NewCryptoService returns an instance of CryptoService
func NewCryptoService(gun string, keyStore trustmanager.KeyStore) *CryptoService {
return &CryptoService{gun: gun, keyStore: keyStore}
}
// Create is used to generate keys for targets, snapshots and timestamps
func (ccs *CryptoService) Create(role string, algorithm data.KeyAlgorithm) (data.PublicKey, error) {
var privKey data.PrivateKey
var err error
switch algorithm {
case data.RSAKey:
privKey, err = trustmanager.GenerateRSAKey(rand.Reader, rsaKeySize)
if err != nil {
return nil, fmt.Errorf("failed to generate RSA key: %v", err)
}
case data.ECDSAKey:
privKey, err = trustmanager.GenerateECDSAKey(rand.Reader)
if err != nil {
return nil, fmt.Errorf("failed to generate EC key: %v", err)
}
case data.ED25519Key:
privKey, err = trustmanager.GenerateED25519Key(rand.Reader)
if err != nil {
return nil, fmt.Errorf("failed to generate ED25519 key: %v", err)
}
default:
return nil, fmt.Errorf("private key type not supported for key generation: %s", algorithm)
}
logrus.Debugf("generated new %s key for role: %s and keyID: %s", algorithm, role, privKey.ID())
// Store the private key into our keystore with the name being: /GUN/ID.key with an alias of role
err = ccs.keyStore.AddKey(filepath.Join(ccs.gun, privKey.ID()), role, privKey)
if err != nil {
return nil, fmt.Errorf("failed to add key to filestore: %v", err)
}
return data.PublicKeyFromPrivate(privKey), nil
}
// GetKey returns a key by ID
func (ccs *CryptoService) GetKey(keyID string) data.PublicKey {
key, _, err := ccs.keyStore.GetKey(keyID)
if err != nil {
return nil
}
return data.PublicKeyFromPrivate(key)
}
// RemoveKey deletes a key by ID
func (ccs *CryptoService) RemoveKey(keyID string) error {
return ccs.keyStore.RemoveKey(keyID)
}
// Sign returns the signatures for the payload with a set of keyIDs. It ignores
// errors to sign and expects the called to validate if the number of returned
// signatures is adequate.
func (ccs *CryptoService) Sign(keyIDs []string, payload []byte) ([]data.Signature, error) {
signatures := make([]data.Signature, 0, len(keyIDs))
for _, keyid := range keyIDs {
// ccs.gun will be empty if this is the root key
keyName := filepath.Join(ccs.gun, keyid)
var privKey data.PrivateKey
var err error
privKey, _, err = ccs.keyStore.GetKey(keyName)
if err != nil {
logrus.Debugf("error attempting to retrieve key ID: %s, %v", keyid, err)
return nil, err
}
algorithm := privKey.Algorithm()
var sigAlgorithm data.SigAlgorithm
var sig []byte
switch algorithm {
case data.RSAKey:
sig, err = rsaSign(privKey, payload)
sigAlgorithm = data.RSAPSSSignature
case data.ECDSAKey:
sig, err = ecdsaSign(privKey, payload)
sigAlgorithm = data.ECDSASignature
case data.ED25519Key:
// ED25519 does not operate on a SHA256 hash
sig, err = ed25519Sign(privKey, payload)
sigAlgorithm = data.EDDSASignature
}
if err != nil {
logrus.Debugf("ignoring error attempting to %s sign with keyID: %s, %v", algorithm, keyid, err)
return nil, err
}
logrus.Debugf("appending %s signature with Key ID: %s", algorithm, keyid)
// Append signatures to result array
signatures = append(signatures, data.Signature{
KeyID: keyid,
Method: sigAlgorithm,
Signature: sig[:],
})
}
return signatures, nil
}
func rsaSign(privKey data.PrivateKey, message []byte) ([]byte, error) {
if privKey.Algorithm() != data.RSAKey {
return nil, fmt.Errorf("private key type not supported: %s", privKey.Algorithm())
}
hashed := sha256.Sum256(message)
// Create an rsa.PrivateKey out of the private key bytes
rsaPrivKey, err := x509.ParsePKCS1PrivateKey(privKey.Private())
if err != nil {
return nil, err
}
// Use the RSA key to RSASSA-PSS sign the data
sig, err := rsa.SignPSS(rand.Reader, rsaPrivKey, crypto.SHA256, hashed[:], &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash})
if err != nil {
return nil, err
}
return sig, nil
}
func ecdsaSign(privKey data.PrivateKey, message []byte) ([]byte, error) {
if privKey.Algorithm() != data.ECDSAKey {
return nil, fmt.Errorf("private key type not supported: %s", privKey.Algorithm())
}
hashed := sha256.Sum256(message)
// Create an ecdsa.PrivateKey out of the private key bytes
ecdsaPrivKey, err := x509.ParseECPrivateKey(privKey.Private())
if err != nil {
return nil, err
}
// Use the ECDSA key to sign the data
r, s, err := ecdsa.Sign(rand.Reader, ecdsaPrivKey, hashed[:])
if err != nil {
return nil, err
}
rBytes, sBytes := r.Bytes(), s.Bytes()
octetLength := (ecdsaPrivKey.Params().BitSize + 7) >> 3
// MUST include leading zeros in the output
rBuf := make([]byte, octetLength-len(rBytes), octetLength)
sBuf := make([]byte, octetLength-len(sBytes), octetLength)
rBuf = append(rBuf, rBytes...)
sBuf = append(sBuf, sBytes...)
return append(rBuf, sBuf...), nil
}
func ed25519Sign(privKey data.PrivateKey, message []byte) ([]byte, error) {
if privKey.Algorithm() != data.ED25519Key {
return nil, fmt.Errorf("private key type not supported: %s", privKey.Algorithm())
}
priv := [ed25519.PrivateKeySize]byte{}
copy(priv[:], privKey.Private()[ed25519.PublicKeySize:])
sig := ed25519.Sign(&priv, message)
return sig[:], nil
}

View file

@ -0,0 +1,83 @@
package cryptoservice
import (
"crypto"
"crypto/ecdsa"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"fmt"
"github.com/docker/notary/trustmanager"
"github.com/endophage/gotuf/data"
"github.com/endophage/gotuf/signed"
)
// UnlockedCryptoService encapsulates a private key and a cryptoservice that
// uses that private key, providing convinience methods for generation of
// certificates.
type UnlockedCryptoService struct {
PrivKey data.PrivateKey
CryptoService signed.CryptoService
}
// NewUnlockedCryptoService creates an UnlockedCryptoService instance
func NewUnlockedCryptoService(privKey data.PrivateKey, cryptoService signed.CryptoService) *UnlockedCryptoService {
return &UnlockedCryptoService{
PrivKey: privKey,
CryptoService: cryptoService,
}
}
// ID gets a consistent ID based on the PrivateKey bytes and algorithm type
func (ucs *UnlockedCryptoService) ID() string {
return ucs.PublicKey().ID()
}
// PublicKey Returns the public key associated with the private key
func (ucs *UnlockedCryptoService) PublicKey() data.PublicKey {
return data.PublicKeyFromPrivate(ucs.PrivKey)
}
// GenerateCertificate generates an X509 Certificate from a template, given a GUN
func (ucs *UnlockedCryptoService) GenerateCertificate(gun string) (*x509.Certificate, error) {
algorithm := ucs.PrivKey.Algorithm()
var publicKey crypto.PublicKey
var privateKey crypto.PrivateKey
var err error
switch algorithm {
case data.RSAKey:
var rsaPrivateKey *rsa.PrivateKey
rsaPrivateKey, err = x509.ParsePKCS1PrivateKey(ucs.PrivKey.Private())
privateKey = rsaPrivateKey
publicKey = rsaPrivateKey.Public()
case data.ECDSAKey:
var ecdsaPrivateKey *ecdsa.PrivateKey
ecdsaPrivateKey, err = x509.ParseECPrivateKey(ucs.PrivKey.Private())
privateKey = ecdsaPrivateKey
publicKey = ecdsaPrivateKey.Public()
default:
return nil, fmt.Errorf("only RSA or ECDSA keys are currently supported. Found: %s", algorithm)
}
if err != nil {
return nil, fmt.Errorf("failed to parse root key: %s (%v)", gun, err)
}
template, err := trustmanager.NewCertificate(gun)
if err != nil {
return nil, fmt.Errorf("failed to create the certificate template for: %s (%v)", gun, err)
}
derBytes, err := x509.CreateCertificate(rand.Reader, template, template, publicKey, privateKey)
if err != nil {
return nil, fmt.Errorf("failed to create the certificate for: %s (%v)", gun, err)
}
// Encode the new certificate into PEM
cert, err := x509.ParseCertificate(derBytes)
if err != nil {
return nil, fmt.Errorf("failed to parse the certificate for key: %s (%v)", gun, err)
}
return cert, nil
}

View file

@ -0,0 +1,304 @@
package keystoremanager
import (
"archive/zip"
"crypto/x509"
"encoding/pem"
"errors"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/notary/pkg/passphrase"
"github.com/docker/notary/trustmanager"
)
var (
// ErrNoValidPrivateKey is returned if a key being imported doesn't
// look like a private key
ErrNoValidPrivateKey = errors.New("no valid private key found")
// ErrRootKeyNotEncrypted is returned if a root key being imported is
// unencrypted
ErrRootKeyNotEncrypted = errors.New("only encrypted root keys may be imported")
// ErrNoKeysFoundForGUN is returned if no keys are found for the
// specified GUN during export
ErrNoKeysFoundForGUN = errors.New("no keys found for specified GUN")
)
// ExportRootKey exports the specified root key to an io.Writer in PEM format.
// The key's existing encryption is preserved.
func (km *KeyStoreManager) ExportRootKey(dest io.Writer, keyID string) error {
pemBytes, err := km.rootKeyStore.Get(keyID + "_root")
if err != nil {
return err
}
_, err = dest.Write(pemBytes)
return err
}
// checkRootKeyIsEncrypted makes sure the root key is encrypted. We have
// internal assumptions that depend on this.
func checkRootKeyIsEncrypted(pemBytes []byte) error {
block, _ := pem.Decode(pemBytes)
if block == nil {
return ErrNoValidPrivateKey
}
if !x509.IsEncryptedPEMBlock(block) {
return ErrRootKeyNotEncrypted
}
return nil
}
// ImportRootKey imports a root in PEM format key from an io.Reader
// The key's existing encryption is preserved. The keyID parameter is
// necessary because otherwise we'd need the passphrase to decrypt the key
// in order to compute the ID.
func (km *KeyStoreManager) ImportRootKey(source io.Reader, keyID string) error {
pemBytes, err := ioutil.ReadAll(source)
if err != nil {
return err
}
if err = checkRootKeyIsEncrypted(pemBytes); err != nil {
return err
}
if err = km.rootKeyStore.Add(keyID+"_root", pemBytes); err != nil {
return err
}
return err
}
func moveKeys(oldKeyStore, newKeyStore *trustmanager.KeyFileStore) error {
// List all files but no symlinks
for _, f := range oldKeyStore.ListKeys() {
pemBytes, alias, err := oldKeyStore.GetKey(f)
if err != nil {
return err
}
err = newKeyStore.AddKey(f, alias, pemBytes)
if err != nil {
return err
}
}
return nil
}
func addKeysToArchive(zipWriter *zip.Writer, newKeyStore *trustmanager.KeyFileStore, subDir string) error {
// List all files but no symlinks
for _, relKeyPath := range newKeyStore.ListFiles(false) {
fullKeyPath := filepath.Join(newKeyStore.BaseDir(), relKeyPath)
fi, err := os.Stat(fullKeyPath)
if err != nil {
return err
}
infoHeader, err := zip.FileInfoHeader(fi)
if err != nil {
return err
}
infoHeader.Name = filepath.Join(subDir, relKeyPath)
zipFileEntryWriter, err := zipWriter.CreateHeader(infoHeader)
if err != nil {
return err
}
fileContents, err := ioutil.ReadFile(fullKeyPath)
if err != nil {
return err
}
if _, err = zipFileEntryWriter.Write(fileContents); err != nil {
return err
}
}
return nil
}
// ExportAllKeys exports all keys to an io.Writer in zip format.
// newPassphraseRetriever will be used to obtain passphrases to use to encrypt the existing keys.
func (km *KeyStoreManager) ExportAllKeys(dest io.Writer, newPassphraseRetriever passphrase.Retriever) error {
tempBaseDir, err := ioutil.TempDir("", "notary-key-export-")
defer os.RemoveAll(tempBaseDir)
privNonRootKeysSubdir := filepath.Join(privDir, nonRootKeysSubdir)
privRootKeysSubdir := filepath.Join(privDir, rootKeysSubdir)
// Create temporary keystores to use as a staging area
tempNonRootKeysPath := filepath.Join(tempBaseDir, privNonRootKeysSubdir)
tempNonRootKeyStore, err := trustmanager.NewKeyFileStore(tempNonRootKeysPath, newPassphraseRetriever)
if err != nil {
return err
}
tempRootKeysPath := filepath.Join(tempBaseDir, privRootKeysSubdir)
tempRootKeyStore, err := trustmanager.NewKeyFileStore(tempRootKeysPath, newPassphraseRetriever)
if err != nil {
return err
}
if err := moveKeys(km.rootKeyStore, tempRootKeyStore); err != nil {
return err
}
if err := moveKeys(km.nonRootKeyStore, tempNonRootKeyStore); err != nil {
return err
}
zipWriter := zip.NewWriter(dest)
if err := addKeysToArchive(zipWriter, tempRootKeyStore, privRootKeysSubdir); err != nil {
return err
}
if err := addKeysToArchive(zipWriter, tempNonRootKeyStore, privNonRootKeysSubdir); err != nil {
return err
}
zipWriter.Close()
return nil
}
// ImportKeysZip imports keys from a zip file provided as an io.ReaderAt. The
// keys in the root_keys directory are left encrypted, but the other keys are
// decrypted with the specified passphrase.
func (km *KeyStoreManager) ImportKeysZip(zipReader zip.Reader) error {
// Temporarily store the keys in maps, so we can bail early if there's
// an error (for example, wrong passphrase), without leaving the key
// store in an inconsistent state
newRootKeys := make(map[string][]byte)
newNonRootKeys := make(map[string][]byte)
// Note that using / as a separator is okay here - the zip package
// guarantees that the separator will be /
rootKeysPrefix := privDir + "/" + rootKeysSubdir + "/"
nonRootKeysPrefix := privDir + "/" + nonRootKeysSubdir + "/"
// Iterate through the files in the archive. Don't add the keys
for _, f := range zipReader.File {
fNameTrimmed := strings.TrimSuffix(f.Name, filepath.Ext(f.Name))
rc, err := f.Open()
if err != nil {
return err
}
fileBytes, err := ioutil.ReadAll(rc)
if err != nil {
return nil
}
// Is this in the root_keys directory?
// Note that using / as a separator is okay here - the zip
// package guarantees that the separator will be /
if strings.HasPrefix(fNameTrimmed, rootKeysPrefix) {
if err = checkRootKeyIsEncrypted(fileBytes); err != nil {
rc.Close()
return err
}
// Root keys are preserved without decrypting
keyName := strings.TrimPrefix(fNameTrimmed, rootKeysPrefix)
newRootKeys[keyName] = fileBytes
} else if strings.HasPrefix(fNameTrimmed, nonRootKeysPrefix) {
// Nonroot keys are preserved without decrypting
keyName := strings.TrimPrefix(fNameTrimmed, nonRootKeysPrefix)
newNonRootKeys[keyName] = fileBytes
} else {
// This path inside the zip archive doesn't look like a
// root key, non-root key, or alias. To avoid adding a file
// to the filestore that we won't be able to use, skip
// this file in the import.
logrus.Warnf("skipping import of key with a path that doesn't begin with %s or %s: %s", rootKeysPrefix, nonRootKeysPrefix, f.Name)
rc.Close()
continue
}
rc.Close()
}
for keyName, pemBytes := range newRootKeys {
if err := km.rootKeyStore.Add(keyName, pemBytes); err != nil {
return err
}
}
for keyName, pemBytes := range newNonRootKeys {
if err := km.nonRootKeyStore.Add(keyName, pemBytes); err != nil {
return err
}
}
return nil
}
func moveKeysByGUN(oldKeyStore, newKeyStore *trustmanager.KeyFileStore, gun string) error {
// List all files but no symlinks
for _, relKeyPath := range oldKeyStore.ListKeys() {
// Skip keys that aren't associated with this GUN
if !strings.HasPrefix(relKeyPath, filepath.FromSlash(gun)) {
continue
}
privKey, alias, err := oldKeyStore.GetKey(relKeyPath)
if err != nil {
return err
}
err = newKeyStore.AddKey(relKeyPath, alias, privKey)
if err != nil {
return err
}
}
return nil
}
// ExportKeysByGUN exports all keys associated with a specified GUN to an
// io.Writer in zip format. passphraseRetriever is used to select new passphrases to use to
// encrypt the keys.
func (km *KeyStoreManager) ExportKeysByGUN(dest io.Writer, gun string, passphraseRetriever passphrase.Retriever) error {
tempBaseDir, err := ioutil.TempDir("", "notary-key-export-")
defer os.RemoveAll(tempBaseDir)
privNonRootKeysSubdir := filepath.Join(privDir, nonRootKeysSubdir)
// Create temporary keystore to use as a staging area
tempNonRootKeysPath := filepath.Join(tempBaseDir, privNonRootKeysSubdir)
tempNonRootKeyStore, err := trustmanager.NewKeyFileStore(tempNonRootKeysPath, passphraseRetriever)
if err != nil {
return err
}
if err := moveKeysByGUN(km.nonRootKeyStore, tempNonRootKeyStore, gun); err != nil {
return err
}
zipWriter := zip.NewWriter(dest)
if len(tempNonRootKeyStore.ListKeys()) == 0 {
return ErrNoKeysFoundForGUN
}
if err := addKeysToArchive(zipWriter, tempNonRootKeyStore, privNonRootKeysSubdir); err != nil {
return err
}
zipWriter.Close()
return nil
}

View file

@ -0,0 +1,426 @@
package keystoremanager
import (
"crypto/rand"
"crypto/x509"
"errors"
"fmt"
"path/filepath"
"strings"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/notary/cryptoservice"
"github.com/docker/notary/pkg/passphrase"
"github.com/docker/notary/trustmanager"
"github.com/endophage/gotuf/data"
"github.com/endophage/gotuf/signed"
)
// KeyStoreManager is an abstraction around the root and non-root key stores,
// and related CA stores
type KeyStoreManager struct {
rootKeyStore *trustmanager.KeyFileStore
nonRootKeyStore *trustmanager.KeyFileStore
trustedCAStore trustmanager.X509Store
trustedCertificateStore trustmanager.X509Store
}
const (
trustDir = "trusted_certificates"
privDir = "private"
rootKeysSubdir = "root_keys"
nonRootKeysSubdir = "tuf_keys"
rsaRootKeySize = 4096 // Used for new root keys
)
// ErrValidationFail is returned when there is no valid trusted certificates
// being served inside of the roots.json
type ErrValidationFail struct {
Reason string
}
// ErrValidationFail is returned when there is no valid trusted certificates
// being served inside of the roots.json
func (err ErrValidationFail) Error() string {
return fmt.Sprintf("could not validate the path to a trusted root: %s", err.Reason)
}
// ErrRootRotationFail is returned when we fail to do a full root key rotation
// by either failing to add the new root certificate, or delete the old ones
type ErrRootRotationFail struct {
Reason string
}
// ErrRootRotationFail is returned when we fail to do a full root key rotation
// by either failing to add the new root certificate, or delete the old ones
func (err ErrRootRotationFail) Error() string {
return fmt.Sprintf("could not rotate trust to a new trusted root: %s", err.Reason)
}
// NewKeyStoreManager returns an initialized KeyStoreManager, or an error
// if it fails to create the KeyFileStores or load certificates
func NewKeyStoreManager(baseDir string, passphraseRetriever passphrase.Retriever) (*KeyStoreManager, error) {
nonRootKeysPath := filepath.Join(baseDir, privDir, nonRootKeysSubdir)
nonRootKeyStore, err := trustmanager.NewKeyFileStore(nonRootKeysPath, passphraseRetriever)
if err != nil {
return nil, err
}
// Load the keystore that will hold all of our encrypted Root Private Keys
rootKeysPath := filepath.Join(baseDir, privDir, rootKeysSubdir)
rootKeyStore, err := trustmanager.NewKeyFileStore(rootKeysPath, passphraseRetriever)
if err != nil {
return nil, err
}
trustPath := filepath.Join(baseDir, trustDir)
// Load all CAs that aren't expired and don't use SHA1
trustedCAStore, err := trustmanager.NewX509FilteredFileStore(trustPath, func(cert *x509.Certificate) bool {
return cert.IsCA && cert.BasicConstraintsValid && cert.SubjectKeyId != nil &&
time.Now().Before(cert.NotAfter) &&
cert.SignatureAlgorithm != x509.SHA1WithRSA &&
cert.SignatureAlgorithm != x509.DSAWithSHA1 &&
cert.SignatureAlgorithm != x509.ECDSAWithSHA1
})
if err != nil {
return nil, err
}
// Load all individual (non-CA) certificates that aren't expired and don't use SHA1
trustedCertificateStore, err := trustmanager.NewX509FilteredFileStore(trustPath, func(cert *x509.Certificate) bool {
return !cert.IsCA &&
time.Now().Before(cert.NotAfter) &&
cert.SignatureAlgorithm != x509.SHA1WithRSA &&
cert.SignatureAlgorithm != x509.DSAWithSHA1 &&
cert.SignatureAlgorithm != x509.ECDSAWithSHA1
})
if err != nil {
return nil, err
}
return &KeyStoreManager{
rootKeyStore: rootKeyStore,
nonRootKeyStore: nonRootKeyStore,
trustedCAStore: trustedCAStore,
trustedCertificateStore: trustedCertificateStore,
}, nil
}
// RootKeyStore returns the root key store being managed by this
// KeyStoreManager
func (km *KeyStoreManager) RootKeyStore() *trustmanager.KeyFileStore {
return km.rootKeyStore
}
// NonRootKeyStore returns the non-root key store being managed by this
// KeyStoreManager
func (km *KeyStoreManager) NonRootKeyStore() *trustmanager.KeyFileStore {
return km.nonRootKeyStore
}
// TrustedCertificateStore returns the trusted certificate store being managed
// by this KeyStoreManager
func (km *KeyStoreManager) TrustedCertificateStore() trustmanager.X509Store {
return km.trustedCertificateStore
}
// TrustedCAStore returns the CA store being managed by this KeyStoreManager
func (km *KeyStoreManager) TrustedCAStore() trustmanager.X509Store {
return km.trustedCAStore
}
// AddTrustedCert adds a cert to the trusted certificate store (not the CA
// store)
func (km *KeyStoreManager) AddTrustedCert(cert *x509.Certificate) {
km.trustedCertificateStore.AddCert(cert)
}
// AddTrustedCACert adds a cert to the trusted CA certificate store
func (km *KeyStoreManager) AddTrustedCACert(cert *x509.Certificate) {
km.trustedCAStore.AddCert(cert)
}
// GenRootKey generates a new root key
func (km *KeyStoreManager) GenRootKey(algorithm string) (string, error) {
var err error
var privKey data.PrivateKey
// We don't want external API callers to rely on internal TUF data types, so
// the API here should continue to receive a string algorithm, and ensure
// that it is downcased
switch data.KeyAlgorithm(strings.ToLower(algorithm)) {
case data.RSAKey:
privKey, err = trustmanager.GenerateRSAKey(rand.Reader, rsaRootKeySize)
case data.ECDSAKey:
privKey, err = trustmanager.GenerateECDSAKey(rand.Reader)
default:
return "", fmt.Errorf("only RSA or ECDSA keys are currently supported. Found: %s", algorithm)
}
if err != nil {
return "", fmt.Errorf("failed to generate private key: %v", err)
}
// Changing the root
km.rootKeyStore.AddKey(privKey.ID(), "root", privKey)
return privKey.ID(), nil
}
// GetRootCryptoService retrieves a root key and a cryptoservice to use with it
// TODO(mccauley): remove this as its no longer needed once we have key caching in the keystores
func (km *KeyStoreManager) GetRootCryptoService(rootKeyID string) (*cryptoservice.UnlockedCryptoService, error) {
privKey, _, err := km.rootKeyStore.GetKey(rootKeyID)
if err != nil {
return nil, fmt.Errorf("could not get decrypted root key with keyID: %s, %v", rootKeyID, err)
}
cryptoService := cryptoservice.NewCryptoService("", km.rootKeyStore)
return cryptoservice.NewUnlockedCryptoService(privKey, cryptoService), nil
}
/*
ValidateRoot receives a new root, validates its correctness and attempts to
do root key rotation if needed.
First we list the current trusted certificates we have for a particular GUN. If
that list is non-empty means that we've already seen this repository before, and
have a list of trusted certificates for it. In this case, we use this list of
certificates to attempt to validate this root file.
If the previous validation suceeds, or in the case where we found no trusted
certificates for this particular GUN, we check the integrity of the root by
making sure that it is validated by itself. This means that we will attempt to
validate the root data with the certificates that are included in the root keys
themselves.
If this last steps succeeds, we attempt to do root rotation, by ensuring that
we only trust the certificates that are present in the new root.
This mechanism of operation is essentially Trust On First Use (TOFU): if we
have never seen a certificate for a particular CN, we trust it. If later we see
a different certificate for that certificate, we return an ErrValidationFailed error.
Note that since we only allow trust data to be downloaded over an HTTPS channel
we are using the current public PKI to validate the first download of the certificate
adding an extra layer of security over the normal (SSH style) trust model.
We shall call this: TOFUS.
*/
func (km *KeyStoreManager) ValidateRoot(root *data.Signed, gun string) error {
logrus.Debugf("entered ValidateRoot with dns: %s", gun)
signedRoot, err := data.RootFromSigned(root)
if err != nil {
return err
}
// Retrieve all the leaf certificates in root for which the CN matches the GUN
allValidCerts, err := validRootLeafCerts(signedRoot, gun)
if err != nil {
logrus.Debugf("error retrieving valid leaf certificates for: %s, %v", gun, err)
return &ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"}
}
// Retrieve all the trusted certificates that match this gun
certsForCN, err := km.trustedCertificateStore.GetCertificatesByCN(gun)
if err != nil {
// If the error that we get back is different than ErrNoCertificatesFound
// we couldn't check if there are any certificates with this CN already
// trusted. Let's take the conservative approach and return a failed validation
if _, ok := err.(*trustmanager.ErrNoCertificatesFound); !ok {
logrus.Debugf("error retrieving trusted certificates for: %s, %v", gun, err)
return &ErrValidationFail{Reason: "unable to retrieve trusted certificates"}
}
}
// If we have certificates that match this specific GUN, let's make sure to
// use them first to validate that this new root is valid.
if len(certsForCN) != 0 {
logrus.Debugf("found %d valid root certificates for %s", len(certsForCN), gun)
err = signed.VerifyRoot(root, 0, trustmanager.CertsToKeys(certsForCN))
if err != nil {
logrus.Debugf("failed to verify TUF data for: %s, %v", gun, err)
return &ErrValidationFail{Reason: "failed to validate data with current trusted certificates"}
}
} else {
logrus.Debugf("found no currently valid root certificates for %s", gun)
}
// Validate the integrity of the new root (does it have valid signatures)
err = signed.VerifyRoot(root, 0, trustmanager.CertsToKeys(allValidCerts))
if err != nil {
logrus.Debugf("failed to verify TUF data for: %s, %v", gun, err)
return &ErrValidationFail{Reason: "failed to validate integrity of roots"}
}
// Getting here means A) we had trusted certificates and both the
// old and new validated this root; or B) we had no trusted certificates but
// the new set of certificates has integrity (self-signed)
logrus.Debugf("entering root certificate rotation for: %s", gun)
// Do root certificate rotation: we trust only the certs present in the new root
// First we add all the new certificates (even if they already exist)
for _, cert := range allValidCerts {
err := km.trustedCertificateStore.AddCert(cert)
if err != nil {
// If the error is already exists we don't fail the rotation
if _, ok := err.(*trustmanager.ErrCertExists); ok {
logrus.Debugf("ignoring certificate addition to: %s", gun)
continue
}
logrus.Debugf("error adding new trusted certificate for: %s, %v", gun, err)
}
}
// Now we delete old certificates that aren't present in the new root
for certID, cert := range certsToRemove(certsForCN, allValidCerts) {
logrus.Debugf("removing certificate with certID: %s", certID)
err = km.trustedCertificateStore.RemoveCert(cert)
if err != nil {
logrus.Debugf("failed to remove trusted certificate with keyID: %s, %v", certID, err)
return &ErrRootRotationFail{Reason: "failed to rotate root keys"}
}
}
logrus.Debugf("Root validation succeeded for %s", gun)
return nil
}
// validRootLeafCerts returns a list of non-exipired, non-sha1 certificates whoose
// Common-Names match the provided GUN
func validRootLeafCerts(root *data.SignedRoot, gun string) ([]*x509.Certificate, error) {
// Get a list of all of the leaf certificates present in root
allLeafCerts, _ := parseAllCerts(root)
var validLeafCerts []*x509.Certificate
// Go through every leaf certificate and check that the CN matches the gun
for _, cert := range allLeafCerts {
// Validate that this leaf certificate has a CN that matches the exact gun
if cert.Subject.CommonName != gun {
logrus.Debugf("error leaf certificate CN: %s doesn't match the given GUN: %s", cert.Subject.CommonName)
continue
}
// Make sure the certificate is not expired
if time.Now().After(cert.NotAfter) {
logrus.Debugf("error leaf certificate is expired")
continue
}
// We don't allow root certificates that use SHA1
if cert.SignatureAlgorithm == x509.SHA1WithRSA ||
cert.SignatureAlgorithm == x509.DSAWithSHA1 ||
cert.SignatureAlgorithm == x509.ECDSAWithSHA1 {
logrus.Debugf("error certificate uses deprecated hashing algorithm (SHA1)")
continue
}
validLeafCerts = append(validLeafCerts, cert)
}
if len(validLeafCerts) < 1 {
logrus.Debugf("didn't find any valid leaf certificates for %s", gun)
return nil, errors.New("no valid leaf certificates found in any of the root keys")
}
logrus.Debugf("found %d valid leaf certificates for %s", len(validLeafCerts), gun)
return validLeafCerts, nil
}
// parseAllCerts returns two maps, one with all of the leafCertificates and one
// with all the intermediate certificates found in signedRoot
func parseAllCerts(signedRoot *data.SignedRoot) (map[string]*x509.Certificate, map[string][]*x509.Certificate) {
leafCerts := make(map[string]*x509.Certificate)
intCerts := make(map[string][]*x509.Certificate)
// Before we loop through all root keys available, make sure any exist
rootRoles, ok := signedRoot.Signed.Roles["root"]
if !ok {
logrus.Debugf("tried to parse certificates from invalid root signed data")
return nil, nil
}
logrus.Debugf("found the following root keys: %v", rootRoles.KeyIDs)
// Iterate over every keyID for the root role inside of roots.json
for _, keyID := range rootRoles.KeyIDs {
// check that the key exists in the signed root keys map
key, ok := signedRoot.Signed.Keys[keyID]
if !ok {
logrus.Debugf("error while getting data for keyID: %s", keyID)
continue
}
// Decode all the x509 certificates that were bundled with this
// Specific root key
decodedCerts, err := trustmanager.LoadCertBundleFromPEM(key.Public())
if err != nil {
logrus.Debugf("error while parsing root certificate with keyID: %s, %v", keyID, err)
continue
}
// Get all non-CA certificates in the decoded certificates
leafCertList := trustmanager.GetLeafCerts(decodedCerts)
// If we got no leaf certificates or we got more than one, fail
if len(leafCertList) != 1 {
logrus.Debugf("invalid chain due to leaf certificate missing or too many leaf certificates for keyID: %s", keyID)
continue
}
// Get the ID of the leaf certificate
leafCert := leafCertList[0]
leafID, err := trustmanager.FingerprintCert(leafCert)
if err != nil {
logrus.Debugf("error while fingerprinting root certificate with keyID: %s, %v", keyID, err)
continue
}
// Store the leaf cert in the map
leafCerts[leafID] = leafCert
// Get all the remainder certificates marked as a CA to be used as intermediates
intermediateCerts := trustmanager.GetIntermediateCerts(decodedCerts)
intCerts[leafID] = intermediateCerts
}
return leafCerts, intCerts
}
// certsToRemove returns all the certifificates from oldCerts that aren't present
// in newCerts
func certsToRemove(oldCerts, newCerts []*x509.Certificate) map[string]*x509.Certificate {
certsToRemove := make(map[string]*x509.Certificate)
// If no newCerts were provided
if len(newCerts) == 0 {
return certsToRemove
}
// Populate a map with all the IDs from newCert
var newCertMap = make(map[string]struct{})
for _, cert := range newCerts {
certID, err := trustmanager.FingerprintCert(cert)
if err != nil {
logrus.Debugf("error while fingerprinting root certificate with keyID: %s, %v", certID, err)
continue
}
newCertMap[certID] = struct{}{}
}
// Iterate over all the old certificates and check to see if we should remove them
for _, cert := range oldCerts {
certID, err := trustmanager.FingerprintCert(cert)
if err != nil {
logrus.Debugf("error while fingerprinting root certificate with certID: %s, %v", certID, err)
continue
}
if _, ok := newCertMap[certID]; !ok {
certsToRemove[certID] = cert
}
}
return certsToRemove
}

View file

@ -0,0 +1,148 @@
// Package passphrase is a utility function for managing passphrase
// for TUF and Notary keys.
package passphrase
import (
"bufio"
"errors"
"fmt"
"io"
"os"
"strings"
"path/filepath"
"github.com/docker/docker/pkg/term"
)
// Retriever is a callback function that should retrieve a passphrase
// for a given named key. If it should be treated as new passphrase (e.g. with
// confirmation), createNew will be true. Attempts is passed in so that implementers
// decide how many chances to give to a human, for example.
type Retriever func(keyName, alias string, createNew bool, attempts int) (passphrase string, giveup bool, err error)
const (
idBytesToDisplay = 5
tufRootAlias = "root"
tufTargetsAlias = "targets"
tufSnapshotAlias = "snapshot"
tufRootKeyGenerationWarning = `You are about to create a new root signing key passphrase. This passphrase will be used to protect
the most sensitive key in your signing system. Please choose a long, complex passphrase and be careful
to keep the password and the key file itself secure and backed up. It is highly recommended that you use
a password manager to generate the passphrase and keep it safe. There will be no way to recover this key.
You can find the key in your config directory.`
)
// PromptRetriever returns a new Retriever which will provide a prompt on stdin
// and stdout to retrieve a passphrase. The passphrase will be cached such that
// subsequent prompts will produce the same passphrase.
func PromptRetriever() Retriever {
return PromptRetrieverWithInOut(os.Stdin, os.Stdout)
}
// PromptRetrieverWithInOut returns a new Retriever which will provide a
// prompt using the given in and out readers. The passphrase will be cached
// such that subsequent prompts will produce the same passphrase.
func PromptRetrieverWithInOut(in io.Reader, out io.Writer) Retriever {
userEnteredTargetsSnapshotsPass := false
targetsSnapshotsPass := ""
userEnteredRootsPass := false
rootsPass := ""
return func(keyName string, alias string, createNew bool, numAttempts int) (string, bool, error) {
if alias == tufRootAlias && createNew && numAttempts == 0 {
fmt.Fprintln(out, tufRootKeyGenerationWarning)
}
if numAttempts > 0 {
if createNew {
fmt.Fprintln(out, "Passphrases do not match. Please retry.")
} else {
fmt.Fprintln(out, "Passphrase incorrect. Please retry.")
}
}
// First, check if we have a password cached for this alias.
if numAttempts == 0 {
if userEnteredTargetsSnapshotsPass && (alias == tufSnapshotAlias || alias == tufTargetsAlias) {
return targetsSnapshotsPass, false, nil
}
if userEnteredRootsPass && (alias == "root") {
return rootsPass, false, nil
}
}
if numAttempts > 3 && !createNew {
return "", true, errors.New("Too many attempts")
}
state, err := term.SaveState(0)
if err != nil {
return "", false, err
}
term.DisableEcho(0, state)
defer term.RestoreTerminal(0, state)
stdin := bufio.NewReader(in)
indexOfLastSeparator := strings.LastIndex(keyName, string(filepath.Separator))
if len(keyName) > indexOfLastSeparator+idBytesToDisplay+1 {
keyName = keyName[:indexOfLastSeparator+idBytesToDisplay+1]
}
if createNew {
fmt.Fprintf(out, "Enter passphrase for new %s key with id %s: ", alias, keyName)
} else {
fmt.Fprintf(out, "Enter key passphrase for %s key with id %s: ", alias, keyName)
}
passphrase, err := stdin.ReadBytes('\n')
fmt.Fprintln(out)
if err != nil {
return "", false, err
}
retPass := strings.TrimSpace(string(passphrase))
if !createNew {
if alias == tufSnapshotAlias || alias == tufTargetsAlias {
userEnteredTargetsSnapshotsPass = true
targetsSnapshotsPass = retPass
}
if alias == tufRootAlias {
userEnteredRootsPass = true
rootsPass = retPass
}
return retPass, false, nil
}
if len(retPass) < 8 {
fmt.Fprintln(out, "Please use a password manager to generate and store a good random passphrase.")
return "", false, errors.New("Passphrase too short")
}
fmt.Fprintf(out, "Repeat passphrase for new %s key with id %s: ", alias, keyName)
confirmation, err := stdin.ReadBytes('\n')
fmt.Fprintln(out)
if err != nil {
return "", false, err
}
confirmationStr := strings.TrimSpace(string(confirmation))
if retPass != confirmationStr {
return "", false, errors.New("The entered passphrases do not match")
}
if alias == tufSnapshotAlias || alias == tufTargetsAlias {
userEnteredTargetsSnapshotsPass = true
targetsSnapshotsPass = retPass
}
if alias == tufRootAlias {
userEnteredRootsPass = true
rootsPass = retPass
}
return retPass, false, nil
}
}

View file

@ -0,0 +1,293 @@
package trustmanager
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
)
const (
visible os.FileMode = 0755
private os.FileMode = 0700
)
var (
// ErrPathOutsideStore indicates that the returned path would be
// outside the store
ErrPathOutsideStore = errors.New("path outside file store")
)
// LimitedFileStore implements the bare bones primitives (no symlinks or
// hierarchy)
type LimitedFileStore interface {
Add(fileName string, data []byte) error
Remove(fileName string) error
Get(fileName string) ([]byte, error)
ListFiles(symlinks bool) []string
}
// FileStore is the interface for full-featured FileStores
type FileStore interface {
LimitedFileStore
RemoveDir(directoryName string) error
GetPath(fileName string) (string, error)
ListDir(directoryName string, symlinks bool) []string
Link(src, dst string) error
BaseDir() string
}
// SimpleFileStore implements FileStore
type SimpleFileStore struct {
baseDir string
fileExt string
perms os.FileMode
}
// NewSimpleFileStore creates a directory with 755 permissions
func NewSimpleFileStore(baseDir string, fileExt string) (*SimpleFileStore, error) {
baseDir = filepath.Clean(baseDir)
if err := CreateDirectory(baseDir); err != nil {
return nil, err
}
return &SimpleFileStore{
baseDir: baseDir,
fileExt: fileExt,
perms: visible,
}, nil
}
// NewPrivateSimpleFileStore creates a directory with 700 permissions
func NewPrivateSimpleFileStore(baseDir string, fileExt string) (*SimpleFileStore, error) {
if err := CreatePrivateDirectory(baseDir); err != nil {
return nil, err
}
return &SimpleFileStore{
baseDir: baseDir,
fileExt: fileExt,
perms: private,
}, nil
}
// Add writes data to a file with a given name
func (f *SimpleFileStore) Add(name string, data []byte) error {
filePath, err := f.GetPath(name)
if err != nil {
return err
}
createDirectory(filepath.Dir(filePath), f.perms)
return ioutil.WriteFile(filePath, data, f.perms)
}
// Remove removes a file identified by name
func (f *SimpleFileStore) Remove(name string) error {
// Attempt to remove
filePath, err := f.GetPath(name)
if err != nil {
return err
}
return os.Remove(filePath)
}
// RemoveDir removes the directory identified by name
func (f *SimpleFileStore) RemoveDir(name string) error {
dirPath := filepath.Join(f.baseDir, name)
// Check to see if directory exists
fi, err := os.Stat(dirPath)
if err != nil {
return err
}
// Check to see if it is a directory
if !fi.IsDir() {
return fmt.Errorf("directory not found: %s", name)
}
return os.RemoveAll(dirPath)
}
// Get returns the data given a file name
func (f *SimpleFileStore) Get(name string) ([]byte, error) {
filePath, err := f.GetPath(name)
if err != nil {
return nil, err
}
data, err := ioutil.ReadFile(filePath)
if err != nil {
return nil, err
}
return data, nil
}
// GetPath returns the full final path of a file with a given name
func (f *SimpleFileStore) GetPath(name string) (string, error) {
fileName := f.genFileName(name)
fullPath := filepath.Clean(filepath.Join(f.baseDir, fileName))
if !strings.HasPrefix(fullPath, f.baseDir) {
return "", ErrPathOutsideStore
}
return fullPath, nil
}
// ListFiles lists all the files inside of a store
func (f *SimpleFileStore) ListFiles(symlinks bool) []string {
return f.list(f.baseDir, symlinks)
}
// ListDir lists all the files inside of a directory identified by a name
func (f *SimpleFileStore) ListDir(name string, symlinks bool) []string {
fullPath := filepath.Join(f.baseDir, name)
return f.list(fullPath, symlinks)
}
// list lists all the files in a directory given a full path. Ignores symlinks.
func (f *SimpleFileStore) list(path string, symlinks bool) []string {
files := make([]string, 0, 0)
filepath.Walk(path, func(fp string, fi os.FileInfo, err error) error {
// If there are errors, ignore this particular file
if err != nil {
return nil
}
// Ignore if it is a directory
if fi.IsDir() {
return nil
}
// If this is a symlink, and symlinks is true, ignore it
if !symlinks && fi.Mode()&os.ModeSymlink == os.ModeSymlink {
return nil
}
// Only allow matches that end with our certificate extension (e.g. *.crt)
matched, _ := filepath.Match("*"+f.fileExt, fi.Name())
if matched {
// Find the relative path for this file relative to the base path.
fp, err = filepath.Rel(path, fp)
if err != nil {
return err
}
files = append(files, fp)
}
return nil
})
return files
}
// genFileName returns the name using the right extension
func (f *SimpleFileStore) genFileName(name string) string {
return fmt.Sprintf("%s.%s", name, f.fileExt)
}
// Link creates a symlink beetween the ID of the certificate used by a repository
// and the ID of the root key that is being used.
// We use full path for the source and local for the destination to use relative
// path for the symlink
func (f *SimpleFileStore) Link(oldname, newname string) error {
newnamePath, err := f.GetPath(newname)
if err != nil {
return err
}
return os.Symlink(f.genFileName(oldname), newnamePath)
}
// BaseDir returns the base directory of the filestore
func (f *SimpleFileStore) BaseDir() string {
return f.baseDir
}
// CreateDirectory uses createDirectory to create a chmod 755 Directory
func CreateDirectory(dir string) error {
return createDirectory(dir, visible)
}
// CreatePrivateDirectory uses createDirectory to create a chmod 700 Directory
func CreatePrivateDirectory(dir string) error {
return createDirectory(dir, private)
}
// createDirectory receives a string of the path to a directory.
// It does not support passing files, so the caller has to remove
// the filename by doing filepath.Dir(full_path_to_file)
func createDirectory(dir string, perms os.FileMode) error {
// This prevents someone passing /path/to/dir and 'dir' not being created
// If two '//' exist, MkdirAll deals it with correctly
dir = dir + "/"
return os.MkdirAll(dir, perms)
}
// MemoryFileStore is an implementation of LimitedFileStore that keeps
// the contents in memory.
type MemoryFileStore struct {
sync.Mutex
files map[string][]byte
}
// NewMemoryFileStore creates a MemoryFileStore
func NewMemoryFileStore() *MemoryFileStore {
return &MemoryFileStore{
files: make(map[string][]byte),
}
}
// ErrMemFileNotFound is returned for a nonexistent "file" in the memory file
// store
var ErrMemFileNotFound = errors.New("key not found in memory file store")
// Add writes data to a file with a given name
func (f *MemoryFileStore) Add(name string, data []byte) error {
f.Lock()
defer f.Unlock()
f.files[name] = data
return nil
}
// Remove removes a file identified by name
func (f *MemoryFileStore) Remove(name string) error {
f.Lock()
defer f.Unlock()
if _, present := f.files[name]; !present {
return ErrMemFileNotFound
}
delete(f.files, name)
return nil
}
// Get returns the data given a file name
func (f *MemoryFileStore) Get(name string) ([]byte, error) {
f.Lock()
defer f.Unlock()
fileData, present := f.files[name]
if !present {
return nil, ErrMemFileNotFound
}
return fileData, nil
}
// ListFiles lists all the files inside of a store
func (f *MemoryFileStore) ListFiles(symlinks bool) []string {
var list []string
for name := range f.files {
list = append(list, name)
}
return list
}

View file

@ -0,0 +1,288 @@
package trustmanager
import (
"path/filepath"
"strings"
"sync"
"fmt"
"github.com/docker/notary/pkg/passphrase"
"github.com/endophage/gotuf/data"
)
const (
keyExtension = "key"
)
// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key
type ErrAttemptsExceeded struct{}
// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key
func (err ErrAttemptsExceeded) Error() string {
return "maximum number of passphrase attempts exceeded"
}
// ErrPasswordInvalid is returned when signing fails. It could also mean the signing
// key file was corrupted, but we have no way to distinguish.
type ErrPasswordInvalid struct{}
// ErrPasswordInvalid is returned when signing fails. It could also mean the signing
// key file was corrupted, but we have no way to distinguish.
func (err ErrPasswordInvalid) Error() string {
return "password invalid, operation has failed."
}
// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key.
type ErrKeyNotFound struct {
KeyID string
}
// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key.
func (err ErrKeyNotFound) Error() string {
return fmt.Sprintf("signing key not found: %s", err.KeyID)
}
// KeyStore is a generic interface for private key storage
type KeyStore interface {
LimitedFileStore
AddKey(name, alias string, privKey data.PrivateKey) error
GetKey(name string) (data.PrivateKey, string, error)
ListKeys() []string
RemoveKey(name string) error
}
type cachedKey struct {
alias string
key data.PrivateKey
}
// PassphraseRetriever is a callback function that should retrieve a passphrase
// for a given named key. If it should be treated as new passphrase (e.g. with
// confirmation), createNew will be true. Attempts is passed in so that implementers
// decide how many chances to give to a human, for example.
type PassphraseRetriever func(keyId, alias string, createNew bool, attempts int) (passphrase string, giveup bool, err error)
// KeyFileStore persists and manages private keys on disk
type KeyFileStore struct {
sync.Mutex
SimpleFileStore
passphrase.Retriever
cachedKeys map[string]*cachedKey
}
// KeyMemoryStore manages private keys in memory
type KeyMemoryStore struct {
sync.Mutex
MemoryFileStore
passphrase.Retriever
cachedKeys map[string]*cachedKey
}
// NewKeyFileStore returns a new KeyFileStore creating a private directory to
// hold the keys.
func NewKeyFileStore(baseDir string, passphraseRetriever passphrase.Retriever) (*KeyFileStore, error) {
fileStore, err := NewPrivateSimpleFileStore(baseDir, keyExtension)
if err != nil {
return nil, err
}
cachedKeys := make(map[string]*cachedKey)
return &KeyFileStore{SimpleFileStore: *fileStore,
Retriever: passphraseRetriever,
cachedKeys: cachedKeys}, nil
}
// AddKey stores the contents of a PEM-encoded private key as a PEM block
func (s *KeyFileStore) AddKey(name, alias string, privKey data.PrivateKey) error {
s.Lock()
defer s.Unlock()
return addKey(s, s.Retriever, s.cachedKeys, name, alias, privKey)
}
// GetKey returns the PrivateKey given a KeyID
func (s *KeyFileStore) GetKey(name string) (data.PrivateKey, string, error) {
s.Lock()
defer s.Unlock()
return getKey(s, s.Retriever, s.cachedKeys, name)
}
// ListKeys returns a list of unique PublicKeys present on the KeyFileStore.
// There might be symlinks associating Certificate IDs to Public Keys, so this
// method only returns the IDs that aren't symlinks
func (s *KeyFileStore) ListKeys() []string {
return listKeys(s)
}
// RemoveKey removes the key from the keyfilestore
func (s *KeyFileStore) RemoveKey(name string) error {
s.Lock()
defer s.Unlock()
return removeKey(s, s.cachedKeys, name)
}
// NewKeyMemoryStore returns a new KeyMemoryStore which holds keys in memory
func NewKeyMemoryStore(passphraseRetriever passphrase.Retriever) *KeyMemoryStore {
memStore := NewMemoryFileStore()
cachedKeys := make(map[string]*cachedKey)
return &KeyMemoryStore{MemoryFileStore: *memStore,
Retriever: passphraseRetriever,
cachedKeys: cachedKeys}
}
// AddKey stores the contents of a PEM-encoded private key as a PEM block
func (s *KeyMemoryStore) AddKey(name, alias string, privKey data.PrivateKey) error {
s.Lock()
defer s.Unlock()
return addKey(s, s.Retriever, s.cachedKeys, name, alias, privKey)
}
// GetKey returns the PrivateKey given a KeyID
func (s *KeyMemoryStore) GetKey(name string) (data.PrivateKey, string, error) {
s.Lock()
defer s.Unlock()
return getKey(s, s.Retriever, s.cachedKeys, name)
}
// ListKeys returns a list of unique PublicKeys present on the KeyFileStore.
// There might be symlinks associating Certificate IDs to Public Keys, so this
// method only returns the IDs that aren't symlinks
func (s *KeyMemoryStore) ListKeys() []string {
return listKeys(s)
}
// RemoveKey removes the key from the keystore
func (s *KeyMemoryStore) RemoveKey(name string) error {
s.Lock()
defer s.Unlock()
return removeKey(s, s.cachedKeys, name)
}
func addKey(s LimitedFileStore, passphraseRetriever passphrase.Retriever, cachedKeys map[string]*cachedKey, name, alias string, privKey data.PrivateKey) error {
pemPrivKey, err := KeyToPEM(privKey)
if err != nil {
return err
}
attempts := 0
passphrase := ""
giveup := false
for {
passphrase, giveup, err = passphraseRetriever(name, alias, true, attempts)
if err != nil {
attempts++
continue
}
if giveup {
return ErrAttemptsExceeded{}
}
if attempts > 10 {
return ErrAttemptsExceeded{}
}
break
}
if passphrase != "" {
pemPrivKey, err = EncryptPrivateKey(privKey, passphrase)
if err != nil {
return err
}
}
cachedKeys[name] = &cachedKey{alias: alias, key: privKey}
return s.Add(name+"_"+alias, pemPrivKey)
}
func getKeyAlias(s LimitedFileStore, keyID string) (string, error) {
files := s.ListFiles(true)
name := strings.TrimSpace(strings.TrimSuffix(filepath.Base(keyID), filepath.Ext(keyID)))
for _, file := range files {
filename := filepath.Base(file)
if strings.HasPrefix(filename, name) {
aliasPlusDotKey := strings.TrimPrefix(filename, name+"_")
retVal := strings.TrimSuffix(aliasPlusDotKey, "."+keyExtension)
return retVal, nil
}
}
return "", &ErrKeyNotFound{KeyID: keyID}
}
// GetKey returns the PrivateKey given a KeyID
func getKey(s LimitedFileStore, passphraseRetriever passphrase.Retriever, cachedKeys map[string]*cachedKey, name string) (data.PrivateKey, string, error) {
cachedKeyEntry, ok := cachedKeys[name]
if ok {
return cachedKeyEntry.key, cachedKeyEntry.alias, nil
}
keyAlias, err := getKeyAlias(s, name)
if err != nil {
return nil, "", err
}
keyBytes, err := s.Get(name + "_" + keyAlias)
if err != nil {
return nil, "", err
}
var retErr error
// See if the key is encrypted. If its encrypted we'll fail to parse the private key
privKey, err := ParsePEMPrivateKey(keyBytes, "")
if err != nil {
// We need to decrypt the key, lets get a passphrase
for attempts := 0; ; attempts++ {
passphrase, giveup, err := passphraseRetriever(name, string(keyAlias), false, attempts)
// Check if the passphrase retriever got an error or if it is telling us to give up
if giveup || err != nil {
return nil, "", ErrPasswordInvalid{}
}
if attempts > 10 {
return nil, "", ErrAttemptsExceeded{}
}
// Try to convert PEM encoded bytes back to a PrivateKey using the passphrase
privKey, err = ParsePEMPrivateKey(keyBytes, passphrase)
if err != nil {
retErr = ErrPasswordInvalid{}
} else {
// We managed to parse the PrivateKey. We've succeeded!
retErr = nil
break
}
}
}
if retErr != nil {
return nil, "", retErr
}
cachedKeys[name] = &cachedKey{alias: keyAlias, key: privKey}
return privKey, keyAlias, nil
}
// ListKeys returns a list of unique PublicKeys present on the KeyFileStore.
// There might be symlinks associating Certificate IDs to Public Keys, so this
// method only returns the IDs that aren't symlinks
func listKeys(s LimitedFileStore) []string {
var keyIDList []string
for _, f := range s.ListFiles(false) {
keyID := strings.TrimSpace(strings.TrimSuffix(f, filepath.Ext(f)))
keyID = keyID[:strings.LastIndex(keyID, "_")]
keyIDList = append(keyIDList, keyID)
}
return keyIDList
}
// RemoveKey removes the key from the keyfilestore
func removeKey(s LimitedFileStore, cachedKeys map[string]*cachedKey, name string) error {
keyAlias, err := getKeyAlias(s, name)
if err != nil {
return err
}
delete(cachedKeys, name)
return s.Remove(name + "_" + keyAlias)
}

View file

@ -0,0 +1,270 @@
package trustmanager
import (
"crypto/x509"
"errors"
"os"
"path"
"github.com/Sirupsen/logrus"
)
// X509FileStore implements X509Store that persists on disk
type X509FileStore struct {
validate Validator
fileMap map[CertID]string
fingerprintMap map[CertID]*x509.Certificate
nameMap map[string][]CertID
fileStore FileStore
}
// NewX509FileStore returns a new X509FileStore.
func NewX509FileStore(directory string) (*X509FileStore, error) {
validate := ValidatorFunc(func(cert *x509.Certificate) bool { return true })
return newX509FileStore(directory, validate)
}
// NewX509FilteredFileStore returns a new X509FileStore that validates certificates
// that are added.
func NewX509FilteredFileStore(directory string, validate func(*x509.Certificate) bool) (*X509FileStore, error) {
return newX509FileStore(directory, validate)
}
func newX509FileStore(directory string, validate func(*x509.Certificate) bool) (*X509FileStore, error) {
fileStore, err := NewSimpleFileStore(directory, certExtension)
if err != nil {
return nil, err
}
s := &X509FileStore{
validate: ValidatorFunc(validate),
fileMap: make(map[CertID]string),
fingerprintMap: make(map[CertID]*x509.Certificate),
nameMap: make(map[string][]CertID),
fileStore: fileStore,
}
err = loadCertsFromDir(s)
if err != nil {
return nil, err
}
return s, nil
}
// AddCert creates a filename for a given cert and adds a certificate with that name
func (s *X509FileStore) AddCert(cert *x509.Certificate) error {
if cert == nil {
return errors.New("adding nil Certificate to X509Store")
}
// Check if this certificate meets our validation criteria
if !s.validate.Validate(cert) {
return &ErrCertValidation{}
}
// Attempt to write the certificate to the file
if err := s.addNamedCert(cert); err != nil {
return err
}
return nil
}
// addNamedCert allows adding a certificate while controlling the filename it gets
// stored under. If the file does not exist on disk, saves it.
func (s *X509FileStore) addNamedCert(cert *x509.Certificate) error {
fileName, certID, err := fileName(cert)
if err != nil {
return err
}
logrus.Debug("Adding cert with certID: ", certID)
// Validate if we already added this certificate before
if _, ok := s.fingerprintMap[certID]; ok {
return &ErrCertExists{}
}
// Convert certificate to PEM
certBytes := CertToPEM(cert)
// Save the file to disk if not already there.
filePath, err := s.fileStore.GetPath(fileName)
if err != nil {
return err
}
if _, err := os.Stat(filePath); os.IsNotExist(err) {
if err := s.fileStore.Add(fileName, certBytes); err != nil {
return err
}
} else if err != nil {
return err
}
// We wrote the certificate succcessfully, add it to our in-memory storage
s.fingerprintMap[certID] = cert
s.fileMap[certID] = fileName
name := string(cert.Subject.CommonName)
s.nameMap[name] = append(s.nameMap[name], certID)
return nil
}
// RemoveCert removes a certificate from a X509FileStore.
func (s *X509FileStore) RemoveCert(cert *x509.Certificate) error {
if cert == nil {
return errors.New("removing nil Certificate from X509Store")
}
certID, err := fingerprintCert(cert)
if err != nil {
return err
}
delete(s.fingerprintMap, certID)
filename := s.fileMap[certID]
delete(s.fileMap, certID)
name := string(cert.Subject.CommonName)
// Filter the fingerprint out of this name entry
fpList := s.nameMap[name]
newfpList := fpList[:0]
for _, x := range fpList {
if x != certID {
newfpList = append(newfpList, x)
}
}
s.nameMap[name] = newfpList
if err := s.fileStore.Remove(filename); err != nil {
return err
}
return nil
}
// RemoveAll removes all the certificates from the store
func (s *X509FileStore) RemoveAll() error {
for _, filename := range s.fileMap {
if err := s.fileStore.Remove(filename); err != nil {
return err
}
}
s.fileMap = make(map[CertID]string)
s.fingerprintMap = make(map[CertID]*x509.Certificate)
s.nameMap = make(map[string][]CertID)
return nil
}
// AddCertFromPEM adds the first certificate that it finds in the byte[], returning
// an error if no Certificates are found
func (s X509FileStore) AddCertFromPEM(pemBytes []byte) error {
cert, err := LoadCertFromPEM(pemBytes)
if err != nil {
return err
}
return s.AddCert(cert)
}
// AddCertFromFile tries to adds a X509 certificate to the store given a filename
func (s *X509FileStore) AddCertFromFile(filename string) error {
cert, err := LoadCertFromFile(filename)
if err != nil {
return err
}
return s.AddCert(cert)
}
// GetCertificates returns an array with all of the current X509 Certificates.
func (s *X509FileStore) GetCertificates() []*x509.Certificate {
certs := make([]*x509.Certificate, len(s.fingerprintMap))
i := 0
for _, v := range s.fingerprintMap {
certs[i] = v
i++
}
return certs
}
// GetCertificatePool returns an x509 CertPool loaded with all the certificates
// in the store.
func (s *X509FileStore) GetCertificatePool() *x509.CertPool {
pool := x509.NewCertPool()
for _, v := range s.fingerprintMap {
pool.AddCert(v)
}
return pool
}
// GetCertificateByCertID returns the certificate that matches a certain certID
func (s *X509FileStore) GetCertificateByCertID(certID string) (*x509.Certificate, error) {
return s.getCertificateByCertID(CertID(certID))
}
// getCertificateByCertID returns the certificate that matches a certain certID
func (s *X509FileStore) getCertificateByCertID(certID CertID) (*x509.Certificate, error) {
// If it does not look like a hex encoded sha256 hash, error
if len(certID) != 64 {
return nil, errors.New("invalid Subject Key Identifier")
}
// Check to see if this subject key identifier exists
if cert, ok := s.fingerprintMap[CertID(certID)]; ok {
return cert, nil
}
return nil, &ErrNoCertificatesFound{query: string(certID)}
}
// GetCertificatesByCN returns all the certificates that match a specific
// CommonName
func (s *X509FileStore) GetCertificatesByCN(cn string) ([]*x509.Certificate, error) {
var certs []*x509.Certificate
if ids, ok := s.nameMap[cn]; ok {
for _, v := range ids {
cert, err := s.getCertificateByCertID(v)
if err != nil {
// This error should never happen. This would mean that we have
// an inconsistent X509FileStore
return nil, &ErrBadCertificateStore{}
}
certs = append(certs, cert)
}
}
if len(certs) == 0 {
return nil, &ErrNoCertificatesFound{query: cn}
}
return certs, nil
}
// GetVerifyOptions returns VerifyOptions with the certificates within the KeyStore
// as part of the roots list. This never allows the use of system roots, returning
// an error if there are no root CAs.
func (s *X509FileStore) GetVerifyOptions(dnsName string) (x509.VerifyOptions, error) {
// If we have no Certificates loaded return error (we don't want to rever to using
// system CAs).
if len(s.fingerprintMap) == 0 {
return x509.VerifyOptions{}, errors.New("no root CAs available")
}
opts := x509.VerifyOptions{
DNSName: dnsName,
Roots: s.GetCertificatePool(),
}
return opts, nil
}
func fileName(cert *x509.Certificate) (string, CertID, error) {
certID, err := fingerprintCert(cert)
if err != nil {
return "", "", err
}
return path.Join(cert.Subject.CommonName, string(certID)), certID, nil
}

View file

@ -0,0 +1,203 @@
package trustmanager
import (
"crypto/x509"
"errors"
"github.com/Sirupsen/logrus"
)
// X509MemStore implements X509Store as an in-memory object with no persistence
type X509MemStore struct {
validate Validator
fingerprintMap map[CertID]*x509.Certificate
nameMap map[string][]CertID
}
// NewX509MemStore returns a new X509MemStore.
func NewX509MemStore() *X509MemStore {
validate := ValidatorFunc(func(cert *x509.Certificate) bool { return true })
return &X509MemStore{
validate: validate,
fingerprintMap: make(map[CertID]*x509.Certificate),
nameMap: make(map[string][]CertID),
}
}
// NewX509FilteredMemStore returns a new X509Memstore that validates certificates
// that are added.
func NewX509FilteredMemStore(validate func(*x509.Certificate) bool) *X509MemStore {
s := &X509MemStore{
validate: ValidatorFunc(validate),
fingerprintMap: make(map[CertID]*x509.Certificate),
nameMap: make(map[string][]CertID),
}
return s
}
// AddCert adds a certificate to the store
func (s *X509MemStore) AddCert(cert *x509.Certificate) error {
if cert == nil {
return errors.New("adding nil Certificate to X509Store")
}
if !s.validate.Validate(cert) {
return &ErrCertValidation{}
}
certID, err := fingerprintCert(cert)
if err != nil {
return err
}
logrus.Debug("Adding cert with certID: ", certID)
// In this store we overwrite the certificate if it already exists
s.fingerprintMap[certID] = cert
name := string(cert.RawSubject)
s.nameMap[name] = append(s.nameMap[name], certID)
return nil
}
// RemoveCert removes a certificate from a X509MemStore.
func (s *X509MemStore) RemoveCert(cert *x509.Certificate) error {
if cert == nil {
return errors.New("removing nil Certificate to X509Store")
}
certID, err := fingerprintCert(cert)
if err != nil {
return err
}
delete(s.fingerprintMap, certID)
name := string(cert.RawSubject)
// Filter the fingerprint out of this name entry
fpList := s.nameMap[name]
newfpList := fpList[:0]
for _, x := range fpList {
if x != certID {
newfpList = append(newfpList, x)
}
}
s.nameMap[name] = newfpList
return nil
}
// RemoveAll removes all the certificates from the store
func (s *X509MemStore) RemoveAll() error {
for _, cert := range s.fingerprintMap {
if err := s.RemoveCert(cert); err != nil {
return err
}
}
return nil
}
// AddCertFromPEM adds a certificate to the store from a PEM blob
func (s *X509MemStore) AddCertFromPEM(pemBytes []byte) error {
cert, err := LoadCertFromPEM(pemBytes)
if err != nil {
return err
}
return s.AddCert(cert)
}
// AddCertFromFile tries to adds a X509 certificate to the store given a filename
func (s *X509MemStore) AddCertFromFile(originFilname string) error {
cert, err := LoadCertFromFile(originFilname)
if err != nil {
return err
}
return s.AddCert(cert)
}
// GetCertificates returns an array with all of the current X509 Certificates.
func (s *X509MemStore) GetCertificates() []*x509.Certificate {
certs := make([]*x509.Certificate, len(s.fingerprintMap))
i := 0
for _, v := range s.fingerprintMap {
certs[i] = v
i++
}
return certs
}
// GetCertificatePool returns an x509 CertPool loaded with all the certificates
// in the store.
func (s *X509MemStore) GetCertificatePool() *x509.CertPool {
pool := x509.NewCertPool()
for _, v := range s.fingerprintMap {
pool.AddCert(v)
}
return pool
}
// GetCertificateByCertID returns the certificate that matches a certain certID
func (s *X509MemStore) GetCertificateByCertID(certID string) (*x509.Certificate, error) {
return s.getCertificateByCertID(CertID(certID))
}
// getCertificateByCertID returns the certificate that matches a certain certID or error
func (s *X509MemStore) getCertificateByCertID(certID CertID) (*x509.Certificate, error) {
// If it does not look like a hex encoded sha256 hash, error
if len(certID) != 64 {
return nil, errors.New("invalid Subject Key Identifier")
}
// Check to see if this subject key identifier exists
if cert, ok := s.fingerprintMap[CertID(certID)]; ok {
return cert, nil
}
return nil, &ErrNoCertificatesFound{query: string(certID)}
}
// GetCertificatesByCN returns all the certificates that match a specific
// CommonName
func (s *X509MemStore) GetCertificatesByCN(cn string) ([]*x509.Certificate, error) {
var certs []*x509.Certificate
if ids, ok := s.nameMap[cn]; ok {
for _, v := range ids {
cert, err := s.getCertificateByCertID(v)
if err != nil {
// This error should never happen. This would mean that we have
// an inconsistent X509MemStore
return nil, err
}
certs = append(certs, cert)
}
}
if len(certs) == 0 {
return nil, &ErrNoCertificatesFound{query: cn}
}
return certs, nil
}
// GetVerifyOptions returns VerifyOptions with the certificates within the KeyStore
// as part of the roots list. This never allows the use of system roots, returning
// an error if there are no root CAs.
func (s *X509MemStore) GetVerifyOptions(dnsName string) (x509.VerifyOptions, error) {
// If we have no Certificates loaded return error (we don't want to rever to using
// system CAs).
if len(s.fingerprintMap) == 0 {
return x509.VerifyOptions{}, errors.New("no root CAs available")
}
opts := x509.VerifyOptions{
DNSName: dnsName,
Roots: s.GetCertificatePool(),
}
return opts, nil
}

View file

@ -0,0 +1,144 @@
package trustmanager
import (
"crypto/x509"
"errors"
"fmt"
)
const certExtension string = "crt"
// ErrNoCertificatesFound is returned when no certificates are found for a
// GetCertificatesBy*
type ErrNoCertificatesFound struct {
query string
}
// ErrNoCertificatesFound is returned when no certificates are found for a
// GetCertificatesBy*
func (err ErrNoCertificatesFound) Error() string {
return fmt.Sprintf("error, no certificates found in the keystore match: %s", err.query)
}
// ErrCertValidation is returned when a certificate doesn't pass the store specific
// validations
type ErrCertValidation struct {
}
// ErrCertValidation is returned when a certificate doesn't pass the store specific
// validations
func (err ErrCertValidation) Error() string {
return fmt.Sprintf("store-specific certificate validations failed")
}
// ErrCertExists is returned when a Certificate already exists in the key store
type ErrCertExists struct {
}
// ErrCertExists is returned when a Certificate already exists in the key store
func (err ErrCertExists) Error() string {
return fmt.Sprintf("certificate already in the store")
}
// ErrBadCertificateStore is returned when there is an internal inconsistency
// in our x509 store
type ErrBadCertificateStore struct {
}
// ErrBadCertificateStore is returned when there is an internal inconsistency
// in our x509 store
func (err ErrBadCertificateStore) Error() string {
return fmt.Sprintf("inconsistent certificate store")
}
// X509Store is the interface for all X509Stores
type X509Store interface {
AddCert(cert *x509.Certificate) error
AddCertFromPEM(pemCerts []byte) error
AddCertFromFile(filename string) error
RemoveCert(cert *x509.Certificate) error
RemoveAll() error
GetCertificateByCertID(certID string) (*x509.Certificate, error)
GetCertificatesByCN(cn string) ([]*x509.Certificate, error)
GetCertificates() []*x509.Certificate
GetCertificatePool() *x509.CertPool
GetVerifyOptions(dnsName string) (x509.VerifyOptions, error)
}
// CertID represent the ID used to identify certificates
type CertID string
// Validator is a convenience type to create validating function that filters
// certificates that get added to the store
type Validator interface {
Validate(cert *x509.Certificate) bool
}
// ValidatorFunc is a convenience type to create functions that implement
// the Validator interface
type ValidatorFunc func(cert *x509.Certificate) bool
// Validate implements the Validator interface to allow for any func() bool method
// to be passed as a Validator
func (vf ValidatorFunc) Validate(cert *x509.Certificate) bool {
return vf(cert)
}
// Verify operates on an X509Store and validates the existence of a chain of trust
// between a leafCertificate and a CA present inside of the X509 Store.
// It requires at least two certificates in certList, a leaf Certificate and an
// intermediate CA certificate.
func Verify(s X509Store, dnsName string, certList []*x509.Certificate) error {
// If we have no Certificates loaded return error (we don't want to revert to using
// system CAs).
if len(s.GetCertificates()) == 0 {
return errors.New("no root CAs available")
}
// At a minimum we should be provided a leaf cert and an intermediate.
if len(certList) < 2 {
return errors.New("certificate and at least one intermediate needed")
}
// Get the VerifyOptions from the keystore for a base dnsName
opts, err := s.GetVerifyOptions(dnsName)
if err != nil {
return err
}
// Create a Certificate Pool for our intermediate certificates
intPool := x509.NewCertPool()
var leafCert *x509.Certificate
// Iterate through all the certificates
for _, c := range certList {
// If the cert is a CA, we add it to the intermediates pool. If not, we call
// it the leaf cert
if c.IsCA {
intPool.AddCert(c)
continue
}
// Certificate is not a CA, it must be our leaf certificate.
// If we already found one, bail with error
if leafCert != nil {
return errors.New("more than one leaf certificate found")
}
leafCert = c
}
// We exited the loop with no leaf certificates
if leafCert == nil {
return errors.New("no leaf certificates found")
}
// We have one leaf certificate and at least one intermediate. Lets add this
// Cert Pool as the Intermediates list on our VerifyOptions
opts.Intermediates = intPool
// Finally, let's call Verify on our leafCert with our fully configured options
chains, err := leafCert.Verify(opts)
if len(chains) == 0 || err != nil {
return fmt.Errorf("certificate verification failed: %v", err)
}
return nil
}

View file

@ -0,0 +1,497 @@
package trustmanager
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"errors"
"fmt"
"io"
"io/ioutil"
"math/big"
"net/http"
"net/url"
"path/filepath"
"time"
"github.com/Sirupsen/logrus"
"github.com/agl/ed25519"
"github.com/endophage/gotuf/data"
)
// GetCertFromURL tries to get a X509 certificate given a HTTPS URL
func GetCertFromURL(urlStr string) (*x509.Certificate, error) {
url, err := url.Parse(urlStr)
if err != nil {
return nil, err
}
// Check if we are adding via HTTPS
if url.Scheme != "https" {
return nil, errors.New("only HTTPS URLs allowed")
}
// Download the certificate and write to directory
resp, err := http.Get(url.String())
if err != nil {
return nil, err
}
// Copy the content to certBytes
defer resp.Body.Close()
certBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
// Try to extract the first valid PEM certificate from the bytes
cert, err := LoadCertFromPEM(certBytes)
if err != nil {
return nil, err
}
return cert, nil
}
// CertToPEM is an utility function returns a PEM encoded x509 Certificate
func CertToPEM(cert *x509.Certificate) []byte {
pemCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})
return pemCert
}
// LoadCertFromPEM returns the first certificate found in a bunch of bytes or error
// if nothing is found. Taken from https://golang.org/src/crypto/x509/cert_pool.go#L85.
func LoadCertFromPEM(pemBytes []byte) (*x509.Certificate, error) {
for len(pemBytes) > 0 {
var block *pem.Block
block, pemBytes = pem.Decode(pemBytes)
if block == nil {
return nil, errors.New("no certificates found in PEM data")
}
if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
continue
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
continue
}
return cert, nil
}
return nil, errors.New("no certificates found in PEM data")
}
// FingerprintCert returns a TUF compliant fingerprint for a X509 Certificate
func FingerprintCert(cert *x509.Certificate) (string, error) {
certID, err := fingerprintCert(cert)
if err != nil {
return "", err
}
return string(certID), nil
}
func fingerprintCert(cert *x509.Certificate) (CertID, error) {
block := pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}
pemdata := pem.EncodeToMemory(&block)
var keyType data.KeyAlgorithm
switch cert.PublicKeyAlgorithm {
case x509.RSA:
keyType = data.RSAx509Key
case x509.ECDSA:
keyType = data.ECDSAx509Key
default:
return "", fmt.Errorf("got Unknown key type while fingerprinting certificate")
}
// Create new TUF Key so we can compute the TUF-compliant CertID
tufKey := data.NewPublicKey(keyType, pemdata)
return CertID(tufKey.ID()), nil
}
// loadCertsFromDir receives a store AddCertFromFile for each certificate found
func loadCertsFromDir(s *X509FileStore) error {
certFiles := s.fileStore.ListFiles(true)
for _, f := range certFiles {
// ListFiles returns relative paths
fullPath := filepath.Join(s.fileStore.BaseDir(), f)
err := s.AddCertFromFile(fullPath)
if err != nil {
if _, ok := err.(*ErrCertValidation); ok {
logrus.Debugf("ignoring certificate, did not pass validation: %s", f)
continue
}
if _, ok := err.(*ErrCertExists); ok {
logrus.Debugf("ignoring certificate, already exists in the store: %s", f)
continue
}
return err
}
}
return nil
}
// LoadCertFromFile loads the first certificate from the file provided. The
// data is expected to be PEM Encoded and contain one of more certificates
// with PEM type "CERTIFICATE"
func LoadCertFromFile(filename string) (*x509.Certificate, error) {
certs, err := LoadCertBundleFromFile(filename)
if err != nil {
return nil, err
}
return certs[0], nil
}
// LoadCertBundleFromFile loads certificates from the []byte provided. The
// data is expected to be PEM Encoded and contain one of more certificates
// with PEM type "CERTIFICATE"
func LoadCertBundleFromFile(filename string) ([]*x509.Certificate, error) {
b, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
return LoadCertBundleFromPEM(b)
}
// LoadCertBundleFromPEM loads certificates from the []byte provided. The
// data is expected to be PEM Encoded and contain one of more certificates
// with PEM type "CERTIFICATE"
func LoadCertBundleFromPEM(pemBytes []byte) ([]*x509.Certificate, error) {
certificates := []*x509.Certificate{}
var block *pem.Block
block, pemBytes = pem.Decode(pemBytes)
for ; block != nil; block, pemBytes = pem.Decode(pemBytes) {
if block.Type == "CERTIFICATE" {
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, err
}
certificates = append(certificates, cert)
} else {
return nil, fmt.Errorf("invalid pem block type: %s", block.Type)
}
}
if len(certificates) == 0 {
return nil, fmt.Errorf("no valid certificates found")
}
return certificates, nil
}
// GetLeafCerts parses a list of x509 Certificates and returns all of them
// that aren't CA
func GetLeafCerts(certs []*x509.Certificate) []*x509.Certificate {
var leafCerts []*x509.Certificate
for _, cert := range certs {
if cert.IsCA {
continue
}
leafCerts = append(leafCerts, cert)
}
return leafCerts
}
// GetIntermediateCerts parses a list of x509 Certificates and returns all of the
// ones marked as a CA, to be used as intermediates
func GetIntermediateCerts(certs []*x509.Certificate) (intCerts []*x509.Certificate) {
for _, cert := range certs {
if cert.IsCA {
intCerts = append(intCerts, cert)
}
}
return intCerts
}
// ParsePEMPrivateKey returns a data.PrivateKey from a PEM encoded private key. It
// only supports RSA (PKCS#1) and attempts to decrypt using the passphrase, if encrypted.
func ParsePEMPrivateKey(pemBytes []byte, passphrase string) (data.PrivateKey, error) {
block, _ := pem.Decode(pemBytes)
if block == nil {
return nil, errors.New("no valid private key found")
}
switch block.Type {
case "RSA PRIVATE KEY":
var privKeyBytes []byte
var err error
if x509.IsEncryptedPEMBlock(block) {
privKeyBytes, err = x509.DecryptPEMBlock(block, []byte(passphrase))
if err != nil {
return nil, errors.New("could not decrypt private key")
}
} else {
privKeyBytes = block.Bytes
}
rsaPrivKey, err := x509.ParsePKCS1PrivateKey(privKeyBytes)
if err != nil {
return nil, fmt.Errorf("could not parse DER encoded key: %v", err)
}
tufRSAPrivateKey, err := RSAToPrivateKey(rsaPrivKey)
if err != nil {
return nil, fmt.Errorf("could not convert rsa.PrivateKey to data.PrivateKey: %v", err)
}
return tufRSAPrivateKey, nil
case "EC PRIVATE KEY":
var privKeyBytes []byte
var err error
if x509.IsEncryptedPEMBlock(block) {
privKeyBytes, err = x509.DecryptPEMBlock(block, []byte(passphrase))
if err != nil {
return nil, errors.New("could not decrypt private key")
}
} else {
privKeyBytes = block.Bytes
}
ecdsaPrivKey, err := x509.ParseECPrivateKey(privKeyBytes)
if err != nil {
return nil, fmt.Errorf("could not parse DER encoded private key: %v", err)
}
tufECDSAPrivateKey, err := ECDSAToPrivateKey(ecdsaPrivKey)
if err != nil {
return nil, fmt.Errorf("could not convert ecdsa.PrivateKey to data.PrivateKey: %v", err)
}
return tufECDSAPrivateKey, nil
case "ED25519 PRIVATE KEY":
// We serialize ED25519 keys by concatenating the private key
// to the public key and encoding with PEM. See the
// ED25519ToPrivateKey function.
var privKeyBytes []byte
var err error
if x509.IsEncryptedPEMBlock(block) {
privKeyBytes, err = x509.DecryptPEMBlock(block, []byte(passphrase))
if err != nil {
return nil, errors.New("could not decrypt private key")
}
} else {
privKeyBytes = block.Bytes
}
tufECDSAPrivateKey, err := ED25519ToPrivateKey(privKeyBytes)
if err != nil {
return nil, fmt.Errorf("could not convert ecdsa.PrivateKey to data.PrivateKey: %v", err)
}
return tufECDSAPrivateKey, nil
default:
return nil, fmt.Errorf("unsupported key type %q", block.Type)
}
}
// GenerateRSAKey generates an RSA private key and returns a TUF PrivateKey
func GenerateRSAKey(random io.Reader, bits int) (data.PrivateKey, error) {
rsaPrivKey, err := rsa.GenerateKey(random, bits)
if err != nil {
return nil, fmt.Errorf("could not generate private key: %v", err)
}
tufPrivKey, err := RSAToPrivateKey(rsaPrivKey)
if err != nil {
return nil, err
}
logrus.Debugf("generated RSA key with keyID: %s", tufPrivKey.ID())
return tufPrivKey, nil
}
// RSAToPrivateKey converts an rsa.Private key to a TUF data.PrivateKey type
func RSAToPrivateKey(rsaPrivKey *rsa.PrivateKey) (data.PrivateKey, error) {
// Get a DER-encoded representation of the PublicKey
rsaPubBytes, err := x509.MarshalPKIXPublicKey(&rsaPrivKey.PublicKey)
if err != nil {
return nil, fmt.Errorf("failed to marshal public key: %v", err)
}
// Get a DER-encoded representation of the PrivateKey
rsaPrivBytes := x509.MarshalPKCS1PrivateKey(rsaPrivKey)
return data.NewPrivateKey(data.RSAKey, rsaPubBytes, rsaPrivBytes), nil
}
// GenerateECDSAKey generates an ECDSA Private key and returns a TUF PrivateKey
func GenerateECDSAKey(random io.Reader) (data.PrivateKey, error) {
ecdsaPrivKey, err := ecdsa.GenerateKey(elliptic.P256(), random)
if err != nil {
return nil, err
}
tufPrivKey, err := ECDSAToPrivateKey(ecdsaPrivKey)
if err != nil {
return nil, err
}
logrus.Debugf("generated ECDSA key with keyID: %s", tufPrivKey.ID())
return tufPrivKey, nil
}
// GenerateED25519Key generates an ED25519 private key and returns a TUF
// PrivateKey. The serialization format we use is just the public key bytes
// followed by the private key bytes
func GenerateED25519Key(random io.Reader) (data.PrivateKey, error) {
pub, priv, err := ed25519.GenerateKey(rand.Reader)
if err != nil {
return nil, err
}
var serialized [ed25519.PublicKeySize + ed25519.PrivateKeySize]byte
copy(serialized[:], pub[:])
copy(serialized[ed25519.PublicKeySize:], priv[:])
tufPrivKey, err := ED25519ToPrivateKey(serialized[:])
if err != nil {
return nil, err
}
logrus.Debugf("generated ED25519 key with keyID: %s", tufPrivKey.ID())
return tufPrivKey, nil
}
// ECDSAToPrivateKey converts an rsa.Private key to a TUF data.PrivateKey type
func ECDSAToPrivateKey(ecdsaPrivKey *ecdsa.PrivateKey) (data.PrivateKey, error) {
// Get a DER-encoded representation of the PublicKey
ecdsaPubBytes, err := x509.MarshalPKIXPublicKey(&ecdsaPrivKey.PublicKey)
if err != nil {
return nil, fmt.Errorf("failed to marshal public key: %v", err)
}
// Get a DER-encoded representation of the PrivateKey
ecdsaPrivKeyBytes, err := x509.MarshalECPrivateKey(ecdsaPrivKey)
if err != nil {
return nil, fmt.Errorf("failed to marshal private key: %v", err)
}
return data.NewPrivateKey(data.ECDSAKey, ecdsaPubBytes, ecdsaPrivKeyBytes), nil
}
// ED25519ToPrivateKey converts a serialized ED25519 key to a TUF
// data.PrivateKey type
func ED25519ToPrivateKey(privKeyBytes []byte) (data.PrivateKey, error) {
if len(privKeyBytes) != ed25519.PublicKeySize+ed25519.PrivateKeySize {
return nil, errors.New("malformed ed25519 private key")
}
return data.NewPrivateKey(data.ED25519Key, privKeyBytes[:ed25519.PublicKeySize], privKeyBytes), nil
}
func blockType(algorithm data.KeyAlgorithm) (string, error) {
switch algorithm {
case data.RSAKey:
return "RSA PRIVATE KEY", nil
case data.ECDSAKey:
return "EC PRIVATE KEY", nil
case data.ED25519Key:
return "ED25519 PRIVATE KEY", nil
default:
return "", fmt.Errorf("algorithm %s not supported", algorithm)
}
}
// KeyToPEM returns a PEM encoded key from a Private Key
func KeyToPEM(privKey data.PrivateKey) ([]byte, error) {
blockType, err := blockType(privKey.Algorithm())
if err != nil {
return nil, err
}
return pem.EncodeToMemory(&pem.Block{Type: blockType, Bytes: privKey.Private()}), nil
}
// EncryptPrivateKey returns an encrypted PEM key given a Privatekey
// and a passphrase
func EncryptPrivateKey(key data.PrivateKey, passphrase string) ([]byte, error) {
blockType, err := blockType(key.Algorithm())
if err != nil {
return nil, err
}
password := []byte(passphrase)
cipherType := x509.PEMCipherAES256
encryptedPEMBlock, err := x509.EncryptPEMBlock(rand.Reader,
blockType,
key.Private(),
password,
cipherType)
if err != nil {
return nil, err
}
return pem.EncodeToMemory(encryptedPEMBlock), nil
}
// CertsToKeys transforms each of the input certificates into it's corresponding
// PublicKey
func CertsToKeys(certs []*x509.Certificate) map[string]data.PublicKey {
keys := make(map[string]data.PublicKey)
for _, cert := range certs {
block := pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}
pemdata := pem.EncodeToMemory(&block)
var keyType data.KeyAlgorithm
switch cert.PublicKeyAlgorithm {
case x509.RSA:
keyType = data.RSAx509Key
case x509.ECDSA:
keyType = data.ECDSAx509Key
default:
logrus.Debugf("unknown certificate type found, ignoring")
}
// Create new the appropriate PublicKey
newKey := data.NewPublicKey(keyType, pemdata)
keys[newKey.ID()] = newKey
}
return keys
}
// NewCertificate returns an X509 Certificate following a template, given a GUN.
func NewCertificate(gun string) (*x509.Certificate, error) {
notBefore := time.Now()
// Certificates will expire in 10 years
notAfter := notBefore.Add(time.Hour * 24 * 365 * 10)
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, fmt.Errorf("failed to generate new certificate: %v", err)
}
return &x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
CommonName: gun,
},
NotBefore: notBefore,
NotAfter: notAfter,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning},
BasicConstraintsValid: true,
}, nil
}

View file

@ -0,0 +1,3 @@
/db/
*.bkp
*.swp

View file

@ -0,0 +1,30 @@
language: go
go:
- 1.4
- tip
sudo: false
before_install:
- go get golang.org/x/tools/cmd/cover
script:
- go test -race -cover ./...
notifications:
irc:
channels:
- "chat.freenode.net#flynn"
use_notice: true
skip_join: true
on_success: change
on_failure: always
template:
- "%{repository}/%{branch} - %{commit}: %{message} %{build_url}"
email:
on_success: never
on_failure: always
matrix:
allow_failures:
- go: tip

View file

@ -0,0 +1,3 @@
Aaron Lehmann <aaron.lehmann@docker.com> (github: aaronlehmann)
Lewis Marshall <lewis@flynn.io> (github: lmars)
Jonathan Rudenberg <jonathan@flynn.io> (github: titanous)

View file

@ -0,0 +1,30 @@
Copyright (c) 2015, Docker Inc.
Copyright (c) 2014-2015 Prime Directive, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Prime Directive, Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -0,0 +1 @@
David Lawrence <david.lawrence@docker.com> (github: endophage)

View file

@ -0,0 +1,34 @@
# Set an output prefix, which is the local directory if not specified
PREFIX?=$(shell pwd)
vet:
@echo "+ $@"
@go vet ./...
fmt:
@echo "+ $@"
@test -z "$$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" || \
echo "+ please format Go code with 'gofmt -s'"
lint:
@echo "+ $@"
@test -z "$$(golint ./... | grep -v Godeps/_workspace/src/ | tee /dev/stderr)"
build:
@echo "+ $@"
@go build -v ${GO_LDFLAGS} ./...
test:
@echo "+ $@"
@go test -test.short ./...
test-full:
@echo "+ $@"
@go test ./...
binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/registry-api-descriptor-template ${PREFIX}/bin/dist
@echo "+ $@"
clean:
@echo "+ $@"
@rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/registry-api-descriptor-template"

View file

@ -0,0 +1,36 @@
# GOTUF
This is still a work in progress but will shortly be a fully compliant
Go implementation of [The Update Framework (TUF)](http://theupdateframework.com/).
## Where's the CLI
This repository provides a library only. The [Notary project](https://github.com/docker/notary)
from Docker should be considered the official CLI to be used with this implementation of TUF.
## TODOs:
- [X] Add Targets to existing repo
- [X] Sign metadata files
- [X] Refactor TufRepo to take care of signing ~~and verification~~
- [ ] Ensure consistent capitalization in naming (TUF\_\_\_ vs Tuf\_\_\_)
- [X] Make caching of metadata files smarter - PR #5
- [ ] ~~Add configuration for CLI commands. Order of configuration priority from most to least: flags, config file, defaults~~ Notary should be the official CLI
- [X] Reasses organization of data types. Possibly consolidate a few things into the data package but break up package into a few more distinct files
- [ ] Comprehensive test cases
- [ ] Delete files no longer in use
- [ ] Fix up errors. Some have to be instantiated, others don't, the inconsistency is annoying.
- [X] Bump version numbers in meta files (could probably be done better)
## Credits
This implementation was originally forked from [flynn/go-tuf](https://github.com/flynn/go-tuf),
however in attempting to add delegations I found I was making such
significant changes that I could not maintain backwards compatibility
without the code becoming overly convoluted.
Some features such as pluggable verifiers have alreayd been merged upstream to flynn/go-tuf
and we are in discussion with [titanous](https://github.com/titanous) about working to merge the 2 implementations.
This implementation retains the same 3 Clause BSD license present on
the original flynn implementation.

View file

@ -0,0 +1,538 @@
package client
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"path"
"path/filepath"
"strings"
"github.com/Sirupsen/logrus"
tuf "github.com/endophage/gotuf"
"github.com/endophage/gotuf/data"
"github.com/endophage/gotuf/keys"
"github.com/endophage/gotuf/signed"
"github.com/endophage/gotuf/store"
"github.com/endophage/gotuf/utils"
)
const maxSize int64 = 5 << 20
type Client struct {
local *tuf.TufRepo
remote store.RemoteStore
keysDB *keys.KeyDB
cache store.MetadataStore
}
func NewClient(local *tuf.TufRepo, remote store.RemoteStore, keysDB *keys.KeyDB, cache store.MetadataStore) *Client {
return &Client{
local: local,
remote: remote,
keysDB: keysDB,
cache: cache,
}
}
func (c *Client) Update() error {
// 1. Get timestamp
// a. If timestamp error (verification, expired, etc...) download new root and return to 1.
// 2. Check if local snapshot is up to date
// a. If out of date, get updated snapshot
// i. If snapshot error, download new root and return to 1.
// 3. Check if root correct against snapshot
// a. If incorrect, download new root and return to 1.
// 4. Iteratively download and search targets and delegations to find target meta
logrus.Debug("updating TUF client")
err := c.update()
if err != nil {
switch err.(type) {
case signed.ErrRoleThreshold, signed.ErrExpired, tuf.ErrLocalRootExpired:
logrus.Debug("retryable error occurred. Root will be downloaded and another update attempted")
if err := c.downloadRoot(); err != nil {
logrus.Errorf("client Update (Root):", err)
return err
}
default:
logrus.Error("an unexpected error occurred while updating TUF client")
return err
}
// If we error again, we now have the latest root and just want to fail
// out as there's no expectation the problem can be resolved automatically
logrus.Debug("retrying TUF client update")
return c.update()
}
return nil
}
func (c *Client) update() error {
err := c.downloadTimestamp()
if err != nil {
logrus.Errorf("Client Update (Timestamp): %s", err.Error())
return err
}
err = c.downloadSnapshot()
if err != nil {
logrus.Errorf("Client Update (Snapshot): %s", err.Error())
return err
}
err = c.checkRoot()
if err != nil {
// In this instance the root has not expired base on time, but is
// expired based on the snapshot dictating a new root has been produced.
logrus.Info(err.Error())
return tuf.ErrLocalRootExpired{}
}
// will always need top level targets at a minimum
err = c.downloadTargets("targets")
if err != nil {
logrus.Errorf("Client Update (Targets): %s", err.Error())
return err
}
return nil
}
// checkRoot determines if the hash, and size are still those reported
// in the snapshot file. It will also check the expiry, however, if the
// hash and size in snapshot are unchanged but the root file has expired,
// there is little expectation that the situation can be remedied.
func (c Client) checkRoot() error {
role := data.RoleName("root")
size := c.local.Snapshot.Signed.Meta[role].Length
hashSha256 := c.local.Snapshot.Signed.Meta[role].Hashes["sha256"]
raw, err := c.cache.GetMeta("root", size)
if err != nil {
return err
}
hash := sha256.Sum256(raw)
if !bytes.Equal(hash[:], hashSha256) {
return fmt.Errorf("Cached root sha256 did not match snapshot root sha256")
}
return nil
}
// downloadRoot is responsible for downloading the root.json
func (c *Client) downloadRoot() error {
role := data.RoleName("root")
size := maxSize
var expectedSha256 []byte = nil
if c.local.Snapshot != nil {
size = c.local.Snapshot.Signed.Meta[role].Length
expectedSha256 = c.local.Snapshot.Signed.Meta[role].Hashes["sha256"]
}
// if we're bootstrapping we may not have a cached root, an
// error will result in the "previous root version" being
// interpreted as 0.
var download bool
var err error
var cachedRoot []byte = nil
old := &data.Signed{}
version := 0
if expectedSha256 != nil {
// can only trust cache if we have an expected sha256 to trust
cachedRoot, err = c.cache.GetMeta(role, size)
}
if cachedRoot == nil || err != nil {
logrus.Debug("didn't find a cached root, must download")
download = true
} else {
hash := sha256.Sum256(cachedRoot)
if !bytes.Equal(hash[:], expectedSha256) {
logrus.Debug("cached root's hash didn't match expected, must download")
download = true
}
err := json.Unmarshal(cachedRoot, old)
if err == nil {
root, err := data.RootFromSigned(old)
if err == nil {
version = root.Signed.Version
} else {
logrus.Debug("couldn't parse Signed part of cached root, must download")
download = true
}
} else {
logrus.Debug("couldn't parse cached root, must download")
download = true
}
}
var s *data.Signed
var raw []byte
if download {
logrus.Debug("downloading new root")
raw, err = c.remote.GetMeta(role, size)
if err != nil {
return err
}
hash := sha256.Sum256(raw)
if expectedSha256 != nil && !bytes.Equal(hash[:], expectedSha256) {
// if we don't have an expected sha256, we're going to trust the root
// based purely on signature and expiry time validation
return fmt.Errorf("Remote root sha256 did not match snapshot root sha256: %#x vs. %#x", hash, []byte(expectedSha256))
}
s = &data.Signed{}
err = json.Unmarshal(raw, s)
if err != nil {
return err
}
} else {
logrus.Debug("using cached root")
s = old
}
if err := c.verifyRoot(role, s, version); err != nil {
return err
}
if download {
logrus.Debug("caching downloaded root")
// Now that we have accepted new root, write it to cache
if err = c.cache.SetMeta(role, raw); err != nil {
logrus.Errorf("Failed to write root to local cache: %s", err.Error())
}
}
return nil
}
func (c Client) verifyRoot(role string, s *data.Signed, minVersion int) error {
// this will confirm that the root has been signed by the old root role
// as c.keysDB contains the root keys we bootstrapped with.
// Still need to determine if there has been a root key update and
// confirm signature with new root key
logrus.Debug("verifying root with existing keys")
err := signed.Verify(s, role, minVersion, c.keysDB)
if err != nil {
logrus.Debug("root did not verify with existing keys")
return err
}
// This will cause keyDB to get updated, overwriting any keyIDs associated
// with the roles in root.json
logrus.Debug("updating known root roles and keys")
err = c.local.SetRoot(s)
if err != nil {
logrus.Error(err.Error())
return err
}
// verify again now that the old keys have been replaced with the new keys.
// TODO(endophage): be more intelligent and only re-verify if we detect
// there has been a change in root keys
logrus.Debug("verifying root with updated keys")
err = signed.Verify(s, role, minVersion, c.keysDB)
if err != nil {
logrus.Debug("root did not verify with new keys")
return err
}
logrus.Debug("successfully verified root")
return nil
}
// downloadTimestamp is responsible for downloading the timestamp.json
func (c *Client) downloadTimestamp() error {
logrus.Debug("downloadTimestamp")
role := data.RoleName("timestamp")
// We may not have a cached timestamp if this is the first time
// we're interacting with the repo. This will result in the
// version being 0
var download bool
old := &data.Signed{}
version := 0
cachedTS, err := c.cache.GetMeta(role, maxSize)
if err == nil {
err := json.Unmarshal(cachedTS, old)
if err == nil {
ts, err := data.TimestampFromSigned(old)
if err == nil {
version = ts.Signed.Version
}
} else {
old = nil
}
}
// unlike root, targets and snapshot, always try and download timestamps
// from remote, only using the cache one if we couldn't reach remote.
logrus.Debug("Downloading timestamp")
raw, err := c.remote.GetMeta(role, maxSize)
var s *data.Signed
if err != nil || len(raw) == 0 {
if err, ok := err.(store.ErrMetaNotFound); ok {
return err
}
if old == nil {
if err == nil {
// couldn't retrieve data from server and don't have valid
// data in cache.
return store.ErrMetaNotFound{}
}
return err
}
s = old
} else {
download = true
s = &data.Signed{}
err = json.Unmarshal(raw, s)
if err != nil {
return err
}
}
err = signed.Verify(s, role, version, c.keysDB)
if err != nil {
return err
}
logrus.Debug("successfully verified timestamp")
if download {
c.cache.SetMeta(role, raw)
}
c.local.SetTimestamp(s)
return nil
}
// downloadSnapshot is responsible for downloading the snapshot.json
func (c *Client) downloadSnapshot() error {
logrus.Debug("downloadSnapshot")
role := data.RoleName("snapshot")
size := c.local.Timestamp.Signed.Meta[role].Length
expectedSha256, ok := c.local.Timestamp.Signed.Meta[role].Hashes["sha256"]
if !ok {
return fmt.Errorf("Sha256 is currently the only hash supported by this client. No Sha256 found for snapshot")
}
var download bool
old := &data.Signed{}
version := 0
raw, err := c.cache.GetMeta(role, size)
if raw == nil || err != nil {
logrus.Debug("no snapshot in cache, must download")
download = true
} else {
// file may have been tampered with on disk. Always check the hash!
genHash := sha256.Sum256(raw)
if !bytes.Equal(genHash[:], expectedSha256) {
logrus.Debug("hash of snapshot in cache did not match expected hash, must download")
download = true
}
err := json.Unmarshal(raw, old)
if err == nil {
snap, err := data.TimestampFromSigned(old)
if err == nil {
version = snap.Signed.Version
} else {
logrus.Debug("Could not parse Signed part of snapshot, must download")
download = true
}
} else {
logrus.Debug("Could not parse snapshot, must download")
download = true
}
}
var s *data.Signed
if download {
logrus.Debug("downloading new snapshot")
raw, err = c.remote.GetMeta(role, size)
if err != nil {
return err
}
genHash := sha256.Sum256(raw)
if !bytes.Equal(genHash[:], expectedSha256) {
return fmt.Errorf("Retrieved snapshot did not verify against hash in timestamp.")
}
s = &data.Signed{}
err = json.Unmarshal(raw, s)
if err != nil {
return err
}
} else {
logrus.Debug("using cached snapshot")
s = old
}
err = signed.Verify(s, role, version, c.keysDB)
if err != nil {
return err
}
logrus.Debug("successfully verified snapshot")
c.local.SetSnapshot(s)
if download {
err = c.cache.SetMeta(role, raw)
if err != nil {
logrus.Errorf("Failed to write snapshot to local cache: %s", err.Error())
}
}
return nil
}
// downloadTargets is responsible for downloading any targets file
// including delegates roles. It will download the whole tree of
// delegated roles below the given one
func (c *Client) downloadTargets(role string) error {
role = data.RoleName(role) // this will really only do something for base targets role
snap := c.local.Snapshot.Signed
root := c.local.Root.Signed
r := c.keysDB.GetRole(role)
if r == nil {
return fmt.Errorf("Invalid role: %s", role)
}
keyIDs := r.KeyIDs
s, err := c.GetTargetsFile(role, keyIDs, snap.Meta, root.ConsistentSnapshot, r.Threshold)
if err != nil {
logrus.Error("Error getting targets file:", err)
return err
}
err = c.local.SetTargets(role, s)
if err != nil {
return err
}
return nil
}
func (c Client) GetTargetsFile(role string, keyIDs []string, snapshotMeta data.Files, consistent bool, threshold int) (*data.Signed, error) {
// require role exists in snapshots
roleMeta, ok := snapshotMeta[role]
if !ok {
return nil, fmt.Errorf("Snapshot does not contain target role")
}
expectedSha256, ok := snapshotMeta[role].Hashes["sha256"]
if !ok {
return nil, fmt.Errorf("Sha256 is currently the only hash supported by this client. No Sha256 found for targets role %s", role)
}
// try to get meta file from content addressed cache
var download bool
old := &data.Signed{}
version := 0
raw, err := c.cache.GetMeta(role, roleMeta.Length)
if err != nil || raw == nil {
logrus.Debugf("Couldn't not find cached %s, must download", role)
download = true
} else {
// file may have been tampered with on disk. Always check the hash!
genHash := sha256.Sum256(raw)
if !bytes.Equal(genHash[:], expectedSha256) {
download = true
}
err := json.Unmarshal(raw, old)
if err == nil {
targ, err := data.TargetsFromSigned(old)
if err == nil {
version = targ.Signed.Version
} else {
download = true
}
} else {
download = true
}
}
var s *data.Signed
if download {
rolePath, err := c.RoleTargetsPath(role, hex.EncodeToString(expectedSha256), consistent)
if err != nil {
return nil, err
}
raw, err = c.remote.GetMeta(rolePath, snapshotMeta[role].Length)
if err != nil {
return nil, err
}
s = &data.Signed{}
err = json.Unmarshal(raw, s)
if err != nil {
logrus.Error("Error unmarshalling targets file:", err)
return nil, err
}
} else {
logrus.Debug("using cached ", role)
s = old
}
err = signed.Verify(s, role, version, c.keysDB)
if err != nil {
return nil, err
}
logrus.Debugf("successfully verified %s", role)
if download {
// if we error when setting meta, we should continue.
err = c.cache.SetMeta(role, raw)
if err != nil {
logrus.Errorf("Failed to write snapshot to local cache: %s", err.Error())
}
}
return s, nil
}
// RoleTargetsPath generates the appropriate filename for the targets file,
// based on whether the repo is marked as consistent.
func (c Client) RoleTargetsPath(role string, hashSha256 string, consistent bool) (string, error) {
if consistent {
dir := filepath.Dir(role)
if strings.Contains(role, "/") {
lastSlashIdx := strings.LastIndex(role, "/")
role = role[lastSlashIdx+1:]
}
role = path.Join(
dir,
fmt.Sprintf("%s.%s.json", hashSha256, role),
)
}
return role, nil
}
// TargetMeta ensures the repo is up to date, downloading the minimum
// necessary metadata files
func (c Client) TargetMeta(path string) (*data.FileMeta, error) {
c.Update()
var meta *data.FileMeta
pathDigest := sha256.Sum256([]byte(path))
pathHex := hex.EncodeToString(pathDigest[:])
// FIFO list of targets delegations to inspect for target
roles := []string{data.ValidRoles["targets"]}
var role string
for len(roles) > 0 {
// have to do these lines here because of order of execution in for statement
role = roles[0]
roles = roles[1:]
// Download the target role file if necessary
err := c.downloadTargets(role)
if err != nil {
// as long as we find a valid target somewhere we're happy.
// continue and search other delegated roles if any
continue
}
meta = c.local.TargetMeta(role, path)
if meta != nil {
// we found the target!
return meta, nil
}
delegations := c.local.TargetDelegations(role, path, pathHex)
for _, d := range delegations {
roles = append(roles, d.Name)
}
}
return meta, nil
}
func (c Client) DownloadTarget(dst io.Writer, path string, meta *data.FileMeta) error {
reader, err := c.remote.GetTarget(path)
if err != nil {
return err
}
defer reader.Close()
r := io.TeeReader(
io.LimitReader(reader, meta.Length),
dst,
)
err = utils.ValidateTarget(r, meta)
return err
}

View file

@ -0,0 +1,106 @@
package client
import (
"errors"
"fmt"
)
var (
ErrNoRootKeys = errors.New("tuf: no root keys found in local meta store")
ErrInsufficientKeys = errors.New("tuf: insufficient keys to meet threshold")
)
type ErrMissingRemoteMetadata struct {
Name string
}
func (e ErrMissingRemoteMetadata) Error() string {
return fmt.Sprintf("tuf: missing remote metadata %s", e.Name)
}
type ErrDownloadFailed struct {
File string
Err error
}
func (e ErrDownloadFailed) Error() string {
return fmt.Sprintf("tuf: failed to download %s: %s", e.File, e.Err)
}
type ErrDecodeFailed struct {
File string
Err error
}
func (e ErrDecodeFailed) Error() string {
return fmt.Sprintf("tuf: failed to decode %s: %s", e.File, e.Err)
}
func isDecodeFailedWithErr(err, expected error) bool {
e, ok := err.(ErrDecodeFailed)
if !ok {
return false
}
return e.Err == expected
}
type ErrNotFound struct {
File string
}
func (e ErrNotFound) Error() string {
return fmt.Sprintf("tuf: file not found: %s", e.File)
}
func IsNotFound(err error) bool {
_, ok := err.(ErrNotFound)
return ok
}
type ErrWrongSize struct {
File string
Actual int64
Expected int64
}
func (e ErrWrongSize) Error() string {
return fmt.Sprintf("tuf: unexpected file size: %s (expected %d bytes, got %d bytes)", e.File, e.Expected, e.Actual)
}
type ErrLatestSnapshot struct {
Version int
}
func (e ErrLatestSnapshot) Error() string {
return fmt.Sprintf("tuf: the local snapshot version (%d) is the latest", e.Version)
}
func IsLatestSnapshot(err error) bool {
_, ok := err.(ErrLatestSnapshot)
return ok
}
type ErrUnknownTarget struct {
Name string
}
func (e ErrUnknownTarget) Error() string {
return fmt.Sprintf("tuf: unknown target file: %s", e.Name)
}
type ErrMetaTooLarge struct {
Name string
Size int64
}
func (e ErrMetaTooLarge) Error() string {
return fmt.Sprintf("tuf: %s size %d bytes greater than maximum", e.Name, e.Size)
}
type ErrInvalidURL struct {
URL string
}
func (e ErrInvalidURL) Error() string {
return fmt.Sprintf("tuf: invalid repository URL %s", e.URL)
}

View file

@ -0,0 +1,96 @@
package data
import (
"crypto/sha256"
"encoding/hex"
"github.com/Sirupsen/logrus"
cjson "github.com/tent/canonical-json-go"
)
type Key interface {
ID() string
Algorithm() KeyAlgorithm
Public() []byte
}
type PublicKey interface {
Key
}
type PrivateKey interface {
Key
Private() []byte
}
type KeyPair struct {
Public []byte `json:"public"`
Private []byte `json:"private"`
}
// TUFKey is the structure used for both public and private keys in TUF.
// Normally it would make sense to use a different structures for public and
// private keys, but that would change the key ID algorithm (since the canonical
// JSON would be different). This structure should normally be accessed through
// the PublicKey or PrivateKey interfaces.
type TUFKey struct {
id string `json:"-"`
Type KeyAlgorithm `json:"keytype"`
Value KeyPair `json:"keyval"`
}
func NewPrivateKey(algorithm KeyAlgorithm, public, private []byte) *TUFKey {
return &TUFKey{
Type: algorithm,
Value: KeyPair{
Public: public,
Private: private,
},
}
}
func (k TUFKey) Algorithm() KeyAlgorithm {
return k.Type
}
func (k *TUFKey) ID() string {
if k.id == "" {
pubK := NewPublicKey(k.Algorithm(), k.Public())
data, err := cjson.Marshal(&pubK)
if err != nil {
logrus.Error("Error generating key ID:", err)
}
digest := sha256.Sum256(data)
k.id = hex.EncodeToString(digest[:])
}
return k.id
}
func (k TUFKey) Public() []byte {
return k.Value.Public
}
func (k *TUFKey) Private() []byte {
return k.Value.Private
}
func NewPublicKey(algorithm KeyAlgorithm, public []byte) PublicKey {
return &TUFKey{
Type: algorithm,
Value: KeyPair{
Public: public,
Private: nil,
},
}
}
func PublicKeyFromPrivate(pk PrivateKey) PublicKey {
return &TUFKey{
Type: pk.Algorithm(),
Value: KeyPair{
Public: pk.Public(),
Private: nil,
},
}
}

View file

@ -0,0 +1,117 @@
package data
import (
"fmt"
"strings"
"github.com/endophage/gotuf/errors"
)
var ValidRoles = map[string]string{
"root": "root",
"targets": "targets",
"snapshot": "snapshot",
"timestamp": "timestamp",
}
func SetValidRoles(rs map[string]string) {
for k, v := range rs {
ValidRoles[strings.ToLower(k)] = strings.ToLower(v)
}
}
func RoleName(role string) string {
if r, ok := ValidRoles[role]; ok {
return r
}
return role
}
// ValidRole only determines the name is semantically
// correct. For target delegated roles, it does NOT check
// the the appropriate parent roles exist.
func ValidRole(name string) bool {
name = strings.ToLower(name)
if v, ok := ValidRoles[name]; ok {
return name == v
}
targetsBase := fmt.Sprintf("%s/", ValidRoles["targets"])
if strings.HasPrefix(name, targetsBase) {
return true
}
for _, v := range ValidRoles {
if name == v {
return true
}
}
return false
}
type RootRole struct {
KeyIDs []string `json:"keyids"`
Threshold int `json:"threshold"`
}
type Role struct {
RootRole
Name string `json:"name"`
Paths []string `json:"paths,omitempty"`
PathHashPrefixes []string `json:"path_hash_prefixes,omitempty"`
}
func NewRole(name string, threshold int, keyIDs, paths, pathHashPrefixes []string) (*Role, error) {
if len(paths) > 0 && len(pathHashPrefixes) > 0 {
return nil, errors.ErrInvalidRole{}
}
if threshold < 1 {
return nil, errors.ErrInvalidRole{}
}
if !ValidRole(name) {
return nil, errors.ErrInvalidRole{}
}
return &Role{
RootRole: RootRole{
KeyIDs: keyIDs,
Threshold: threshold,
},
Name: name,
Paths: paths,
PathHashPrefixes: pathHashPrefixes,
}, nil
}
func (r Role) IsValid() bool {
return !(len(r.Paths) > 0 && len(r.PathHashPrefixes) > 0)
}
func (r Role) ValidKey(id string) bool {
for _, key := range r.KeyIDs {
if key == id {
return true
}
}
return false
}
func (r Role) CheckPaths(path string) bool {
for _, p := range r.Paths {
if strings.HasPrefix(path, p) {
return true
}
}
return false
}
func (r Role) CheckPrefixes(hash string) bool {
for _, p := range r.PathHashPrefixes {
if strings.HasPrefix(hash, p) {
return true
}
}
return false
}
func (r Role) IsDelegation() bool {
targetsBase := fmt.Sprintf("%s/", ValidRoles["targets"])
return strings.HasPrefix(r.Name, targetsBase)
}

View file

@ -0,0 +1,90 @@
package data
import (
"encoding/json"
"time"
cjson "github.com/tent/canonical-json-go"
)
type SignedRoot struct {
Signatures []Signature
Signed Root
Dirty bool
}
type Root struct {
Type string `json:"_type"`
Version int `json:"version"`
Expires time.Time `json:"expires"`
// These keys are public keys. We use TUFKey instead of PublicKey to
// support direct JSON unmarshaling.
Keys map[string]*TUFKey `json:"keys"`
Roles map[string]*RootRole `json:"roles"`
ConsistentSnapshot bool `json:"consistent_snapshot"`
}
func NewRoot(keys map[string]PublicKey, roles map[string]*RootRole, consistent bool) (*SignedRoot, error) {
signedRoot := &SignedRoot{
Signatures: make([]Signature, 0),
Signed: Root{
Type: TUFTypes["root"],
Version: 0,
Expires: DefaultExpires("root"),
Keys: make(map[string]*TUFKey),
Roles: roles,
ConsistentSnapshot: consistent,
},
Dirty: true,
}
// Convert PublicKeys to TUFKey structures
// The Signed.Keys map needs to have *TUFKey values, since this
// structure gets directly unmarshalled from JSON, and it's not
// possible to unmarshal into an interface type. But this function
// takes a map with PublicKey values to avoid exposing this ugliness.
// The loop below converts to the TUFKey type.
for k, v := range keys {
signedRoot.Signed.Keys[k] = &TUFKey{
Type: v.Algorithm(),
Value: KeyPair{
Public: v.Public(),
Private: nil,
},
}
}
return signedRoot, nil
}
func (r SignedRoot) ToSigned() (*Signed, error) {
s, err := cjson.Marshal(r.Signed)
if err != nil {
return nil, err
}
signed := json.RawMessage{}
err = signed.UnmarshalJSON(s)
if err != nil {
return nil, err
}
sigs := make([]Signature, len(r.Signatures))
copy(sigs, r.Signatures)
return &Signed{
Signatures: sigs,
Signed: signed,
}, nil
}
func RootFromSigned(s *Signed) (*SignedRoot, error) {
r := Root{}
err := json.Unmarshal(s.Signed, &r)
if err != nil {
return nil, err
}
sigs := make([]Signature, len(s.Signatures))
copy(sigs, s.Signatures)
return &SignedRoot{
Signatures: sigs,
Signed: r,
}, nil
}

View file

@ -0,0 +1,98 @@
package data
import (
"bytes"
"encoding/json"
"time"
"github.com/Sirupsen/logrus"
cjson "github.com/tent/canonical-json-go"
)
type SignedSnapshot struct {
Signatures []Signature
Signed Snapshot
Dirty bool
}
type Snapshot struct {
Type string `json:"_type"`
Version int `json:"version"`
Expires time.Time `json:"expires"`
Meta Files `json:"meta"`
}
func NewSnapshot(root *Signed, targets *Signed) (*SignedSnapshot, error) {
logrus.Debug("generating new snapshot...")
targetsJSON, err := json.Marshal(targets)
if err != nil {
logrus.Debug("Error Marshalling Targets")
return nil, err
}
rootJSON, err := json.Marshal(root)
if err != nil {
logrus.Debug("Error Marshalling Root")
return nil, err
}
rootMeta, err := NewFileMeta(bytes.NewReader(rootJSON), "sha256")
if err != nil {
return nil, err
}
targetsMeta, err := NewFileMeta(bytes.NewReader(targetsJSON), "sha256")
if err != nil {
return nil, err
}
return &SignedSnapshot{
Signatures: make([]Signature, 0),
Signed: Snapshot{
Type: TUFTypes["snapshot"],
Version: 0,
Expires: DefaultExpires("snapshot"),
Meta: Files{
ValidRoles["root"]: rootMeta,
ValidRoles["targets"]: targetsMeta,
},
},
}, nil
}
func (sp *SignedSnapshot) hashForRole(role string) []byte {
return sp.Signed.Meta[role].Hashes["sha256"]
}
func (sp SignedSnapshot) ToSigned() (*Signed, error) {
s, err := cjson.Marshal(sp.Signed)
if err != nil {
return nil, err
}
signed := json.RawMessage{}
err = signed.UnmarshalJSON(s)
if err != nil {
return nil, err
}
sigs := make([]Signature, len(sp.Signatures))
copy(sigs, sp.Signatures)
return &Signed{
Signatures: sigs,
Signed: signed,
}, nil
}
func (sp *SignedSnapshot) AddMeta(role string, meta FileMeta) {
sp.Signed.Meta[role] = meta
sp.Dirty = true
}
func SnapshotFromSigned(s *Signed) (*SignedSnapshot, error) {
sp := Snapshot{}
err := json.Unmarshal(s.Signed, &sp)
if err != nil {
return nil, err
}
sigs := make([]Signature, len(s.Signatures))
copy(sigs, s.Signatures)
return &SignedSnapshot{
Signatures: sigs,
Signed: sp,
}, nil
}

View file

@ -0,0 +1,118 @@
package data
import (
"crypto/sha256"
"encoding/hex"
"encoding/json"
"time"
cjson "github.com/tent/canonical-json-go"
)
type SignedTargets struct {
Signatures []Signature
Signed Targets
Dirty bool
}
type Targets struct {
Type string `json:"_type"`
Version int `json:"version"`
Expires time.Time `json:"expires"`
Targets Files `json:"targets"`
Delegations Delegations `json:"delegations,omitempty"`
}
func NewTargets() *SignedTargets {
return &SignedTargets{
Signatures: make([]Signature, 0),
Signed: Targets{
Type: TUFTypes["targets"],
Version: 0,
Expires: DefaultExpires("targets"),
Targets: make(Files),
Delegations: *NewDelegations(),
},
Dirty: true,
}
}
// GetMeta attempts to find the targets entry for the path. It
// will return nil in the case of the target not being found.
func (t SignedTargets) GetMeta(path string) *FileMeta {
for p, meta := range t.Signed.Targets {
if p == path {
return &meta
}
}
return nil
}
// GetDelegations filters the roles and associated keys that may be
// the signers for the given target path. If no appropriate roles
// can be found, it will simply return nil for the return values.
// The returned slice of Role will have order maintained relative
// to the role slice on Delegations per TUF spec proposal on using
// order to determine priority.
func (t SignedTargets) GetDelegations(path string) []*Role {
roles := make([]*Role, 0)
pathHashBytes := sha256.Sum256([]byte(path))
pathHash := hex.EncodeToString(pathHashBytes[:])
for _, r := range t.Signed.Delegations.Roles {
if !r.IsValid() {
// Role has both Paths and PathHashPrefixes.
continue
}
if r.CheckPaths(path) {
roles = append(roles, r)
continue
}
if r.CheckPrefixes(pathHash) {
roles = append(roles, r)
continue
}
//keysDB.AddRole(r)
}
return roles
}
func (t *SignedTargets) AddTarget(path string, meta FileMeta) {
t.Signed.Targets[path] = meta
t.Dirty = true
}
func (t *SignedTargets) AddDelegation(role *Role, keys []*PublicKey) error {
return nil
}
func (t SignedTargets) ToSigned() (*Signed, error) {
s, err := cjson.Marshal(t.Signed)
if err != nil {
return nil, err
}
signed := json.RawMessage{}
err = signed.UnmarshalJSON(s)
if err != nil {
return nil, err
}
sigs := make([]Signature, len(t.Signatures))
copy(sigs, t.Signatures)
return &Signed{
Signatures: sigs,
Signed: signed,
}, nil
}
func TargetsFromSigned(s *Signed) (*SignedTargets, error) {
t := Targets{}
err := json.Unmarshal(s.Signed, &t)
if err != nil {
return nil, err
}
sigs := make([]Signature, len(s.Signatures))
copy(sigs, s.Signatures)
return &SignedTargets{
Signatures: sigs,
Signed: t,
}, nil
}

View file

@ -0,0 +1,76 @@
package data
import (
"bytes"
"encoding/json"
"time"
cjson "github.com/tent/canonical-json-go"
)
type SignedTimestamp struct {
Signatures []Signature
Signed Timestamp
Dirty bool
}
type Timestamp struct {
Type string `json:"_type"`
Version int `json:"version"`
Expires time.Time `json:"expires"`
Meta Files `json:"meta"`
}
func NewTimestamp(snapshot *Signed) (*SignedTimestamp, error) {
snapshotJSON, err := json.Marshal(snapshot)
if err != nil {
return nil, err
}
snapshotMeta, err := NewFileMeta(bytes.NewReader(snapshotJSON), "sha256")
if err != nil {
return nil, err
}
return &SignedTimestamp{
Signatures: make([]Signature, 0),
Signed: Timestamp{
Type: TUFTypes["timestamp"],
Version: 0,
Expires: DefaultExpires("timestamp"),
Meta: Files{
ValidRoles["snapshot"]: snapshotMeta,
},
},
}, nil
}
func (ts SignedTimestamp) ToSigned() (*Signed, error) {
s, err := cjson.Marshal(ts.Signed)
if err != nil {
return nil, err
}
signed := json.RawMessage{}
err = signed.UnmarshalJSON(s)
if err != nil {
return nil, err
}
sigs := make([]Signature, len(ts.Signatures))
copy(sigs, ts.Signatures)
return &Signed{
Signatures: sigs,
Signed: signed,
}, nil
}
func TimestampFromSigned(s *Signed) (*SignedTimestamp, error) {
ts := Timestamp{}
err := json.Unmarshal(s.Signed, &ts)
if err != nil {
return nil, err
}
sigs := make([]Signature, len(s.Signatures))
copy(sigs, s.Signatures)
return &SignedTimestamp{
Signatures: sigs,
Signed: ts,
}, nil
}

View file

@ -0,0 +1,177 @@
package data
import (
"crypto/sha256"
"crypto/sha512"
"encoding/json"
"fmt"
"hash"
"io"
"io/ioutil"
"strings"
"time"
"github.com/Sirupsen/logrus"
)
type KeyAlgorithm string
func (k KeyAlgorithm) String() string {
return string(k)
}
type SigAlgorithm string
func (k SigAlgorithm) String() string {
return string(k)
}
const (
defaultHashAlgorithm = "sha256"
EDDSASignature SigAlgorithm = "eddsa"
RSAPSSSignature SigAlgorithm = "rsapss"
RSAPKCS1v15Signature SigAlgorithm = "rsapkcs1v15"
ECDSASignature SigAlgorithm = "ecdsa"
PyCryptoSignature SigAlgorithm = "pycrypto-pkcs#1 pss"
ED25519Key KeyAlgorithm = "ed25519"
RSAKey KeyAlgorithm = "rsa"
RSAx509Key KeyAlgorithm = "rsa-x509"
ECDSAKey KeyAlgorithm = "ecdsa"
ECDSAx509Key KeyAlgorithm = "ecdsa-x509"
)
var TUFTypes = map[string]string{
"targets": "Targets",
"root": "Root",
"snapshot": "Snapshot",
"timestamp": "Timestamp",
}
// SetTUFTypes allows one to override some or all of the default
// type names in TUF.
func SetTUFTypes(ts map[string]string) {
for k, v := range ts {
TUFTypes[k] = v
}
}
// Checks if type is correct.
func ValidTUFType(t string) bool {
// most people will just use the defaults so have this optimal check
// first. Do comparison just in case there is some unknown vulnerability
// if a key and value in the map differ.
if v, ok := TUFTypes[t]; ok {
return t == v
}
// For people that feel the need to change the default type names.
for _, v := range TUFTypes {
if t == v {
return true
}
}
return false
}
type Signed struct {
Signed json.RawMessage `json:"signed"`
Signatures []Signature `json:"signatures"`
}
type Signature struct {
KeyID string `json:"keyid"`
Method SigAlgorithm `json:"method"`
Signature []byte `json:"sig"`
}
type Files map[string]FileMeta
type Hashes map[string][]byte
type FileMeta struct {
Length int64 `json:"length"`
Hashes Hashes `json:"hashes"`
Custom json.RawMessage `json:"custom,omitempty"`
}
func NewFileMeta(r io.Reader, hashAlgorithms ...string) (FileMeta, error) {
if len(hashAlgorithms) == 0 {
hashAlgorithms = []string{defaultHashAlgorithm}
}
hashes := make(map[string]hash.Hash, len(hashAlgorithms))
for _, hashAlgorithm := range hashAlgorithms {
var h hash.Hash
switch hashAlgorithm {
case "sha256":
h = sha256.New()
case "sha512":
h = sha512.New()
default:
return FileMeta{}, fmt.Errorf("Unknown Hash Algorithm: %s", hashAlgorithm)
}
hashes[hashAlgorithm] = h
r = io.TeeReader(r, h)
}
n, err := io.Copy(ioutil.Discard, r)
if err != nil {
return FileMeta{}, err
}
m := FileMeta{Length: n, Hashes: make(Hashes, len(hashes))}
for hashAlgorithm, h := range hashes {
m.Hashes[hashAlgorithm] = h.Sum(nil)
}
return m, nil
}
type Delegations struct {
Keys map[string]PublicKey `json:"keys"`
Roles []*Role `json:"roles"`
}
func NewDelegations() *Delegations {
return &Delegations{
Keys: make(map[string]PublicKey),
Roles: make([]*Role, 0),
}
}
// defines number of days in which something should expire
var defaultExpiryTimes = map[string]int{
"root": 365,
"targets": 90,
"snapshot": 7,
"timestamp": 1,
}
// SetDefaultExpiryTimes allows one to change the default expiries.
func SetDefaultExpiryTimes(times map[string]int) {
for key, value := range times {
if _, ok := defaultExpiryTimes[key]; !ok {
logrus.Errorf("Attempted to set default expiry for an unknown role: %s", key)
continue
}
defaultExpiryTimes[key] = value
}
}
func DefaultExpires(role string) time.Time {
var t time.Time
if t, ok := defaultExpiryTimes[role]; ok {
return time.Now().AddDate(0, 0, t)
}
return t.UTC().Round(time.Second)
}
type unmarshalledSignature Signature
func (s *Signature) UnmarshalJSON(data []byte) error {
uSignature := unmarshalledSignature{}
err := json.Unmarshal(data, &uSignature)
if err != nil {
return err
}
uSignature.Method = SigAlgorithm(strings.ToLower(string(uSignature.Method)))
*s = Signature(uSignature)
return nil
}

View file

@ -0,0 +1,85 @@
package errors
import (
"errors"
"fmt"
"time"
)
var ErrInitNotAllowed = errors.New("tuf: repository already initialized")
type ErrMissingMetadata struct {
Name string
}
func (e ErrMissingMetadata) Error() string {
return fmt.Sprintf("tuf: missing metadata %s", e.Name)
}
type ErrFileNotFound struct {
Path string
}
func (e ErrFileNotFound) Error() string {
return fmt.Sprintf("tuf: file not found %s", e.Path)
}
type ErrInsufficientKeys struct {
Name string
}
func (e ErrInsufficientKeys) Error() string {
return fmt.Sprintf("tuf: insufficient keys to sign %s", e.Name)
}
type ErrInsufficientSignatures struct {
Name string
Err error
}
func (e ErrInsufficientSignatures) Error() string {
return fmt.Sprintf("tuf: insufficient signatures for %s: %s", e.Name, e.Err)
}
type ErrInvalidRole struct {
Role string
}
func (e ErrInvalidRole) Error() string {
return fmt.Sprintf("tuf: invalid role %s", e.Role)
}
type ErrInvalidExpires struct {
Expires time.Time
}
func (e ErrInvalidExpires) Error() string {
return fmt.Sprintf("tuf: invalid expires: %s", e.Expires)
}
type ErrKeyNotFound struct {
Role string
KeyID string
}
func (e ErrKeyNotFound) Error() string {
return fmt.Sprintf(`tuf: no key with id "%s" exists for the %s role`, e.KeyID, e.Role)
}
type ErrNotEnoughKeys struct {
Role string
Keys int
Threshold int
}
func (e ErrNotEnoughKeys) Error() string {
return fmt.Sprintf("tuf: %s role has insufficient keys for threshold (has %d keys, threshold is %d)", e.Role, e.Keys, e.Threshold)
}
type ErrPassphraseRequired struct {
Role string
}
func (e ErrPassphraseRequired) Error() string {
return fmt.Sprintf("tuf: a passphrase is required to access the encrypted %s keys file", e.Role)
}

View file

@ -0,0 +1,60 @@
package keys
import (
"errors"
"github.com/endophage/gotuf/data"
)
var (
ErrWrongType = errors.New("tuf: invalid key type")
ErrExists = errors.New("tuf: key already in db")
ErrWrongID = errors.New("tuf: key id mismatch")
ErrInvalidKey = errors.New("tuf: invalid key")
ErrInvalidRole = errors.New("tuf: invalid role")
ErrInvalidKeyID = errors.New("tuf: invalid key id")
ErrInvalidThreshold = errors.New("tuf: invalid role threshold")
)
type KeyDB struct {
roles map[string]*data.Role
keys map[string]data.PublicKey
}
func NewDB() *KeyDB {
return &KeyDB{
roles: make(map[string]*data.Role),
keys: make(map[string]data.PublicKey),
}
}
func (db *KeyDB) AddKey(k data.PublicKey) {
db.keys[k.ID()] = k
}
func (db *KeyDB) AddRole(r *data.Role) error {
if !data.ValidRole(r.Name) {
return ErrInvalidRole
}
if r.Threshold < 1 {
return ErrInvalidThreshold
}
// validate all key ids are in the keys maps
for _, id := range r.KeyIDs {
if _, ok := db.keys[id]; !ok {
return ErrInvalidKeyID
}
}
db.roles[r.Name] = r
return nil
}
func (db *KeyDB) GetKey(id string) data.PublicKey {
return db.keys[id]
}
func (db *KeyDB) GetRole(name string) *data.Role {
return db.roles[name]
}

View file

@ -0,0 +1,75 @@
package signed
import (
"crypto/rand"
"errors"
"github.com/agl/ed25519"
"github.com/endophage/gotuf/data"
)
// Ed25519 implements a simple in memory cryptosystem for ED25519 keys
type Ed25519 struct {
keys map[string]data.PrivateKey
}
func NewEd25519() *Ed25519 {
return &Ed25519{
make(map[string]data.PrivateKey),
}
}
// addKey allows you to add a private key
func (e *Ed25519) addKey(k data.PrivateKey) {
e.keys[k.ID()] = k
}
func (e *Ed25519) RemoveKey(keyID string) error {
delete(e.keys, keyID)
return nil
}
func (e *Ed25519) Sign(keyIDs []string, toSign []byte) ([]data.Signature, error) {
signatures := make([]data.Signature, 0, len(keyIDs))
for _, kID := range keyIDs {
priv := [ed25519.PrivateKeySize]byte{}
copy(priv[:], e.keys[kID].Private())
sig := ed25519.Sign(&priv, toSign)
signatures = append(signatures, data.Signature{
KeyID: kID,
Method: data.EDDSASignature,
Signature: sig[:],
})
}
return signatures, nil
}
func (e *Ed25519) Create(role string, algorithm data.KeyAlgorithm) (data.PublicKey, error) {
if algorithm != data.ED25519Key {
return nil, errors.New("only ED25519 supported by this cryptoservice")
}
pub, priv, err := ed25519.GenerateKey(rand.Reader)
if err != nil {
return nil, err
}
public := data.NewPublicKey(data.ED25519Key, pub[:])
private := data.NewPrivateKey(data.ED25519Key, pub[:], priv[:])
e.addKey(private)
return public, nil
}
func (e *Ed25519) PublicKeys(keyIDs ...string) (map[string]data.PublicKey, error) {
k := make(map[string]data.PublicKey)
for _, kID := range keyIDs {
if key, ok := e.keys[kID]; ok {
k[kID] = data.PublicKeyFromPrivate(key)
}
}
return k, nil
}
func (e *Ed25519) GetKey(keyID string) data.PublicKey {
return data.PublicKeyFromPrivate(e.keys[keyID])
}

View file

@ -0,0 +1,29 @@
package signed
import (
"fmt"
)
type ErrExpired struct {
Role string
Expired string
}
func (e ErrExpired) Error() string {
return fmt.Sprintf("%s expired at %v", e.Role, e.Expired)
}
type ErrLowVersion struct {
Actual int
Current int
}
func (e ErrLowVersion) Error() string {
return fmt.Sprintf("version %d is lower than current version %d", e.Actual, e.Current)
}
type ErrRoleThreshold struct{}
func (e ErrRoleThreshold) Error() string {
return "valid signatures did not meet threshold"
}

View file

@ -0,0 +1,44 @@
package signed
import (
"github.com/endophage/gotuf/data"
)
// SigningService defines the necessary functions to determine
// if a user is able to sign with a key, and to perform signing.
type SigningService interface {
// Sign takes a slice of keyIDs and a piece of data to sign
// and returns a slice of signatures and an error
Sign(keyIDs []string, data []byte) ([]data.Signature, error)
}
// KeyService provides management of keys locally. It will never
// accept or provide private keys. Communication between the KeyService
// and a SigningService happen behind the Create function.
type KeyService interface {
// Create issues a new key pair and is responsible for loading
// the private key into the appropriate signing service.
// The role isn't currently used for anything, but it's here to support
// future features
Create(role string, algorithm data.KeyAlgorithm) (data.PublicKey, error)
// GetKey retrieves the public key if present, otherwise it returns nil
GetKey(keyID string) data.PublicKey
// RemoveKey deletes the specified key
RemoveKey(keyID string) error
}
// CryptoService defines a unified Signing and Key Service as this
// will be most useful for most applications.
type CryptoService interface {
SigningService
KeyService
}
// Verifier defines an interface for verfying signatures. An implementer
// of this interface should verify signatures for one and only one
// signing scheme.
type Verifier interface {
Verify(key data.PublicKey, sig []byte, msg []byte) error
}

View file

@ -0,0 +1,43 @@
package signed
import (
"fmt"
"github.com/Sirupsen/logrus"
"github.com/endophage/gotuf/data"
"github.com/endophage/gotuf/errors"
"strings"
)
// Sign takes a data.Signed and a key, calculated and adds the signature
// to the data.Signed
func Sign(service CryptoService, s *data.Signed, keys ...data.PublicKey) error {
logrus.Debugf("sign called with %d keys", len(keys))
signatures := make([]data.Signature, 0, len(s.Signatures)+1)
keyIDMemb := make(map[string]struct{})
keyIDs := make([]string, 0, len(keys))
for _, key := range keys {
keyIDMemb[key.ID()] = struct{}{}
keyIDs = append(keyIDs, key.ID())
}
logrus.Debugf("Generated list of signing IDs: %s", strings.Join(keyIDs, ", "))
for _, sig := range s.Signatures {
if _, ok := keyIDMemb[sig.KeyID]; ok {
continue
}
signatures = append(signatures, sig)
}
newSigs, err := service.Sign(keyIDs, s.Signed)
if err != nil {
return err
}
if len(newSigs) < 1 {
return errors.ErrInsufficientSignatures{
Name: fmt.Sprint("Cryptoservice failed to produce any signatures for keys with IDs: %s", strings.Join(keyIDs, ", ")),
Err: nil,
}
}
logrus.Debugf("appending %d new signatures", len(newSigs))
s.Signatures = append(signatures, newSigs...)
return nil
}

View file

@ -0,0 +1,235 @@
package signed
import (
"crypto"
"crypto/ecdsa"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/pem"
"math/big"
"reflect"
"github.com/Sirupsen/logrus"
"github.com/agl/ed25519"
"github.com/endophage/gotuf/data"
)
// Verifiers serves as a map of all verifiers available on the system and
// can be injected into a verificationService. For testing and configuration
// purposes, it will not be used by default.
var Verifiers = map[data.SigAlgorithm]Verifier{
data.RSAPSSSignature: RSAPSSVerifier{},
data.RSAPKCS1v15Signature: RSAPKCS1v15Verifier{},
data.PyCryptoSignature: RSAPyCryptoVerifier{},
data.ECDSASignature: ECDSAVerifier{},
data.EDDSASignature: Ed25519Verifier{},
}
// RegisterVerifier provides a convenience function for init() functions
// to register additional verifiers or replace existing ones.
func RegisterVerifier(algorithm data.SigAlgorithm, v Verifier) {
curr, ok := Verifiers[algorithm]
if ok {
typOld := reflect.TypeOf(curr)
typNew := reflect.TypeOf(v)
logrus.Debugf(
"replacing already loaded verifier %s:%s with %s:%s",
typOld.PkgPath(), typOld.Name(),
typNew.PkgPath(), typNew.Name(),
)
} else {
logrus.Debug("adding verifier for: ", algorithm)
}
Verifiers[algorithm] = v
}
type Ed25519Verifier struct{}
func (v Ed25519Verifier) Verify(key data.PublicKey, sig []byte, msg []byte) error {
var sigBytes [ed25519.SignatureSize]byte
if len(sig) != len(sigBytes) {
logrus.Infof("signature length is incorrect, must be %d, was %d.", ed25519.SignatureSize, len(sig))
return ErrInvalid
}
copy(sigBytes[:], sig)
var keyBytes [ed25519.PublicKeySize]byte
copy(keyBytes[:], key.Public())
if !ed25519.Verify(&keyBytes, msg, &sigBytes) {
logrus.Infof("failed ed25519 verification")
return ErrInvalid
}
return nil
}
func verifyPSS(key interface{}, digest, sig []byte) error {
rsaPub, ok := key.(*rsa.PublicKey)
if !ok {
logrus.Infof("value was not an RSA public key")
return ErrInvalid
}
opts := rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256}
if err := rsa.VerifyPSS(rsaPub, crypto.SHA256, digest[:], sig, &opts); err != nil {
logrus.Infof("failed RSAPSS verification: %s", err)
return ErrInvalid
}
return nil
}
func getRSAPubKey(key data.PublicKey) (crypto.PublicKey, error) {
algorithm := key.Algorithm()
var pubKey crypto.PublicKey
switch algorithm {
case data.RSAx509Key:
pemCert, _ := pem.Decode([]byte(key.Public()))
if pemCert == nil {
logrus.Infof("failed to decode PEM-encoded x509 certificate")
return nil, ErrInvalid
}
cert, err := x509.ParseCertificate(pemCert.Bytes)
if err != nil {
logrus.Infof("failed to parse x509 certificate: %s\n", err)
return nil, ErrInvalid
}
pubKey = cert.PublicKey
case data.RSAKey:
var err error
pubKey, err = x509.ParsePKIXPublicKey(key.Public())
if err != nil {
logrus.Infof("failed to parse public key: %s\n", err)
return nil, ErrInvalid
}
default:
logrus.Infof("invalid key type for RSAPSS verifier: %s", algorithm)
return nil, ErrInvalid
}
return pubKey, nil
}
// RSAPSSVerifier checks RSASSA-PSS signatures
type RSAPSSVerifier struct{}
// Verify does the actual check.
func (v RSAPSSVerifier) Verify(key data.PublicKey, sig []byte, msg []byte) error {
pubKey, err := getRSAPubKey(key)
if err != nil {
return err
}
digest := sha256.Sum256(msg)
return verifyPSS(pubKey, digest[:], sig)
}
// RSAPKCS1v15SVerifier checks RSA PKCS1v15 signatures
type RSAPKCS1v15Verifier struct{}
func (v RSAPKCS1v15Verifier) Verify(key data.PublicKey, sig []byte, msg []byte) error {
pubKey, err := getRSAPubKey(key)
if err != nil {
return err
}
digest := sha256.Sum256(msg)
rsaPub, ok := pubKey.(*rsa.PublicKey)
if !ok {
logrus.Infof("value was not an RSA public key")
return ErrInvalid
}
if err = rsa.VerifyPKCS1v15(rsaPub, crypto.SHA256, digest[:], sig); err != nil {
logrus.Errorf("Failed verification: %s", err.Error())
return ErrInvalid
}
return nil
}
// RSAPSSVerifier checks RSASSA-PSS signatures
type RSAPyCryptoVerifier struct{}
// Verify does the actual check.
// N.B. We have not been able to make this work in a way that is compatible
// with PyCrypto.
func (v RSAPyCryptoVerifier) Verify(key data.PublicKey, sig []byte, msg []byte) error {
digest := sha256.Sum256(msg)
k, _ := pem.Decode([]byte(key.Public()))
if k == nil {
logrus.Infof("failed to decode PEM-encoded x509 certificate")
return ErrInvalid
}
pub, err := x509.ParsePKIXPublicKey(k.Bytes)
if err != nil {
logrus.Infof("failed to parse public key: %s\n", err)
return ErrInvalid
}
return verifyPSS(pub, digest[:], sig)
}
// ECDSAVerifier checks ECDSA signatures, decoding the keyType appropriately
type ECDSAVerifier struct{}
// Verify does the actual check.
func (v ECDSAVerifier) Verify(key data.PublicKey, sig []byte, msg []byte) error {
algorithm := key.Algorithm()
var pubKey crypto.PublicKey
switch algorithm {
case data.ECDSAx509Key:
pemCert, _ := pem.Decode([]byte(key.Public()))
if pemCert == nil {
logrus.Infof("failed to decode PEM-encoded x509 certificate for keyID: %s", key.ID())
logrus.Debugf("certificate bytes: %s", string(key.Public()))
return ErrInvalid
}
cert, err := x509.ParseCertificate(pemCert.Bytes)
if err != nil {
logrus.Infof("failed to parse x509 certificate: %s\n", err)
return ErrInvalid
}
pubKey = cert.PublicKey
case data.ECDSAKey:
var err error
pubKey, err = x509.ParsePKIXPublicKey(key.Public())
if err != nil {
logrus.Infof("Failed to parse private key for keyID: %s, %s\n", key.ID(), err)
return ErrInvalid
}
default:
logrus.Infof("invalid key type for ECDSA verifier: %s", algorithm)
return ErrInvalid
}
ecdsaPubKey, ok := pubKey.(*ecdsa.PublicKey)
if !ok {
logrus.Infof("value isn't an ECDSA public key")
return ErrInvalid
}
sigLength := len(sig)
expectedOctetLength := 2 * ((ecdsaPubKey.Params().BitSize + 7) >> 3)
if sigLength != expectedOctetLength {
logrus.Infof("signature had an unexpected length")
return ErrInvalid
}
rBytes, sBytes := sig[:sigLength/2], sig[sigLength/2:]
r := new(big.Int).SetBytes(rBytes)
s := new(big.Int).SetBytes(sBytes)
digest := sha256.Sum256(msg)
if !ecdsa.Verify(ecdsaPubKey, digest[:], r, s) {
logrus.Infof("failed ECDSA signature validation")
return ErrInvalid
}
return nil
}

View file

@ -0,0 +1,186 @@
package signed
import (
"encoding/json"
"errors"
"strings"
"time"
"github.com/Sirupsen/logrus"
"github.com/endophage/gotuf/data"
"github.com/endophage/gotuf/keys"
"github.com/tent/canonical-json-go"
)
var (
ErrMissingKey = errors.New("tuf: missing key")
ErrNoSignatures = errors.New("tuf: data has no signatures")
ErrInvalid = errors.New("tuf: signature verification failed")
ErrWrongMethod = errors.New("tuf: invalid signature type")
ErrUnknownRole = errors.New("tuf: unknown role")
ErrWrongType = errors.New("tuf: meta file has wrong type")
)
type signedMeta struct {
Type string `json:"_type"`
Expires string `json:"expires"`
Version int `json:"version"`
}
// VerifyRoot checks if a given root file is valid against a known set of keys.
// Threshold is always assumed to be 1
func VerifyRoot(s *data.Signed, minVersion int, keys map[string]data.PublicKey) error {
if len(s.Signatures) == 0 {
return ErrNoSignatures
}
var decoded map[string]interface{}
if err := json.Unmarshal(s.Signed, &decoded); err != nil {
return err
}
msg, err := cjson.Marshal(decoded)
if err != nil {
return err
}
for _, sig := range s.Signatures {
// method lookup is consistent due to Unmarshal JSON doing lower case for us.
method := sig.Method
verifier, ok := Verifiers[method]
if !ok {
logrus.Debugf("continuing b/c signing method is not supported for verify root: %s\n", sig.Method)
continue
}
key, ok := keys[sig.KeyID]
if !ok {
logrus.Debugf("continuing b/c signing key isn't present in keys: %s\n", sig.KeyID)
continue
}
if err := verifier.Verify(key, sig.Signature, msg); err != nil {
logrus.Debugf("continuing b/c signature was invalid\n")
continue
}
// threshold of 1 so return on first success
return verifyMeta(s, "root", minVersion)
}
return ErrRoleThreshold{}
}
func Verify(s *data.Signed, role string, minVersion int, db *keys.KeyDB) error {
if err := VerifySignatures(s, role, db); err != nil {
return err
}
return verifyMeta(s, role, minVersion)
}
func verifyMeta(s *data.Signed, role string, minVersion int) error {
sm := &signedMeta{}
if err := json.Unmarshal(s.Signed, sm); err != nil {
return err
}
if !data.ValidTUFType(sm.Type) {
return ErrWrongType
}
if IsExpired(sm.Expires) {
logrus.Errorf("Metadata for %s expired", role)
return ErrExpired{Role: role, Expired: sm.Expires}
}
if sm.Version < minVersion {
return ErrLowVersion{sm.Version, minVersion}
}
return nil
}
var IsExpired = func(t string) bool {
ts, err := time.Parse(time.RFC3339, t)
if err != nil {
ts, err = time.Parse("2006-01-02 15:04:05 MST", t)
if err != nil {
return false
}
}
return ts.Sub(time.Now()) <= 0
}
func VerifySignatures(s *data.Signed, role string, db *keys.KeyDB) error {
if len(s.Signatures) == 0 {
return ErrNoSignatures
}
roleData := db.GetRole(role)
if roleData == nil {
return ErrUnknownRole
}
if roleData.Threshold < 1 {
return ErrRoleThreshold{}
}
logrus.Debugf("%s role has key IDs: %s", role, strings.Join(roleData.KeyIDs, ","))
var decoded map[string]interface{}
if err := json.Unmarshal(s.Signed, &decoded); err != nil {
return err
}
msg, err := cjson.Marshal(decoded)
if err != nil {
return err
}
valid := make(map[string]struct{})
for _, sig := range s.Signatures {
logrus.Debug("verifying signature for key ID: ", sig.KeyID)
if !roleData.ValidKey(sig.KeyID) {
logrus.Debugf("continuing b/c keyid was invalid: %s for roledata %s\n", sig.KeyID, roleData)
continue
}
key := db.GetKey(sig.KeyID)
if key == nil {
logrus.Debugf("continuing b/c keyid lookup was nil: %s\n", sig.KeyID)
continue
}
// method lookup is consistent due to Unmarshal JSON doing lower case for us.
method := sig.Method
verifier, ok := Verifiers[method]
if !ok {
logrus.Debugf("continuing b/c signing method is not supported: %s\n", sig.Method)
continue
}
if err := verifier.Verify(key, sig.Signature, msg); err != nil {
logrus.Debugf("continuing b/c signature was invalid\n")
continue
}
valid[sig.KeyID] = struct{}{}
}
if len(valid) < roleData.Threshold {
return ErrRoleThreshold{}
}
return nil
}
func Unmarshal(b []byte, v interface{}, role string, minVersion int, db *keys.KeyDB) error {
s := &data.Signed{}
if err := json.Unmarshal(b, s); err != nil {
return err
}
if err := Verify(s, role, minVersion, db); err != nil {
return err
}
return json.Unmarshal(s.Signed, v)
}
func UnmarshalTrusted(b []byte, v interface{}, role string, db *keys.KeyDB) error {
s := &data.Signed{}
if err := json.Unmarshal(b, s); err != nil {
return err
}
if err := VerifySignatures(s, role, db); err != nil {
return err
}
return json.Unmarshal(s.Signed, v)
}

View file

@ -0,0 +1,252 @@
package store
import (
"database/sql"
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
logrus "github.com/Sirupsen/logrus"
"github.com/endophage/gotuf/data"
"github.com/endophage/gotuf/utils"
)
const (
tufLoc string = "/tmp/tuf"
metadataSubDir string = "metadata"
)
// implements LocalStore
type dbStore struct {
db sql.DB
imageName string
}
// DBStore takes a database connection and the QDN of the image
func DBStore(db *sql.DB, imageName string) *dbStore {
store := dbStore{
db: *db,
imageName: imageName,
}
return &store
}
// GetMeta loads existing TUF metadata files
func (dbs *dbStore) GetMeta(name string) ([]byte, error) {
data, err := dbs.readFile(name)
if err != nil {
return nil, err
}
return data, err
}
// SetMeta writes individual TUF metadata files
func (dbs *dbStore) SetMeta(name string, meta []byte) error {
return dbs.writeFile(name, meta)
}
// WalkStagedTargets walks all targets in scope
func (dbs *dbStore) WalkStagedTargets(paths []string, targetsFn targetsWalkFunc) error {
if len(paths) == 0 {
files := dbs.loadTargets("")
for path, meta := range files {
if err := targetsFn(path, meta); err != nil {
return err
}
}
return nil
}
for _, path := range paths {
files := dbs.loadTargets(path)
meta, ok := files[path]
if !ok {
return fmt.Errorf("File Not Found")
}
if err := targetsFn(path, meta); err != nil {
return err
}
}
return nil
}
// Commit writes a set of consistent (possibly) TUF metadata files
func (dbs *dbStore) Commit(metafiles map[string][]byte, consistent bool, hashes map[string]data.Hashes) error {
// TODO (endophage): write meta files to cache
return nil
}
// GetKeys returns private keys
func (dbs *dbStore) GetKeys(role string) ([]data.PrivateKey, error) {
keys := []data.PrivateKey{}
var r *sql.Rows
var err error
sql := "SELECT `key` FROM `keys` WHERE `role` = ? AND `namespace` = ?;"
tx, err := dbs.db.Begin()
defer tx.Rollback()
r, err = tx.Query(sql, role, dbs.imageName)
if err != nil {
return nil, err
}
defer r.Close()
for r.Next() {
var jsonStr string
key := new(data.TUFKey)
r.Scan(&jsonStr)
err := json.Unmarshal([]byte(jsonStr), key)
if err != nil {
return nil, err
}
keys = append(keys, key)
}
return keys, nil
}
// SaveKey saves a new private key
func (dbs *dbStore) SaveKey(role string, key data.PrivateKey) error {
jsonBytes, err := json.Marshal(key)
if err != nil {
return fmt.Errorf("Could not JSON Marshal Key")
}
tx, err := dbs.db.Begin()
if err != nil {
logrus.Error(err)
return err
}
_, err = tx.Exec("INSERT INTO `keys` (`namespace`, `role`, `key`) VALUES (?,?,?);", dbs.imageName, role, string(jsonBytes))
tx.Commit()
return err
}
// Clean removes staged targets
func (dbs *dbStore) Clean() error {
// TODO (endophage): purge stale items from db? May just/also need a remove method
return nil
}
// AddBlob adds an object to the store
func (dbs *dbStore) AddBlob(path string, meta data.FileMeta) {
path = utils.NormalizeTarget(path)
jsonbytes := []byte{}
if meta.Custom != nil {
jsonbytes, _ = meta.Custom.MarshalJSON()
}
tx, err := dbs.db.Begin()
if err != nil {
logrus.Error(err)
return
}
_, err = tx.Exec("INSERT OR REPLACE INTO `filemeta` VALUES (?,?,?,?);", dbs.imageName, path, meta.Length, jsonbytes)
if err != nil {
logrus.Error(err)
}
tx.Commit()
dbs.addBlobHashes(path, meta.Hashes)
}
func (dbs *dbStore) addBlobHashes(path string, hashes data.Hashes) {
tx, err := dbs.db.Begin()
if err != nil {
logrus.Error(err)
}
for alg, hash := range hashes {
_, err := tx.Exec("INSERT OR REPLACE INTO `filehashes` VALUES (?,?,?,?);", dbs.imageName, path, alg, hex.EncodeToString(hash))
if err != nil {
logrus.Error(err)
}
}
tx.Commit()
}
// RemoveBlob removes an object from the store
func (dbs *dbStore) RemoveBlob(path string) error {
tx, err := dbs.db.Begin()
if err != nil {
logrus.Error(err)
return err
}
_, err = tx.Exec("DELETE FROM `filemeta` WHERE `path`=? AND `namespace`=?", path, dbs.imageName)
if err == nil {
tx.Commit()
} else {
tx.Rollback()
}
return err
}
func (dbs *dbStore) loadTargets(path string) map[string]data.FileMeta {
var err error
var r *sql.Rows
tx, err := dbs.db.Begin()
defer tx.Rollback()
files := make(map[string]data.FileMeta)
sql := "SELECT `filemeta`.`path`, `size`, `alg`, `hash`, `custom` FROM `filemeta` JOIN `filehashes` ON `filemeta`.`path` = `filehashes`.`path` AND `filemeta`.`namespace` = `filehashes`.`namespace` WHERE `filemeta`.`namespace`=?"
if path != "" {
sql = fmt.Sprintf("%s %s", sql, "AND `filemeta`.`path`=?")
r, err = tx.Query(sql, dbs.imageName, path)
} else {
r, err = tx.Query(sql, dbs.imageName)
}
if err != nil {
return files
}
defer r.Close()
for r.Next() {
var absPath, alg, hash string
var size int64
var custom []byte
r.Scan(&absPath, &size, &alg, &hash, &custom)
hashBytes, err := hex.DecodeString(hash)
if err != nil {
// We're going to skip items with unparseable hashes as they
// won't be valid in the targets
logrus.Debug("Hash was not stored in hex as expected")
continue
}
if file, ok := files[absPath]; ok {
file.Hashes[alg] = hashBytes
} else {
file = data.FileMeta{
Length: size,
Hashes: data.Hashes{
alg: hashBytes,
},
}
if custom != nil {
file.Custom = json.RawMessage(custom)
}
files[absPath] = file
}
}
return files
}
func (dbs *dbStore) writeFile(name string, content []byte) error {
jsonName := fmt.Sprintf("%s.json", name)
fullPath := path.Join(tufLoc, metadataSubDir, dbs.imageName, jsonName)
dirPath := path.Dir(fullPath)
err := os.MkdirAll(dirPath, 0744)
if err != nil {
logrus.Error("error creating directory path to TUF cache")
return err
}
err = ioutil.WriteFile(fullPath, content, 0744)
if err != nil {
logrus.Error("Error writing file")
}
return err
}
func (dbs *dbStore) readFile(name string) ([]byte, error) {
jsonName := fmt.Sprintf("%s.json", name)
fullPath := path.Join(tufLoc, metadataSubDir, dbs.imageName, jsonName)
content, err := ioutil.ReadFile(fullPath)
return content, err
}

View file

@ -0,0 +1,7 @@
package store
type ErrMetaNotFound struct{}
func (err ErrMetaNotFound) Error() string {
return "no trust data available"
}

View file

@ -0,0 +1,67 @@
package store
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
)
func NewFilesystemStore(baseDir, metaSubDir, metaExtension, targetsSubDir string) (*filesystemStore, error) {
metaDir := path.Join(baseDir, metaSubDir)
targetsDir := path.Join(baseDir, targetsSubDir)
// Make sure we can create the necessary dirs and they are writable
err := os.MkdirAll(metaDir, 0700)
if err != nil {
return nil, err
}
err = os.MkdirAll(targetsDir, 0700)
if err != nil {
return nil, err
}
return &filesystemStore{
baseDir: baseDir,
metaDir: metaDir,
metaExtension: metaExtension,
targetsDir: targetsDir,
}, nil
}
type filesystemStore struct {
baseDir string
metaDir string
metaExtension string
targetsDir string
}
func (f *filesystemStore) GetMeta(name string, size int64) ([]byte, error) {
fileName := fmt.Sprintf("%s.%s", name, f.metaExtension)
path := filepath.Join(f.metaDir, fileName)
meta, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
return meta, nil
}
func (f *filesystemStore) SetMultiMeta(metas map[string][]byte) error {
for role, blob := range metas {
err := f.SetMeta(role, blob)
if err != nil {
return err
}
}
return nil
}
func (f *filesystemStore) SetMeta(name string, meta []byte) error {
fileName := fmt.Sprintf("%s.%s", name, f.metaExtension)
path := filepath.Join(f.metaDir, fileName)
if err := ioutil.WriteFile(path, meta, 0600); err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,213 @@
package store
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/url"
"path"
"github.com/Sirupsen/logrus"
)
type ErrServerUnavailable struct{}
func (err ErrServerUnavailable) Error() string {
return "Unable to reach trust server at this time."
}
type ErrShortRead struct{}
func (err ErrShortRead) Error() string {
return "Trust server returned incompelete response."
}
type ErrMaliciousServer struct{}
func (err ErrMaliciousServer) Error() string {
return "Trust server returned a bad response."
}
// HTTPStore manages pulling and pushing metadata from and to a remote
// service over HTTP. It assumes the URL structure of the remote service
// maps identically to the structure of the TUF repo:
// <baseURL>/<metaPrefix>/(root|targets|snapshot|timestamp).json
// <baseURL>/<targetsPrefix>/foo.sh
//
// If consistent snapshots are disabled, it is advised that caching is not
// enabled. Simple set a cachePath (and ensure it's writeable) to enable
// caching.
type HTTPStore struct {
baseURL url.URL
metaPrefix string
metaExtension string
targetsPrefix string
keyExtension string
roundTrip http.RoundTripper
}
func NewHTTPStore(baseURL, metaPrefix, metaExtension, targetsPrefix, keyExtension string, roundTrip http.RoundTripper) (*HTTPStore, error) {
base, err := url.Parse(baseURL)
if err != nil {
return nil, err
}
if !base.IsAbs() {
return nil, errors.New("HTTPStore requires an absolute baseURL")
}
return &HTTPStore{
baseURL: *base,
metaPrefix: metaPrefix,
metaExtension: metaExtension,
targetsPrefix: targetsPrefix,
keyExtension: keyExtension,
roundTrip: roundTrip,
}, nil
}
// GetMeta downloads the named meta file with the given size. A short body
// is acceptable because in the case of timestamp.json, the size is a cap,
// not an exact length.
func (s HTTPStore) GetMeta(name string, size int64) ([]byte, error) {
url, err := s.buildMetaURL(name)
if err != nil {
return nil, err
}
req, err := http.NewRequest("GET", url.String(), nil)
if err != nil {
return nil, err
}
resp, err := s.roundTrip.RoundTrip(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.ContentLength > size {
return nil, ErrMaliciousServer{}
}
logrus.Debugf("%d when retrieving metadata for %s", resp.StatusCode, name)
if resp.StatusCode == http.StatusNotFound {
return nil, ErrMetaNotFound{}
}
b := io.LimitReader(resp.Body, size)
body, err := ioutil.ReadAll(b)
if resp.ContentLength > 0 && int64(len(body)) < resp.ContentLength {
return nil, ErrShortRead{}
}
if err != nil {
return nil, err
}
return body, nil
}
func (s HTTPStore) SetMeta(name string, blob []byte) error {
url, err := s.buildMetaURL("")
if err != nil {
return err
}
req, err := http.NewRequest("POST", url.String(), bytes.NewReader(blob))
if err != nil {
return err
}
_, err = s.roundTrip.RoundTrip(req)
return err
}
func (s HTTPStore) SetMultiMeta(metas map[string][]byte) error {
url, err := s.buildMetaURL("")
if err != nil {
return err
}
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
for role, blob := range metas {
part, err := writer.CreateFormFile("files", role)
_, err = io.Copy(part, bytes.NewBuffer(blob))
if err != nil {
return err
}
}
err = writer.Close()
if err != nil {
return err
}
req, err := http.NewRequest("POST", url.String(), body)
req.Header.Set("Content-Type", writer.FormDataContentType())
if err != nil {
return err
}
_, err = s.roundTrip.RoundTrip(req)
return err
}
func (s HTTPStore) buildMetaURL(name string) (*url.URL, error) {
var filename string
if name != "" {
filename = fmt.Sprintf("%s.%s", name, s.metaExtension)
}
uri := path.Join(s.metaPrefix, filename)
return s.buildURL(uri)
}
func (s HTTPStore) buildTargetsURL(name string) (*url.URL, error) {
uri := path.Join(s.targetsPrefix, name)
return s.buildURL(uri)
}
func (s HTTPStore) buildKeyURL(name string) (*url.URL, error) {
filename := fmt.Sprintf("%s.%s", name, s.keyExtension)
uri := path.Join(s.metaPrefix, filename)
return s.buildURL(uri)
}
func (s HTTPStore) buildURL(uri string) (*url.URL, error) {
sub, err := url.Parse(uri)
if err != nil {
return nil, err
}
return s.baseURL.ResolveReference(sub), nil
}
// GetTarget returns a reader for the desired target or an error.
// N.B. The caller is responsible for closing the reader.
func (s HTTPStore) GetTarget(path string) (io.ReadCloser, error) {
url, err := s.buildTargetsURL(path)
if err != nil {
return nil, err
}
logrus.Debug("Attempting to download target: ", url.String())
req, err := http.NewRequest("GET", url.String(), nil)
if err != nil {
return nil, err
}
resp, err := s.roundTrip.RoundTrip(req)
if err != nil {
return nil, err
}
return resp.Body, nil
}
func (s HTTPStore) GetKey(role string) ([]byte, error) {
url, err := s.buildKeyURL(role)
if err != nil {
return nil, err
}
req, err := http.NewRequest("GET", url.String(), nil)
if err != nil {
return nil, err
}
resp, err := s.roundTrip.RoundTrip(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return body, nil
}

View file

@ -0,0 +1,35 @@
package store
import (
"io"
"github.com/endophage/gotuf/data"
)
type targetsWalkFunc func(path string, meta data.FileMeta) error
type MetadataStore interface {
GetMeta(name string, size int64) ([]byte, error)
SetMeta(name string, blob []byte) error
SetMultiMeta(map[string][]byte) error
}
type PublicKeyStore interface {
GetKey(role string) ([]byte, error)
}
// [endophage] I'm of the opinion this should go away.
type TargetStore interface {
WalkStagedTargets(paths []string, targetsFn targetsWalkFunc) error
}
type LocalStore interface {
MetadataStore
TargetStore
}
type RemoteStore interface {
MetadataStore
PublicKeyStore
GetTarget(path string) (io.ReadCloser, error)
}

View file

@ -0,0 +1,89 @@
package store
import (
"bytes"
"fmt"
"io"
"github.com/endophage/gotuf/data"
"github.com/endophage/gotuf/errors"
"github.com/endophage/gotuf/utils"
)
func NewMemoryStore(meta map[string][]byte, files map[string][]byte) *memoryStore {
if meta == nil {
meta = make(map[string][]byte)
}
if files == nil {
files = make(map[string][]byte)
}
return &memoryStore{
meta: meta,
files: files,
keys: make(map[string][]data.PrivateKey),
}
}
type memoryStore struct {
meta map[string][]byte
files map[string][]byte
keys map[string][]data.PrivateKey
}
func (m *memoryStore) GetMeta(name string, size int64) ([]byte, error) {
return m.meta[name], nil
}
func (m *memoryStore) SetMeta(name string, meta []byte) error {
m.meta[name] = meta
return nil
}
func (m *memoryStore) SetMultiMeta(metas map[string][]byte) error {
for role, blob := range metas {
m.SetMeta(role, blob)
}
return nil
}
func (m *memoryStore) GetTarget(path string) (io.ReadCloser, error) {
return &utils.NoopCloser{Reader: bytes.NewReader(m.files[path])}, nil
}
func (m *memoryStore) WalkStagedTargets(paths []string, targetsFn targetsWalkFunc) error {
if len(paths) == 0 {
for path, dat := range m.files {
meta, err := data.NewFileMeta(bytes.NewReader(dat), "sha256")
if err != nil {
return err
}
if err = targetsFn(path, meta); err != nil {
return err
}
}
return nil
}
for _, path := range paths {
dat, ok := m.files[path]
if !ok {
return errors.ErrFileNotFound{path}
}
meta, err := data.NewFileMeta(bytes.NewReader(dat), "sha256")
if err != nil {
return err
}
if err = targetsFn(path, meta); err != nil {
return err
}
}
return nil
}
func (m *memoryStore) Commit(map[string][]byte, bool, map[string]data.Hashes) error {
return nil
}
func (m *memoryStore) GetKey(role string) ([]byte, error) {
return nil, fmt.Errorf("GetKey is not implemented for the memoryStore")
}

View file

@ -0,0 +1,575 @@
// tuf defines the core TUF logic around manipulating a repo.
package tuf
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"path/filepath"
"strings"
"time"
"github.com/Sirupsen/logrus"
"github.com/endophage/gotuf/data"
"github.com/endophage/gotuf/errors"
"github.com/endophage/gotuf/keys"
"github.com/endophage/gotuf/signed"
"github.com/endophage/gotuf/utils"
)
type ErrSigVerifyFail struct{}
func (e ErrSigVerifyFail) Error() string {
return "Error: Signature verification failed"
}
type ErrMetaExpired struct{}
func (e ErrMetaExpired) Error() string {
return "Error: Metadata has expired"
}
type ErrLocalRootExpired struct{}
func (e ErrLocalRootExpired) Error() string {
return "Error: Local Root Has Expired"
}
type ErrNotLoaded struct {
role string
}
func (err ErrNotLoaded) Error() string {
return fmt.Sprintf("%s role has not been loaded", err.role)
}
// TufRepo is an in memory representation of the TUF Repo.
// It operates at the data.Signed level, accepting and producing
// data.Signed objects. Users of a TufRepo are responsible for
// fetching raw JSON and using the Set* functions to populate
// the TufRepo instance.
type TufRepo struct {
Root *data.SignedRoot
Targets map[string]*data.SignedTargets
Snapshot *data.SignedSnapshot
Timestamp *data.SignedTimestamp
keysDB *keys.KeyDB
cryptoService signed.CryptoService
}
// NewTufRepo initializes a TufRepo instance with a keysDB and a signer.
// If the TufRepo will only be used for reading, the signer should be nil.
func NewTufRepo(keysDB *keys.KeyDB, cryptoService signed.CryptoService) *TufRepo {
repo := &TufRepo{
Targets: make(map[string]*data.SignedTargets),
keysDB: keysDB,
cryptoService: cryptoService,
}
return repo
}
// AddBaseKeys is used to add keys to the role in root.json
func (tr *TufRepo) AddBaseKeys(role string, keys ...*data.TUFKey) error {
if tr.Root == nil {
return ErrNotLoaded{role: "root"}
}
for _, k := range keys {
// Store only the public portion
pubKey := *k
pubKey.Value.Private = nil
tr.Root.Signed.Keys[pubKey.ID()] = &pubKey
tr.keysDB.AddKey(&pubKey)
tr.Root.Signed.Roles[role].KeyIDs = append(tr.Root.Signed.Roles[role].KeyIDs, pubKey.ID())
}
tr.Root.Dirty = true
return nil
}
// RemoveKeys is used to remove keys from the roles in root.json
func (tr *TufRepo) RemoveBaseKeys(role string, keyIDs ...string) error {
if tr.Root == nil {
return ErrNotLoaded{role: "root"}
}
keep := make([]string, 0)
toDelete := make(map[string]struct{})
// remove keys from specified role
for _, k := range keyIDs {
toDelete[k] = struct{}{}
for _, rk := range tr.Root.Signed.Roles[role].KeyIDs {
if k != rk {
keep = append(keep, rk)
}
}
}
tr.Root.Signed.Roles[role].KeyIDs = keep
// determine which keys are no longer in use by any roles
for roleName, r := range tr.Root.Signed.Roles {
if roleName == role {
continue
}
for _, rk := range r.KeyIDs {
if _, ok := toDelete[rk]; ok {
delete(toDelete, rk)
}
}
}
// remove keys no longer in use by any roles
for k, _ := range toDelete {
delete(tr.Root.Signed.Keys, k)
}
tr.Root.Dirty = true
return nil
}
// UpdateDelegations updates the appropriate delegations, either adding
// a new delegation or updating an existing one. If keys are
// provided, the IDs will be added to the role (if they do not exist
// there already), and the keys will be added to the targets file.
// The "before" argument specifies another role which this new role
// will be added in front of (i.e. higher priority) in the delegation list.
// An empty before string indicates to add the role to the end of the
// delegation list.
// A new, empty, targets file will be created for the new role.
func (tr *TufRepo) UpdateDelegations(role *data.Role, keys []data.Key, before string) error {
if !role.IsDelegation() || !role.IsValid() {
return errors.ErrInvalidRole{}
}
parent := filepath.Dir(role.Name)
p, ok := tr.Targets[parent]
if !ok {
return errors.ErrInvalidRole{}
}
for _, k := range keys {
key := data.NewPublicKey(k.Algorithm(), k.Public())
if !utils.StrSliceContains(role.KeyIDs, key.ID()) {
role.KeyIDs = append(role.KeyIDs, key.ID())
}
p.Signed.Delegations.Keys[key.ID()] = key
tr.keysDB.AddKey(key)
}
i := -1
var r *data.Role
for i, r = range p.Signed.Delegations.Roles {
if r.Name == role.Name {
break
}
}
if i >= 0 {
p.Signed.Delegations.Roles[i] = role
} else {
p.Signed.Delegations.Roles = append(p.Signed.Delegations.Roles, role)
}
p.Dirty = true
roleTargets := data.NewTargets() // NewTargets always marked Dirty
tr.Targets[role.Name] = roleTargets
tr.keysDB.AddRole(role)
return nil
}
// InitRepo creates the base files for a repo. It inspects data.ValidRoles and
// data.ValidTypes to determine what the role names and filename should be. It
// also relies on the keysDB having already been populated with the keys and
// roles.
func (tr *TufRepo) InitRepo(consistent bool) error {
if err := tr.InitRoot(consistent); err != nil {
return err
}
if err := tr.InitTargets(); err != nil {
return err
}
if err := tr.InitSnapshot(); err != nil {
return err
}
return tr.InitTimestamp()
}
func (tr *TufRepo) InitRoot(consistent bool) error {
rootRoles := make(map[string]*data.RootRole)
rootKeys := make(map[string]data.PublicKey)
for _, r := range data.ValidRoles {
role := tr.keysDB.GetRole(r)
if role == nil {
return errors.ErrInvalidRole{}
}
rootRoles[r] = &role.RootRole
for _, kid := range role.KeyIDs {
// don't need to check if GetKey returns nil, Key presence was
// checked by KeyDB when role was added.
key := tr.keysDB.GetKey(kid)
// Create new key object to doubly ensure private key is excluded
k := data.NewPublicKey(key.Algorithm(), key.Public())
rootKeys[kid] = k
}
}
root, err := data.NewRoot(rootKeys, rootRoles, consistent)
if err != nil {
return err
}
tr.Root = root
return nil
}
func (tr *TufRepo) InitTargets() error {
targets := data.NewTargets()
tr.Targets[data.ValidRoles["targets"]] = targets
return nil
}
func (tr *TufRepo) InitSnapshot() error {
root, err := tr.Root.ToSigned()
if err != nil {
return err
}
targets, err := tr.Targets[data.ValidRoles["targets"]].ToSigned()
if err != nil {
return err
}
snapshot, err := data.NewSnapshot(root, targets)
if err != nil {
return err
}
tr.Snapshot = snapshot
return nil
}
func (tr *TufRepo) InitTimestamp() error {
snap, err := tr.Snapshot.ToSigned()
if err != nil {
return err
}
timestamp, err := data.NewTimestamp(snap)
if err != nil {
return err
}
tr.Timestamp = timestamp
return nil
}
// SetRoot parses the Signed object into a SignedRoot object, sets
// the keys and roles in the KeyDB, and sets the TufRepo.Root field
// to the SignedRoot object.
func (tr *TufRepo) SetRoot(s *data.Signed) error {
r, err := data.RootFromSigned(s)
if err != nil {
return err
}
for _, key := range r.Signed.Keys {
logrus.Debug("Adding key ", key.ID())
tr.keysDB.AddKey(key)
}
for roleName, role := range r.Signed.Roles {
logrus.Debugf("Adding role %s with keys %s", roleName, strings.Join(role.KeyIDs, ","))
baseRole, err := data.NewRole(
roleName,
role.Threshold,
role.KeyIDs,
nil,
nil,
)
if err != nil {
return err
}
err = tr.keysDB.AddRole(baseRole)
if err != nil {
return err
}
}
tr.Root = r
return nil
}
// SetTimestamp parses the Signed object into a SignedTimestamp object
// and sets the TufRepo.Timestamp field.
func (tr *TufRepo) SetTimestamp(s *data.Signed) error {
ts, err := data.TimestampFromSigned(s)
if err != nil {
return err
}
tr.Timestamp = ts
return nil
}
// SetSnapshot parses the Signed object into a SignedSnapshots object
// and sets the TufRepo.Snapshot field.
func (tr *TufRepo) SetSnapshot(s *data.Signed) error {
snap, err := data.SnapshotFromSigned(s)
if err != nil {
return err
}
tr.Snapshot = snap
return nil
}
// SetTargets parses the Signed object into a SignedTargets object,
// reads the delegated roles and keys into the KeyDB, and sets the
// SignedTargets object agaist the role in the TufRepo.Targets map.
func (tr *TufRepo) SetTargets(role string, s *data.Signed) error {
t, err := data.TargetsFromSigned(s)
if err != nil {
return err
}
for _, k := range t.Signed.Delegations.Keys {
tr.keysDB.AddKey(k)
}
for _, r := range t.Signed.Delegations.Roles {
tr.keysDB.AddRole(r)
}
tr.Targets[role] = t
return nil
}
// TargetMeta returns the FileMeta entry for the given path in the
// targets file associated with the given role. This may be nil if
// the target isn't found in the targets file.
func (tr TufRepo) TargetMeta(role, path string) *data.FileMeta {
if t, ok := tr.Targets[role]; ok {
if m, ok := t.Signed.Targets[path]; ok {
return &m
}
}
return nil
}
// TargetDelegations returns a slice of Roles that are valid publishers
// for the target path provided.
func (tr TufRepo) TargetDelegations(role, path, pathHex string) []*data.Role {
if pathHex == "" {
pathDigest := sha256.Sum256([]byte(path))
pathHex = hex.EncodeToString(pathDigest[:])
}
roles := make([]*data.Role, 0)
if t, ok := tr.Targets[role]; ok {
for _, r := range t.Signed.Delegations.Roles {
if r.CheckPrefixes(pathHex) || r.CheckPaths(path) {
roles = append(roles, r)
}
}
}
return roles
}
// FindTarget attempts to find the target represented by the given
// path by starting at the top targets file and traversing
// appropriate delegations until the first entry is found or it
// runs out of locations to search.
// N.B. Multiple entries may exist in different delegated roles
// for the same target. Only the first one encountered is returned.
func (tr TufRepo) FindTarget(path string) *data.FileMeta {
pathDigest := sha256.Sum256([]byte(path))
pathHex := hex.EncodeToString(pathDigest[:])
var walkTargets func(role string) *data.FileMeta
walkTargets = func(role string) *data.FileMeta {
if m := tr.TargetMeta(role, path); m != nil {
return m
}
// Depth first search of delegations based on order
// as presented in current targets file for role:
for _, r := range tr.TargetDelegations(role, path, pathHex) {
if m := walkTargets(r.Name); m != nil {
return m
}
}
return nil
}
return walkTargets("targets")
}
// AddTargets will attempt to add the given targets specifically to
// the directed role. If the user does not have the signing keys for the role
// the function will return an error and the full slice of targets.
func (tr *TufRepo) AddTargets(role string, targets data.Files) (data.Files, error) {
t, ok := tr.Targets[role]
if !ok {
return targets, errors.ErrInvalidRole{role}
}
invalid := make(data.Files)
for path, target := range targets {
pathDigest := sha256.Sum256([]byte(path))
pathHex := hex.EncodeToString(pathDigest[:])
r := tr.keysDB.GetRole(role)
if role == data.ValidRoles["targets"] || (r.CheckPaths(path) || r.CheckPrefixes(pathHex)) {
t.Signed.Targets[path] = target
} else {
invalid[path] = target
}
}
t.Dirty = true
if len(invalid) > 0 {
return invalid, fmt.Errorf("Could not add all targets")
}
return nil, nil
}
func (tr *TufRepo) RemoveTargets(role string, targets ...string) error {
t, ok := tr.Targets[role]
if !ok {
return errors.ErrInvalidRole{role}
}
for _, path := range targets {
delete(t.Signed.Targets, path)
}
t.Dirty = true
return nil
}
func (tr *TufRepo) UpdateSnapshot(role string, s *data.Signed) error {
jsonData, err := json.Marshal(s)
if err != nil {
return err
}
meta, err := data.NewFileMeta(bytes.NewReader(jsonData), "sha256")
if err != nil {
return err
}
tr.Snapshot.Signed.Meta[role] = meta
tr.Snapshot.Dirty = true
return nil
}
func (tr *TufRepo) UpdateTimestamp(s *data.Signed) error {
jsonData, err := json.Marshal(s)
if err != nil {
return err
}
meta, err := data.NewFileMeta(bytes.NewReader(jsonData), "sha256")
if err != nil {
return err
}
tr.Timestamp.Signed.Meta["snapshot"] = meta
tr.Timestamp.Dirty = true
return nil
}
func (tr *TufRepo) SignRoot(expires time.Time, cryptoService signed.CryptoService) (*data.Signed, error) {
logrus.Debug("signing root...")
tr.Root.Signed.Expires = expires
tr.Root.Signed.Version++
root := tr.keysDB.GetRole(data.ValidRoles["root"])
signed, err := tr.Root.ToSigned()
if err != nil {
return nil, err
}
signed, err = tr.sign(signed, *root, cryptoService)
if err != nil {
return nil, err
}
tr.Root.Signatures = signed.Signatures
return signed, nil
}
func (tr *TufRepo) SignTargets(role string, expires time.Time, cryptoService signed.CryptoService) (*data.Signed, error) {
logrus.Debugf("sign targets called for role %s", role)
tr.Targets[role].Signed.Expires = expires
tr.Targets[role].Signed.Version++
signed, err := tr.Targets[role].ToSigned()
if err != nil {
logrus.Debug("errored getting targets data.Signed object")
return nil, err
}
targets := tr.keysDB.GetRole(role)
signed, err = tr.sign(signed, *targets, cryptoService)
if err != nil {
logrus.Debug("errored signing ", role)
return nil, err
}
tr.Targets[role].Signatures = signed.Signatures
return signed, nil
}
func (tr *TufRepo) SignSnapshot(expires time.Time, cryptoService signed.CryptoService) (*data.Signed, error) {
logrus.Debug("signing snapshot...")
signedRoot, err := tr.Root.ToSigned()
if err != nil {
return nil, err
}
err = tr.UpdateSnapshot("root", signedRoot)
if err != nil {
return nil, err
}
tr.Root.Dirty = false // root dirty until changes captures in snapshot
for role, targets := range tr.Targets {
signedTargets, err := targets.ToSigned()
if err != nil {
return nil, err
}
err = tr.UpdateSnapshot(role, signedTargets)
if err != nil {
return nil, err
}
}
tr.Snapshot.Signed.Expires = expires
tr.Snapshot.Signed.Version++
signed, err := tr.Snapshot.ToSigned()
if err != nil {
return nil, err
}
snapshot := tr.keysDB.GetRole(data.ValidRoles["snapshot"])
signed, err = tr.sign(signed, *snapshot, cryptoService)
if err != nil {
return nil, err
}
tr.Snapshot.Signatures = signed.Signatures
return signed, nil
}
func (tr *TufRepo) SignTimestamp(expires time.Time, cryptoService signed.CryptoService) (*data.Signed, error) {
logrus.Debug("SignTimestamp")
signedSnapshot, err := tr.Snapshot.ToSigned()
if err != nil {
return nil, err
}
err = tr.UpdateTimestamp(signedSnapshot)
if err != nil {
return nil, err
}
tr.Timestamp.Signed.Expires = expires
tr.Timestamp.Signed.Version++
signed, err := tr.Timestamp.ToSigned()
if err != nil {
return nil, err
}
timestamp := tr.keysDB.GetRole(data.ValidRoles["timestamp"])
signed, err = tr.sign(signed, *timestamp, cryptoService)
if err != nil {
return nil, err
}
tr.Timestamp.Signatures = signed.Signatures
tr.Snapshot.Dirty = false // snapshot is dirty until changes have been captured in timestamp
return signed, nil
}
func (tr TufRepo) sign(signedData *data.Signed, role data.Role, cryptoService signed.CryptoService) (*data.Signed, error) {
ks := make([]data.PublicKey, 0, len(role.KeyIDs))
for _, kid := range role.KeyIDs {
k := tr.keysDB.GetKey(kid)
if k == nil {
continue
}
ks = append(ks, k)
}
if len(ks) < 1 {
return nil, keys.ErrInvalidKey
}
if cryptoService == nil {
cryptoService = tr.cryptoService
}
err := signed.Sign(cryptoService, signedData, ks...)
if err != nil {
return nil, err
}
return signedData, nil
}

View file

@ -0,0 +1,82 @@
package utils
import (
"crypto/hmac"
"encoding/hex"
"errors"
"fmt"
gopath "path"
"path/filepath"
"github.com/endophage/gotuf/data"
)
var ErrWrongLength = errors.New("wrong length")
type ErrWrongHash struct {
Type string
Expected []byte
Actual []byte
}
func (e ErrWrongHash) Error() string {
return fmt.Sprintf("wrong %s hash, expected %#x got %#x", e.Type, e.Expected, e.Actual)
}
type ErrNoCommonHash struct {
Expected data.Hashes
Actual data.Hashes
}
func (e ErrNoCommonHash) Error() string {
types := func(a data.Hashes) []string {
t := make([]string, 0, len(a))
for typ := range a {
t = append(t, typ)
}
return t
}
return fmt.Sprintf("no common hash function, expected one of %s, got %s", types(e.Expected), types(e.Actual))
}
type ErrUnknownHashAlgorithm struct {
Name string
}
func (e ErrUnknownHashAlgorithm) Error() string {
return fmt.Sprintf("unknown hash algorithm: %s", e.Name)
}
type PassphraseFunc func(role string, confirm bool) ([]byte, error)
func FileMetaEqual(actual data.FileMeta, expected data.FileMeta) error {
if actual.Length != expected.Length {
return ErrWrongLength
}
hashChecked := false
for typ, hash := range expected.Hashes {
if h, ok := actual.Hashes[typ]; ok {
hashChecked = true
if !hmac.Equal(h, hash) {
return ErrWrongHash{typ, hash, h}
}
}
}
if !hashChecked {
return ErrNoCommonHash{expected.Hashes, actual.Hashes}
}
return nil
}
func NormalizeTarget(path string) string {
return gopath.Join("/", path)
}
func HashedPaths(path string, hashes data.Hashes) []string {
paths := make([]string, 0, len(hashes))
for _, hash := range hashes {
hashedPath := filepath.Join(filepath.Dir(path), hex.EncodeToString(hash)+"."+filepath.Base(path))
paths = append(paths, hashedPath)
}
return paths
}

View file

@ -0,0 +1,80 @@
package utils
import (
"bytes"
"crypto/sha256"
"crypto/tls"
"fmt"
"io"
"net/http"
"net/url"
"os"
"strings"
"github.com/endophage/gotuf/data"
)
func Download(url url.URL) (*http.Response, error) {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: tr}
return client.Get(url.String())
}
func Upload(url string, body io.Reader) (*http.Response, error) {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: tr}
return client.Post(url, "application/json", body)
}
func ValidateTarget(r io.Reader, m *data.FileMeta) error {
h := sha256.New()
length, err := io.Copy(h, r)
if err != nil {
return err
}
if length != m.Length {
return fmt.Errorf("Size of downloaded target did not match targets entry.\nExpected: %s\nReceived: %s\n", m.Length, length)
}
hashDigest := h.Sum(nil)
if bytes.Compare(m.Hashes["sha256"], hashDigest[:]) != 0 {
return fmt.Errorf("Hash of downloaded target did not match targets entry.\nExpected: %x\nReceived: %x\n", m.Hashes["sha256"], hashDigest)
}
return nil
}
func StrSliceContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}
func StrSliceContainsI(ss []string, s string) bool {
s = strings.ToLower(s)
for _, v := range ss {
v = strings.ToLower(v)
if v == s {
return true
}
}
return false
}
func FileExists(path string) bool {
_, err := os.Stat(path)
return os.IsNotExist(err)
}
type NoopCloser struct {
io.Reader
}
func (nc *NoopCloser) Close() error {
return nil
}

View file

@ -0,0 +1,4 @@
language: go
go:
- 1.1
- tip

View file

@ -0,0 +1,27 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -0,0 +1,620 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cjson
import (
"bytes"
"encoding/base64"
"encoding/json"
"math"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"unicode"
"unicode/utf8"
)
func Marshal(v interface{}) ([]byte, error) {
e := &encodeState{}
err := e.marshal(v)
if err != nil {
return nil, err
}
return e.Bytes(), nil
}
// Marshaler is the interface implemented by objects that
// can marshal themselves into valid JSON.
type Marshaler interface {
MarshalJSON() ([]byte, error)
}
// An UnsupportedTypeError is returned by Marshal when attempting
// to encode an unsupported value type.
type UnsupportedTypeError struct {
Type reflect.Type
}
func (e *UnsupportedTypeError) Error() string {
return "json: unsupported type: " + e.Type.String()
}
type UnsupportedValueError struct {
Value reflect.Value
Str string
}
func (e *UnsupportedValueError) Error() string {
return "json: unsupported value: " + e.Str
}
type InvalidUTF8Error struct {
S string
}
func (e *InvalidUTF8Error) Error() string {
return "json: invalid UTF-8 in string: " + strconv.Quote(e.S)
}
type MarshalerError struct {
Type reflect.Type
Err error
}
func (e *MarshalerError) Error() string {
return "json: error calling MarshalJSON for type " + e.Type.String() + ": " + e.Err.Error()
}
var hex = "0123456789abcdef"
var numberType = reflect.TypeOf(Number(""))
// A Number represents a JSON number literal.
type Number string
// String returns the literal text of the number.
func (n Number) String() string { return string(n) }
// Float64 returns the number as a float64.
func (n Number) Float64() (float64, error) {
return strconv.ParseFloat(string(n), 64)
}
// Int64 returns the number as an int64.
func (n Number) Int64() (int64, error) {
return strconv.ParseInt(string(n), 10, 64)
}
// An encodeState encodes JSON into a bytes.Buffer.
type encodeState struct {
bytes.Buffer // accumulated output
scratch [64]byte
}
func (e *encodeState) marshal(v interface{}) (err error) {
defer func() {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
}
err = r.(error)
}
}()
e.reflectValue(reflect.ValueOf(v))
return nil
}
func (e *encodeState) error(err error) {
panic(err)
}
var byteSliceType = reflect.TypeOf([]byte(nil))
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
}
return false
}
func (e *encodeState) reflectValue(v reflect.Value) {
e.reflectValueQuoted(v, false)
}
// reflectValueQuoted writes the value in v to the output.
// If quoted is true, the serialization is wrapped in a JSON string.
func (e *encodeState) reflectValueQuoted(v reflect.Value, quoted bool) {
if !v.IsValid() {
e.WriteString("null")
return
}
m, ok := v.Interface().(Marshaler)
if !ok {
// T doesn't match the interface. Check against *T too.
if v.Kind() != reflect.Ptr && v.CanAddr() {
m, ok = v.Addr().Interface().(Marshaler)
if ok {
v = v.Addr()
}
}
}
if ok && (v.Kind() != reflect.Ptr || !v.IsNil()) {
b, err := m.MarshalJSON()
if err != nil {
e.error(&MarshalerError{v.Type(), err})
}
// canonicalize the json if it's an object
b = bytes.TrimSpace(b)
if len(b) > 0 && b[0] == '{' {
var temp interface{}
err = json.Unmarshal(b, &temp)
if err != nil {
e.error(&MarshalerError{v.Type(), err})
}
b, err = Marshal(temp)
if err != nil {
e.error(&MarshalerError{v.Type(), err})
}
}
e.Buffer.Write(b)
return
}
writeString := (*encodeState).WriteString
if quoted {
writeString = (*encodeState).string
}
switch v.Kind() {
case reflect.Bool:
x := v.Bool()
if x {
writeString(e, "true")
} else {
writeString(e, "false")
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
b := strconv.AppendInt(e.scratch[:0], v.Int(), 10)
if quoted {
writeString(e, string(b))
} else {
e.Write(b)
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
b := strconv.AppendUint(e.scratch[:0], v.Uint(), 10)
if quoted {
writeString(e, string(b))
} else {
e.Write(b)
}
case reflect.Float32, reflect.Float64:
f := v.Float()
if math.IsInf(f, 0) || math.IsNaN(f) || math.Floor(f) != f {
e.error(&UnsupportedValueError{v, "floating point number"})
}
b := strconv.AppendInt(e.scratch[:0], int64(f), 10)
if quoted {
writeString(e, string(b))
} else {
e.Write(b)
}
case reflect.String:
if v.Type() == numberType {
numStr := v.String()
if numStr == "" {
numStr = "0" // Number's zero-val
}
e.WriteString(numStr)
break
}
if quoted {
sb, err := Marshal(v.String())
if err != nil {
e.error(err)
}
e.string(string(sb))
} else {
e.string(v.String())
}
case reflect.Struct:
e.WriteByte('{')
first := true
for _, f := range cachedTypeFields(v.Type()) {
fv := fieldByIndex(v, f.index)
if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) {
continue
}
if first {
first = false
} else {
e.WriteByte(',')
}
e.string(f.name)
e.WriteByte(':')
e.reflectValueQuoted(fv, f.quoted)
}
e.WriteByte('}')
case reflect.Map:
if v.Type().Key().Kind() != reflect.String {
e.error(&UnsupportedTypeError{v.Type()})
}
if v.IsNil() {
e.WriteString("null")
break
}
e.WriteByte('{')
var sv stringValues = v.MapKeys()
sort.Sort(sv)
for i, k := range sv {
if i > 0 {
e.WriteByte(',')
}
e.string(k.String())
e.WriteByte(':')
e.reflectValue(v.MapIndex(k))
}
e.WriteByte('}')
case reflect.Slice:
if v.IsNil() {
e.WriteString("null")
break
}
if v.Type().Elem().Kind() == reflect.Uint8 {
// Byte slices get special treatment; arrays don't.
s := v.Bytes()
e.WriteByte('"')
if len(s) < 1024 {
// for small buffers, using Encode directly is much faster.
dst := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
base64.StdEncoding.Encode(dst, s)
e.Write(dst)
} else {
// for large buffers, avoid unnecessary extra temporary
// buffer space.
enc := base64.NewEncoder(base64.StdEncoding, e)
enc.Write(s)
enc.Close()
}
e.WriteByte('"')
break
}
// Slices can be marshalled as nil, but otherwise are handled
// as arrays.
fallthrough
case reflect.Array:
e.WriteByte('[')
n := v.Len()
for i := 0; i < n; i++ {
if i > 0 {
e.WriteByte(',')
}
e.reflectValue(v.Index(i))
}
e.WriteByte(']')
case reflect.Interface, reflect.Ptr:
if v.IsNil() {
e.WriteString("null")
return
}
e.reflectValue(v.Elem())
default:
e.error(&UnsupportedTypeError{v.Type()})
}
return
}
func isValidTag(s string) bool {
if s == "" {
return false
}
for _, c := range s {
switch {
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
// Backslash and quote chars are reserved, but
// otherwise any punctuation chars are allowed
// in a tag name.
default:
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
return false
}
}
}
return true
}
func fieldByIndex(v reflect.Value, index []int) reflect.Value {
for _, i := range index {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return reflect.Value{}
}
v = v.Elem()
}
v = v.Field(i)
}
return v
}
// stringValues is a slice of reflect.Value holding *reflect.StringValue.
// It implements the methods to sort by string.
type stringValues []reflect.Value
func (sv stringValues) Len() int { return len(sv) }
func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) }
func (sv stringValues) get(i int) string { return sv[i].String() }
func (e *encodeState) string(s string) (int, error) {
len0 := e.Len()
e.WriteByte('"')
start := 0
for i := 0; i < len(s); {
if b := s[i]; b < utf8.RuneSelf {
if b != '\\' && b != '"' {
i++
continue
}
if start < i {
e.WriteString(s[start:i])
}
switch b {
case '\\', '"':
e.WriteByte('\\')
e.WriteByte(b)
}
i++
start = i
continue
}
c, size := utf8.DecodeRuneInString(s[i:])
if c == utf8.RuneError && size == 1 {
e.error(&InvalidUTF8Error{s})
}
i += size
}
if start < len(s) {
e.WriteString(s[start:])
}
e.WriteByte('"')
return e.Len() - len0, nil
}
// A field represents a single field found in a struct.
type field struct {
name string
tag bool
index []int
typ reflect.Type
omitEmpty bool
quoted bool
}
// byName sorts field by name, breaking ties with depth,
// then breaking ties with "name came from json tag", then
// breaking ties with index sequence.
type byName []field
func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byName) Less(i, j int) bool {
if x[i].name != x[j].name {
return x[i].name < x[j].name
}
if len(x[i].index) != len(x[j].index) {
return len(x[i].index) < len(x[j].index)
}
if x[i].tag != x[j].tag {
return x[i].tag
}
return byIndex(x).Less(i, j)
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
// typeFields returns a list of fields that JSON should recognize for the given type.
// The algorithm is breadth-first search over the set of structs to include - the top struct
// and then any reachable anonymous structs.
func typeFields(t reflect.Type) []field {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
count := map[reflect.Type]int{}
nextCount := map[reflect.Type]int{}
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
// Fields found.
var fields []field
for len(next) > 0 {
current, next = next, current[:0]
count, nextCount = nextCount, map[reflect.Type]int{}
for _, f := range current {
if visited[f.typ] {
continue
}
visited[f.typ] = true
// Scan f.typ for fields to include.
for i := 0; i < f.typ.NumField(); i++ {
sf := f.typ.Field(i)
if sf.PkgPath != "" { // unexported
continue
}
tag := sf.Tag.Get("json")
if tag == "-" {
continue
}
name, opts := parseTag(tag)
if !isValidTag(name) {
name = ""
}
index := make([]int, len(f.index)+1)
copy(index, f.index)
index[len(f.index)] = i
ft := sf.Type
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
// Follow pointer.
ft = ft.Elem()
}
// Record found field and index sequence.
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
tagged := name != ""
if name == "" {
name = sf.Name
}
fields = append(fields, field{name, tagged, index, ft,
opts.Contains("omitempty"), opts.Contains("string")})
if count[f.typ] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
// It only cares about the distinction between 1 or 2,
// so don't bother generating any more copies.
fields = append(fields, fields[len(fields)-1])
}
continue
}
// Record new anonymous struct to explore in next round.
nextCount[ft]++
if nextCount[ft] == 1 {
next = append(next, field{name: ft.Name(), index: index, typ: ft})
}
}
}
}
sort.Sort(byName(fields))
// Remove fields with annihilating name collisions
// and also fields shadowed by fields with explicit JSON tags.
name := ""
out := fields[:0]
for _, f := range fields {
if f.name != name {
name = f.name
out = append(out, f)
continue
}
if n := len(out); n > 0 && out[n-1].name == name && (!out[n-1].tag || f.tag) {
out = out[:n-1]
}
}
fields = out
return fields
}
var fieldCache struct {
sync.RWMutex
m map[reflect.Type][]field
}
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) []field {
fieldCache.RLock()
f := fieldCache.m[t]
fieldCache.RUnlock()
if f != nil {
return f
}
// Compute fields without lock.
// Might duplicate effort but won't hold other computations back.
f = typeFields(t)
if f == nil {
f = []field{}
}
fieldCache.Lock()
if fieldCache.m == nil {
fieldCache.m = map[reflect.Type][]field{}
}
fieldCache.m[t] = f
fieldCache.Unlock()
return f
}
// tagOptions is the string following a comma in a struct field's "json"
// tag, or the empty string. It does not include the leading comma.
type tagOptions string
// parseTag splits a struct field's json tag into its name and
// comma-separated options.
func parseTag(tag string) (string, tagOptions) {
if idx := strings.Index(tag, ","); idx != -1 {
return tag[:idx], tagOptions(tag[idx+1:])
}
return tag, tagOptions("")
}
// Contains returns whether checks that a comma-separated list of options
// contains a particular substr flag. substr must be surrounded by a
// string boundary or commas.
func (o tagOptions) Contains(optionName string) bool {
if len(o) == 0 {
return false
}
s := string(o)
for s != "" {
var next string
i := strings.Index(s, ",")
if i >= 0 {
s, next = s[:i], s[i+1:]
}
if s == optionName {
return true
}
s = next
}
return false
}