Vendor latest notary
Use updated notary to pick up updates from security review
Signed-off-by: Derek McGowan <derek@mcgstyle.net> (github: dmcgowan)
(cherry picked from commit d594c6fcd8
)
This commit is contained in:
parent
0fe5aad984
commit
b3c3c4cddc
21 changed files with 584 additions and 172 deletions
|
@ -137,7 +137,7 @@ RUN set -x \
|
|||
&& rm -rf "$GOPATH"
|
||||
|
||||
# Install notary server
|
||||
ENV NOTARY_COMMIT 77bced079e83d80f40c1f0a544b1a8a3b97fb052
|
||||
ENV NOTARY_COMMIT 8e8122eb5528f621afcd4e2854c47302f17392f7
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
|
||||
|
|
|
@ -38,8 +38,8 @@ clone git github.com/hashicorp/consul v0.5.2
|
|||
clone git github.com/docker/distribution e83345626608aa943d5c8a027fddcf54814d9545
|
||||
clone git github.com/vbatts/tar-split v0.9.4
|
||||
|
||||
clone git github.com/docker/notary 77bced079e83d80f40c1f0a544b1a8a3b97fb052
|
||||
clone git github.com/endophage/gotuf 374908abc8af7e953a2813c5c2b3944ab625ca68
|
||||
clone git github.com/docker/notary 8e8122eb5528f621afcd4e2854c47302f17392f7
|
||||
clone git github.com/endophage/gotuf 89ceb27829b9353dfee5ccccf7a3a9bb77008b05
|
||||
clone git github.com/tent/canonical-json-go 96e4ba3a7613a1216cbd1badca4efe382adea337
|
||||
clone git github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c
|
||||
|
||||
|
|
201
vendor/src/github.com/docker/notary/LICENSE
vendored
Normal file
201
vendor/src/github.com/docker/notary/LICENSE
vendored
Normal file
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2015 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -1,9 +1,19 @@
|
|||
package changelist
|
||||
|
||||
// Scopes for TufChanges are simply the TUF roles.
|
||||
// Unfortunately because of targets delegations, we can only
|
||||
// cover the base roles.
|
||||
const (
|
||||
ScopeRoot = "root"
|
||||
ScopeTargets = "targets"
|
||||
ScopeSnapshot = "snapshot"
|
||||
ScopeTimestamp = "timestamp"
|
||||
)
|
||||
|
||||
// TufChange represents a change to a TUF repo
|
||||
type TufChange struct {
|
||||
// Abbreviated because Go doesn't permit a field and method of the same name
|
||||
Actn int `json:"action"`
|
||||
Actn string `json:"action"`
|
||||
Role string `json:"role"`
|
||||
ChangeType string `json:"type"`
|
||||
ChangePath string `json:"path"`
|
||||
|
@ -11,7 +21,7 @@ type TufChange struct {
|
|||
}
|
||||
|
||||
// NewTufChange initializes a tufChange object
|
||||
func NewTufChange(action int, role, changeType, changePath string, content []byte) *TufChange {
|
||||
func NewTufChange(action string, role, changeType, changePath string, content []byte) *TufChange {
|
||||
return &TufChange{
|
||||
Actn: action,
|
||||
Role: role,
|
||||
|
@ -22,7 +32,7 @@ func NewTufChange(action int, role, changeType, changePath string, content []byt
|
|||
}
|
||||
|
||||
// Action return c.Actn
|
||||
func (c TufChange) Action() int {
|
||||
func (c TufChange) Action() string {
|
||||
return c.Actn
|
||||
}
|
||||
|
||||
|
|
|
@ -5,6 +5,11 @@ type memChangelist struct {
|
|||
changes []Change
|
||||
}
|
||||
|
||||
// NewMemChangelist instantiates a new in-memory changelist
|
||||
func NewMemChangelist() Changelist {
|
||||
return &memChangelist{}
|
||||
}
|
||||
|
||||
// List returns a list of Changes
|
||||
func (cl memChangelist) List() []Change {
|
||||
return cl.changes
|
||||
|
|
|
@ -22,17 +22,17 @@ type Changelist interface {
|
|||
|
||||
const (
|
||||
// ActionCreate represents a Create action
|
||||
ActionCreate = iota
|
||||
ActionCreate = "create"
|
||||
// ActionUpdate represents an Update action
|
||||
ActionUpdate
|
||||
ActionUpdate = "update"
|
||||
// ActionDelete represents a Delete action
|
||||
ActionDelete
|
||||
ActionDelete = "delete"
|
||||
)
|
||||
|
||||
// Change is the interface for a TUF Change
|
||||
type Change interface {
|
||||
// "create","update", or "delete"
|
||||
Action() int
|
||||
Action() string
|
||||
|
||||
// Where the change should be made.
|
||||
// For TUF this will be the role
|
||||
|
|
|
@ -250,7 +250,7 @@ func (r *NotaryRepository) AddTarget(target *Target) error {
|
|||
return err
|
||||
}
|
||||
|
||||
c := changelist.NewTufChange(changelist.ActionCreate, "targets", "target", target.Name, metaJSON)
|
||||
c := changelist.NewTufChange(changelist.ActionCreate, changelist.ScopeTargets, "target", target.Name, metaJSON)
|
||||
err = cl.Add(c)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -258,6 +258,22 @@ func (r *NotaryRepository) AddTarget(target *Target) error {
|
|||
return cl.Close()
|
||||
}
|
||||
|
||||
// RemoveTarget creates a new changelist entry to remove a target from the repository
|
||||
// when the changelist gets applied at publish time
|
||||
func (r *NotaryRepository) RemoveTarget(targetName string) error {
|
||||
cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("Removing target \"%s\"", targetName)
|
||||
c := changelist.NewTufChange(changelist.ActionDelete, changelist.ScopeTargets, "target", targetName, nil)
|
||||
err = cl.Add(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListTargets lists all targets for the current repository
|
||||
func (r *NotaryRepository) ListTargets() ([]*Target, error) {
|
||||
c, err := r.bootstrapClient()
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/notary/client/changelist"
|
||||
"github.com/endophage/gotuf"
|
||||
"github.com/endophage/gotuf/data"
|
||||
|
@ -26,13 +27,16 @@ func getRemoteStore(baseURL, gun string, rt http.RoundTripper) (store.RemoteStor
|
|||
|
||||
func applyChangelist(repo *tuf.TufRepo, cl changelist.Changelist) error {
|
||||
changes := cl.List()
|
||||
var err error
|
||||
logrus.Debugf("applying %d changes", len(changes))
|
||||
for _, c := range changes {
|
||||
if c.Scope() == "targets" {
|
||||
applyTargetsChange(repo, c)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
switch c.Scope() {
|
||||
case changelist.ScopeTargets:
|
||||
err := applyTargetsChange(repo, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
logrus.Debug("scope not supported: ", c.Scope())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -40,16 +44,21 @@ func applyChangelist(repo *tuf.TufRepo, cl changelist.Changelist) error {
|
|||
|
||||
func applyTargetsChange(repo *tuf.TufRepo, c changelist.Change) error {
|
||||
var err error
|
||||
meta := &data.FileMeta{}
|
||||
err = json.Unmarshal(c.Content(), meta)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if c.Action() == changelist.ActionCreate {
|
||||
switch c.Action() {
|
||||
case changelist.ActionCreate:
|
||||
logrus.Debug("changelist add: ", c.Path())
|
||||
meta := &data.FileMeta{}
|
||||
err = json.Unmarshal(c.Content(), meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
files := data.Files{c.Path(): *meta}
|
||||
_, err = repo.AddTargets("targets", files)
|
||||
} else if c.Action() == changelist.ActionDelete {
|
||||
err = repo.RemoveTargets("targets", c.Path())
|
||||
_, err = repo.AddTargets(c.Scope(), files)
|
||||
case changelist.ActionDelete:
|
||||
logrus.Debug("changelist remove: ", c.Path())
|
||||
err = repo.RemoveTargets(c.Scope(), c.Path())
|
||||
default:
|
||||
logrus.Debug("action not yet supported: ", c.Action())
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -42,6 +42,39 @@ func (km *KeyStoreManager) ExportRootKey(dest io.Writer, keyID string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// ExportRootKeyReencrypt exports the specified root key to an io.Writer in
|
||||
// PEM format. The key is reencrypted with a new passphrase.
|
||||
func (km *KeyStoreManager) ExportRootKeyReencrypt(dest io.Writer, keyID string, newPassphraseRetriever passphrase.Retriever) error {
|
||||
privateKey, alias, err := km.rootKeyStore.GetKey(keyID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create temporary keystore to use as a staging area
|
||||
tempBaseDir, err := ioutil.TempDir("", "notary-key-export-")
|
||||
defer os.RemoveAll(tempBaseDir)
|
||||
|
||||
privRootKeysSubdir := filepath.Join(privDir, rootKeysSubdir)
|
||||
tempRootKeysPath := filepath.Join(tempBaseDir, privRootKeysSubdir)
|
||||
tempRootKeyStore, err := trustmanager.NewKeyFileStore(tempRootKeysPath, newPassphraseRetriever)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = tempRootKeyStore.AddKey(keyID, alias, privateKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pemBytes, err := tempRootKeyStore.Get(keyID + "_" + alias)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = dest.Write(pemBytes)
|
||||
return err
|
||||
}
|
||||
|
||||
// checkRootKeyIsEncrypted makes sure the root key is encrypted. We have
|
||||
// internal assumptions that depend on this.
|
||||
func checkRootKeyIsEncrypted(pemBytes []byte) error {
|
||||
|
@ -80,13 +113,13 @@ func (km *KeyStoreManager) ImportRootKey(source io.Reader, keyID string) error {
|
|||
|
||||
func moveKeys(oldKeyStore, newKeyStore *trustmanager.KeyFileStore) error {
|
||||
// List all files but no symlinks
|
||||
for _, f := range oldKeyStore.ListKeys() {
|
||||
pemBytes, alias, err := oldKeyStore.GetKey(f)
|
||||
for f := range oldKeyStore.ListKeys() {
|
||||
privateKey, alias, err := oldKeyStore.GetKey(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = newKeyStore.AddKey(f, alias, pemBytes)
|
||||
err = newKeyStore.AddKey(f, alias, privateKey)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -247,7 +280,7 @@ func (km *KeyStoreManager) ImportKeysZip(zipReader zip.Reader) error {
|
|||
|
||||
func moveKeysByGUN(oldKeyStore, newKeyStore *trustmanager.KeyFileStore, gun string) error {
|
||||
// List all files but no symlinks
|
||||
for _, relKeyPath := range oldKeyStore.ListKeys() {
|
||||
for relKeyPath := range oldKeyStore.ListKeys() {
|
||||
|
||||
// Skip keys that aren't associated with this GUN
|
||||
if !strings.HasPrefix(relKeyPath, filepath.FromSlash(gun)) {
|
||||
|
|
|
@ -22,28 +22,45 @@ import (
|
|||
type Retriever func(keyName, alias string, createNew bool, attempts int) (passphrase string, giveup bool, err error)
|
||||
|
||||
const (
|
||||
idBytesToDisplay = 5
|
||||
idBytesToDisplay = 7
|
||||
tufRootAlias = "root"
|
||||
tufTargetsAlias = "targets"
|
||||
tufSnapshotAlias = "snapshot"
|
||||
tufRootKeyGenerationWarning = `You are about to create a new root signing key passphrase. This passphrase will be used to protect
|
||||
the most sensitive key in your signing system. Please choose a long, complex passphrase and be careful
|
||||
to keep the password and the key file itself secure and backed up. It is highly recommended that you use
|
||||
a password manager to generate the passphrase and keep it safe. There will be no way to recover this key.
|
||||
You can find the key in your config directory.`
|
||||
tufRootKeyGenerationWarning = `You are about to create a new root signing key passphrase. This passphrase
|
||||
will be used to protect the most sensitive key in your signing system. Please
|
||||
choose a long, complex passphrase and be careful to keep the password and the
|
||||
key file itself secure and backed up. It is highly recommended that you use a
|
||||
password manager to generate the passphrase and keep it safe. There will be no
|
||||
way to recover this key. You can find the key in your config directory.`
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrTooShort is returned if the passphrase entered for a new key is
|
||||
// below the minimum length
|
||||
ErrTooShort = errors.New("Passphrase too short")
|
||||
|
||||
// ErrDontMatch is returned if the two entered passphrases don't match.
|
||||
// new key is below the minimum length
|
||||
ErrDontMatch = errors.New("The entered passphrases do not match")
|
||||
|
||||
// ErrTooManyAttempts is returned if the maximum number of passphrase
|
||||
// entry attempts is reached.
|
||||
ErrTooManyAttempts = errors.New("Too many attempts")
|
||||
)
|
||||
|
||||
// PromptRetriever returns a new Retriever which will provide a prompt on stdin
|
||||
// and stdout to retrieve a passphrase. The passphrase will be cached such that
|
||||
// subsequent prompts will produce the same passphrase.
|
||||
func PromptRetriever() Retriever {
|
||||
return PromptRetrieverWithInOut(os.Stdin, os.Stdout)
|
||||
return PromptRetrieverWithInOut(os.Stdin, os.Stdout, nil)
|
||||
}
|
||||
|
||||
// PromptRetrieverWithInOut returns a new Retriever which will provide a
|
||||
// prompt using the given in and out readers. The passphrase will be cached
|
||||
// such that subsequent prompts will produce the same passphrase.
|
||||
func PromptRetrieverWithInOut(in io.Reader, out io.Writer) Retriever {
|
||||
// aliasMap can be used to specify display names for TUF key aliases. If aliasMap
|
||||
// is nil, a sensible default will be used.
|
||||
func PromptRetrieverWithInOut(in io.Reader, out io.Writer, aliasMap map[string]string) Retriever {
|
||||
userEnteredTargetsSnapshotsPass := false
|
||||
targetsSnapshotsPass := ""
|
||||
userEnteredRootsPass := false
|
||||
|
@ -54,14 +71,20 @@ func PromptRetrieverWithInOut(in io.Reader, out io.Writer) Retriever {
|
|||
fmt.Fprintln(out, tufRootKeyGenerationWarning)
|
||||
}
|
||||
if numAttempts > 0 {
|
||||
if createNew {
|
||||
fmt.Fprintln(out, "Passphrases do not match. Please retry.")
|
||||
|
||||
} else {
|
||||
if !createNew {
|
||||
fmt.Fprintln(out, "Passphrase incorrect. Please retry.")
|
||||
}
|
||||
}
|
||||
|
||||
// Figure out if we should display a different string for this alias
|
||||
displayAlias := alias
|
||||
if aliasMap != nil {
|
||||
if val, ok := aliasMap[alias]; ok {
|
||||
displayAlias = val
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// First, check if we have a password cached for this alias.
|
||||
if numAttempts == 0 {
|
||||
if userEnteredTargetsSnapshotsPass && (alias == tufSnapshotAlias || alias == tufTargetsAlias) {
|
||||
|
@ -73,7 +96,7 @@ func PromptRetrieverWithInOut(in io.Reader, out io.Writer) Retriever {
|
|||
}
|
||||
|
||||
if numAttempts > 3 && !createNew {
|
||||
return "", true, errors.New("Too many attempts")
|
||||
return "", true, ErrTooManyAttempts
|
||||
}
|
||||
|
||||
state, err := term.SaveState(0)
|
||||
|
@ -86,15 +109,24 @@ func PromptRetrieverWithInOut(in io.Reader, out io.Writer) Retriever {
|
|||
stdin := bufio.NewReader(in)
|
||||
|
||||
indexOfLastSeparator := strings.LastIndex(keyName, string(filepath.Separator))
|
||||
if indexOfLastSeparator == -1 {
|
||||
indexOfLastSeparator = 0
|
||||
}
|
||||
|
||||
if len(keyName) > indexOfLastSeparator+idBytesToDisplay+1 {
|
||||
keyName = keyName[:indexOfLastSeparator+idBytesToDisplay+1]
|
||||
if len(keyName) > indexOfLastSeparator+idBytesToDisplay {
|
||||
if indexOfLastSeparator > 0 {
|
||||
keyNamePrefix := keyName[:indexOfLastSeparator]
|
||||
keyNameID := keyName[indexOfLastSeparator+1 : indexOfLastSeparator+idBytesToDisplay+1]
|
||||
keyName = keyNamePrefix + " (" + keyNameID + ")"
|
||||
} else {
|
||||
keyName = keyName[indexOfLastSeparator : indexOfLastSeparator+idBytesToDisplay]
|
||||
}
|
||||
}
|
||||
|
||||
if createNew {
|
||||
fmt.Fprintf(out, "Enter passphrase for new %s key with id %s: ", alias, keyName)
|
||||
fmt.Fprintf(out, "Enter passphrase for new %s key with id %s: ", displayAlias, keyName)
|
||||
} else {
|
||||
fmt.Fprintf(out, "Enter key passphrase for %s key with id %s: ", alias, keyName)
|
||||
fmt.Fprintf(out, "Enter key passphrase for %s key with id %s: ", displayAlias, keyName)
|
||||
}
|
||||
|
||||
passphrase, err := stdin.ReadBytes('\n')
|
||||
|
@ -119,10 +151,10 @@ func PromptRetrieverWithInOut(in io.Reader, out io.Writer) Retriever {
|
|||
|
||||
if len(retPass) < 8 {
|
||||
fmt.Fprintln(out, "Please use a password manager to generate and store a good random passphrase.")
|
||||
return "", false, errors.New("Passphrase too short")
|
||||
return "", false, ErrTooShort
|
||||
}
|
||||
|
||||
fmt.Fprintf(out, "Repeat passphrase for new %s key with id %s: ", alias, keyName)
|
||||
fmt.Fprintf(out, "Repeat passphrase for new %s key with id %s: ", displayAlias, keyName)
|
||||
confirmation, err := stdin.ReadBytes('\n')
|
||||
fmt.Fprintln(out)
|
||||
if err != nil {
|
||||
|
@ -131,7 +163,8 @@ func PromptRetrieverWithInOut(in io.Reader, out io.Writer) Retriever {
|
|||
confirmationStr := strings.TrimSpace(string(confirmation))
|
||||
|
||||
if retPass != confirmationStr {
|
||||
return "", false, errors.New("The entered passphrases do not match")
|
||||
fmt.Fprintln(out, "Passphrases do not match. Please retry.")
|
||||
return "", false, ErrDontMatch
|
||||
}
|
||||
|
||||
if alias == tufSnapshotAlias || alias == tufTargetsAlias {
|
||||
|
|
|
@ -5,65 +5,10 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/notary/pkg/passphrase"
|
||||
"github.com/endophage/gotuf/data"
|
||||
)
|
||||
|
||||
const (
|
||||
keyExtension = "key"
|
||||
)
|
||||
|
||||
// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key
|
||||
type ErrAttemptsExceeded struct{}
|
||||
|
||||
// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key
|
||||
func (err ErrAttemptsExceeded) Error() string {
|
||||
return "maximum number of passphrase attempts exceeded"
|
||||
}
|
||||
|
||||
// ErrPasswordInvalid is returned when signing fails. It could also mean the signing
|
||||
// key file was corrupted, but we have no way to distinguish.
|
||||
type ErrPasswordInvalid struct{}
|
||||
|
||||
// ErrPasswordInvalid is returned when signing fails. It could also mean the signing
|
||||
// key file was corrupted, but we have no way to distinguish.
|
||||
func (err ErrPasswordInvalid) Error() string {
|
||||
return "password invalid, operation has failed."
|
||||
}
|
||||
|
||||
// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key.
|
||||
type ErrKeyNotFound struct {
|
||||
KeyID string
|
||||
}
|
||||
|
||||
// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key.
|
||||
func (err ErrKeyNotFound) Error() string {
|
||||
return fmt.Sprintf("signing key not found: %s", err.KeyID)
|
||||
}
|
||||
|
||||
// KeyStore is a generic interface for private key storage
|
||||
type KeyStore interface {
|
||||
LimitedFileStore
|
||||
|
||||
AddKey(name, alias string, privKey data.PrivateKey) error
|
||||
GetKey(name string) (data.PrivateKey, string, error)
|
||||
ListKeys() []string
|
||||
RemoveKey(name string) error
|
||||
}
|
||||
|
||||
type cachedKey struct {
|
||||
alias string
|
||||
key data.PrivateKey
|
||||
}
|
||||
|
||||
// PassphraseRetriever is a callback function that should retrieve a passphrase
|
||||
// for a given named key. If it should be treated as new passphrase (e.g. with
|
||||
// confirmation), createNew will be true. Attempts is passed in so that implementers
|
||||
// decide how many chances to give to a human, for example.
|
||||
type PassphraseRetriever func(keyId, alias string, createNew bool, attempts int) (passphrase string, giveup bool, err error)
|
||||
|
||||
// KeyFileStore persists and manages private keys on disk
|
||||
type KeyFileStore struct {
|
||||
sync.Mutex
|
||||
|
@ -111,7 +56,7 @@ func (s *KeyFileStore) GetKey(name string) (data.PrivateKey, string, error) {
|
|||
// ListKeys returns a list of unique PublicKeys present on the KeyFileStore.
|
||||
// There might be symlinks associating Certificate IDs to Public Keys, so this
|
||||
// method only returns the IDs that aren't symlinks
|
||||
func (s *KeyFileStore) ListKeys() []string {
|
||||
func (s *KeyFileStore) ListKeys() map[string]string {
|
||||
return listKeys(s)
|
||||
}
|
||||
|
||||
|
@ -149,7 +94,7 @@ func (s *KeyMemoryStore) GetKey(name string) (data.PrivateKey, string, error) {
|
|||
// ListKeys returns a list of unique PublicKeys present on the KeyFileStore.
|
||||
// There might be symlinks associating Certificate IDs to Public Keys, so this
|
||||
// method only returns the IDs that aren't symlinks
|
||||
func (s *KeyMemoryStore) ListKeys() []string {
|
||||
func (s *KeyMemoryStore) ListKeys() map[string]string {
|
||||
return listKeys(s)
|
||||
}
|
||||
|
||||
|
@ -167,10 +112,10 @@ func addKey(s LimitedFileStore, passphraseRetriever passphrase.Retriever, cached
|
|||
}
|
||||
|
||||
attempts := 0
|
||||
passphrase := ""
|
||||
chosenPassphrase := ""
|
||||
giveup := false
|
||||
for {
|
||||
passphrase, giveup, err = passphraseRetriever(name, alias, true, attempts)
|
||||
chosenPassphrase, giveup, err = passphraseRetriever(name, alias, true, attempts)
|
||||
if err != nil {
|
||||
attempts++
|
||||
continue
|
||||
|
@ -184,8 +129,8 @@ func addKey(s LimitedFileStore, passphraseRetriever passphrase.Retriever, cached
|
|||
break
|
||||
}
|
||||
|
||||
if passphrase != "" {
|
||||
pemPrivKey, err = EncryptPrivateKey(privKey, passphrase)
|
||||
if chosenPassphrase != "" {
|
||||
pemPrivKey, err = EncryptPrivateKey(privKey, chosenPassphrase)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -261,18 +206,20 @@ func getKey(s LimitedFileStore, passphraseRetriever passphrase.Retriever, cached
|
|||
return privKey, keyAlias, nil
|
||||
}
|
||||
|
||||
// ListKeys returns a list of unique PublicKeys present on the KeyFileStore.
|
||||
// ListKeys returns a map of unique PublicKeys present on the KeyFileStore and
|
||||
// their corresponding aliases.
|
||||
// There might be symlinks associating Certificate IDs to Public Keys, so this
|
||||
// method only returns the IDs that aren't symlinks
|
||||
func listKeys(s LimitedFileStore) []string {
|
||||
var keyIDList []string
|
||||
func listKeys(s LimitedFileStore) map[string]string {
|
||||
keyIDMap := make(map[string]string)
|
||||
|
||||
for _, f := range s.ListFiles(false) {
|
||||
keyID := strings.TrimSpace(strings.TrimSuffix(f, filepath.Ext(f)))
|
||||
keyID = keyID[:strings.LastIndex(keyID, "_")]
|
||||
keyIDList = append(keyIDList, keyID)
|
||||
keyIDFull := strings.TrimSpace(strings.TrimSuffix(f, filepath.Ext(f)))
|
||||
keyID := keyIDFull[:strings.LastIndex(keyIDFull, "_")]
|
||||
keyAlias := keyIDFull[strings.LastIndex(keyIDFull, "_")+1:]
|
||||
keyIDMap[keyID] = keyAlias
|
||||
}
|
||||
return keyIDList
|
||||
return keyIDMap
|
||||
}
|
||||
|
||||
// RemoveKey removes the key from the keyfilestore
|
||||
|
|
52
vendor/src/github.com/docker/notary/trustmanager/keystore.go
vendored
Normal file
52
vendor/src/github.com/docker/notary/trustmanager/keystore.go
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
package trustmanager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/endophage/gotuf/data"
|
||||
)
|
||||
|
||||
// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key
|
||||
type ErrAttemptsExceeded struct{}
|
||||
|
||||
// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key
|
||||
func (err ErrAttemptsExceeded) Error() string {
|
||||
return "maximum number of passphrase attempts exceeded"
|
||||
}
|
||||
|
||||
// ErrPasswordInvalid is returned when signing fails. It could also mean the signing
|
||||
// key file was corrupted, but we have no way to distinguish.
|
||||
type ErrPasswordInvalid struct{}
|
||||
|
||||
// ErrPasswordInvalid is returned when signing fails. It could also mean the signing
|
||||
// key file was corrupted, but we have no way to distinguish.
|
||||
func (err ErrPasswordInvalid) Error() string {
|
||||
return "password invalid, operation has failed."
|
||||
}
|
||||
|
||||
// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key.
|
||||
type ErrKeyNotFound struct {
|
||||
KeyID string
|
||||
}
|
||||
|
||||
// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key.
|
||||
func (err ErrKeyNotFound) Error() string {
|
||||
return fmt.Sprintf("signing key not found: %s", err.KeyID)
|
||||
}
|
||||
|
||||
const (
|
||||
keyExtension = "key"
|
||||
)
|
||||
|
||||
// KeyStore is a generic interface for private key storage
|
||||
type KeyStore interface {
|
||||
AddKey(name, alias string, privKey data.PrivateKey) error
|
||||
GetKey(name string) (data.PrivateKey, string, error)
|
||||
ListKeys() map[string]string
|
||||
RemoveKey(name string) error
|
||||
}
|
||||
|
||||
type cachedKey struct {
|
||||
alias string
|
||||
key data.PrivateKey
|
||||
}
|
|
@ -351,7 +351,7 @@ func GenerateECDSAKey(random io.Reader) (data.PrivateKey, error) {
|
|||
// PrivateKey. The serialization format we use is just the public key bytes
|
||||
// followed by the private key bytes
|
||||
func GenerateED25519Key(random io.Reader) (data.PrivateKey, error) {
|
||||
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||
pub, priv, err := ed25519.GenerateKey(random)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -50,15 +50,9 @@ func (c *Client) Update() error {
|
|||
logrus.Debug("updating TUF client")
|
||||
err := c.update()
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case signed.ErrRoleThreshold, signed.ErrExpired, tuf.ErrLocalRootExpired:
|
||||
logrus.Debug("retryable error occurred. Root will be downloaded and another update attempted")
|
||||
if err := c.downloadRoot(); err != nil {
|
||||
logrus.Errorf("client Update (Root):", err)
|
||||
return err
|
||||
}
|
||||
default:
|
||||
logrus.Error("an unexpected error occurred while updating TUF client")
|
||||
logrus.Debug("Error occurred. Root will be downloaded and another update attempted")
|
||||
if err := c.downloadRoot(); err != nil {
|
||||
logrus.Errorf("client Update (Root):", err)
|
||||
return err
|
||||
}
|
||||
// If we error again, we now have the latest root and just want to fail
|
||||
|
@ -114,6 +108,20 @@ func (c Client) checkRoot() error {
|
|||
if !bytes.Equal(hash[:], hashSha256) {
|
||||
return fmt.Errorf("Cached root sha256 did not match snapshot root sha256")
|
||||
}
|
||||
|
||||
if int64(len(raw)) != size {
|
||||
return fmt.Errorf("Cached root size did not match snapshot size")
|
||||
}
|
||||
|
||||
root := &data.SignedRoot{}
|
||||
err = json.Unmarshal(raw, root)
|
||||
if err != nil {
|
||||
return ErrCorruptedCache{file: "root.json"}
|
||||
}
|
||||
|
||||
if signed.IsExpired(root.Signed.Expires) {
|
||||
return tuf.ErrLocalRootExpired{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -104,3 +104,11 @@ type ErrInvalidURL struct {
|
|||
func (e ErrInvalidURL) Error() string {
|
||||
return fmt.Sprintf("tuf: invalid repository URL %s", e.URL)
|
||||
}
|
||||
|
||||
type ErrCorruptedCache struct {
|
||||
file string
|
||||
}
|
||||
|
||||
func (e ErrCorruptedCache) Error() string {
|
||||
return fmt.Sprintf("cache is corrupted: %s", e.file)
|
||||
}
|
||||
|
|
|
@ -7,16 +7,27 @@ import (
|
|||
"github.com/endophage/gotuf/errors"
|
||||
)
|
||||
|
||||
// Canonical base role names
|
||||
const (
|
||||
CanonicalRootRole = "root"
|
||||
CanonicalTargetsRole = "targets"
|
||||
CanonicalSnapshotRole = "snapshot"
|
||||
CanonicalTimestampRole = "timestamp"
|
||||
)
|
||||
|
||||
var ValidRoles = map[string]string{
|
||||
"root": "root",
|
||||
"targets": "targets",
|
||||
"snapshot": "snapshot",
|
||||
"timestamp": "timestamp",
|
||||
CanonicalRootRole: CanonicalRootRole,
|
||||
CanonicalTargetsRole: CanonicalTargetsRole,
|
||||
CanonicalSnapshotRole: CanonicalSnapshotRole,
|
||||
CanonicalTimestampRole: CanonicalTimestampRole,
|
||||
}
|
||||
|
||||
func SetValidRoles(rs map[string]string) {
|
||||
for k, v := range rs {
|
||||
ValidRoles[strings.ToLower(k)] = strings.ToLower(v)
|
||||
// iterate ValidRoles
|
||||
for k, _ := range ValidRoles {
|
||||
if v, ok := rs[k]; ok {
|
||||
ValidRoles[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -27,6 +38,27 @@ func RoleName(role string) string {
|
|||
return role
|
||||
}
|
||||
|
||||
func CanonicalRole(role string) string {
|
||||
name := strings.ToLower(role)
|
||||
if _, ok := ValidRoles[name]; ok {
|
||||
// The canonical version is always lower case
|
||||
// se ensure we return name, not role
|
||||
return name
|
||||
}
|
||||
targetsBase := fmt.Sprintf("%s/", ValidRoles[CanonicalTargetsRole])
|
||||
if strings.HasPrefix(name, targetsBase) {
|
||||
role = strings.TrimPrefix(role, targetsBase)
|
||||
role = fmt.Sprintf("%s/%s", CanonicalTargetsRole, role)
|
||||
return role
|
||||
}
|
||||
for r, v := range ValidRoles {
|
||||
if role == v {
|
||||
return r
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// ValidRole only determines the name is semantically
|
||||
// correct. For target delegated roles, it does NOT check
|
||||
// the the appropriate parent roles exist.
|
||||
|
@ -35,7 +67,7 @@ func ValidRole(name string) bool {
|
|||
if v, ok := ValidRoles[name]; ok {
|
||||
return name == v
|
||||
}
|
||||
targetsBase := fmt.Sprintf("%s/", ValidRoles["targets"])
|
||||
targetsBase := fmt.Sprintf("%s/", ValidRoles[CanonicalTargetsRole])
|
||||
if strings.HasPrefix(name, targetsBase) {
|
||||
return true
|
||||
}
|
||||
|
@ -112,6 +144,6 @@ func (r Role) CheckPrefixes(hash string) bool {
|
|||
}
|
||||
|
||||
func (r Role) IsDelegation() bool {
|
||||
targetsBase := fmt.Sprintf("%s/", ValidRoles["targets"])
|
||||
targetsBase := fmt.Sprintf("%s/", ValidRoles[CanonicalTargetsRole])
|
||||
return strings.HasPrefix(r.Name, targetsBase)
|
||||
}
|
||||
|
|
|
@ -43,10 +43,10 @@ const (
|
|||
)
|
||||
|
||||
var TUFTypes = map[string]string{
|
||||
"targets": "Targets",
|
||||
"root": "Root",
|
||||
"snapshot": "Snapshot",
|
||||
"timestamp": "Timestamp",
|
||||
CanonicalRootRole: "Root",
|
||||
CanonicalTargetsRole: "Targets",
|
||||
CanonicalSnapshotRole: "Snapshot",
|
||||
CanonicalTimestampRole: "Timestamp",
|
||||
}
|
||||
|
||||
// SetTUFTypes allows one to override some or all of the default
|
||||
|
@ -57,19 +57,25 @@ func SetTUFTypes(ts map[string]string) {
|
|||
}
|
||||
}
|
||||
|
||||
// Checks if type is correct.
|
||||
func ValidTUFType(t string) bool {
|
||||
func ValidTUFType(typ, role string) bool {
|
||||
if ValidRole(role) {
|
||||
// All targets delegation roles must have
|
||||
// the valid type is for targets.
|
||||
role = CanonicalRole(role)
|
||||
if role == "" {
|
||||
// role is unknown and does not map to
|
||||
// a type
|
||||
return false
|
||||
}
|
||||
if strings.HasPrefix(role, CanonicalTargetsRole+"/") {
|
||||
role = CanonicalTargetsRole
|
||||
}
|
||||
}
|
||||
// most people will just use the defaults so have this optimal check
|
||||
// first. Do comparison just in case there is some unknown vulnerability
|
||||
// if a key and value in the map differ.
|
||||
if v, ok := TUFTypes[t]; ok {
|
||||
return t == v
|
||||
}
|
||||
// For people that feel the need to change the default type names.
|
||||
for _, v := range TUFTypes {
|
||||
if t == v {
|
||||
return true
|
||||
}
|
||||
if v, ok := TUFTypes[role]; ok {
|
||||
return typ == v
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@ -138,10 +144,10 @@ func NewDelegations() *Delegations {
|
|||
|
||||
// defines number of days in which something should expire
|
||||
var defaultExpiryTimes = map[string]int{
|
||||
"root": 365,
|
||||
"targets": 90,
|
||||
"snapshot": 7,
|
||||
"timestamp": 1,
|
||||
CanonicalRootRole: 365,
|
||||
CanonicalTargetsRole: 90,
|
||||
CanonicalSnapshotRole: 7,
|
||||
CanonicalTimestampRole: 1,
|
||||
}
|
||||
|
||||
// SetDefaultExpiryTimes allows one to change the default expiries.
|
||||
|
|
|
@ -27,3 +27,17 @@ type ErrRoleThreshold struct{}
|
|||
func (e ErrRoleThreshold) Error() string {
|
||||
return "valid signatures did not meet threshold"
|
||||
}
|
||||
|
||||
type ErrInvalidKeyType struct{}
|
||||
|
||||
func (e ErrInvalidKeyType) Error() string {
|
||||
return "key type is not valid for signature"
|
||||
}
|
||||
|
||||
type ErrInvalidKeyLength struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e ErrInvalidKeyLength) Error() string {
|
||||
return fmt.Sprintf("key length is not supported: %s", e.msg)
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
|
||||
|
@ -15,6 +16,11 @@ import (
|
|||
"github.com/endophage/gotuf/data"
|
||||
)
|
||||
|
||||
const (
|
||||
minRSAKeySizeBit = 2048 // 2048 bits = 256 bytes
|
||||
minRSAKeySizeByte = minRSAKeySizeBit / 8
|
||||
)
|
||||
|
||||
// Verifiers serves as a map of all verifiers available on the system and
|
||||
// can be injected into a verificationService. For testing and configuration
|
||||
// purposes, it will not be used by default.
|
||||
|
@ -47,15 +53,27 @@ func RegisterVerifier(algorithm data.SigAlgorithm, v Verifier) {
|
|||
type Ed25519Verifier struct{}
|
||||
|
||||
func (v Ed25519Verifier) Verify(key data.PublicKey, sig []byte, msg []byte) error {
|
||||
if key.Algorithm() != data.ED25519Key {
|
||||
return ErrInvalidKeyType{}
|
||||
}
|
||||
var sigBytes [ed25519.SignatureSize]byte
|
||||
if len(sig) != len(sigBytes) {
|
||||
if len(sig) != ed25519.SignatureSize {
|
||||
logrus.Infof("signature length is incorrect, must be %d, was %d.", ed25519.SignatureSize, len(sig))
|
||||
return ErrInvalid
|
||||
}
|
||||
copy(sigBytes[:], sig)
|
||||
|
||||
var keyBytes [ed25519.PublicKeySize]byte
|
||||
copy(keyBytes[:], key.Public())
|
||||
pub := key.Public()
|
||||
if len(pub) != ed25519.PublicKeySize {
|
||||
logrus.Errorf("public key is incorrect size, must be %d, was %d.", ed25519.PublicKeySize, len(pub))
|
||||
return ErrInvalidKeyLength{msg: fmt.Sprintf("ed25519 public key must be %d bytes.", ed25519.PublicKeySize)}
|
||||
}
|
||||
n := copy(keyBytes[:], key.Public())
|
||||
if n < ed25519.PublicKeySize {
|
||||
logrus.Errorf("failed to copy the key, must have %d bytes, copied %d bytes.", ed25519.PublicKeySize, n)
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
if !ed25519.Verify(&keyBytes, msg, &sigBytes) {
|
||||
logrus.Infof("failed ed25519 verification")
|
||||
|
@ -71,6 +89,16 @@ func verifyPSS(key interface{}, digest, sig []byte) error {
|
|||
return ErrInvalid
|
||||
}
|
||||
|
||||
if rsaPub.N.BitLen() < minRSAKeySizeBit {
|
||||
logrus.Infof("RSA keys less than 2048 bits are not acceptable, provided key has length %d.", rsaPub.N.BitLen())
|
||||
return ErrInvalidKeyLength{msg: fmt.Sprintf("RSA key must be at least %d bits.", minRSAKeySizeBit)}
|
||||
}
|
||||
|
||||
if len(sig) < minRSAKeySizeByte {
|
||||
logrus.Infof("RSA keys less than 2048 bits are not acceptable, provided signature has length %d.", len(sig))
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
opts := rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256}
|
||||
if err := rsa.VerifyPSS(rsaPub, crypto.SHA256, digest[:], sig, &opts); err != nil {
|
||||
logrus.Infof("failed RSAPSS verification: %s", err)
|
||||
|
@ -104,8 +132,9 @@ func getRSAPubKey(key data.PublicKey) (crypto.PublicKey, error) {
|
|||
return nil, ErrInvalid
|
||||
}
|
||||
default:
|
||||
// only accept RSA keys
|
||||
logrus.Infof("invalid key type for RSAPSS verifier: %s", algorithm)
|
||||
return nil, ErrInvalid
|
||||
return nil, ErrInvalidKeyType{}
|
||||
}
|
||||
|
||||
return pubKey, nil
|
||||
|
@ -116,6 +145,7 @@ type RSAPSSVerifier struct{}
|
|||
|
||||
// Verify does the actual check.
|
||||
func (v RSAPSSVerifier) Verify(key data.PublicKey, sig []byte, msg []byte) error {
|
||||
// will return err if keytype is not a recognized RSA type
|
||||
pubKey, err := getRSAPubKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -130,6 +160,7 @@ func (v RSAPSSVerifier) Verify(key data.PublicKey, sig []byte, msg []byte) error
|
|||
type RSAPKCS1v15Verifier struct{}
|
||||
|
||||
func (v RSAPKCS1v15Verifier) Verify(key data.PublicKey, sig []byte, msg []byte) error {
|
||||
// will return err if keytype is not a recognized RSA type
|
||||
pubKey, err := getRSAPubKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -142,6 +173,16 @@ func (v RSAPKCS1v15Verifier) Verify(key data.PublicKey, sig []byte, msg []byte)
|
|||
return ErrInvalid
|
||||
}
|
||||
|
||||
if rsaPub.N.BitLen() < minRSAKeySizeBit {
|
||||
logrus.Infof("RSA keys less than 2048 bits are not acceptable, provided key has length %d.", rsaPub.N.BitLen())
|
||||
return ErrInvalidKeyLength{msg: fmt.Sprintf("RSA key must be at least %d bits.", minRSAKeySizeBit)}
|
||||
}
|
||||
|
||||
if len(sig) < minRSAKeySizeByte {
|
||||
logrus.Infof("RSA keys less than 2048 bits are not acceptable, provided signature has length %d.", len(sig))
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
if err = rsa.VerifyPKCS1v15(rsaPub, crypto.SHA256, digest[:], sig); err != nil {
|
||||
logrus.Errorf("Failed verification: %s", err.Error())
|
||||
return ErrInvalid
|
||||
|
@ -157,6 +198,9 @@ type RSAPyCryptoVerifier struct{}
|
|||
// with PyCrypto.
|
||||
func (v RSAPyCryptoVerifier) Verify(key data.PublicKey, sig []byte, msg []byte) error {
|
||||
digest := sha256.Sum256(msg)
|
||||
if key.Algorithm() != data.RSAKey {
|
||||
return ErrInvalidKeyType{}
|
||||
}
|
||||
|
||||
k, _ := pem.Decode([]byte(key.Public()))
|
||||
if k == nil {
|
||||
|
@ -203,8 +247,9 @@ func (v ECDSAVerifier) Verify(key data.PublicKey, sig []byte, msg []byte) error
|
|||
return ErrInvalid
|
||||
}
|
||||
default:
|
||||
// only accept ECDSA keys.
|
||||
logrus.Infof("invalid key type for ECDSA verifier: %s", algorithm)
|
||||
return ErrInvalid
|
||||
return ErrInvalidKeyType{}
|
||||
}
|
||||
|
||||
ecdsaPubKey, ok := pubKey.(*ecdsa.PublicKey)
|
||||
|
|
|
@ -22,9 +22,9 @@ var (
|
|||
)
|
||||
|
||||
type signedMeta struct {
|
||||
Type string `json:"_type"`
|
||||
Expires string `json:"expires"`
|
||||
Version int `json:"version"`
|
||||
Type string `json:"_type"`
|
||||
Expires time.Time `json:"expires"`
|
||||
Version int `json:"version"`
|
||||
}
|
||||
|
||||
// VerifyRoot checks if a given root file is valid against a known set of keys.
|
||||
|
@ -80,12 +80,12 @@ func verifyMeta(s *data.Signed, role string, minVersion int) error {
|
|||
if err := json.Unmarshal(s.Signed, sm); err != nil {
|
||||
return err
|
||||
}
|
||||
if !data.ValidTUFType(sm.Type) {
|
||||
if !data.ValidTUFType(sm.Type, role) {
|
||||
return ErrWrongType
|
||||
}
|
||||
if IsExpired(sm.Expires) {
|
||||
logrus.Errorf("Metadata for %s expired", role)
|
||||
return ErrExpired{Role: role, Expired: sm.Expires}
|
||||
return ErrExpired{Role: role, Expired: sm.Expires.Format("Mon Jan 2 15:04:05 MST 2006")}
|
||||
}
|
||||
if sm.Version < minVersion {
|
||||
return ErrLowVersion{sm.Version, minVersion}
|
||||
|
@ -94,15 +94,8 @@ func verifyMeta(s *data.Signed, role string, minVersion int) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
var IsExpired = func(t string) bool {
|
||||
ts, err := time.Parse(time.RFC3339, t)
|
||||
if err != nil {
|
||||
ts, err = time.Parse("2006-01-02 15:04:05 MST", t)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return ts.Sub(time.Now()) <= 0
|
||||
var IsExpired = func(t time.Time) bool {
|
||||
return t.Before(time.Now())
|
||||
}
|
||||
|
||||
func VerifySignatures(s *data.Signed, role string, db *keys.KeyDB) error {
|
||||
|
|
Loading…
Add table
Reference in a new issue