vendor: github.com/theupdateframework/notary v0.7.0

full diff: https://github.com/theupdateframework/notary/compare/v0.6.1...v0.7.0

Changelog:

v0.7.0 12/01/2021
------------------------

- Switch to Go modules
- Use golang/x/crypto for ed25519
- Update Go version
- Update dependency versions
- Fixes from using Gosec for source analysis

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn 2021-01-14 16:10:01 +01:00
parent 9a3fdc1d64
commit 9f6966d4ec
No known key found for this signature in database
GPG Key ID: 76698F39D527CE8C
33 changed files with 741 additions and 3870 deletions

View File

@ -1,5 +1,4 @@
cloud.google.com/go ceeb313ad77b789a7fa5287b36a1d127b69b7093 # v0.44.3
github.com/agl/ed25519 5312a61534124124185d41f09206b9fef1d88403
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
github.com/beorn7/perks 37c8de3658fcb183f997c4e13e8337516ab753e6 # v1.0.1
github.com/cespare/xxhash/v2 d7df74196a9e781ede915320c11c378c1b2f3a1f # v2.1.1
@ -68,7 +67,7 @@ github.com/shurcooL/sanitized_anchor_name 7bfe4c7ecddb3666a94b053b422c
github.com/sirupsen/logrus 6699a89a232f3db797f2e280639854bbc4b89725 # v1.7.0
github.com/spf13/cobra 86f8bfd7fef868a174e1b606783bd7f5c82ddf8f # v1.1.1
github.com/spf13/pflag 2e9d26c8c37aae03e3f9d4e90b7116f5accb7cab # v1.0.5
github.com/theupdateframework/notary d6e1431feb32348e0650bf7551ac5cffd01d857b # v0.6.1
github.com/theupdateframework/notary b0b6bfdd4933081e8d5ae026b24e8337311dd598 # v0.7.0
github.com/tonistiigi/fsutil 0834f99b7b85462efb69b4f571a4fa3ca7da5ac9
github.com/tonistiigi/go-rosetta f79598599c5d34ea253b56a1d7c89bc6a96de7db
github.com/tonistiigi/units 6950e57a87eaf136bbe44ef2ec8e75b9e3569de2

View File

@ -1,27 +0,0 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,127 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ed25519 implements the Ed25519 signature algorithm. See
// http://ed25519.cr.yp.to/.
package ed25519
// This code is a port of the public domain, "ref10" implementation of ed25519
// from SUPERCOP.
import (
"crypto/sha512"
"crypto/subtle"
"io"
"github.com/agl/ed25519/edwards25519"
)
const (
PublicKeySize = 32
PrivateKeySize = 64
SignatureSize = 64
)
// GenerateKey generates a public/private key pair using randomness from rand.
func GenerateKey(rand io.Reader) (publicKey *[PublicKeySize]byte, privateKey *[PrivateKeySize]byte, err error) {
privateKey = new([64]byte)
publicKey = new([32]byte)
_, err = io.ReadFull(rand, privateKey[:32])
if err != nil {
return nil, nil, err
}
h := sha512.New()
h.Write(privateKey[:32])
digest := h.Sum(nil)
digest[0] &= 248
digest[31] &= 127
digest[31] |= 64
var A edwards25519.ExtendedGroupElement
var hBytes [32]byte
copy(hBytes[:], digest)
edwards25519.GeScalarMultBase(&A, &hBytes)
A.ToBytes(publicKey)
copy(privateKey[32:], publicKey[:])
return
}
// Sign signs the message with privateKey and returns a signature.
func Sign(privateKey *[PrivateKeySize]byte, message []byte) *[SignatureSize]byte {
h := sha512.New()
h.Write(privateKey[:32])
var digest1, messageDigest, hramDigest [64]byte
var expandedSecretKey [32]byte
h.Sum(digest1[:0])
copy(expandedSecretKey[:], digest1[:])
expandedSecretKey[0] &= 248
expandedSecretKey[31] &= 63
expandedSecretKey[31] |= 64
h.Reset()
h.Write(digest1[32:])
h.Write(message)
h.Sum(messageDigest[:0])
var messageDigestReduced [32]byte
edwards25519.ScReduce(&messageDigestReduced, &messageDigest)
var R edwards25519.ExtendedGroupElement
edwards25519.GeScalarMultBase(&R, &messageDigestReduced)
var encodedR [32]byte
R.ToBytes(&encodedR)
h.Reset()
h.Write(encodedR[:])
h.Write(privateKey[32:])
h.Write(message)
h.Sum(hramDigest[:0])
var hramDigestReduced [32]byte
edwards25519.ScReduce(&hramDigestReduced, &hramDigest)
var s [32]byte
edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced)
signature := new([64]byte)
copy(signature[:], encodedR[:])
copy(signature[32:], s[:])
return signature
}
// Verify returns true iff sig is a valid signature of message by publicKey.
func Verify(publicKey *[PublicKeySize]byte, message []byte, sig *[SignatureSize]byte) bool {
if sig[63]&224 != 0 {
return false
}
var A edwards25519.ExtendedGroupElement
if !A.FromBytes(publicKey) {
return false
}
edwards25519.FeNeg(&A.X, &A.X)
edwards25519.FeNeg(&A.T, &A.T)
h := sha512.New()
h.Write(sig[:32])
h.Write(publicKey[:])
h.Write(message)
var digest [64]byte
h.Sum(digest[:0])
var hReduced [32]byte
edwards25519.ScReduce(&hReduced, &digest)
var R edwards25519.ProjectiveGroupElement
var b [32]byte
copy(b[:], sig[32:])
edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &b)
var checkR [32]byte
R.ToBytes(&checkR)
return subtle.ConstantTimeCompare(sig[:32], checkR[:]) == 1
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -21,7 +21,7 @@ for more information.
Notary aims to make the internet more secure by making it easy for people to
publish and verify content. We often rely on TLS to secure our communications
with a web server which is inherently flawed, as any compromise of the server
with a web server, which is inherently flawed, as any compromise of the server
enables malicious content to be substituted for the legitimate content.
With Notary, publishers can sign their content offline using keys kept highly
@ -46,11 +46,16 @@ Notary is based on [The Update Framework](https://www.theupdateframework.com/),
## Security
Any security vulnerabilities can be reported to security@docker.com.
See Notary's [service architecture docs](docs/service_architecture.md#threat-model) for more information about our threat model, which details the varying survivability and severities for key compromise as well as mitigations.
Notary's last security audit was on July 31, 2015 by NCC ([results](docs/resources/ncc_docker_notary_audit_2015_07_31.pdf)).
### Security Audits
Any security vulnerabilities can be reported to security@docker.com.
Notary has had two public security audits:
* [August 7, 2018 by Cure53](docs/resources/cure53_tuf_notary_audit_2018_08_07.pdf) covering TUF and Notary
* [July 31, 2015 by NCC](docs/resources/ncc_docker_notary_audit_2015_07_31.pdf) covering Notary
# Getting started with the Notary CLI
@ -65,7 +70,7 @@ For more advanced usage, see the
To use the CLI against a local Notary server rather than against Docker Hub:
1. Ensure that you have [docker and docker-compose](http://docs.docker.com/compose/install/) installed.
1. Ensure that you have [docker and docker-compose](https://docs.docker.com/compose/install/) installed.
1. `git clone https://github.com/theupdateframework/notary.git` and from the cloned repository path,
start up a local Notary server and signer and copy the config file and testing certs to your
local Notary config directory:
@ -88,6 +93,20 @@ URL is specified already in the configuration, file you copied.
You can also leave off the `-d ~/.docker/trust` argument if you do not care
to use `notary` with Docker images.
## Upgrading dependencies
To prevent mistakes in vendoring the go modules a buildscript has been added to properly vendor the modules using the correct version of Go to mitigate differences in CI and development environment.
Following procedure should be executed to upgrade a dependency. Preferably keep dependency upgrades in a separate commit from your code changes.
```bash
go get -u github.com/spf13/viper
buildscripts/circle-validate-vendor.sh
git add .
git commit -m "Upgraded github.com/spf13/viper"
```
The `buildscripts/circle-validate-vendor.sh` runs `go mod tidy` and `go mod vendor` using the given version of Go to prevent differences if you are for example running on a different version of Go.
## Building Notary
@ -97,25 +116,20 @@ branch and contains features for the next release.
Prerequisites:
- Go >= 1.7.1
- Fedora: `dnf install golang`
- libtool development headers installed
- Ubuntu: `apt-get install libltdl-dev`
- CentOS/RedHat: `yum install libtool-ltdl-devel`
- Fedora: `dnf install libtool-ltdl-devel`
- Mac OS ([Homebrew](http://brew.sh/)): `brew install libtool`
* Go >= 1.12
Set [```GOPATH```](https://golang.org/doc/code.html#GOPATH). Then, run:
```bash
$ export GO111MODULE=on
$ go get github.com/theupdateframework/notary
# build with pcks11 support by default to support yubikey
# build with pkcs11 support by default to support yubikey
$ go install -tags pkcs11 github.com/theupdateframework/notary/cmd/notary
$ notary
```
To build the server and signer, run `docker-compose build`.
## License
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Ftheupdateframework%2Fnotary.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Ftheupdateframework%2Fnotary?ref=badge_large)

View File

@ -35,7 +35,10 @@ func getFileNames(dirName string) ([]os.FileInfo, error) {
if err != nil {
return fileInfos, err
}
defer dir.Close()
defer func() {
_ = dir.Close()
}()
dirListing, err = dir.Readdir(0)
if err != nil {
return fileInfos, err
@ -89,7 +92,7 @@ func (cl FileChangelist) Add(c Change) error {
return err
}
filename := fmt.Sprintf("%020d_%s.change", time.Now().UnixNano(), uuid.Generate())
return ioutil.WriteFile(filepath.Join(cl.dir, filename), cJSON, 0644)
return ioutil.WriteFile(filepath.Join(cl.dir, filename), cJSON, 0600)
}
// Remove deletes the changes found at the given indices
@ -120,7 +123,10 @@ func (cl FileChangelist) Clear(archive string) error {
if err != nil {
return err
}
defer dir.Close()
defer func() {
_ = dir.Close()
}()
files, err := dir.Readdir(0)
if err != nil {
return err

View File

@ -20,7 +20,7 @@ type Changelist interface {
// Remove deletes the changes corresponding with the indices given
Remove(idxs []int) error
// Close syncronizes any pending writes to the underlying
// Close synchronizes any pending writes to the underlying
// storage and closes the file/connection
Close() error

View File

@ -7,10 +7,8 @@ import (
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"path/filepath"
"regexp"
"time"
canonicaljson "github.com/docker/go/canonical/json"
@ -39,7 +37,6 @@ func init() {
// repository stores all the information needed to operate on a notary repository.
type repository struct {
baseDir string
gun data.GUN
baseURL string
changelist changelist.Changelist
@ -56,7 +53,8 @@ type repository struct {
// NewFileCachedRepository is a wrapper for NewRepository that initializes
// a file cache from the provided repository, local config information and a crypto service.
// It also retrieves the remote store associated to the base directory under where all the
// trust files will be stored and the specified GUN.
// trust files will be stored (This is normally defaults to "~/.notary" or "~/.docker/trust"
// when enabling Docker content trust) and the specified GUN.
//
// In case of a nil RoundTripper, a default offline store is used instead.
func NewFileCachedRepository(baseDir string, gun data.GUN, baseURL string, rt http.RoundTripper,
@ -90,16 +88,13 @@ func NewFileCachedRepository(baseDir string, gun data.GUN, baseURL string, rt ht
return nil, err
}
return NewRepository(baseDir, gun, baseURL, remoteStore, cache, trustPinning, cryptoService, cl)
return NewRepository(gun, baseURL, remoteStore, cache, trustPinning, cryptoService, cl)
}
// NewRepository is the base method that returns a new notary repository.
// It takes the base directory under where all the trust files will be stored
// (This is normally defaults to "~/.notary" or "~/.docker/trust" when enabling
// docker content trust).
// It expects an initialized cache. In case of a nil remote store, a default
// offline store is used.
func NewRepository(baseDir string, gun data.GUN, baseURL string, remoteStore store.RemoteStore, cache store.MetadataStore,
func NewRepository(gun data.GUN, baseURL string, remoteStore store.RemoteStore, cache store.MetadataStore,
trustPinning trustpinning.TrustPinConfig, cryptoService signed.CryptoService, cl changelist.Changelist) (Repository, error) {
// Repo's remote store is either a valid remote store or an OfflineStore
@ -114,7 +109,6 @@ func NewRepository(baseDir string, gun data.GUN, baseURL string, remoteStore sto
nRepo := &repository{
gun: gun,
baseURL: baseURL,
baseDir: baseDir,
changelist: cl,
cache: cache,
remoteStore: remoteStore,
@ -131,20 +125,62 @@ func (r *repository) GetGUN() data.GUN {
return r.gun
}
// Target represents a simplified version of the data TUF operates on, so external
// applications don't have to depend on TUF data types.
type Target struct {
Name string // the name of the target
Hashes data.Hashes // the hash of the target
Length int64 // the size in bytes of the target
Custom *canonicaljson.RawMessage // the custom data provided to describe the file at TARGETPATH
func (r *repository) updateTUF(forWrite bool) error {
repo, invalid, err := LoadTUFRepo(TUFLoadOptions{
GUN: r.gun,
TrustPinning: r.trustPinning,
CryptoService: r.cryptoService,
Cache: r.cache,
RemoteStore: r.remoteStore,
AlwaysCheckInitialized: forWrite,
})
if err != nil {
return err
}
r.tufRepo = repo
r.invalid = invalid
return nil
}
// TargetWithRole represents a Target that exists in a particular role - this is
// produced by ListTargets and GetTargetByName
type TargetWithRole struct {
Target
Role data.RoleName
// ListTargets calls update first before listing targets
func (r *repository) ListTargets(roles ...data.RoleName) ([]*TargetWithRole, error) {
if err := r.updateTUF(false); err != nil {
return nil, err
}
return NewReadOnly(r.tufRepo).ListTargets(roles...)
}
// GetTargetByName calls update first before getting target by name
func (r *repository) GetTargetByName(name string, roles ...data.RoleName) (*TargetWithRole, error) {
if err := r.updateTUF(false); err != nil {
return nil, err
}
return NewReadOnly(r.tufRepo).GetTargetByName(name, roles...)
}
// GetAllTargetMetadataByName calls update first before getting targets by name
func (r *repository) GetAllTargetMetadataByName(name string) ([]TargetSignedStruct, error) {
if err := r.updateTUF(false); err != nil {
return nil, err
}
return NewReadOnly(r.tufRepo).GetAllTargetMetadataByName(name)
}
// ListRoles calls update first before getting roles
func (r *repository) ListRoles() ([]RoleWithSignatures, error) {
if err := r.updateTUF(false); err != nil {
return nil, err
}
return NewReadOnly(r.tufRepo).ListRoles()
}
// GetDelegationRoles calls update first before getting all delegation roles
func (r *repository) GetDelegationRoles() ([]data.Role, error) {
if err := r.updateTUF(false); err != nil {
return nil, err
}
return NewReadOnly(r.tufRepo).GetDelegationRoles()
}
// NewTarget is a helper method that returns a Target
@ -493,167 +529,6 @@ func (r *repository) RemoveTarget(targetName string, roles ...data.RoleName) err
return addChange(r.changelist, template, roles...)
}
// ListTargets lists all targets for the current repository. The list of
// roles should be passed in order from highest to lowest priority.
//
// IMPORTANT: if you pass a set of roles such as [ "targets/a", "targets/x"
// "targets/a/b" ], even though "targets/a/b" is part of the "targets/a" subtree
// its entries will be strictly shadowed by those in other parts of the "targets/a"
// subtree and also the "targets/x" subtree, as we will defer parsing it until
// we explicitly reach it in our iteration of the provided list of roles.
func (r *repository) ListTargets(roles ...data.RoleName) ([]*TargetWithRole, error) {
if err := r.Update(false); err != nil {
return nil, err
}
if len(roles) == 0 {
roles = []data.RoleName{data.CanonicalTargetsRole}
}
targets := make(map[string]*TargetWithRole)
for _, role := range roles {
// Define an array of roles to skip for this walk (see IMPORTANT comment above)
skipRoles := utils.RoleNameSliceRemove(roles, role)
// Define a visitor function to populate the targets map in priority order
listVisitorFunc := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
// We found targets so we should try to add them to our targets map
for targetName, targetMeta := range tgt.Signed.Targets {
// Follow the priority by not overriding previously set targets
// and check that this path is valid with this role
if _, ok := targets[targetName]; ok || !validRole.CheckPaths(targetName) {
continue
}
targets[targetName] = &TargetWithRole{
Target: Target{
Name: targetName,
Hashes: targetMeta.Hashes,
Length: targetMeta.Length,
Custom: targetMeta.Custom,
},
Role: validRole.Name,
}
}
return nil
}
r.tufRepo.WalkTargets("", role, listVisitorFunc, skipRoles...)
}
var targetList []*TargetWithRole
for _, v := range targets {
targetList = append(targetList, v)
}
return targetList, nil
}
// GetTargetByName returns a target by the given name. If no roles are passed
// it uses the targets role and does a search of the entire delegation
// graph, finding the first entry in a breadth first search of the delegations.
// If roles are passed, they should be passed in descending priority and
// the target entry found in the subtree of the highest priority role
// will be returned.
// See the IMPORTANT section on ListTargets above. Those roles also apply here.
func (r *repository) GetTargetByName(name string, roles ...data.RoleName) (*TargetWithRole, error) {
if err := r.Update(false); err != nil {
return nil, err
}
if len(roles) == 0 {
roles = append(roles, data.CanonicalTargetsRole)
}
var resultMeta data.FileMeta
var resultRoleName data.RoleName
var foundTarget bool
for _, role := range roles {
// Define an array of roles to skip for this walk (see IMPORTANT comment above)
skipRoles := utils.RoleNameSliceRemove(roles, role)
// Define a visitor function to find the specified target
getTargetVisitorFunc := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
if tgt == nil {
return nil
}
// We found the target and validated path compatibility in our walk,
// so we should stop our walk and set the resultMeta and resultRoleName variables
if resultMeta, foundTarget = tgt.Signed.Targets[name]; foundTarget {
resultRoleName = validRole.Name
return tuf.StopWalk{}
}
return nil
}
// Check that we didn't error, and that we assigned to our target
if err := r.tufRepo.WalkTargets(name, role, getTargetVisitorFunc, skipRoles...); err == nil && foundTarget {
return &TargetWithRole{Target: Target{Name: name, Hashes: resultMeta.Hashes, Length: resultMeta.Length, Custom: resultMeta.Custom}, Role: resultRoleName}, nil
}
}
return nil, ErrNoSuchTarget(name)
}
// TargetSignedStruct is a struct that contains a Target, the role it was found in, and the list of signatures for that role
type TargetSignedStruct struct {
Role data.DelegationRole
Target Target
Signatures []data.Signature
}
//ErrNoSuchTarget is returned when no valid trust data is found.
type ErrNoSuchTarget string
func (f ErrNoSuchTarget) Error() string {
return fmt.Sprintf("No valid trust data for %s", string(f))
}
// GetAllTargetMetadataByName searches the entire delegation role tree to find the specified target by name for all
// roles, and returns a list of TargetSignedStructs for each time it finds the specified target.
// If given an empty string for a target name, it will return back all targets signed into the repository in every role
func (r *repository) GetAllTargetMetadataByName(name string) ([]TargetSignedStruct, error) {
if err := r.Update(false); err != nil {
return nil, err
}
var targetInfoList []TargetSignedStruct
// Define a visitor function to find the specified target
getAllTargetInfoByNameVisitorFunc := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
if tgt == nil {
return nil
}
// We found a target and validated path compatibility in our walk,
// so add it to our list if we have a match
// if we have an empty name, add all targets, else check if we have it
var targetMetaToAdd data.Files
if name == "" {
targetMetaToAdd = tgt.Signed.Targets
} else {
if meta, ok := tgt.Signed.Targets[name]; ok {
targetMetaToAdd = data.Files{name: meta}
}
}
for targetName, resultMeta := range targetMetaToAdd {
targetInfo := TargetSignedStruct{
Role: validRole,
Target: Target{Name: targetName, Hashes: resultMeta.Hashes, Length: resultMeta.Length, Custom: resultMeta.Custom},
Signatures: tgt.Signatures,
}
targetInfoList = append(targetInfoList, targetInfo)
}
// continue walking to all child roles
return nil
}
// Check that we didn't error, and that we found the target at least once
if err := r.tufRepo.WalkTargets(name, "", getAllTargetInfoByNameVisitorFunc); err != nil {
return nil, err
}
if len(targetInfoList) == 0 {
return nil, ErrNoSuchTarget(name)
}
return targetInfoList, nil
}
// GetChangelist returns the list of the repository's unpublished changes
func (r *repository) GetChangelist() (changelist.Changelist, error) {
return r.changelist, nil
@ -671,51 +546,6 @@ func (r *repository) getRemoteStore() store.RemoteStore {
return r.remoteStore
}
// RoleWithSignatures is a Role with its associated signatures
type RoleWithSignatures struct {
Signatures []data.Signature
data.Role
}
// ListRoles returns a list of RoleWithSignatures objects for this repo
// This represents the latest metadata for each role in this repo
func (r *repository) ListRoles() ([]RoleWithSignatures, error) {
// Update to latest repo state
if err := r.Update(false); err != nil {
return nil, err
}
// Get all role info from our updated keysDB, can be empty
roles := r.tufRepo.GetAllLoadedRoles()
var roleWithSigs []RoleWithSignatures
// Populate RoleWithSignatures with Role from keysDB and signatures from TUF metadata
for _, role := range roles {
roleWithSig := RoleWithSignatures{Role: *role, Signatures: nil}
switch role.Name {
case data.CanonicalRootRole:
roleWithSig.Signatures = r.tufRepo.Root.Signatures
case data.CanonicalTargetsRole:
roleWithSig.Signatures = r.tufRepo.Targets[data.CanonicalTargetsRole].Signatures
case data.CanonicalSnapshotRole:
roleWithSig.Signatures = r.tufRepo.Snapshot.Signatures
case data.CanonicalTimestampRole:
roleWithSig.Signatures = r.tufRepo.Timestamp.Signatures
default:
if !data.IsDelegation(role.Name) {
continue
}
if _, ok := r.tufRepo.Targets[role.Name]; ok {
// We'll only find a signature if we've published any targets with this delegation
roleWithSig.Signatures = r.tufRepo.Targets[role.Name].Signatures
}
}
roleWithSigs = append(roleWithSigs, roleWithSig)
}
return roleWithSigs, nil
}
// Publish pushes the local changes in signed material to the remote notary-server
// Conceptually it performs an operation similar to a `git rebase`
func (r *repository) Publish() error {
@ -736,7 +566,7 @@ func (r *repository) Publish() error {
func (r *repository) publish(cl changelist.Changelist) error {
var initialPublish bool
// update first before publishing
if err := r.Update(true); err != nil {
if err := r.updateTUF(true); err != nil {
// If the remote is not aware of the repo, then this is being published
// for the first time. Try to initialize the repository before publishing.
if _, ok := err.(ErrRepositoryNotExist); ok {
@ -863,7 +693,14 @@ func (r *repository) oldKeysForLegacyClientSupport(legacyVersions int, initialPu
}
oldKeys := make(map[string]data.PublicKey)
c, err := r.bootstrapClient(true)
c, err := bootstrapClient(TUFLoadOptions{
GUN: r.gun,
TrustPinning: r.trustPinning,
CryptoService: r.cryptoService,
Cache: r.cache,
RemoteStore: r.remoteStore,
AlwaysCheckInitialized: true,
})
// require a server connection to fetch old roots
if err != nil {
return nil, err
@ -1003,135 +840,6 @@ func (r *repository) saveMetadata(ignoreSnapshot bool) error {
return r.cache.Set(data.CanonicalSnapshotRole.String(), snapshotJSON)
}
// returns a properly constructed ErrRepositoryNotExist error based on this
// repo's information
func (r *repository) errRepositoryNotExist() error {
host := r.baseURL
parsed, err := url.Parse(r.baseURL)
if err == nil {
host = parsed.Host // try to exclude the scheme and any paths
}
return ErrRepositoryNotExist{remote: host, gun: r.gun}
}
// Update bootstraps a trust anchor (root.json) before updating all the
// metadata from the repo.
func (r *repository) Update(forWrite bool) error {
c, err := r.bootstrapClient(forWrite)
if err != nil {
if _, ok := err.(store.ErrMetaNotFound); ok {
return r.errRepositoryNotExist()
}
return err
}
repo, invalid, err := c.Update()
if err != nil {
// notFound.Resource may include a version or checksum so when the role is root,
// it will be root, <version>.root or root.<checksum>.
notFound, ok := err.(store.ErrMetaNotFound)
isRoot, _ := regexp.MatchString(`\.?`+data.CanonicalRootRole.String()+`\.?`, notFound.Resource)
if ok && isRoot {
return r.errRepositoryNotExist()
}
return err
}
// we can be assured if we are at this stage that the repo we built is good
// no need to test the following function call for an error as it will always be fine should the repo be good- it is!
r.tufRepo = repo
r.invalid = invalid
warnRolesNearExpiry(repo)
return nil
}
// bootstrapClient attempts to bootstrap a root.json to be used as the trust
// anchor for a repository. The checkInitialized argument indicates whether
// we should always attempt to contact the server to determine if the repository
// is initialized or not. If set to true, we will always attempt to download
// and return an error if the remote repository errors.
//
// Populates a tuf.RepoBuilder with this root metadata. If the root metadata
// downloaded is a newer version than what is on disk, then intermediate
// versions will be downloaded and verified in order to rotate trusted keys
// properly. Newer root metadata must always be signed with the previous
// threshold and keys.
//
// Fails if the remote server is reachable and does not know the repo
// (i.e. before the first r.Publish()), in which case the error is
// store.ErrMetaNotFound, or if the root metadata (from whichever source is used)
// is not trusted.
//
// Returns a TUFClient for the remote server, which may not be actually
// operational (if the URL is invalid but a root.json is cached).
func (r *repository) bootstrapClient(checkInitialized bool) (*tufClient, error) {
minVersion := 1
// the old root on disk should not be validated against any trust pinning configuration
// because if we have an old root, it itself is the thing that pins trust
oldBuilder := tuf.NewRepoBuilder(r.gun, r.GetCryptoService(), trustpinning.TrustPinConfig{})
// by default, we want to use the trust pinning configuration on any new root that we download
newBuilder := tuf.NewRepoBuilder(r.gun, r.GetCryptoService(), r.trustPinning)
// Try to read root from cache first. We will trust this root until we detect a problem
// during update which will cause us to download a new root and perform a rotation.
// If we have an old root, and it's valid, then we overwrite the newBuilder to be one
// preloaded with the old root or one which uses the old root for trust bootstrapping.
if rootJSON, err := r.cache.GetSized(data.CanonicalRootRole.String(), store.NoSizeLimit); err == nil {
// if we can't load the cached root, fail hard because that is how we pin trust
if err := oldBuilder.Load(data.CanonicalRootRole, rootJSON, minVersion, true); err != nil {
return nil, err
}
// again, the root on disk is the source of trust pinning, so use an empty trust
// pinning configuration
newBuilder = tuf.NewRepoBuilder(r.gun, r.GetCryptoService(), trustpinning.TrustPinConfig{})
if err := newBuilder.Load(data.CanonicalRootRole, rootJSON, minVersion, false); err != nil {
// Ok, the old root is expired - we want to download a new one. But we want to use the
// old root to verify the new root, so bootstrap a new builder with the old builder
// but use the trustpinning to validate the new root
minVersion = oldBuilder.GetLoadedVersion(data.CanonicalRootRole)
newBuilder = oldBuilder.BootstrapNewBuilderWithNewTrustpin(r.trustPinning)
}
}
remote := r.getRemoteStore()
if !newBuilder.IsLoaded(data.CanonicalRootRole) || checkInitialized {
// remoteErr was nil and we were not able to load a root from cache or
// are specifically checking for initialization of the repo.
// if remote store successfully set up, try and get root from remote
// We don't have any local data to determine the size of root, so try the maximum (though it is restricted at 100MB)
tmpJSON, err := remote.GetSized(data.CanonicalRootRole.String(), store.NoSizeLimit)
if err != nil {
// we didn't have a root in cache and were unable to load one from
// the server. Nothing we can do but error.
return nil, err
}
if !newBuilder.IsLoaded(data.CanonicalRootRole) {
// we always want to use the downloaded root if we couldn't load from cache
if err := newBuilder.Load(data.CanonicalRootRole, tmpJSON, minVersion, false); err != nil {
return nil, err
}
err = r.cache.Set(data.CanonicalRootRole.String(), tmpJSON)
if err != nil {
// if we can't write cache we should still continue, just log error
logrus.Errorf("could not save root to cache: %s", err.Error())
}
}
}
// We can only get here if remoteErr != nil (hence we don't download any new root),
// and there was no root on disk
if !newBuilder.IsLoaded(data.CanonicalRootRole) {
return nil, ErrRepoNotInitialized{}
}
return newTufClient(oldBuilder, newBuilder, remote, r.cache), nil
}
// RotateKey removes all existing keys associated with the role. If no keys are
// specified in keyList, then this creates and adds one new key or delegates
// managing the key to the server. If key(s) are specified by keyList, then they are
@ -1273,7 +981,7 @@ func DeleteTrustData(baseDir string, gun data.GUN, URL string, rt http.RoundTrip
if deleteRemote {
remote, err := getRemoteStore(URL, gun, rt)
if err != nil {
logrus.Error("unable to instantiate a remote store: %v", err)
logrus.Errorf("unable to instantiate a remote store: %v", err)
return err
}
if err := remote.RemoveAll(); err != nil {

View File

@ -7,7 +7,6 @@ import (
"github.com/sirupsen/logrus"
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/client/changelist"
store "github.com/theupdateframework/notary/storage"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/utils"
)
@ -77,7 +76,7 @@ func (r *repository) AddDelegationPaths(name data.RoleName, paths []string) erro
}
// RemoveDelegationKeysAndPaths creates changelist entries to remove provided delegation key IDs and paths.
// This method composes RemoveDelegationPaths and RemoveDelegationKeys (each creates one changelist if called).
// This method composes RemoveDelegationPaths and RemoveDelegationKeys (each creates one changelist entry if called).
func (r *repository) RemoveDelegationKeysAndPaths(name data.RoleName, keyIDs, paths []string) error {
if len(paths) > 0 {
err := r.RemoveDelegationPaths(name, paths)
@ -201,41 +200,6 @@ func newDeleteDelegationChange(name data.RoleName, content []byte) *changelist.T
)
}
// GetDelegationRoles returns the keys and roles of the repository's delegations
// Also converts key IDs to canonical key IDs to keep consistent with signing prompts
func (r *repository) GetDelegationRoles() ([]data.Role, error) {
// Update state of the repo to latest
if err := r.Update(false); err != nil {
return nil, err
}
// All top level delegations (ex: targets/level1) are stored exclusively in targets.json
_, ok := r.tufRepo.Targets[data.CanonicalTargetsRole]
if !ok {
return nil, store.ErrMetaNotFound{Resource: data.CanonicalTargetsRole.String()}
}
// make a copy for traversing nested delegations
allDelegations := []data.Role{}
// Define a visitor function to populate the delegations list and translate their key IDs to canonical IDs
delegationCanonicalListVisitor := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
// For the return list, update with a copy that includes canonicalKeyIDs
// These aren't validated by the validRole
canonicalDelegations, err := translateDelegationsToCanonicalIDs(tgt.Signed.Delegations)
if err != nil {
return err
}
allDelegations = append(allDelegations, canonicalDelegations...)
return nil
}
err := r.tufRepo.WalkTargets("", "", delegationCanonicalListVisitor)
if err != nil {
return nil, err
}
return allDelegations, nil
}
func translateDelegationsToCanonicalIDs(delegationInfo data.Delegations) ([]data.Role, error) {
canonicalDelegations := make([]data.Role, len(delegationInfo.Roles))
// Do a copy by value to ensure local delegation metadata is untouched

View File

@ -6,42 +6,145 @@ import (
"github.com/theupdateframework/notary/tuf/signed"
)
// Repository represents the set of options that must be supported over a TUF repo.
type Repository interface {
// General management operations
Initialize(rootKeyIDs []string, serverManagedRoles ...data.RoleName) error
InitializeWithCertificate(rootKeyIDs []string, rootCerts []data.PublicKey, serverManagedRoles ...data.RoleName) error
Publish() error
// Target Operations
AddTarget(target *Target, roles ...data.RoleName) error
RemoveTarget(targetName string, roles ...data.RoleName) error
// ReadOnly represents the set of options that must be supported over a TUF repo for
// reading
type ReadOnly interface {
// ListTargets lists all targets for the current repository. The list of
// roles should be passed in order from highest to lowest priority.
//
// IMPORTANT: if you pass a set of roles such as [ "targets/a", "targets/x"
// "targets/a/b" ], even though "targets/a/b" is part of the "targets/a" subtree
// its entries will be strictly shadowed by those in other parts of the "targets/a"
// subtree and also the "targets/x" subtree, as we will defer parsing it until
// we explicitly reach it in our iteration of the provided list of roles.
ListTargets(roles ...data.RoleName) ([]*TargetWithRole, error)
// GetTargetByName returns a target by the given name. If no roles are passed
// it uses the targets role and does a search of the entire delegation
// graph, finding the first entry in a breadth first search of the delegations.
// If roles are passed, they should be passed in descending priority and
// the target entry found in the subtree of the highest priority role
// will be returned.
// See the IMPORTANT section on ListTargets above. Those roles also apply here.
GetTargetByName(name string, roles ...data.RoleName) (*TargetWithRole, error)
// GetAllTargetMetadataByName searches the entire delegation role tree to find
// the specified target by name for all roles, and returns a list of
// TargetSignedStructs for each time it finds the specified target.
// If given an empty string for a target name, it will return back all targets
// signed into the repository in every role
GetAllTargetMetadataByName(name string) ([]TargetSignedStruct, error)
// Changelist operations
// ListRoles returns a list of RoleWithSignatures objects for this repo
// This represents the latest metadata for each role in this repo
ListRoles() ([]RoleWithSignatures, error)
// GetDelegationRoles returns the keys and roles of the repository's delegations
// Also converts key IDs to canonical key IDs to keep consistent with signing prompts
GetDelegationRoles() ([]data.Role, error)
}
// Repository represents the set of options that must be supported over a TUF repo
// for both reading and writing.
type Repository interface {
ReadOnly
// ------------------- Publishing operations -------------------
// GetGUN returns the GUN associated with the repository
GetGUN() data.GUN
// SetLegacyVersion sets the number of versions back to fetch roots to sign with
SetLegacyVersions(int)
// ----- General management operations -----
// Initialize creates a new repository by using rootKey as the root Key for the
// TUF repository. The remote store/server must be reachable (and is asked to
// generate a timestamp key and possibly other serverManagedRoles), but the
// created repository result is only stored on local cache, not published to
// the remote store. To do that, use r.Publish() eventually.
Initialize(rootKeyIDs []string, serverManagedRoles ...data.RoleName) error
// InitializeWithCertificate initializes the repository with root keys and their
// corresponding certificates
InitializeWithCertificate(rootKeyIDs []string, rootCerts []data.PublicKey, serverManagedRoles ...data.RoleName) error
// Publish pushes the local changes in signed material to the remote notary-server
// Conceptually it performs an operation similar to a `git rebase`
Publish() error
// ----- Target Operations -----
// AddTarget creates new changelist entries to add a target to the given roles
// in the repository when the changelist gets applied at publish time.
// If roles are unspecified, the default role is "targets"
AddTarget(target *Target, roles ...data.RoleName) error
// RemoveTarget creates new changelist entries to remove a target from the given
// roles in the repository when the changelist gets applied at publish time.
// If roles are unspecified, the default role is "target".
RemoveTarget(targetName string, roles ...data.RoleName) error
// ----- Changelist operations -----
// GetChangelist returns the list of the repository's unpublished changes
GetChangelist() (changelist.Changelist, error)
// Role operations
ListRoles() ([]RoleWithSignatures, error)
GetDelegationRoles() ([]data.Role, error)
// ----- Role operations -----
// AddDelegation creates changelist entries to add provided delegation public keys and paths.
// This method composes AddDelegationRoleAndKeys and AddDelegationPaths (each creates one changelist if called).
AddDelegation(name data.RoleName, delegationKeys []data.PublicKey, paths []string) error
// AddDelegationRoleAndKeys creates a changelist entry to add provided delegation public keys.
// This method is the simplest way to create a new delegation, because the delegation must have at least
// one key upon creation to be valid since we will reject the changelist while validating the threshold.
AddDelegationRoleAndKeys(name data.RoleName, delegationKeys []data.PublicKey) error
// AddDelegationPaths creates a changelist entry to add provided paths to an existing delegation.
// This method cannot create a new delegation itself because the role must meet the key threshold upon
// creation.
AddDelegationPaths(name data.RoleName, paths []string) error
// RemoveDelegationKeysAndPaths creates changelist entries to remove provided delegation key IDs and
// paths. This method composes RemoveDelegationPaths and RemoveDelegationKeys (each creates one
// changelist entry if called).
RemoveDelegationKeysAndPaths(name data.RoleName, keyIDs, paths []string) error
// RemoveDelegationRole creates a changelist to remove all paths and keys from a role, and delete the
// role in its entirety.
RemoveDelegationRole(name data.RoleName) error
// RemoveDelegationPaths creates a changelist entry to remove provided paths from an existing delegation.
RemoveDelegationPaths(name data.RoleName, paths []string) error
// RemoveDelegationKeys creates a changelist entry to remove provided keys from an existing delegation.
// When this changelist is applied, if the specified keys are the only keys left in the role,
// the role itself will be deleted in its entirety.
// It can also delete a key from all delegations under a parent using a name
// with a wildcard at the end.
RemoveDelegationKeys(name data.RoleName, keyIDs []string) error
// ClearDelegationPaths creates a changelist entry to remove all paths from an existing delegation.
ClearDelegationPaths(name data.RoleName) error
// Witness and other re-signing operations
// ----- Witness and other re-signing operations -----
// Witness creates change objects to witness (i.e. re-sign) the given
// roles on the next publish. One change is created per role
Witness(roles ...data.RoleName) ([]data.RoleName, error)
// Key Operations
// ----- Key Operations -----
// RotateKey removes all existing keys associated with the role. If no keys are
// specified in keyList, then this creates and adds one new key or delegates
// managing the key to the server. If key(s) are specified by keyList, then they are
// used for signing the role.
// These changes are staged in a changelist until publish is called.
RotateKey(role data.RoleName, serverManagesKey bool, keyList []string) error
// GetCryptoService is the getter for the repository's CryptoService, which is used
// to sign all updates.
GetCryptoService() signed.CryptoService
SetLegacyVersions(int)
GetGUN() data.GUN
}

View File

@ -0,0 +1,257 @@
package client
import (
"fmt"
canonicaljson "github.com/docker/go/canonical/json"
store "github.com/theupdateframework/notary/storage"
"github.com/theupdateframework/notary/tuf"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/utils"
)
// Target represents a simplified version of the data TUF operates on, so external
// applications don't have to depend on TUF data types.
type Target struct {
Name string // the name of the target
Hashes data.Hashes // the hash of the target
Length int64 // the size in bytes of the target
Custom *canonicaljson.RawMessage // the custom data provided to describe the file at TARGETPATH
}
// TargetWithRole represents a Target that exists in a particular role - this is
// produced by ListTargets and GetTargetByName
type TargetWithRole struct {
Target
Role data.RoleName
}
// TargetSignedStruct is a struct that contains a Target, the role it was found in, and the list of signatures for that role
type TargetSignedStruct struct {
Role data.DelegationRole
Target Target
Signatures []data.Signature
}
//ErrNoSuchTarget is returned when no valid trust data is found.
type ErrNoSuchTarget string
func (f ErrNoSuchTarget) Error() string {
return fmt.Sprintf("No valid trust data for %s", string(f))
}
// RoleWithSignatures is a Role with its associated signatures
type RoleWithSignatures struct {
Signatures []data.Signature
data.Role
}
// NewReadOnly is the base method that returns a new notary repository for reading.
// It expects an initialized cache. In case of a nil remote store, a default
// offline store is used.
func NewReadOnly(repo *tuf.Repo) ReadOnly {
return &reader{tufRepo: repo}
}
type reader struct {
tufRepo *tuf.Repo
}
// ListTargets lists all targets for the current repository. The list of
// roles should be passed in order from highest to lowest priority.
//
// IMPORTANT: if you pass a set of roles such as [ "targets/a", "targets/x"
// "targets/a/b" ], even though "targets/a/b" is part of the "targets/a" subtree
// its entries will be strictly shadowed by those in other parts of the "targets/a"
// subtree and also the "targets/x" subtree, as we will defer parsing it until
// we explicitly reach it in our iteration of the provided list of roles.
func (r *reader) ListTargets(roles ...data.RoleName) ([]*TargetWithRole, error) {
if len(roles) == 0 {
roles = []data.RoleName{data.CanonicalTargetsRole}
}
targets := make(map[string]*TargetWithRole)
for _, role := range roles {
// Define an array of roles to skip for this walk (see IMPORTANT comment above)
skipRoles := utils.RoleNameSliceRemove(roles, role)
// Define a visitor function to populate the targets map in priority order
listVisitorFunc := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
// We found targets so we should try to add them to our targets map
for targetName, targetMeta := range tgt.Signed.Targets {
// Follow the priority by not overriding previously set targets
// and check that this path is valid with this role
if _, ok := targets[targetName]; ok || !validRole.CheckPaths(targetName) {
continue
}
targets[targetName] = &TargetWithRole{
Target: Target{
Name: targetName,
Hashes: targetMeta.Hashes,
Length: targetMeta.Length,
Custom: targetMeta.Custom,
},
Role: validRole.Name,
}
}
return nil
}
r.tufRepo.WalkTargets("", role, listVisitorFunc, skipRoles...)
}
var targetList []*TargetWithRole
for _, v := range targets {
targetList = append(targetList, v)
}
return targetList, nil
}
// GetTargetByName returns a target by the given name. If no roles are passed
// it uses the targets role and does a search of the entire delegation
// graph, finding the first entry in a breadth first search of the delegations.
// If roles are passed, they should be passed in descending priority and
// the target entry found in the subtree of the highest priority role
// will be returned.
// See the IMPORTANT section on ListTargets above. Those roles also apply here.
func (r *reader) GetTargetByName(name string, roles ...data.RoleName) (*TargetWithRole, error) {
if len(roles) == 0 {
roles = append(roles, data.CanonicalTargetsRole)
}
var resultMeta data.FileMeta
var resultRoleName data.RoleName
var foundTarget bool
for _, role := range roles {
// Define an array of roles to skip for this walk (see IMPORTANT comment above)
skipRoles := utils.RoleNameSliceRemove(roles, role)
// Define a visitor function to find the specified target
getTargetVisitorFunc := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
if tgt == nil {
return nil
}
// We found the target and validated path compatibility in our walk,
// so we should stop our walk and set the resultMeta and resultRoleName variables
if resultMeta, foundTarget = tgt.Signed.Targets[name]; foundTarget {
resultRoleName = validRole.Name
return tuf.StopWalk{}
}
return nil
}
// Check that we didn't error, and that we assigned to our target
if err := r.tufRepo.WalkTargets(name, role, getTargetVisitorFunc, skipRoles...); err == nil && foundTarget {
return &TargetWithRole{Target: Target{Name: name, Hashes: resultMeta.Hashes, Length: resultMeta.Length, Custom: resultMeta.Custom}, Role: resultRoleName}, nil
}
}
return nil, ErrNoSuchTarget(name)
}
// GetAllTargetMetadataByName searches the entire delegation role tree to find the specified target by name for all
// roles, and returns a list of TargetSignedStructs for each time it finds the specified target.
// If given an empty string for a target name, it will return back all targets signed into the repository in every role
func (r *reader) GetAllTargetMetadataByName(name string) ([]TargetSignedStruct, error) {
var targetInfoList []TargetSignedStruct
// Define a visitor function to find the specified target
getAllTargetInfoByNameVisitorFunc := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
if tgt == nil {
return nil
}
// We found a target and validated path compatibility in our walk,
// so add it to our list if we have a match
// if we have an empty name, add all targets, else check if we have it
var targetMetaToAdd data.Files
if name == "" {
targetMetaToAdd = tgt.Signed.Targets
} else {
if meta, ok := tgt.Signed.Targets[name]; ok {
targetMetaToAdd = data.Files{name: meta}
}
}
for targetName, resultMeta := range targetMetaToAdd {
targetInfo := TargetSignedStruct{
Role: validRole,
Target: Target{Name: targetName, Hashes: resultMeta.Hashes, Length: resultMeta.Length, Custom: resultMeta.Custom},
Signatures: tgt.Signatures,
}
targetInfoList = append(targetInfoList, targetInfo)
}
// continue walking to all child roles
return nil
}
// Check that we didn't error, and that we found the target at least once
if err := r.tufRepo.WalkTargets(name, "", getAllTargetInfoByNameVisitorFunc); err != nil {
return nil, err
}
if len(targetInfoList) == 0 {
return nil, ErrNoSuchTarget(name)
}
return targetInfoList, nil
}
// ListRoles returns a list of RoleWithSignatures objects for this repo
// This represents the latest metadata for each role in this repo
func (r *reader) ListRoles() ([]RoleWithSignatures, error) {
// Get all role info from our updated keysDB, can be empty
roles := r.tufRepo.GetAllLoadedRoles()
var roleWithSigs []RoleWithSignatures
// Populate RoleWithSignatures with Role from keysDB and signatures from TUF metadata
for _, role := range roles {
roleWithSig := RoleWithSignatures{Role: *role, Signatures: nil}
switch role.Name {
case data.CanonicalRootRole:
roleWithSig.Signatures = r.tufRepo.Root.Signatures
case data.CanonicalTargetsRole:
roleWithSig.Signatures = r.tufRepo.Targets[data.CanonicalTargetsRole].Signatures
case data.CanonicalSnapshotRole:
roleWithSig.Signatures = r.tufRepo.Snapshot.Signatures
case data.CanonicalTimestampRole:
roleWithSig.Signatures = r.tufRepo.Timestamp.Signatures
default:
if !data.IsDelegation(role.Name) {
continue
}
if _, ok := r.tufRepo.Targets[role.Name]; ok {
// We'll only find a signature if we've published any targets with this delegation
roleWithSig.Signatures = r.tufRepo.Targets[role.Name].Signatures
}
}
roleWithSigs = append(roleWithSigs, roleWithSig)
}
return roleWithSigs, nil
}
// GetDelegationRoles returns the keys and roles of the repository's delegations
// Also converts key IDs to canonical key IDs to keep consistent with signing prompts
func (r *reader) GetDelegationRoles() ([]data.Role, error) {
// All top level delegations (ex: targets/level1) are stored exclusively in targets.json
_, ok := r.tufRepo.Targets[data.CanonicalTargetsRole]
if !ok {
return nil, store.ErrMetaNotFound{Resource: data.CanonicalTargetsRole.String()}
}
// make a copy for traversing nested delegations
allDelegations := []data.Role{}
// Define a visitor function to populate the delegations list and translate their key IDs to canonical IDs
delegationCanonicalListVisitor := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
// For the return list, update with a copy that includes canonicalKeyIDs
// These aren't validated by the validRole
canonicalDelegations, err := translateDelegationsToCanonicalIDs(tgt.Signed.Delegations)
if err != nil {
return err
}
allDelegations = append(allDelegations, canonicalDelegations...)
return nil
}
err := r.tufRepo.WalkTargets("", "", delegationCanonicalListVisitor)
if err != nil {
return nil, err
}
return allDelegations, nil
}

View File

@ -3,9 +3,11 @@ package client
import (
"encoding/json"
"fmt"
"regexp"
"github.com/sirupsen/logrus"
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/cryptoservice"
store "github.com/theupdateframework/notary/storage"
"github.com/theupdateframework/notary/trustpinning"
"github.com/theupdateframework/notary/tuf"
@ -21,16 +23,6 @@ type tufClient struct {
newBuilder tuf.RepoBuilder
}
// newTufClient initialized a tufClient with the given repo, remote source of content, and cache
func newTufClient(oldBuilder, newBuilder tuf.RepoBuilder, remote store.RemoteStore, cache store.MetadataStore) *tufClient {
return &tufClient{
oldBuilder: oldBuilder,
newBuilder: newBuilder,
remote: remote,
cache: cache,
}
}
// Update performs an update to the TUF repo as defined by the TUF spec
func (c *tufClient) Update() (*tuf.Repo, *tuf.Repo, error) {
// 1. Get timestamp
@ -139,7 +131,7 @@ func (c *tufClient) updateRoot() error {
// Write newest to cache
if err := c.cache.Set(data.CanonicalRootRole.String(), raw); err != nil {
logrus.Debugf("unable to write %s to cache: %d.%s", newestVersion, data.CanonicalRootRole, err)
logrus.Debugf("unable to write %d.%s to cache: %s", newestVersion, data.CanonicalRootRole, err)
}
logrus.Debugf("finished updating root files")
return nil
@ -323,3 +315,149 @@ func (c *tufClient) tryLoadRemote(consistentInfo tuf.ConsistentInfo, old []byte)
}
return raw, nil
}
// TUFLoadOptions are provided to LoadTUFRepo, which loads a TUF repo from cache,
// from a remote store, or both
type TUFLoadOptions struct {
GUN data.GUN
TrustPinning trustpinning.TrustPinConfig
CryptoService signed.CryptoService
Cache store.MetadataStore
RemoteStore store.RemoteStore
AlwaysCheckInitialized bool
}
// bootstrapClient attempts to bootstrap a root.json to be used as the trust
// anchor for a repository. The checkInitialized argument indicates whether
// we should always attempt to contact the server to determine if the repository
// is initialized or not. If set to true, we will always attempt to download
// and return an error if the remote repository errors.
//
// Populates a tuf.RepoBuilder with this root metadata. If the root metadata
// downloaded is a newer version than what is on disk, then intermediate
// versions will be downloaded and verified in order to rotate trusted keys
// properly. Newer root metadata must always be signed with the previous
// threshold and keys.
//
// Fails if the remote server is reachable and does not know the repo
// (i.e. before any metadata has been published), in which case the error is
// store.ErrMetaNotFound, or if the root metadata (from whichever source is used)
// is not trusted.
//
// Returns a TUFClient for the remote server, which may not be actually
// operational (if the URL is invalid but a root.json is cached).
func bootstrapClient(l TUFLoadOptions) (*tufClient, error) {
minVersion := 1
// the old root on disk should not be validated against any trust pinning configuration
// because if we have an old root, it itself is the thing that pins trust
oldBuilder := tuf.NewRepoBuilder(l.GUN, l.CryptoService, trustpinning.TrustPinConfig{})
// by default, we want to use the trust pinning configuration on any new root that we download
newBuilder := tuf.NewRepoBuilder(l.GUN, l.CryptoService, l.TrustPinning)
// Try to read root from cache first. We will trust this root until we detect a problem
// during update which will cause us to download a new root and perform a rotation.
// If we have an old root, and it's valid, then we overwrite the newBuilder to be one
// preloaded with the old root or one which uses the old root for trust bootstrapping.
if rootJSON, err := l.Cache.GetSized(data.CanonicalRootRole.String(), store.NoSizeLimit); err == nil {
// if we can't load the cached root, fail hard because that is how we pin trust
if err := oldBuilder.Load(data.CanonicalRootRole, rootJSON, minVersion, true); err != nil {
return nil, err
}
// again, the root on disk is the source of trust pinning, so use an empty trust
// pinning configuration
newBuilder = tuf.NewRepoBuilder(l.GUN, l.CryptoService, trustpinning.TrustPinConfig{})
if err := newBuilder.Load(data.CanonicalRootRole, rootJSON, minVersion, false); err != nil {
// Ok, the old root is expired - we want to download a new one. But we want to use the
// old root to verify the new root, so bootstrap a new builder with the old builder
// but use the trustpinning to validate the new root
minVersion = oldBuilder.GetLoadedVersion(data.CanonicalRootRole)
newBuilder = oldBuilder.BootstrapNewBuilderWithNewTrustpin(l.TrustPinning)
}
}
if !newBuilder.IsLoaded(data.CanonicalRootRole) || l.AlwaysCheckInitialized {
// remoteErr was nil and we were not able to load a root from cache or
// are specifically checking for initialization of the repo.
// if remote store successfully set up, try and get root from remote
// We don't have any local data to determine the size of root, so try the maximum (though it is restricted at 100MB)
tmpJSON, err := l.RemoteStore.GetSized(data.CanonicalRootRole.String(), store.NoSizeLimit)
if err != nil {
// we didn't have a root in cache and were unable to load one from
// the server. Nothing we can do but error.
return nil, err
}
if !newBuilder.IsLoaded(data.CanonicalRootRole) {
// we always want to use the downloaded root if we couldn't load from cache
if err := newBuilder.Load(data.CanonicalRootRole, tmpJSON, minVersion, false); err != nil {
return nil, err
}
err = l.Cache.Set(data.CanonicalRootRole.String(), tmpJSON)
if err != nil {
// if we can't write cache we should still continue, just log error
logrus.Errorf("could not save root to cache: %s", err.Error())
}
}
}
// We can only get here if remoteErr != nil (hence we don't download any new root),
// and there was no root on disk
if !newBuilder.IsLoaded(data.CanonicalRootRole) {
return nil, ErrRepoNotInitialized{}
}
return &tufClient{
oldBuilder: oldBuilder,
newBuilder: newBuilder,
remote: l.RemoteStore,
cache: l.Cache,
}, nil
}
// LoadTUFRepo bootstraps a trust anchor (root.json) from cache (if provided) before updating
// all the metadata for the repo from the remote (if provided). It loads a TUF repo from cache,
// from a remote store, or both.
func LoadTUFRepo(options TUFLoadOptions) (*tuf.Repo, *tuf.Repo, error) {
// set some sane defaults, so nothing has to be provided necessarily
if options.RemoteStore == nil {
options.RemoteStore = store.OfflineStore{}
}
if options.Cache == nil {
options.Cache = store.NewMemoryStore(nil)
}
if options.CryptoService == nil {
options.CryptoService = cryptoservice.EmptyService
}
c, err := bootstrapClient(options)
if err != nil {
if _, ok := err.(store.ErrMetaNotFound); ok {
return nil, nil, ErrRepositoryNotExist{
remote: options.RemoteStore.Location(),
gun: options.GUN,
}
}
return nil, nil, err
}
repo, invalid, err := c.Update()
if err != nil {
// notFound.Resource may include a version or checksum so when the role is root,
// it will be root, <version>.root or root.<checksum>.
notFound, ok := err.(store.ErrMetaNotFound)
isRoot, _ := regexp.MatchString(`\.?`+data.CanonicalRootRole.String()+`\.?`, notFound.Resource)
if ok && isRoot {
return nil, nil, ErrRepositoryNotExist{
remote: options.RemoteStore.Location(),
gun: options.GUN,
}
}
return nil, nil, err
}
warnRolesNearExpiry(repo)
return repo, invalid, nil
}

View File

@ -21,6 +21,9 @@ var (
// ErrRootKeyNotEncrypted is returned if a root key being imported is
// unencrypted
ErrRootKeyNotEncrypted = errors.New("only encrypted root keys may be imported")
// EmptyService is an empty crypto service
EmptyService = NewCryptoService()
)
// CryptoService implements Sign and Create, holding a specific GUN and keystore to

View File

@ -3,7 +3,7 @@ package notary
import (
"crypto"
// Need to import md5 so can test availability.
_ "crypto/md5"
_ "crypto/md5" // #nosec
)
// FIPSEnabled returns true if running in FIPS mode.

59
vendor/github.com/theupdateframework/notary/go.mod generated vendored Normal file
View File

@ -0,0 +1,59 @@
module github.com/theupdateframework/notary
go 1.12
require (
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d
github.com/beorn7/perks v0.0.0-20150223135152-b965b613227f // indirect
github.com/bitly/go-simplejson v0.5.0 // indirect
github.com/bugsnag/bugsnag-go v1.0.5-0.20150529004307-13fd6b8acda0
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b // indirect
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 // indirect
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73 // indirect
github.com/docker/distribution v2.7.1+incompatible
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c
github.com/docker/go-connections v0.4.0
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916 // indirect
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 // indirect
github.com/go-sql-driver/mysql v1.3.0
github.com/gogo/protobuf v1.0.0 // indirect
github.com/golang/protobuf v1.3.4
github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93 // indirect
github.com/gorilla/mux v1.7.0
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jinzhu/gorm v0.0.0-20170222002820-5409931a1bb8
github.com/jinzhu/inflection v0.0.0-20170102125226-1c35d901db3d // indirect
github.com/jinzhu/now v1.1.1 // indirect
github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 // indirect
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f
github.com/magiconair/properties v1.5.3 // indirect
github.com/mattn/go-sqlite3 v1.6.0
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/miekg/pkcs11 v1.0.2
github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366 // indirect
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420 // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/pkg/errors v0.8.1 // indirect
github.com/prometheus/client_golang v0.9.0-pre1.0.20180209125602-c332b6f63c06
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5 // indirect
github.com/prometheus/common v0.0.0-20180110214958-89604d197083 // indirect
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7 // indirect
github.com/sirupsen/logrus v1.4.1
github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94 // indirect
github.com/spf13/cobra v0.0.1
github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431 // indirect
github.com/spf13/pflag v1.0.0 // indirect
github.com/spf13/viper v0.0.0-20150530192845-be5ff3e4840c
github.com/stretchr/testify v1.5.1
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221
google.golang.org/grpc v1.0.5
gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1
)

View File

@ -12,7 +12,7 @@ import (
"strings"
"github.com/theupdateframework/notary"
"golang.org/x/crypto/ssh/terminal"
"golang.org/x/term"
)
const (
@ -49,7 +49,7 @@ var (
// Upon successful passphrase retrievals, the passphrase will be cached such that
// subsequent prompts will produce the same passphrase.
func PromptRetriever() notary.PassRetriever {
if !terminal.IsTerminal(int(os.Stdin.Fd())) {
if !term.IsTerminal(int(os.Stdin.Fd())) {
return func(string, string, bool, int) (string, bool, error) {
return "", false, ErrNoInput
}
@ -200,8 +200,8 @@ func GetPassphrase(in *bufio.Reader) ([]byte, error) {
err error
)
if terminal.IsTerminal(int(os.Stdin.Fd())) {
passphrase, err = terminal.ReadPassword(int(os.Stdin.Fd()))
if term.IsTerminal(int(os.Stdin.Fd())) {
passphrase, err = term.ReadPassword(int(os.Stdin.Fd()))
} else {
passphrase, err = in.ReadBytes('\n')
}

View File

@ -137,14 +137,16 @@ func (f *FilesystemStore) GetSized(name string, size int64) ([]byte, error) {
if err != nil {
return nil, err
}
file, err := os.OpenFile(p, os.O_RDONLY, notary.PrivNoExecPerms)
file, err := os.Open(p)
if err != nil {
if os.IsNotExist(err) {
err = ErrMetaNotFound{Resource: name}
}
return nil, err
}
defer file.Close()
defer func() {
_ = file.Close()
}()
if size == NoSizeLimit {
size = notary.MaxDownloadSize

View File

@ -111,6 +111,18 @@ type HTTPStore struct {
roundTrip http.RoundTripper
}
// NewNotaryServerStore returns a new HTTPStore against a URL which should represent a notary
// server
func NewNotaryServerStore(serverURL string, gun data.GUN, roundTrip http.RoundTripper) (RemoteStore, error) {
return NewHTTPStore(
serverURL+"/v2/"+gun.String()+"/_trust/tuf/",
"",
"json",
"key",
roundTrip,
)
}
// NewHTTPStore initializes a new store against a URL and a number of configuration options.
//
// In case of a nil `roundTrip`, a default offline store is used instead.
@ -363,5 +375,5 @@ func (s HTTPStore) RotateKey(role data.RoleName) ([]byte, error) {
// Location returns a human readable name for the storage location
func (s HTTPStore) Location() string {
return s.baseURL.String()
return s.baseURL.Host
}

View File

@ -15,6 +15,7 @@ type MetadataStore interface {
SetMulti(map[string][]byte) error
RemoveAll() error
Remove(name string) error
Location() string
}
// PublicKeyStore must be implemented by a key service

View File

@ -359,7 +359,7 @@ func (rb *repoBuilder) GenerateSnapshot(prev *data.SignedSnapshot) ([]byte, int,
// loadedNotChecksummed should currently contain the root awaiting checksumming,
// since it has to have been loaded. Since the snapshot was generated using
// the root and targets data (there may not be any) that that have been loaded,
// the root and targets data (there may not be any) that have been loaded,
// remove all of them from rb.loadedNotChecksummed
for tgtName := range rb.repo.Targets {
delete(rb.loadedNotChecksummed, data.RoleName(tgtName))

View File

@ -12,9 +12,9 @@ import (
"io"
"math/big"
"github.com/agl/ed25519"
"github.com/docker/go/canonical/json"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ed25519"
)
// PublicKey is the necessary interface for public keys
@ -484,9 +484,10 @@ func (k RSAPrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts)
// Sign creates an ed25519 signature
func (k ED25519PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) (signature []byte, err error) {
priv := [ed25519.PrivateKeySize]byte{}
copy(priv[:], k.private[ed25519.PublicKeySize:])
return ed25519.Sign(&priv, msg)[:], nil
priv := make([]byte, ed25519.PrivateKeySize)
// The ed25519 key is serialized as public key then private key, so just use private key here.
copy(priv, k.private[ed25519.PublicKeySize:])
return ed25519.Sign(ed25519.PrivateKey(priv), msg)[:], nil
}
// Sign on an UnknownPrivateKey raises an error because the client does not

View File

@ -55,7 +55,7 @@ func (e ErrInvalidRole) Error() string {
// ValidRole only determines the name is semantically
// correct. For target delegated roles, it does NOT check
// the the appropriate parent roles exist.
// the appropriate parent roles exist.
func ValidRole(name RoleName) bool {
if IsDelegation(name) {
return true

View File

@ -59,7 +59,7 @@ func IsValidSnapshotStructure(s Snapshot) error {
return nil
}
// NewSnapshot initilizes a SignedSnapshot with a given top level root
// NewSnapshot initializes a SignedSnapshot with a given top level root
// and targets objects
func NewSnapshot(root *Signed, targets *Signed) (*SignedSnapshot, error) {
logrus.Debug("generating new snapshot...")

View File

@ -54,7 +54,7 @@ func isValidTargetsStructure(t Targets, roleName RoleName) error {
return nil
}
// NewTargets intiializes a new empty SignedTargets object
// NewTargets initializes a new empty SignedTargets object
func NewTargets() *SignedTargets {
return &SignedTargets{
Signatures: make([]Signature, 0),

View File

@ -186,7 +186,7 @@ type FileMeta struct {
// Equals returns true if the other FileMeta object is equivalent to this one
func (f FileMeta) Equals(o FileMeta) bool {
if o.Length != f.Length || len(f.Hashes) != len(f.Hashes) {
if o.Length != f.Length || len(o.Hashes) != len(f.Hashes) {
return false
}
if f.Custom == nil && o.Custom != nil || f.Custom != nil && o.Custom == nil {

View File

@ -39,7 +39,7 @@ type CryptoService interface {
KeyService
}
// Verifier defines an interface for verfying signatures. An implementer
// Verifier defines an interface for verifying signatures. An implementer
// of this interface should verify signatures for one and only one
// signing scheme.
type Verifier interface {

View File

@ -87,7 +87,8 @@ func Sign(service CryptoService, s *data.Signed, signingKeys []data.PublicKey,
})
}
for _, sig := range s.Signatures {
for i := range s.Signatures {
sig := s.Signatures[i]
if _, ok := signingKeyIDs[sig.KeyID]; ok {
// key is in the set of key IDs for which a signature has been created
continue

View File

@ -10,9 +10,9 @@ import (
"fmt"
"math/big"
"github.com/agl/ed25519"
"github.com/sirupsen/logrus"
"github.com/theupdateframework/notary/tuf/data"
"golang.org/x/crypto/ed25519"
)
const (
@ -39,26 +39,26 @@ func (v Ed25519Verifier) Verify(key data.PublicKey, sig []byte, msg []byte) erro
if key.Algorithm() != data.ED25519Key {
return ErrInvalidKeyType{}
}
var sigBytes [ed25519.SignatureSize]byte
sigBytes := make([]byte, ed25519.SignatureSize)
if len(sig) != ed25519.SignatureSize {
logrus.Debugf("signature length is incorrect, must be %d, was %d.", ed25519.SignatureSize, len(sig))
return ErrInvalid
}
copy(sigBytes[:], sig)
copy(sigBytes, sig)
var keyBytes [ed25519.PublicKeySize]byte
keyBytes := make([]byte, ed25519.PublicKeySize)
pub := key.Public()
if len(pub) != ed25519.PublicKeySize {
logrus.Errorf("public key is incorrect size, must be %d, was %d.", ed25519.PublicKeySize, len(pub))
return ErrInvalidKeyLength{msg: fmt.Sprintf("ed25519 public key must be %d bytes.", ed25519.PublicKeySize)}
}
n := copy(keyBytes[:], key.Public())
n := copy(keyBytes, key.Public())
if n < ed25519.PublicKeySize {
logrus.Errorf("failed to copy the key, must have %d bytes, copied %d bytes.", ed25519.PublicKeySize, n)
return ErrInvalid
}
if !ed25519.Verify(&keyBytes, msg, &sigBytes) {
if !ed25519.Verify(ed25519.PublicKey(keyBytes), msg, sigBytes) {
logrus.Debugf("failed ed25519 verification")
return ErrInvalid
}

View File

@ -39,7 +39,7 @@ import (
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/sha1"
"crypto/sha1" // #nosec
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"

View File

@ -30,7 +30,7 @@ func RoleNameSliceContains(ss []data.RoleName, s data.RoleName) bool {
return false
}
// RoleNameSliceRemove removes the the given RoleName from the slice, returning a new slice
// RoleNameSliceRemove removes the given RoleName from the slice, returning a new slice
func RoleNameSliceRemove(ss []data.RoleName, s data.RoleName) []data.RoleName {
res := []data.RoleName{}
for _, v := range ss {

View File

@ -16,10 +16,10 @@ import (
"math/big"
"time"
"github.com/agl/ed25519"
"github.com/sirupsen/logrus"
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/tuf/data"
"golang.org/x/crypto/ed25519"
)
// CanonicalKeyID returns the ID of the public bytes version of a TUF key.

View File

@ -1,59 +0,0 @@
github.com/Shopify/logrus-bugsnag 6dbc35f2c30d1e37549f9673dd07912452ab28a5
github.com/sirupsen/logrus f006c2ac4710855cf0f916dd6b77acf6b048dc6e # v1.0.3
github.com/agl/ed25519 278e1ec8e8a6e017cd07577924d6766039146ced
github.com/bugsnag/bugsnag-go 13fd6b8acda029830ef9904df6b63be0a83369d0
github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782
github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702
github.com/docker/distribution edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c
github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
github.com/docker/go-connections 7395e3f8aa162843a74ed6d48e79627d9792ac55
github.com/docker/go d30aec9fd63c35133f8f79c3412ad91a3b08be06
github.com/dvsekhvalnov/jose2go f21a8cedbbae609f623613ec8f81125c243212e6 # v1.3
github.com/go-sql-driver/mysql a0583e0143b1624142adab07e0e97fe106d99561 # v1.3
github.com/gorilla/mux 53c1911da2b537f792e7cafcb446b05ffe33b996 # v1.6.1
github.com/jinzhu/gorm 5409931a1bb87e484d68d649af9367c207713ea2
github.com/jinzhu/inflection 1c35d901db3da928c72a72d8458480cc9ade058f
github.com/lib/pq 0dad96c0b94f8dee039aa40467f767467392a0af
github.com/mattn/go-sqlite3 6c771bb9887719704b210e87e934f08be014bdb1 # v1.6.0
github.com/miekg/pkcs11 5f6e0d0dad6f472df908c8e968a98ef00c9224bb
github.com/prometheus/client_golang 449ccefff16c8e2b7229f6be1921ba22f62461fe
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 # model-0.0.2-12-gfa8ad6f
github.com/prometheus/procfs b1afdc266f54247f5dc725544f5d351a8661f502
github.com/prometheus/common 4fdc91a58c9d3696b982e8a680f4997403132d44
github.com/golang/protobuf c3cefd437628a0b7d31b34fe44b3a7a540e98527
github.com/spf13/cobra 7b2c5ac9fc04fc5efafb60700713d4fa609b777b # v0.0.1
github.com/spf13/viper be5ff3e4840cf692388bde7a057595a474ef379e
golang.org/x/crypto 76eec36fa14229c4b25bb894c2d0e591527af429
golang.org/x/net 6a513affb38dc9788b449d59ffed099b8de18fa0
golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993
google.golang.org/grpc 708a7f9f3283aa2d4f6132d287d78683babe55c8 # v1.0.5
github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9
github.com/spf13/pflag e57e3eeb33f795204c1ca35f56c44f83227c6e66 # v1.0.0
github.com/spf13/cast 4d07383ffe94b5e5a6fa3af9211374a4507a0184
gopkg.in/yaml.v2 5420a8b6744d3b0345ab293f6fcba19c978f1183 # v2.2.1
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
github.com/gorilla/context 14f550f51af52180c2eefed15e5fd18d63c0a64a # unused
github.com/spf13/jwalterweatherman 3d60171a64319ef63c78bd45bd60e6eab1e75f8b
github.com/mitchellh/mapstructure 2caf8efc93669b6c43e0441cdc6aed17546c96f3
github.com/magiconair/properties 624009598839a9432bd97bb75552389422357723 # v1.5.3
github.com/kr/text 6807e777504f54ad073ecef66747de158294b639
github.com/kr/pretty bc9499caa0f45ee5edb2f0209fbd61fbf3d9018f # go.weekly.2011-12-22-18-gbc9499c
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20
github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d
github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
gopkg.in/dancannon/gorethink.v3 e324d6ad938205da6c1e8a0179dc97a5b1a92185 https://github.com/docker/gorethink # v3.0.0-logrus
# dependencies of gorethink.v3
gopkg.in/gorethink/gorethink.v2 ac5be4ae8538d44ae8843b97fc9f90860cb48a85 https://github.com/docker/gorethink # v2.2.2-logrus
github.com/cenk/backoff 32cd0c5b3aef12c76ed64aaf678f6c79736be7dc # v1.0.0
# Testing requirements
github.com/stretchr/testify 089c7181b8c728499929ff09b62d3fdd8df8adff
github.com/cloudflare/cfssl 4e2dcbde500472449917533851bf4bae9bdff562 # v1.3.1
github.com/google/certificate-transparency-go 5ab67e519c93568ac3ee50fd6772a5bcf8aa460d
github.com/gogo/protobuf 1adfc126b41513cc696b209667c8656ea7aac67c # v1.0.0