1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Bump notary version up to 0.3.0 and re-vendor.

Signed-off-by: cyli <cyli@twistedmatrix.com>
This commit is contained in:
cyli 2016-05-11 15:25:05 -07:00
parent 730b974c64
commit 6094be63ac
38 changed files with 1104 additions and 1609 deletions

View file

@ -186,7 +186,7 @@ RUN set -x \
&& rm -rf "$GOPATH"
# Install notary and notary-server
ENV NOTARY_VERSION v0.3.0-RC1
ENV NOTARY_VERSION v0.3.0
RUN set -x \
&& export GO15VENDOREXPERIMENT=1 \
&& export GOPATH="$(mktemp -d)" \

View file

@ -117,7 +117,7 @@ RUN set -x \
&& rm -rf "$GOPATH"
# Install notary and notary-server
ENV NOTARY_VERSION v0.3.0-RC1
ENV NOTARY_VERSION v0.3.0
RUN set -x \
&& export GO15VENDOREXPERIMENT=1 \
&& export GOPATH="$(mktemp -d)" \

View file

@ -128,7 +128,7 @@ RUN set -x \
&& rm -rf "$GOPATH"
# Install notary and notary-server
ENV NOTARY_VERSION v0.3.0-RC1
ENV NOTARY_VERSION v0.3.0
RUN set -x \
&& export GO15VENDOREXPERIMENT=1 \
&& export GOPATH="$(mktemp -d)" \

View file

@ -141,7 +141,7 @@ RUN set -x \
&& rm -rf "$GOPATH"
# Install notary and notary-server
ENV NOTARY_VERSION v0.3.0-RC1
ENV NOTARY_VERSION v0.3.0
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \

View file

@ -130,7 +130,7 @@ RUN set -x \
&& rm -rf "$GOPATH"
# Install notary and notary-server
ENV NOTARY_VERSION v0.3.0-RC1
ENV NOTARY_VERSION v0.3.0
RUN set -x \
&& export GO15VENDOREXPERIMENT=1 \
&& export GOPATH="$(mktemp -d)" \

View file

@ -86,7 +86,7 @@ the tagged image prior to the loss. Image consumers would get an error for
content that they already downloaded:
```
could not validate the path to a trusted root: failed to validate data with current trusted certificates
Warning: potential malicious behavior - trust data has insufficient signatures for remote repository docker.io/my/image: valid signatures did not meet threshold
```
To correct this, they need to download a new image tag with that is signed with

View file

@ -56,7 +56,7 @@ clone git github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa
clone git github.com/pborman/uuid v1.0
# get desired notary commit, might also need to be updated in Dockerfile
clone git github.com/docker/notary v0.3.0-RC1
clone git github.com/docker/notary v0.3.0
clone git google.golang.org/grpc a22b6611561e9f0a3e0919690dd2caf48f14c517 https://github.com/grpc/grpc-go.git
clone git github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f

View file

@ -73,6 +73,9 @@ ${PREFIX}/bin/static/notary-server:
${PREFIX}/bin/static/notary-signer:
@echo "notary-signer: static builds not supported on OS X"
${PREFIX}/bin/static/notary:
@echo "notary: static builds not supported on OS X"
else
${PREFIX}/bin/static/notary-server: NOTARY_VERSION $(shell find . -type f -name '*.go')
@echo "+ $@"
@ -81,6 +84,10 @@ ${PREFIX}/bin/static/notary-server: NOTARY_VERSION $(shell find . -type f -name
${PREFIX}/bin/static/notary-signer: NOTARY_VERSION $(shell find . -type f -name '*.go')
@echo "+ $@"
@go build -tags ${NOTARY_BUILDTAGS} -o $@ ${GO_LDFLAGS_STATIC} ./cmd/notary-signer
${PREFIX}/bin/static/notary:
@echo "+ $@"
@go build -tags ${NOTARY_BUILDTAGS} -o $@ ${GO_LDFLAGS_STATIC} ./cmd/notary
endif
vet:
@ -179,7 +186,7 @@ client: ${PREFIX}/bin/notary
binaries: ${PREFIX}/bin/notary-server ${PREFIX}/bin/notary ${PREFIX}/bin/notary-signer
@echo "+ $@"
static: ${PREFIX}/bin/static/notary-server ${PREFIX}/bin/static/notary-signer
static: ${PREFIX}/bin/static/notary-server ${PREFIX}/bin/static/notary-signer ${PREFIX}/bin/static/notary
@echo "+ $@"
notary-dockerfile:

View file

@ -83,8 +83,17 @@ Prerequisites:
- Go >= 1.6.1
- [godep](https://github.com/tools/godep) installed
- libtool development headers installed
- Ubuntu: `apt-get install libtool-dev`
- Ubuntu: `apt-get install libltdl-dev`
- CentOS/RedHat: `yum install libtool-ltdl-devel`
- Mac OS ([Homebrew](http://brew.sh/)): `brew install libtool`
Run `make binaries`, which creates the Notary Client CLI binary at `bin/notary`.
Note that `make binaries` assumes a standard Go directory structure, in which
Notary is checked out to the `src` directory in your `GOPATH`. For example:
```
$GOPATH/
src/
github.com/
docker/
notary/
```

View file

@ -2,7 +2,6 @@ package client
import (
"bytes"
"crypto/x509"
"encoding/json"
"fmt"
"io/ioutil"
@ -87,7 +86,6 @@ type NotaryRepository struct {
CryptoService signed.CryptoService
tufRepo *tuf.Repo
roundTrip http.RoundTripper
CertStore trustmanager.X509Store
trustPinning trustpinning.TrustPinConfig
}
@ -97,15 +95,6 @@ type NotaryRepository struct {
func repositoryFromKeystores(baseDir, gun, baseURL string, rt http.RoundTripper,
keyStores []trustmanager.KeyStore, trustPin trustpinning.TrustPinConfig) (*NotaryRepository, error) {
certPath := filepath.Join(baseDir, notary.TrustedCertsDir)
certStore, err := trustmanager.NewX509FilteredFileStore(
certPath,
trustmanager.FilterCertsExpiredSha1,
)
if err != nil {
return nil, err
}
cryptoService := cryptoservice.NewCryptoService(keyStores...)
nRepo := &NotaryRepository{
@ -115,7 +104,6 @@ func repositoryFromKeystores(baseDir, gun, baseURL string, rt http.RoundTripper,
tufRepoPath: filepath.Join(baseDir, tufDir, filepath.FromSlash(gun)),
CryptoService: cryptoService,
roundTrip: rt,
CertStore: certStore,
trustPinning: trustPin,
}
@ -162,22 +150,22 @@ func NewTarget(targetName string, targetPath string) (*Target, error) {
return &Target{Name: targetName, Hashes: meta.Hashes, Length: meta.Length}, nil
}
func rootCertKey(gun string, privKey data.PrivateKey) (*x509.Certificate, data.PublicKey, error) {
func rootCertKey(gun string, privKey data.PrivateKey) (data.PublicKey, error) {
// Hard-coded policy: the generated certificate expires in 10 years.
startTime := time.Now()
cert, err := cryptoservice.GenerateCertificate(
privKey, gun, startTime, startTime.Add(notary.Year*10))
if err != nil {
return nil, nil, err
return nil, err
}
x509PublicKey := trustmanager.CertToKey(cert)
if x509PublicKey == nil {
return nil, nil, fmt.Errorf(
return nil, fmt.Errorf(
"cannot use regenerated certificate: format %s", cert.PublicKeyAlgorithm)
}
return cert, x509PublicKey, nil
return x509PublicKey, nil
}
// Initialize creates a new repository by using rootKey as the root Key for the
@ -218,11 +206,10 @@ func (r *NotaryRepository) Initialize(rootKeyID string, serverManagedRoles ...st
}
}
rootCert, rootKey, err := rootCertKey(r.gun, privKey)
rootKey, err := rootCertKey(r.gun, privKey)
if err != nil {
return err
}
r.CertStore.AddCert(rootCert)
var (
rootRole = data.NewBaseRole(
@ -394,8 +381,7 @@ func (r *NotaryRepository) RemoveTarget(targetName string, roles ...string) erro
// subtree and also the "targets/x" subtree, as we will defer parsing it until
// we explicitly reach it in our iteration of the provided list of roles.
func (r *NotaryRepository) ListTargets(roles ...string) ([]*TargetWithRole, error) {
err := r.Update(false)
if err != nil {
if err := r.Update(false); err != nil {
return nil, err
}
@ -432,12 +418,12 @@ func (r *NotaryRepository) ListTargets(roles ...string) ([]*TargetWithRole, erro
return targetList, nil
}
// GetTargetByName returns a target given a name. If no roles are passed
// GetTargetByName returns a target by the given name. If no roles are passed
// it uses the targets role and does a search of the entire delegation
// graph, finding the first entry in a breadth first search of the delegations.
// If roles are passed, they should be passed in descending priority and
// the target entry found in the subtree of the highest priority role
// will be returned
// will be returned.
// See the IMPORTANT section on ListTargets above. Those roles also apply here.
func (r *NotaryRepository) GetTargetByName(name string, roles ...string) (*TargetWithRole, error) {
if err := r.Update(false); err != nil {
@ -656,50 +642,33 @@ func (r *NotaryRepository) publish(cl changelist.Changelist) error {
// a not yet published repo or a possibly obsolete local copy) into
// r.tufRepo. This attempts to load metadata for all roles. Since server
// snapshots are supported, if the snapshot metadata fails to load, that's ok.
// This can also be unified with some cache reading tools from tuf/client.
// This assumes that bootstrapRepo is only used by Publish() or RotateKey()
func (r *NotaryRepository) bootstrapRepo() error {
tufRepo := tuf.NewRepo(r.CryptoService)
b := tuf.NewRepoBuilder(r.gun, r.CryptoService, r.trustPinning)
logrus.Debugf("Loading trusted collection.")
rootJSON, err := r.fileStore.GetMeta(data.CanonicalRootRole, -1)
if err != nil {
return err
}
root := &data.SignedRoot{}
err = json.Unmarshal(rootJSON, root)
if err != nil {
return err
}
err = tufRepo.SetRoot(root)
if err != nil {
return err
}
targetsJSON, err := r.fileStore.GetMeta(data.CanonicalTargetsRole, -1)
if err != nil {
return err
}
targets := &data.SignedTargets{}
err = json.Unmarshal(targetsJSON, targets)
if err != nil {
return err
}
tufRepo.SetTargets(data.CanonicalTargetsRole, targets)
snapshotJSON, err := r.fileStore.GetMeta(data.CanonicalSnapshotRole, -1)
if err == nil {
snapshot := &data.SignedSnapshot{}
err = json.Unmarshal(snapshotJSON, snapshot)
for _, role := range data.BaseRoles {
jsonBytes, err := r.fileStore.GetMeta(role, store.NoSizeLimit)
if err != nil {
if _, ok := err.(store.ErrMetaNotFound); ok &&
// server snapshots are supported, and server timestamp management
// is required, so if either of these fail to load that's ok - especially
// if the repo is new
role == data.CanonicalSnapshotRole || role == data.CanonicalTimestampRole {
continue
}
return err
}
if err := b.Load(role, jsonBytes, 1, true); err != nil {
return err
}
tufRepo.SetSnapshot(snapshot)
} else if _, ok := err.(store.ErrMetaNotFound); !ok {
return err
}
r.tufRepo = tufRepo
tufRepo, err := b.Finish()
if err == nil {
r.tufRepo = tufRepo
}
return nil
}
@ -769,15 +738,17 @@ func (r *NotaryRepository) Update(forWrite bool) error {
}
return err
}
if err := c.Update(); err != nil {
repo, err := c.Update()
if err != nil {
// notFound.Resource may include a checksum so when the role is root,
// it will be root.json or root.<checksum>.json. Therefore best we can
// it will be root or root.<checksum>. Therefore best we can
// do it match a "root." prefix
if notFound, ok := err.(store.ErrMetaNotFound); ok && strings.HasPrefix(notFound.Resource, data.CanonicalRootRole+".") {
return r.errRepositoryNotExist()
}
return err
}
r.tufRepo = repo
return nil
}
@ -787,12 +758,9 @@ func (r *NotaryRepository) Update(forWrite bool) error {
// is initialized or not. If set to true, we will always attempt to download
// and return an error if the remote repository errors.
//
// Partially populates r.tufRepo with this root metadata (only; use
// Populates a tuf.RepoBuilder with this root metadata (only use
// tufclient.Client.Update to load the rest).
//
// As another side effect, r.CertManager's list of trusted certificates
// is updated with data from the loaded root.json.
//
// Fails if the remote server is reachable and does not know the repo
// (i.e. before the first r.Publish()), in which case the error is
// store.ErrMetaNotFound, or if the root metadata (from whichever source is used)
@ -801,40 +769,55 @@ func (r *NotaryRepository) Update(forWrite bool) error {
// Returns a tufclient.Client for the remote server, which may not be actually
// operational (if the URL is invalid but a root.json is cached).
func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Client, error) {
var (
rootJSON []byte
err error
signedRoot *data.SignedRoot
)
// try to read root from cache first. We will trust this root
// until we detect a problem during update which will cause
// us to download a new root and perform a rotation.
rootJSON, cachedRootErr := r.fileStore.GetMeta(data.CanonicalRootRole, -1)
minVersion := 1
// the old root on disk should not be validated against any trust pinning configuration
// because if we have an old root, it itself is the thing that pins trust
oldBuilder := tuf.NewRepoBuilder(r.gun, r.CryptoService, trustpinning.TrustPinConfig{})
if cachedRootErr == nil {
signedRoot, cachedRootErr = r.validateRoot(rootJSON)
// by default, we want to use the trust pinning configuration on any new root that we download
newBuilder := tuf.NewRepoBuilder(r.gun, r.CryptoService, r.trustPinning)
// Try to read root from cache first. We will trust this root until we detect a problem
// during update which will cause us to download a new root and perform a rotation.
// If we have an old root, and it's valid, then we overwrite the newBuilder to be one
// preloaded with the old root or one which uses the old root for trust bootstrapping.
if rootJSON, err := r.fileStore.GetMeta(data.CanonicalRootRole, store.NoSizeLimit); err == nil {
// if we can't load the cached root, fail hard because that is how we pin trust
if err := oldBuilder.Load(data.CanonicalRootRole, rootJSON, minVersion, true); err != nil {
return nil, err
}
// again, the root on disk is the source of trust pinning, so use an empty trust
// pinning configuration
newBuilder = tuf.NewRepoBuilder(r.gun, r.CryptoService, trustpinning.TrustPinConfig{})
if err := newBuilder.Load(data.CanonicalRootRole, rootJSON, minVersion, false); err != nil {
// Ok, the old root is expired - we want to download a new one. But we want to use the
// old root to verify the new root, so bootstrap a new builder with the old builder
minVersion = oldBuilder.GetLoadedVersion(data.CanonicalRootRole)
newBuilder = oldBuilder.BootstrapNewBuilder()
}
}
remote, remoteErr := getRemoteStore(r.baseURL, r.gun, r.roundTrip)
if remoteErr != nil {
logrus.Error(remoteErr)
} else if cachedRootErr != nil || checkInitialized {
// remoteErr was nil and we had a cachedRootErr (or are specifically
// checking for initialization of the repo).
} else if !newBuilder.IsLoaded(data.CanonicalRootRole) || checkInitialized {
// remoteErr was nil and we were not able to load a root from cache or
// are specifically checking for initialization of the repo.
// if remote store successfully set up, try and get root from remote
// We don't have any local data to determine the size of root, so try the maximum (though it is restricted at 100MB)
tmpJSON, err := remote.GetMeta(data.CanonicalRootRole, -1)
tmpJSON, err := remote.GetMeta(data.CanonicalRootRole, store.NoSizeLimit)
if err != nil {
// we didn't have a root in cache and were unable to load one from
// the server. Nothing we can do but error.
return nil, err
}
if cachedRootErr != nil {
// we always want to use the downloaded root if there was a cache
// error.
signedRoot, err = r.validateRoot(tmpJSON)
if err != nil {
if !newBuilder.IsLoaded(data.CanonicalRootRole) {
// we always want to use the downloaded root if we couldn't load from cache
if err := newBuilder.Load(data.CanonicalRootRole, tmpJSON, minVersion, false); err != nil {
return nil, err
}
@ -846,44 +829,13 @@ func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Cl
}
}
r.tufRepo = tuf.NewRepo(r.CryptoService)
if signedRoot == nil {
// We can only get here if remoteErr != nil (hence we don't download any new root),
// and there was no root on disk
if !newBuilder.IsLoaded(data.CanonicalRootRole) {
return nil, ErrRepoNotInitialized{}
}
err = r.tufRepo.SetRoot(signedRoot)
if err != nil {
return nil, err
}
return tufclient.NewClient(
r.tufRepo,
remote,
r.fileStore,
), nil
}
// validateRoot MUST only be used during bootstrapping. It will only validate
// signatures of the root based on known keys, not expiry or other metadata.
// This is so that an out of date root can be loaded to be used in a rotation
// should the TUF update process detect a problem.
func (r *NotaryRepository) validateRoot(rootJSON []byte) (*data.SignedRoot, error) {
// can't just unmarshal into SignedRoot because validate root
// needs the root.Signed field to still be []byte for signature
// validation
root := &data.Signed{}
err := json.Unmarshal(rootJSON, root)
if err != nil {
return nil, err
}
err = trustpinning.ValidateRoot(r.CertStore, root, r.gun, r.trustPinning)
if err != nil {
return nil, err
}
return data.RootFromSigned(root)
return tufclient.NewClient(oldBuilder, newBuilder, remote, r.fileStore), nil
}
// RotateKey removes all existing keys associated with the role, and either
@ -929,7 +881,7 @@ func (r *NotaryRepository) RotateKey(role string, serverManagesKey bool) error {
if err != nil {
return err
}
_, pubKey, err = rootCertKey(r.gun, privKey)
pubKey, err = rootCertKey(r.gun, privKey)
if err != nil {
return err
}
@ -964,26 +916,12 @@ func (r *NotaryRepository) rootFileKeyChange(cl changelist.Changelist, role, act
return cl.Add(c)
}
// DeleteTrustData removes the trust data stored for this repo in the TUF cache and certificate store on the client side
// DeleteTrustData removes the trust data stored for this repo in the TUF cache on the client side
func (r *NotaryRepository) DeleteTrustData() error {
// Clear TUF files and cache
if err := r.fileStore.RemoveAll(); err != nil {
return fmt.Errorf("error clearing TUF repo data: %v", err)
}
r.tufRepo = tuf.NewRepo(nil)
// Clear certificates
certificates, err := r.CertStore.GetCertificatesByCN(r.gun)
if err != nil {
// If there were no certificates to delete, we're done
if _, ok := err.(*trustmanager.ErrNoCertificatesFound); ok {
return nil
}
return fmt.Errorf("error retrieving certificates for %s: %v", r.gun, err)
}
for _, cert := range certificates {
if err := r.CertStore.RemoveCert(cert); err != nil {
return fmt.Errorf("error removing certificate: %v: %v", cert, err)
}
}
return nil
}

View file

@ -13,7 +13,8 @@ import (
// NewNotaryRepository is a helper method that returns a new notary repository.
// It takes the base directory under where all the trust files will be stored
// (usually ~/.docker/trust/).
// (This is normally defaults to "~/.notary" or "~/.docker/trust" when enabling
// docker content trust).
func NewNotaryRepository(baseDir, gun, baseURL string, rt http.RoundTripper,
retriever passphrase.Retriever, trustPinning trustpinning.TrustPinConfig) (
*NotaryRepository, error) {

View file

@ -0,0 +1,18 @@
codecov:
notify:
# 2 builds on circleci, 1 jenkins build
after_n_builds: 3
coverage:
status:
# project will give us the diff in the total code coverage between a commit
# and its parent
project:
default:
target: auto
# patch would give us the code coverage of the diff only
patch: false
# changes tells us if there are unexpected code coverage changes in other files
# which were not changed by the diff
changes: false
comment: off

View file

@ -39,7 +39,7 @@ services:
depends_on:
- rdb-proxy
rdb-01:
image: jlhawn/rethinkdb-tls
image: jlhawn/rethinkdb:2.3.0
volumes:
- ./fixtures/rethinkdb:/tls
- rdb-01-data:/var/data
@ -49,9 +49,9 @@ services:
- rdb
- rdb.rdb
- rdb-01.rdb
command: "--bind all --no-http-admin --server-name rdb_01 --canonical-address rdb-01.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
command: "--bind all --no-http-admin --server-name rdb_01 --canonical-address rdb-01.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
rdb-02:
image: jlhawn/rethinkdb-tls
image: jlhawn/rethinkdb:2.3.0
volumes:
- ./fixtures/rethinkdb:/tls
- rdb-02-data:/var/data
@ -61,9 +61,9 @@ services:
- rdb
- rdb.rdb
- rdb-02.rdb
command: "--bind all --no-http-admin --server-name rdb_02 --canonical-address rdb-02.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
command: "--bind all --no-http-admin --server-name rdb_02 --canonical-address rdb-02.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
rdb-03:
image: jlhawn/rethinkdb-tls
image: jlhawn/rethinkdb:2.3.0
volumes:
- ./fixtures/rethinkdb:/tls
- rdb-03-data:/var/data
@ -73,9 +73,9 @@ services:
- rdb
- rdb.rdb
- rdb-03.rdb
command: "--bind all --no-http-admin --server-name rdb_03 --canonical-address rdb-03.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
command: "--bind all --no-http-admin --server-name rdb_03 --canonical-address rdb-03.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
rdb-proxy:
image: jlhawn/rethinkdb-tls
image: jlhawn/rethinkdb:2.3.0
ports:
- "8080:8080"
volumes:
@ -85,7 +85,7 @@ services:
aliases:
- rdb-proxy
- rdb-proxy.rdp
command: "proxy --bind all --join rdb.rdb --web-tls --web-tls-key /tls/key.pem --web-tls-cert /tls/cert.pem --driver-tls --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
command: "proxy --bind all --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
depends_on:
- rdb-01
- rdb-02

View file

@ -39,7 +39,7 @@ services:
depends_on:
- rdb-proxy
rdb-01:
image: jlhawn/rethinkdb-tls
image: jlhawn/rethinkdb:2.3.0
volumes:
- ./fixtures/rethinkdb:/tls
- rdb-01-data:/var/data
@ -49,9 +49,9 @@ services:
- rdb
- rdb.rdb
- rdb-01.rdb
command: "--bind all --no-http-admin --server-name rdb_01 --canonical-address rdb-01.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
command: "--bind all --no-http-admin --server-name rdb_01 --canonical-address rdb-01.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
rdb-02:
image: jlhawn/rethinkdb-tls
image: jlhawn/rethinkdb:2.3.0
volumes:
- ./fixtures/rethinkdb:/tls
- rdb-02-data:/var/data
@ -61,9 +61,9 @@ services:
- rdb
- rdb.rdb
- rdb-02.rdb
command: "--bind all --no-http-admin --server-name rdb_02 --canonical-address rdb-02.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
command: "--bind all --no-http-admin --server-name rdb_02 --canonical-address rdb-02.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
rdb-03:
image: jlhawn/rethinkdb-tls
image: jlhawn/rethinkdb:2.3.0
volumes:
- ./fixtures/rethinkdb:/tls
- rdb-03-data:/var/data
@ -73,9 +73,9 @@ services:
- rdb
- rdb.rdb
- rdb-03.rdb
command: "--bind all --no-http-admin --server-name rdb_03 --canonical-address rdb-03.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
command: "--bind all --no-http-admin --server-name rdb_03 --canonical-address rdb-03.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
rdb-proxy:
image: jlhawn/rethinkdb-tls
image: jlhawn/rethinkdb:2.3.0
ports:
- "8080:8080"
volumes:
@ -85,7 +85,7 @@ services:
aliases:
- rdb-proxy
- rdb-proxy.rdp
command: "proxy --bind all --join rdb.rdb --web-tls --web-tls-key /tls/key.pem --web-tls-cert /tls/cert.pem --driver-tls --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
command: "proxy --bind all --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
depends_on:
- rdb-01
- rdb-02

View file

@ -1,272 +0,0 @@
package trustmanager
import (
"crypto/x509"
"errors"
"os"
"path"
"github.com/Sirupsen/logrus"
)
// X509FileStore implements X509Store that persists on disk
type X509FileStore struct {
validate Validator
fileMap map[CertID]string
fingerprintMap map[CertID]*x509.Certificate
nameMap map[string][]CertID
fileStore Storage
}
// NewX509FileStore returns a new X509FileStore.
func NewX509FileStore(directory string) (*X509FileStore, error) {
validate := ValidatorFunc(func(cert *x509.Certificate) bool { return true })
return newX509FileStore(directory, validate)
}
// NewX509FilteredFileStore returns a new X509FileStore that validates certificates
// that are added.
func NewX509FilteredFileStore(directory string, validate func(*x509.Certificate) bool) (*X509FileStore, error) {
return newX509FileStore(directory, validate)
}
func newX509FileStore(directory string, validate func(*x509.Certificate) bool) (*X509FileStore, error) {
fileStore, err := NewSimpleFileStore(directory, certExtension)
if err != nil {
return nil, err
}
s := &X509FileStore{
validate: ValidatorFunc(validate),
fileMap: make(map[CertID]string),
fingerprintMap: make(map[CertID]*x509.Certificate),
nameMap: make(map[string][]CertID),
fileStore: fileStore,
}
err = loadCertsFromDir(s)
if err != nil {
return nil, err
}
return s, nil
}
// AddCert creates a filename for a given cert and adds a certificate with that name
func (s *X509FileStore) AddCert(cert *x509.Certificate) error {
if cert == nil {
return errors.New("adding nil Certificate to X509Store")
}
// Check if this certificate meets our validation criteria
if !s.validate.Validate(cert) {
return &ErrCertValidation{}
}
// Attempt to write the certificate to the file
if err := s.addNamedCert(cert); err != nil {
return err
}
return nil
}
// addNamedCert allows adding a certificate while controlling the filename it gets
// stored under. If the file does not exist on disk, saves it.
func (s *X509FileStore) addNamedCert(cert *x509.Certificate) error {
fileName, certID, err := fileName(cert)
if err != nil {
return err
}
logrus.Debug("Adding cert with certID: ", certID)
// Validate if we already added this certificate before
if _, ok := s.fingerprintMap[certID]; ok {
return &ErrCertExists{}
}
// Convert certificate to PEM
certBytes := CertToPEM(cert)
// Save the file to disk if not already there.
if _, err = s.fileStore.Get(fileName); os.IsNotExist(err) {
if err := s.fileStore.Add(fileName, certBytes); err != nil {
return err
}
} else if err != nil {
return err
}
// We wrote the certificate succcessfully, add it to our in-memory storage
s.fingerprintMap[certID] = cert
s.fileMap[certID] = fileName
name := string(cert.Subject.CommonName)
s.nameMap[name] = append(s.nameMap[name], certID)
return nil
}
// RemoveCert removes a certificate from a X509FileStore.
func (s *X509FileStore) RemoveCert(cert *x509.Certificate) error {
if cert == nil {
return errors.New("removing nil Certificate from X509Store")
}
certID, err := fingerprintCert(cert)
if err != nil {
return err
}
delete(s.fingerprintMap, certID)
filename := s.fileMap[certID]
delete(s.fileMap, certID)
name := string(cert.Subject.CommonName)
// Filter the fingerprint out of this name entry
fpList := s.nameMap[name]
newfpList := fpList[:0]
for _, x := range fpList {
if x != certID {
newfpList = append(newfpList, x)
}
}
s.nameMap[name] = newfpList
if err := s.fileStore.Remove(filename); err != nil {
return err
}
return nil
}
// RemoveAll removes all the certificates from the store
func (s *X509FileStore) RemoveAll() error {
for _, filename := range s.fileMap {
if err := s.fileStore.Remove(filename); err != nil {
return err
}
}
s.fileMap = make(map[CertID]string)
s.fingerprintMap = make(map[CertID]*x509.Certificate)
s.nameMap = make(map[string][]CertID)
return nil
}
// AddCertFromPEM adds the first certificate that it finds in the byte[], returning
// an error if no Certificates are found
func (s X509FileStore) AddCertFromPEM(pemBytes []byte) error {
cert, err := LoadCertFromPEM(pemBytes)
if err != nil {
return err
}
return s.AddCert(cert)
}
// AddCertFromFile tries to adds a X509 certificate to the store given a filename
func (s *X509FileStore) AddCertFromFile(filename string) error {
cert, err := LoadCertFromFile(filename)
if err != nil {
return err
}
return s.AddCert(cert)
}
// GetCertificates returns an array with all of the current X509 Certificates.
func (s *X509FileStore) GetCertificates() []*x509.Certificate {
certs := make([]*x509.Certificate, len(s.fingerprintMap))
i := 0
for _, v := range s.fingerprintMap {
certs[i] = v
i++
}
return certs
}
// GetCertificatePool returns an x509 CertPool loaded with all the certificates
// in the store.
func (s *X509FileStore) GetCertificatePool() *x509.CertPool {
pool := x509.NewCertPool()
for _, v := range s.fingerprintMap {
pool.AddCert(v)
}
return pool
}
// GetCertificateByCertID returns the certificate that matches a certain certID
func (s *X509FileStore) GetCertificateByCertID(certID string) (*x509.Certificate, error) {
return s.getCertificateByCertID(CertID(certID))
}
// getCertificateByCertID returns the certificate that matches a certain certID
func (s *X509FileStore) getCertificateByCertID(certID CertID) (*x509.Certificate, error) {
// If it does not look like a hex encoded sha256 hash, error
if len(certID) != 64 {
return nil, errors.New("invalid Subject Key Identifier")
}
// Check to see if this subject key identifier exists
if cert, ok := s.fingerprintMap[CertID(certID)]; ok {
return cert, nil
}
return nil, &ErrNoCertificatesFound{query: string(certID)}
}
// GetCertificatesByCN returns all the certificates that match a specific
// CommonName
func (s *X509FileStore) GetCertificatesByCN(cn string) ([]*x509.Certificate, error) {
var certs []*x509.Certificate
if ids, ok := s.nameMap[cn]; ok {
for _, v := range ids {
cert, err := s.getCertificateByCertID(v)
if err != nil {
// This error should never happen. This would mean that we have
// an inconsistent X509FileStore
return nil, &ErrBadCertificateStore{}
}
certs = append(certs, cert)
}
}
if len(certs) == 0 {
return nil, &ErrNoCertificatesFound{query: cn}
}
return certs, nil
}
// GetVerifyOptions returns VerifyOptions with the certificates within the KeyStore
// as part of the roots list. This never allows the use of system roots, returning
// an error if there are no root CAs.
func (s *X509FileStore) GetVerifyOptions(dnsName string) (x509.VerifyOptions, error) {
// If we have no Certificates loaded return error (we don't want to revert to using
// system CAs).
if len(s.fingerprintMap) == 0 {
return x509.VerifyOptions{}, errors.New("no root CAs available")
}
opts := x509.VerifyOptions{
DNSName: dnsName,
Roots: s.GetCertificatePool(),
}
return opts, nil
}
// Empty returns true if there are no certificates in the X509FileStore, false
// otherwise.
func (s *X509FileStore) Empty() bool {
return len(s.fingerprintMap) == 0
}
func fileName(cert *x509.Certificate) (string, CertID, error) {
certID, err := fingerprintCert(cert)
if err != nil {
return "", "", err
}
return path.Join(cert.Subject.CommonName, string(certID)), certID, nil
}

View file

@ -1,203 +0,0 @@
package trustmanager
import (
"crypto/x509"
"errors"
"github.com/Sirupsen/logrus"
)
// X509MemStore implements X509Store as an in-memory object with no persistence
type X509MemStore struct {
validate Validator
fingerprintMap map[CertID]*x509.Certificate
nameMap map[string][]CertID
}
// NewX509MemStore returns a new X509MemStore.
func NewX509MemStore() *X509MemStore {
validate := ValidatorFunc(func(cert *x509.Certificate) bool { return true })
return &X509MemStore{
validate: validate,
fingerprintMap: make(map[CertID]*x509.Certificate),
nameMap: make(map[string][]CertID),
}
}
// NewX509FilteredMemStore returns a new X509Memstore that validates certificates
// that are added.
func NewX509FilteredMemStore(validate func(*x509.Certificate) bool) *X509MemStore {
s := &X509MemStore{
validate: ValidatorFunc(validate),
fingerprintMap: make(map[CertID]*x509.Certificate),
nameMap: make(map[string][]CertID),
}
return s
}
// AddCert adds a certificate to the store
func (s *X509MemStore) AddCert(cert *x509.Certificate) error {
if cert == nil {
return errors.New("adding nil Certificate to X509Store")
}
if !s.validate.Validate(cert) {
return &ErrCertValidation{}
}
certID, err := fingerprintCert(cert)
if err != nil {
return err
}
logrus.Debug("Adding cert with certID: ", certID)
// In this store we overwrite the certificate if it already exists
s.fingerprintMap[certID] = cert
name := string(cert.RawSubject)
s.nameMap[name] = append(s.nameMap[name], certID)
return nil
}
// RemoveCert removes a certificate from a X509MemStore.
func (s *X509MemStore) RemoveCert(cert *x509.Certificate) error {
if cert == nil {
return errors.New("removing nil Certificate to X509Store")
}
certID, err := fingerprintCert(cert)
if err != nil {
return err
}
delete(s.fingerprintMap, certID)
name := string(cert.RawSubject)
// Filter the fingerprint out of this name entry
fpList := s.nameMap[name]
newfpList := fpList[:0]
for _, x := range fpList {
if x != certID {
newfpList = append(newfpList, x)
}
}
s.nameMap[name] = newfpList
return nil
}
// RemoveAll removes all the certificates from the store
func (s *X509MemStore) RemoveAll() error {
for _, cert := range s.fingerprintMap {
if err := s.RemoveCert(cert); err != nil {
return err
}
}
return nil
}
// AddCertFromPEM adds a certificate to the store from a PEM blob
func (s *X509MemStore) AddCertFromPEM(pemBytes []byte) error {
cert, err := LoadCertFromPEM(pemBytes)
if err != nil {
return err
}
return s.AddCert(cert)
}
// AddCertFromFile tries to adds a X509 certificate to the store given a filename
func (s *X509MemStore) AddCertFromFile(originFilname string) error {
cert, err := LoadCertFromFile(originFilname)
if err != nil {
return err
}
return s.AddCert(cert)
}
// GetCertificates returns an array with all of the current X509 Certificates.
func (s *X509MemStore) GetCertificates() []*x509.Certificate {
certs := make([]*x509.Certificate, len(s.fingerprintMap))
i := 0
for _, v := range s.fingerprintMap {
certs[i] = v
i++
}
return certs
}
// GetCertificatePool returns an x509 CertPool loaded with all the certificates
// in the store.
func (s *X509MemStore) GetCertificatePool() *x509.CertPool {
pool := x509.NewCertPool()
for _, v := range s.fingerprintMap {
pool.AddCert(v)
}
return pool
}
// GetCertificateByCertID returns the certificate that matches a certain certID
func (s *X509MemStore) GetCertificateByCertID(certID string) (*x509.Certificate, error) {
return s.getCertificateByCertID(CertID(certID))
}
// getCertificateByCertID returns the certificate that matches a certain certID or error
func (s *X509MemStore) getCertificateByCertID(certID CertID) (*x509.Certificate, error) {
// If it does not look like a hex encoded sha256 hash, error
if len(certID) != 64 {
return nil, errors.New("invalid Subject Key Identifier")
}
// Check to see if this subject key identifier exists
if cert, ok := s.fingerprintMap[CertID(certID)]; ok {
return cert, nil
}
return nil, &ErrNoCertificatesFound{query: string(certID)}
}
// GetCertificatesByCN returns all the certificates that match a specific
// CommonName
func (s *X509MemStore) GetCertificatesByCN(cn string) ([]*x509.Certificate, error) {
var certs []*x509.Certificate
if ids, ok := s.nameMap[cn]; ok {
for _, v := range ids {
cert, err := s.getCertificateByCertID(v)
if err != nil {
// This error should never happen. This would mean that we have
// an inconsistent X509MemStore
return nil, err
}
certs = append(certs, cert)
}
}
if len(certs) == 0 {
return nil, &ErrNoCertificatesFound{query: cn}
}
return certs, nil
}
// GetVerifyOptions returns VerifyOptions with the certificates within the KeyStore
// as part of the roots list. This never allows the use of system roots, returning
// an error if there are no root CAs.
func (s *X509MemStore) GetVerifyOptions(dnsName string) (x509.VerifyOptions, error) {
// If we have no Certificates loaded return error (we don't want to revert to using
// system CAs).
if len(s.fingerprintMap) == 0 {
return x509.VerifyOptions{}, errors.New("no root CAs available")
}
opts := x509.VerifyOptions{
DNSName: dnsName,
Roots: s.GetCertificatePool(),
}
return opts, nil
}

View file

@ -1,144 +0,0 @@
package trustmanager
import (
"crypto/x509"
"errors"
"fmt"
)
const certExtension string = "crt"
// ErrNoCertificatesFound is returned when no certificates are found for a
// GetCertificatesBy*
type ErrNoCertificatesFound struct {
query string
}
// ErrNoCertificatesFound is returned when no certificates are found for a
// GetCertificatesBy*
func (err ErrNoCertificatesFound) Error() string {
return fmt.Sprintf("error, no certificates found in the keystore match: %s", err.query)
}
// ErrCertValidation is returned when a certificate doesn't pass the store specific
// validations
type ErrCertValidation struct {
}
// ErrCertValidation is returned when a certificate doesn't pass the store specific
// validations
func (err ErrCertValidation) Error() string {
return fmt.Sprintf("store-specific certificate validations failed")
}
// ErrCertExists is returned when a Certificate already exists in the key store
type ErrCertExists struct {
}
// ErrCertExists is returned when a Certificate already exists in the key store
func (err ErrCertExists) Error() string {
return fmt.Sprintf("certificate already in the store")
}
// ErrBadCertificateStore is returned when there is an internal inconsistency
// in our x509 store
type ErrBadCertificateStore struct {
}
// ErrBadCertificateStore is returned when there is an internal inconsistency
// in our x509 store
func (err ErrBadCertificateStore) Error() string {
return fmt.Sprintf("inconsistent certificate store")
}
// X509Store is the interface for all X509Stores
type X509Store interface {
AddCert(cert *x509.Certificate) error
AddCertFromPEM(pemCerts []byte) error
AddCertFromFile(filename string) error
RemoveCert(cert *x509.Certificate) error
RemoveAll() error
GetCertificateByCertID(certID string) (*x509.Certificate, error)
GetCertificatesByCN(cn string) ([]*x509.Certificate, error)
GetCertificates() []*x509.Certificate
GetCertificatePool() *x509.CertPool
GetVerifyOptions(dnsName string) (x509.VerifyOptions, error)
}
// CertID represent the ID used to identify certificates
type CertID string
// Validator is a convenience type to create validating function that filters
// certificates that get added to the store
type Validator interface {
Validate(cert *x509.Certificate) bool
}
// ValidatorFunc is a convenience type to create functions that implement
// the Validator interface
type ValidatorFunc func(cert *x509.Certificate) bool
// Validate implements the Validator interface to allow for any func() bool method
// to be passed as a Validator
func (vf ValidatorFunc) Validate(cert *x509.Certificate) bool {
return vf(cert)
}
// Verify operates on an X509Store and validates the existence of a chain of trust
// between a leafCertificate and a CA present inside of the X509 Store.
// It requires at least two certificates in certList, a leaf Certificate and an
// intermediate CA certificate.
func Verify(s X509Store, dnsName string, certList []*x509.Certificate) error {
// If we have no Certificates loaded return error (we don't want to revert to using
// system CAs).
if len(s.GetCertificates()) == 0 {
return errors.New("no root CAs available")
}
// At a minimum we should be provided a leaf cert and an intermediate.
if len(certList) < 2 {
return errors.New("certificate and at least one intermediate needed")
}
// Get the VerifyOptions from the keystore for a base dnsName
opts, err := s.GetVerifyOptions(dnsName)
if err != nil {
return err
}
// Create a Certificate Pool for our intermediate certificates
intPool := x509.NewCertPool()
var leafCert *x509.Certificate
// Iterate through all the certificates
for _, c := range certList {
// If the cert is a CA, we add it to the intermediates pool. If not, we call
// it the leaf cert
if c.IsCA {
intPool.AddCert(c)
continue
}
// Certificate is not a CA, it must be our leaf certificate.
// If we already found one, bail with error
if leafCert != nil {
return errors.New("more than one leaf certificate found")
}
leafCert = c
}
// We exited the loop with no leaf certificates
if leafCert == nil {
return errors.New("no leaf certificates found")
}
// We have one leaf certificate and at least one intermediate. Lets add this
// Cert Pool as the Intermediates list on our VerifyOptions
opts.Intermediates = intPool
// Finally, let's call Verify on our leafCert with our fully configured options
chains, err := leafCert.Verify(opts)
if len(chains) == 0 || err != nil {
return fmt.Errorf("certificate verification failed: %v", err)
}
return nil
}

View file

@ -14,8 +14,6 @@ import (
"io"
"io/ioutil"
"math/big"
"net/http"
"net/url"
"time"
"github.com/Sirupsen/logrus"
@ -24,40 +22,6 @@ import (
"github.com/docker/notary/tuf/data"
)
// GetCertFromURL tries to get a X509 certificate given a HTTPS URL
func GetCertFromURL(urlStr string) (*x509.Certificate, error) {
url, err := url.Parse(urlStr)
if err != nil {
return nil, err
}
// Check if we are adding via HTTPS
if url.Scheme != "https" {
return nil, errors.New("only HTTPS URLs allowed")
}
// Download the certificate and write to directory
resp, err := http.Get(url.String())
if err != nil {
return nil, err
}
// Copy the content to certBytes
defer resp.Body.Close()
certBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
// Try to extract the first valid PEM certificate from the bytes
cert, err := LoadCertFromPEM(certBytes)
if err != nil {
return nil, err
}
return cert, nil
}
// CertToPEM is a utility function returns a PEM encoded x509 Certificate
func CertToPEM(cert *x509.Certificate) []byte {
pemCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})
@ -100,60 +64,6 @@ func LoadCertFromPEM(pemBytes []byte) (*x509.Certificate, error) {
return nil, errors.New("no certificates found in PEM data")
}
// FingerprintCert returns a TUF compliant fingerprint for a X509 Certificate
func FingerprintCert(cert *x509.Certificate) (string, error) {
certID, err := fingerprintCert(cert)
if err != nil {
return "", err
}
return string(certID), nil
}
func fingerprintCert(cert *x509.Certificate) (CertID, error) {
block := pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}
pemdata := pem.EncodeToMemory(&block)
var tufKey data.PublicKey
switch cert.PublicKeyAlgorithm {
case x509.RSA:
tufKey = data.NewRSAx509PublicKey(pemdata)
case x509.ECDSA:
tufKey = data.NewECDSAx509PublicKey(pemdata)
default:
return "", fmt.Errorf("got Unknown key type while fingerprinting certificate")
}
return CertID(tufKey.ID()), nil
}
// loadCertsFromDir receives a store AddCertFromFile for each certificate found
func loadCertsFromDir(s *X509FileStore) error {
for _, f := range s.fileStore.ListFiles() {
// ListFiles returns relative paths
data, err := s.fileStore.Get(f)
if err != nil {
// the filestore told us it had a file that it then couldn't serve.
// this is a serious problem so error immediately
return err
}
err = s.AddCertFromPEM(data)
if err != nil {
if _, ok := err.(*ErrCertValidation); ok {
logrus.Debugf("ignoring certificate, did not pass validation: %s", f)
continue
}
if _, ok := err.(*ErrCertExists); ok {
logrus.Debugf("ignoring certificate, already exists in the store: %s", f)
continue
}
return err
}
}
return nil
}
// LoadCertFromFile loads the first certificate from the file provided. The
// data is expected to be PEM Encoded and contain one of more certificates
// with PEM type "CERTIFICATE"
@ -533,37 +443,39 @@ func CertToKey(cert *x509.Certificate) data.PublicKey {
// CertsToKeys transforms each of the input certificate chains into its corresponding
// PublicKey
func CertsToKeys(leafCerts []*x509.Certificate, intCerts map[string][]*x509.Certificate) map[string]data.PublicKey {
func CertsToKeys(leafCerts map[string]*x509.Certificate, intCerts map[string][]*x509.Certificate) map[string]data.PublicKey {
keys := make(map[string]data.PublicKey)
for _, leafCert := range leafCerts {
certBundle := []*x509.Certificate{leafCert}
certID, err := FingerprintCert(leafCert)
if err != nil {
continue
for id, leafCert := range leafCerts {
if key, err := CertBundleToKey(leafCert, intCerts[id]); err == nil {
keys[key.ID()] = key
}
if intCertsForLeafs, ok := intCerts[certID]; ok {
certBundle = append(certBundle, intCertsForLeafs...)
}
certChainPEM, err := CertChainToPEM(certBundle)
if err != nil {
continue
}
var newKey data.PublicKey
// Use the leaf cert's public key algorithm for typing
switch leafCert.PublicKeyAlgorithm {
case x509.RSA:
newKey = data.NewRSAx509PublicKey(certChainPEM)
case x509.ECDSA:
newKey = data.NewECDSAx509PublicKey(certChainPEM)
default:
logrus.Debugf("Unknown key type parsed from certificate: %v", leafCert.PublicKeyAlgorithm)
continue
}
keys[newKey.ID()] = newKey
}
return keys
}
// CertBundleToKey creates a TUF key from a leaf certs and a list of
// intermediates
func CertBundleToKey(leafCert *x509.Certificate, intCerts []*x509.Certificate) (data.PublicKey, error) {
certBundle := []*x509.Certificate{leafCert}
certBundle = append(certBundle, intCerts...)
certChainPEM, err := CertChainToPEM(certBundle)
if err != nil {
return nil, err
}
var newKey data.PublicKey
// Use the leaf cert's public key algorithm for typing
switch leafCert.PublicKeyAlgorithm {
case x509.RSA:
newKey = data.NewRSAx509PublicKey(certChainPEM)
case x509.ECDSA:
newKey = data.NewECDSAx509PublicKey(certChainPEM)
default:
logrus.Debugf("Unknown key type parsed from certificate: %v", leafCert.PublicKeyAlgorithm)
return nil, x509.ErrUnsupportedAlgorithm
}
return newKey, nil
}
// NewCertificate returns an X509 Certificate following a template, given a GUN and validity interval.
func NewCertificate(gun string, startTime, endTime time.Time) (*x509.Certificate, error) {
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
@ -610,14 +522,3 @@ func X509PublicKeyID(certPubKey data.PublicKey) (string, error) {
return key.ID(), nil
}
// FilterCertsExpiredSha1 can be used as the filter function to cert store
// initializers to filter out all expired or SHA-1 certificate that we
// shouldn't load.
func FilterCertsExpiredSha1(cert *x509.Certificate) bool {
return !cert.IsCA &&
time.Now().Before(cert.NotAfter) &&
cert.SignatureAlgorithm != x509.SHA1WithRSA &&
cert.SignatureAlgorithm != x509.DSAWithSHA1 &&
cert.SignatureAlgorithm != x509.ECDSAWithSHA1
}

View file

@ -137,7 +137,7 @@ type YubiPrivateKey struct {
libLoader pkcs11LibLoader
}
// YubiKeySigner wraps a YubiPrivateKey and implements the crypto.Signer interface
// yubikeySigner wraps a YubiPrivateKey and implements the crypto.Signer interface
type yubikeySigner struct {
YubiPrivateKey
}
@ -344,7 +344,7 @@ func getECDSAKey(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byt
return data.NewECDSAPublicKey(pubBytes), data.CanonicalRootRole, nil
}
// Sign returns a signature for a given signature request
// sign returns a signature for a given signature request
func sign(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byte, passRetriever passphrase.Retriever, payload []byte) ([]byte, error) {
err := login(ctx, session, passRetriever, pkcs11.CKU_USER, UserPin)
if err != nil {

View file

@ -37,13 +37,9 @@ func (err ErrRootRotationFail) Error() string {
return fmt.Sprintf("could not rotate trust to a new trusted root: %s", err.Reason)
}
func prettyFormatCertIDs(certs []*x509.Certificate) string {
func prettyFormatCertIDs(certs map[string]*x509.Certificate) string {
ids := make([]string, 0, len(certs))
for _, cert := range certs {
id, err := trustmanager.FingerprintCert(cert)
if err != nil {
id = fmt.Sprintf("[Error %s]", err)
}
for id := range certs {
ids = append(ids, id)
}
return strings.Join(ids, ", ")
@ -53,8 +49,9 @@ func prettyFormatCertIDs(certs []*x509.Certificate) string {
ValidateRoot receives a new root, validates its correctness and attempts to
do root key rotation if needed.
First we list the current trusted certificates we have for a particular GUN. If
that list is non-empty means that we've already seen this repository before, and
First we check if we have any trusted certificates for a particular GUN in
a previous root, if we have one. If the previous root is not nil and we find
certificates for this GUN, we've already seen this repository before, and
have a list of trusted certificates for it. In this case, we use this list of
certificates to attempt to validate this root file.
@ -86,68 +83,67 @@ We shall call this: TOFUS.
Validation failure at any step will result in an ErrValidationFailed error.
*/
func ValidateRoot(certStore trustmanager.X509Store, root *data.Signed, gun string, trustPinning TrustPinConfig) error {
func ValidateRoot(prevRoot *data.SignedRoot, root *data.Signed, gun string, trustPinning TrustPinConfig) (*data.SignedRoot, error) {
logrus.Debugf("entered ValidateRoot with dns: %s", gun)
signedRoot, err := data.RootFromSigned(root)
if err != nil {
return err
return nil, err
}
rootRole, err := signedRoot.BuildBaseRole(data.CanonicalRootRole)
if err != nil {
return err
return nil, err
}
// Retrieve all the leaf and intermediate certificates in root for which the CN matches the GUN
allLeafCerts, allIntCerts := parseAllCerts(signedRoot)
certsFromRoot, err := validRootLeafCerts(allLeafCerts, gun)
certsFromRoot, err := validRootLeafCerts(allLeafCerts, gun, true)
if err != nil {
logrus.Debugf("error retrieving valid leaf certificates for: %s, %v", gun, err)
return &ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"}
return nil, &ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"}
}
// Retrieve all the trusted certificates that match this gun
trustedCerts, err := certStore.GetCertificatesByCN(gun)
if err != nil {
// If the error that we get back is different than ErrNoCertificatesFound
// we couldn't check if there are any certificates with this CN already
// trusted. Let's take the conservative approach and return a failed validation
if _, ok := err.(*trustmanager.ErrNoCertificatesFound); !ok {
logrus.Debugf("error retrieving trusted certificates for: %s, %v", gun, err)
return &ErrValidationFail{Reason: "unable to retrieve trusted certificates"}
// If we have a previous root, let's try to use it to validate that this new root is valid.
if prevRoot != nil {
// Retrieve all the trusted certificates from our previous root
// Note that we do not validate expiries here since our originally trusted root might have expired certs
allTrustedLeafCerts, allTrustedIntCerts := parseAllCerts(prevRoot)
trustedLeafCerts, err := validRootLeafCerts(allTrustedLeafCerts, gun, false)
// Use the certificates we found in the previous root for the GUN to verify its signatures
// This could potentially be an empty set, in which case we will fail to verify
logrus.Debugf("found %d valid root leaf certificates for %s: %s", len(trustedLeafCerts), gun,
prettyFormatCertIDs(trustedLeafCerts))
// Extract the previous root's threshold for signature verification
prevRootRoleData, ok := prevRoot.Signed.Roles[data.CanonicalRootRole]
if !ok {
return nil, &ErrValidationFail{Reason: "could not retrieve previous root role data"}
}
}
// If we have certificates that match this specific GUN, let's make sure to
// use them first to validate that this new root is valid.
if len(trustedCerts) != 0 {
logrus.Debugf("found %d valid root certificates for %s: %s", len(trustedCerts), gun,
prettyFormatCertIDs(trustedCerts))
err = signed.VerifySignatures(
root, data.BaseRole{Keys: trustmanager.CertsToKeys(trustedCerts, allIntCerts), Threshold: 1})
root, data.BaseRole{Keys: trustmanager.CertsToKeys(trustedLeafCerts, allTrustedIntCerts), Threshold: prevRootRoleData.Threshold})
if err != nil {
logrus.Debugf("failed to verify TUF data for: %s, %v", gun, err)
return &ErrValidationFail{Reason: "failed to validate data with current trusted certificates"}
return nil, &ErrRootRotationFail{Reason: "failed to validate data with current trusted certificates"}
}
} else {
logrus.Debugf("found no currently valid root certificates for %s, using trust_pinning config to bootstrap trust", gun)
trustPinCheckFunc, err := NewTrustPinChecker(trustPinning, gun)
if err != nil {
return &ErrValidationFail{Reason: err.Error()}
return nil, &ErrValidationFail{Reason: err.Error()}
}
validPinnedCerts := []*x509.Certificate{}
for _, cert := range certsFromRoot {
certID, err := trustmanager.FingerprintCert(cert)
if err != nil {
validPinnedCerts := map[string]*x509.Certificate{}
for id, cert := range certsFromRoot {
if ok := trustPinCheckFunc(cert, allIntCerts[id]); !ok {
continue
}
if ok := trustPinCheckFunc(cert, allIntCerts[certID]); !ok {
continue
}
validPinnedCerts = append(validPinnedCerts, cert)
validPinnedCerts[id] = cert
}
if len(validPinnedCerts) == 0 {
return &ErrValidationFail{Reason: "unable to match any certificates to trust_pinning config"}
return nil, &ErrValidationFail{Reason: "unable to match any certificates to trust_pinning config"}
}
certsFromRoot = validPinnedCerts
}
@ -159,64 +155,29 @@ func ValidateRoot(certStore trustmanager.X509Store, root *data.Signed, gun strin
Keys: trustmanager.CertsToKeys(certsFromRoot, allIntCerts), Threshold: rootRole.Threshold})
if err != nil {
logrus.Debugf("failed to verify TUF data for: %s, %v", gun, err)
return &ErrValidationFail{Reason: "failed to validate integrity of roots"}
}
// Getting here means:
// A) we had trusted certificates and both the old and new validated this root.
// or
// B) we had no trusted certificates but the new set of certificates has integrity (self-signed).
logrus.Debugf("entering root certificate rotation for: %s", gun)
// Do root certificate rotation: we trust only the certs present in the new root
// First we add all the new certificates (even if they already exist)
for _, cert := range certsFromRoot {
err := certStore.AddCert(cert)
if err != nil {
// If the error is already exists we don't fail the rotation
if _, ok := err.(*trustmanager.ErrCertExists); ok {
logrus.Debugf("ignoring certificate addition to: %s", gun)
continue
}
logrus.Debugf("error adding new trusted certificate for: %s, %v", gun, err)
}
}
// Now we delete old certificates that aren't present in the new root
oldCertsToRemove, err := certsToRemove(trustedCerts, certsFromRoot)
if err != nil {
logrus.Debugf("inconsistency when removing old certificates: %v", err)
return err
}
for certID, cert := range oldCertsToRemove {
logrus.Debugf("removing certificate with certID: %s", certID)
err = certStore.RemoveCert(cert)
if err != nil {
logrus.Debugf("failed to remove trusted certificate with keyID: %s, %v", certID, err)
return &ErrRootRotationFail{Reason: "failed to rotate root keys"}
}
return nil, &ErrValidationFail{Reason: "failed to validate integrity of roots"}
}
logrus.Debugf("Root validation succeeded for %s", gun)
return nil
return signedRoot, nil
}
// validRootLeafCerts returns a list of non-expired, non-sha1 certificates
// validRootLeafCerts returns a list of possibly (if checkExpiry is true) non-expired, non-sha1 certificates
// found in root whose Common-Names match the provided GUN. Note that this
// "validity" alone does not imply any measure of trust.
func validRootLeafCerts(allLeafCerts map[string]*x509.Certificate, gun string) ([]*x509.Certificate, error) {
var validLeafCerts []*x509.Certificate
func validRootLeafCerts(allLeafCerts map[string]*x509.Certificate, gun string, checkExpiry bool) (map[string]*x509.Certificate, error) {
validLeafCerts := make(map[string]*x509.Certificate)
// Go through every leaf certificate and check that the CN matches the gun
for _, cert := range allLeafCerts {
for id, cert := range allLeafCerts {
// Validate that this leaf certificate has a CN that matches the exact gun
if cert.Subject.CommonName != gun {
logrus.Debugf("error leaf certificate CN: %s doesn't match the given GUN: %s",
cert.Subject.CommonName, gun)
continue
}
// Make sure the certificate is not expired
if time.Now().After(cert.NotAfter) {
// Make sure the certificate is not expired if checkExpiry is true
if checkExpiry && time.Now().After(cert.NotAfter) {
logrus.Debugf("error leaf certificate is expired")
continue
}
@ -230,7 +191,7 @@ func validRootLeafCerts(allLeafCerts map[string]*x509.Certificate, gun string) (
continue
}
validLeafCerts = append(validLeafCerts, cert)
validLeafCerts[id] = cert
}
if len(validLeafCerts) < 1 {
@ -246,11 +207,15 @@ func validRootLeafCerts(allLeafCerts map[string]*x509.Certificate, gun string) (
// parseAllCerts returns two maps, one with all of the leafCertificates and one
// with all the intermediate certificates found in signedRoot
func parseAllCerts(signedRoot *data.SignedRoot) (map[string]*x509.Certificate, map[string][]*x509.Certificate) {
if signedRoot == nil {
return nil, nil
}
leafCerts := make(map[string]*x509.Certificate)
intCerts := make(map[string][]*x509.Certificate)
// Before we loop through all root keys available, make sure any exist
rootRoles, ok := signedRoot.Signed.Roles["root"]
rootRoles, ok := signedRoot.Signed.Roles[data.CanonicalRootRole]
if !ok {
logrus.Debugf("tried to parse certificates from invalid root signed data")
return nil, nil
@ -290,59 +255,14 @@ func parseAllCerts(signedRoot *data.SignedRoot) (map[string]*x509.Certificate, m
// Get the ID of the leaf certificate
leafCert := leafCertList[0]
leafID, err := trustmanager.FingerprintCert(leafCert)
if err != nil {
logrus.Debugf("error while fingerprinting root certificate with keyID: %s, %v", keyID, err)
continue
}
// Store the leaf cert in the map
leafCerts[leafID] = leafCert
leafCerts[key.ID()] = leafCert
// Get all the remainder certificates marked as a CA to be used as intermediates
intermediateCerts := trustmanager.GetIntermediateCerts(decodedCerts)
intCerts[leafID] = intermediateCerts
intCerts[key.ID()] = intermediateCerts
}
return leafCerts, intCerts
}
// certsToRemove returns all the certificates from oldCerts that aren't present
// in newCerts. Note that newCerts should never be empty, else this function will error.
// We expect newCerts to come from validateRootLeafCerts, which does not return empty sets.
func certsToRemove(oldCerts, newCerts []*x509.Certificate) (map[string]*x509.Certificate, error) {
certsToRemove := make(map[string]*x509.Certificate)
// Populate a map with all the IDs from newCert
var newCertMap = make(map[string]struct{})
for _, cert := range newCerts {
certID, err := trustmanager.FingerprintCert(cert)
if err != nil {
logrus.Debugf("error while fingerprinting root certificate with keyID: %s, %v", certID, err)
continue
}
newCertMap[certID] = struct{}{}
}
// We don't want to "rotate" certificates to an empty set, nor keep old certificates if the
// new root does not trust them. newCerts should come from validRootLeafCerts, which refuses
// to return an empty set, and they should all be fingerprintable, so this should never happen
// - fail just to be sure.
if len(newCertMap) == 0 {
return nil, &ErrRootRotationFail{Reason: "internal error, got no certificates to rotate to"}
}
// Iterate over all the old certificates and check to see if we should remove them
for _, cert := range oldCerts {
certID, err := trustmanager.FingerprintCert(cert)
if err != nil {
logrus.Debugf("error while fingerprinting root certificate with certID: %s, %v", certID, err)
continue
}
if _, ok := newCertMap[certID]; !ok {
certsToRemove[certID] = cert
}
}
return certsToRemove, nil
}

View file

@ -3,6 +3,7 @@ package trustpinning
import (
"crypto/x509"
"fmt"
"github.com/Sirupsen/logrus"
"github.com/docker/notary/trustmanager"
"github.com/docker/notary/tuf/utils"
"strings"
@ -67,17 +68,12 @@ func NewTrustPinChecker(trustPinConfig TrustPinConfig, gun string) (CertChecker,
func (t trustPinChecker) certsCheck(leafCert *x509.Certificate, intCerts []*x509.Certificate) bool {
// reconstruct the leaf + intermediate cert chain, which is bundled as {leaf, intermediates...},
// in order to get the matching id in the root file
leafCertID, err := trustmanager.FingerprintCert(leafCert)
key, err := trustmanager.CertBundleToKey(leafCert, intCerts)
if err != nil {
logrus.Debug("error creating cert bundle: ", err.Error())
return false
}
rootKeys := trustmanager.CertsToKeys([]*x509.Certificate{leafCert}, map[string][]*x509.Certificate{leafCertID: intCerts})
for keyID := range rootKeys {
if utils.StrSliceContains(t.pinnedCertIDs, keyID) {
return true
}
}
return false
return utils.StrSliceContains(t.pinnedCertIDs, key.ID())
}
func (t trustPinChecker) caCheck(leafCert *x509.Certificate, intCerts []*x509.Certificate) bool {

View file

@ -0,0 +1,673 @@
package tuf
import (
"fmt"
"github.com/docker/go/canonical/json"
"github.com/docker/notary"
"github.com/docker/notary/trustpinning"
"github.com/docker/notary/tuf/data"
"github.com/docker/notary/tuf/signed"
"github.com/docker/notary/tuf/utils"
)
// ErrBuildDone is returned when any functions are called on RepoBuilder, and it
// is already finished building
var ErrBuildDone = fmt.Errorf(
"the builder has finished building and cannot accept any more input or produce any more output")
// ErrInvalidBuilderInput is returned when RepoBuilder.Load is called
// with the wrong type of metadata for thes tate that it's in
type ErrInvalidBuilderInput struct{ msg string }
func (e ErrInvalidBuilderInput) Error() string {
return e.msg
}
// ConsistentInfo is the consistent name and size of a role, or just the name
// of the role and a -1 if no file metadata for the role is known
type ConsistentInfo struct {
RoleName string
fileMeta data.FileMeta
}
// ChecksumKnown determines whether or not we know enough to provide a size and
// consistent name
func (c ConsistentInfo) ChecksumKnown() bool {
// empty hash, no size : this is the zero value
return len(c.fileMeta.Hashes) > 0 || c.fileMeta.Length != 0
}
// ConsistentName returns the consistent name (rolename.sha256) for the role
// given this consistent information
func (c ConsistentInfo) ConsistentName() string {
return utils.ConsistentName(c.RoleName, c.fileMeta.Hashes[notary.SHA256])
}
// Length returns the expected length of the role as per this consistent
// information - if no checksum information is known, the size is -1.
func (c ConsistentInfo) Length() int64 {
if c.ChecksumKnown() {
return c.fileMeta.Length
}
return -1
}
// RepoBuilder is an interface for an object which builds a tuf.Repo
type RepoBuilder interface {
Load(roleName string, content []byte, minVersion int, allowExpired bool) error
GenerateSnapshot(prev *data.SignedSnapshot) ([]byte, int, error)
GenerateTimestamp(prev *data.SignedTimestamp) ([]byte, int, error)
Finish() (*Repo, error)
BootstrapNewBuilder() RepoBuilder
// informative functions
IsLoaded(roleName string) bool
GetLoadedVersion(roleName string) int
GetConsistentInfo(roleName string) ConsistentInfo
}
// finishedBuilder refuses any more input or output
type finishedBuilder struct{}
func (f finishedBuilder) Load(roleName string, content []byte, minVersion int, allowExpired bool) error {
return ErrBuildDone
}
func (f finishedBuilder) GenerateSnapshot(prev *data.SignedSnapshot) ([]byte, int, error) {
return nil, 0, ErrBuildDone
}
func (f finishedBuilder) GenerateTimestamp(prev *data.SignedTimestamp) ([]byte, int, error) {
return nil, 0, ErrBuildDone
}
func (f finishedBuilder) Finish() (*Repo, error) { return nil, ErrBuildDone }
func (f finishedBuilder) BootstrapNewBuilder() RepoBuilder { return f }
func (f finishedBuilder) IsLoaded(roleName string) bool { return false }
func (f finishedBuilder) GetLoadedVersion(roleName string) int { return 0 }
func (f finishedBuilder) GetConsistentInfo(roleName string) ConsistentInfo {
return ConsistentInfo{RoleName: roleName}
}
// NewRepoBuilder is the only way to get a pre-built RepoBuilder
func NewRepoBuilder(gun string, cs signed.CryptoService, trustpin trustpinning.TrustPinConfig) RepoBuilder {
return &repoBuilderWrapper{RepoBuilder: &repoBuilder{
repo: NewRepo(cs),
gun: gun,
trustpin: trustpin,
loadedNotChecksummed: make(map[string][]byte),
}}
}
// repoBuilderWrapper embeds a repoBuilder, but once Finish is called, swaps
// the embed out with a finishedBuilder
type repoBuilderWrapper struct {
RepoBuilder
}
func (rbw *repoBuilderWrapper) Finish() (*Repo, error) {
switch rbw.RepoBuilder.(type) {
case finishedBuilder:
return rbw.RepoBuilder.Finish()
default:
old := rbw.RepoBuilder
rbw.RepoBuilder = finishedBuilder{}
return old.Finish()
}
}
// repoBuilder actually builds a tuf.Repo
type repoBuilder struct {
repo *Repo
// needed for root trust pininng verification
gun string
trustpin trustpinning.TrustPinConfig
// in case we load root and/or targets before snapshot and timestamp (
// or snapshot and not timestamp), so we know what to verify when the
// data with checksums come in
loadedNotChecksummed map[string][]byte
// bootstrapped values to validate a new root
prevRoot *data.SignedRoot
bootstrappedRootChecksum *data.FileMeta
// for bootstrapping the next builder
nextRootChecksum *data.FileMeta
}
func (rb *repoBuilder) Finish() (*Repo, error) {
return rb.repo, nil
}
func (rb *repoBuilder) BootstrapNewBuilder() RepoBuilder {
return &repoBuilderWrapper{RepoBuilder: &repoBuilder{
repo: NewRepo(rb.repo.cryptoService),
gun: rb.gun,
loadedNotChecksummed: make(map[string][]byte),
trustpin: rb.trustpin,
prevRoot: rb.repo.Root,
bootstrappedRootChecksum: rb.nextRootChecksum,
}}
}
// IsLoaded returns whether a particular role has already been loaded
func (rb *repoBuilder) IsLoaded(roleName string) bool {
switch roleName {
case data.CanonicalRootRole:
return rb.repo.Root != nil
case data.CanonicalSnapshotRole:
return rb.repo.Snapshot != nil
case data.CanonicalTimestampRole:
return rb.repo.Timestamp != nil
default:
return rb.repo.Targets[roleName] != nil
}
}
// GetLoadedVersion returns the metadata version, if it is loaded, or 1 (the
// minimum valid version number) otherwise
func (rb *repoBuilder) GetLoadedVersion(roleName string) int {
switch {
case roleName == data.CanonicalRootRole && rb.repo.Root != nil:
return rb.repo.Root.Signed.Version
case roleName == data.CanonicalSnapshotRole && rb.repo.Snapshot != nil:
return rb.repo.Snapshot.Signed.Version
case roleName == data.CanonicalTimestampRole && rb.repo.Timestamp != nil:
return rb.repo.Timestamp.Signed.Version
default:
if tgts, ok := rb.repo.Targets[roleName]; ok {
return tgts.Signed.Version
}
}
return 1
}
// GetConsistentInfo returns the consistent name and size of a role, if it is known,
// otherwise just the rolename and a -1 for size (both of which are inside a
// ConsistentInfo object)
func (rb *repoBuilder) GetConsistentInfo(roleName string) ConsistentInfo {
info := ConsistentInfo{RoleName: roleName} // starts out with unknown filemeta
switch roleName {
case data.CanonicalTimestampRole:
// we do not want to get a consistent timestamp, but we do want to
// limit its size
info.fileMeta.Length = notary.MaxTimestampSize
case data.CanonicalSnapshotRole:
if rb.repo.Timestamp != nil {
info.fileMeta = rb.repo.Timestamp.Signed.Meta[roleName]
}
case data.CanonicalRootRole:
switch {
case rb.bootstrappedRootChecksum != nil:
info.fileMeta = *rb.bootstrappedRootChecksum
case rb.repo.Snapshot != nil:
info.fileMeta = rb.repo.Snapshot.Signed.Meta[roleName]
}
default:
if rb.repo.Snapshot != nil {
info.fileMeta = rb.repo.Snapshot.Signed.Meta[roleName]
}
}
return info
}
func (rb *repoBuilder) Load(roleName string, content []byte, minVersion int, allowExpired bool) error {
if !data.ValidRole(roleName) {
return ErrInvalidBuilderInput{msg: fmt.Sprintf("%s is an invalid role", roleName)}
}
if rb.IsLoaded(roleName) {
return ErrInvalidBuilderInput{msg: fmt.Sprintf("%s has already been loaded", roleName)}
}
var err error
switch roleName {
case data.CanonicalRootRole:
break
case data.CanonicalTimestampRole, data.CanonicalSnapshotRole, data.CanonicalTargetsRole:
err = rb.checkPrereqsLoaded([]string{data.CanonicalRootRole})
default: // delegations
err = rb.checkPrereqsLoaded([]string{data.CanonicalRootRole, data.CanonicalTargetsRole})
}
if err != nil {
return err
}
switch roleName {
case data.CanonicalRootRole:
return rb.loadRoot(content, minVersion, allowExpired)
case data.CanonicalSnapshotRole:
return rb.loadSnapshot(content, minVersion, allowExpired)
case data.CanonicalTimestampRole:
return rb.loadTimestamp(content, minVersion, allowExpired)
case data.CanonicalTargetsRole:
return rb.loadTargets(content, minVersion, allowExpired)
default:
return rb.loadDelegation(roleName, content, minVersion, allowExpired)
}
}
func (rb *repoBuilder) checkPrereqsLoaded(prereqRoles []string) error {
for _, req := range prereqRoles {
if !rb.IsLoaded(req) {
return ErrInvalidBuilderInput{msg: fmt.Sprintf("%s must be loaded first", req)}
}
}
return nil
}
// GenerateSnapshot generates a new snapshot given a previous (optional) snapshot
// We can't just load the previous snapshot, because it may have been signed by a different
// snapshot key (maybe from a previous root version). Note that we need the root role and
// targets role to be loaded, because we need to generate metadata for both (and we need
// the root to be loaded so we can get the snapshot role to sign with)
func (rb *repoBuilder) GenerateSnapshot(prev *data.SignedSnapshot) ([]byte, int, error) {
switch {
case rb.repo.cryptoService == nil:
return nil, 0, ErrInvalidBuilderInput{msg: "cannot generate snapshot without a cryptoservice"}
case rb.IsLoaded(data.CanonicalSnapshotRole):
return nil, 0, ErrInvalidBuilderInput{msg: "snapshot has already been loaded"}
case rb.IsLoaded(data.CanonicalTimestampRole):
return nil, 0, ErrInvalidBuilderInput{msg: "cannot generate snapshot if timestamp has already been loaded"}
}
if err := rb.checkPrereqsLoaded([]string{data.CanonicalRootRole}); err != nil {
return nil, 0, err
}
// If there is no previous snapshot, we need to generate one, and so the targets must
// have already been loaded. Otherwise, so long as the previous snapshot structure is
// valid (it has a targets meta), we're good.
switch prev {
case nil:
if err := rb.checkPrereqsLoaded([]string{data.CanonicalTargetsRole}); err != nil {
return nil, 0, err
}
if err := rb.repo.InitSnapshot(); err != nil {
rb.repo.Snapshot = nil
return nil, 0, err
}
default:
if err := data.IsValidSnapshotStructure(prev.Signed); err != nil {
return nil, 0, err
}
rb.repo.Snapshot = prev
}
sgnd, err := rb.repo.SignSnapshot(data.DefaultExpires(data.CanonicalSnapshotRole))
if err != nil {
rb.repo.Snapshot = nil
return nil, 0, err
}
sgndJSON, err := json.Marshal(sgnd)
if err != nil {
rb.repo.Snapshot = nil
return nil, 0, err
}
// loadedNotChecksummed should currently contain the root awaiting checksumming,
// since it has to have been loaded. Since the snapshot was generated using
// the root and targets data (there may not be any) that that have been loaded,
// remove all of them from rb.loadedNotChecksummed
for tgtName := range rb.repo.Targets {
delete(rb.loadedNotChecksummed, tgtName)
}
delete(rb.loadedNotChecksummed, data.CanonicalRootRole)
// The timestamp can't have been loaded yet, so we want to cache the snapshot
// bytes so we can validate the checksum when a timestamp gets generated or
// loaded later.
rb.loadedNotChecksummed[data.CanonicalSnapshotRole] = sgndJSON
return sgndJSON, rb.repo.Snapshot.Signed.Version, nil
}
// GenerateTimestamp generates a new timestamp given a previous (optional) timestamp
// We can't just load the previous timestamp, because it may have been signed by a different
// timestamp key (maybe from a previous root version)
func (rb *repoBuilder) GenerateTimestamp(prev *data.SignedTimestamp) ([]byte, int, error) {
switch {
case rb.repo.cryptoService == nil:
return nil, 0, ErrInvalidBuilderInput{msg: "cannot generate timestamp without a cryptoservice"}
case rb.IsLoaded(data.CanonicalTimestampRole):
return nil, 0, ErrInvalidBuilderInput{msg: "timestamp has already been loaded"}
}
// SignTimetamp always serializes the loaded snapshot and signs in the data, so we must always
// have the snapshot loaded first
if err := rb.checkPrereqsLoaded([]string{data.CanonicalRootRole, data.CanonicalSnapshotRole}); err != nil {
return nil, 0, err
}
switch prev {
case nil:
if err := rb.repo.InitTimestamp(); err != nil {
rb.repo.Timestamp = nil
return nil, 0, err
}
default:
if err := data.IsValidTimestampStructure(prev.Signed); err != nil {
return nil, 0, err
}
rb.repo.Timestamp = prev
}
sgnd, err := rb.repo.SignTimestamp(data.DefaultExpires(data.CanonicalTimestampRole))
if err != nil {
rb.repo.Timestamp = nil
return nil, 0, err
}
sgndJSON, err := json.Marshal(sgnd)
if err != nil {
rb.repo.Timestamp = nil
return nil, 0, err
}
// The snapshot should have been loaded (and not checksummed, since a timestamp
// cannot have been loaded), so it is awaiting checksumming. Since this
// timestamp was generated using the snapshot awaiting checksumming, we can
// remove it from rb.loadedNotChecksummed. There should be no other items
// awaiting checksumming now since loading/generating a snapshot should have
// cleared out everything else in `loadNotChecksummed`.
delete(rb.loadedNotChecksummed, data.CanonicalSnapshotRole)
return sgndJSON, rb.repo.Timestamp.Signed.Version, nil
}
// loadRoot loads a root if one has not been loaded
func (rb *repoBuilder) loadRoot(content []byte, minVersion int, allowExpired bool) error {
roleName := data.CanonicalRootRole
signedObj, err := rb.bytesToSigned(content, data.CanonicalRootRole)
if err != nil {
return err
}
// ValidateRoot validates against the previous root's role, as well as validates that the root
// itself is self-consistent with its own signatures and thresholds.
// This assumes that ValidateRoot calls data.RootFromSigned, which validates
// the metadata, rather than just unmarshalling signedObject into a SignedRoot object itself.
signedRoot, err := trustpinning.ValidateRoot(rb.prevRoot, signedObj, rb.gun, rb.trustpin)
if err != nil {
return err
}
if err := signed.VerifyVersion(&(signedRoot.Signed.SignedCommon), minVersion); err != nil {
return err
}
if !allowExpired { // check must go at the end because all other validation should pass
if err := signed.VerifyExpiry(&(signedRoot.Signed.SignedCommon), roleName); err != nil {
return err
}
}
rootRole, err := signedRoot.BuildBaseRole(data.CanonicalRootRole)
if err != nil { // this should never happen since the root has been validated
return err
}
rb.repo.Root = signedRoot
rb.repo.originalRootRole = rootRole
return nil
}
func (rb *repoBuilder) loadTimestamp(content []byte, minVersion int, allowExpired bool) error {
roleName := data.CanonicalTimestampRole
timestampRole, err := rb.repo.Root.BuildBaseRole(roleName)
if err != nil { // this should never happen, since it's already been validated
return err
}
signedObj, err := rb.bytesToSignedAndValidateSigs(timestampRole, content)
if err != nil {
return err
}
signedTimestamp, err := data.TimestampFromSigned(signedObj)
if err != nil {
return err
}
if err := signed.VerifyVersion(&(signedTimestamp.Signed.SignedCommon), minVersion); err != nil {
return err
}
if !allowExpired { // check must go at the end because all other validation should pass
if err := signed.VerifyExpiry(&(signedTimestamp.Signed.SignedCommon), roleName); err != nil {
return err
}
}
if err := rb.validateChecksumsFromTimestamp(signedTimestamp); err != nil {
return err
}
rb.repo.Timestamp = signedTimestamp
return nil
}
func (rb *repoBuilder) loadSnapshot(content []byte, minVersion int, allowExpired bool) error {
roleName := data.CanonicalSnapshotRole
snapshotRole, err := rb.repo.Root.BuildBaseRole(roleName)
if err != nil { // this should never happen, since it's already been validated
return err
}
signedObj, err := rb.bytesToSignedAndValidateSigs(snapshotRole, content)
if err != nil {
return err
}
signedSnapshot, err := data.SnapshotFromSigned(signedObj)
if err != nil {
return err
}
if err := signed.VerifyVersion(&(signedSnapshot.Signed.SignedCommon), minVersion); err != nil {
return err
}
if !allowExpired { // check must go at the end because all other validation should pass
if err := signed.VerifyExpiry(&(signedSnapshot.Signed.SignedCommon), roleName); err != nil {
return err
}
}
// at this point, the only thing left to validate is existing checksums - we can use
// this snapshot to bootstrap the next builder if needed - and we don't need to do
// the 2-value assignment since we've already validated the signedSnapshot, which MUST
// have root metadata
rootMeta := signedSnapshot.Signed.Meta[data.CanonicalRootRole]
rb.nextRootChecksum = &rootMeta
if err := rb.validateChecksumsFromSnapshot(signedSnapshot); err != nil {
return err
}
rb.repo.Snapshot = signedSnapshot
return nil
}
func (rb *repoBuilder) loadTargets(content []byte, minVersion int, allowExpired bool) error {
roleName := data.CanonicalTargetsRole
targetsRole, err := rb.repo.Root.BuildBaseRole(roleName)
if err != nil { // this should never happen, since it's already been validated
return err
}
signedObj, err := rb.bytesToSignedAndValidateSigs(targetsRole, content)
if err != nil {
return err
}
signedTargets, err := data.TargetsFromSigned(signedObj, roleName)
if err != nil {
return err
}
if err := signed.VerifyVersion(&(signedTargets.Signed.SignedCommon), minVersion); err != nil {
return err
}
if !allowExpired { // check must go at the end because all other validation should pass
if err := signed.VerifyExpiry(&(signedTargets.Signed.SignedCommon), roleName); err != nil {
return err
}
}
rb.repo.Targets[roleName] = signedTargets
return nil
}
func (rb *repoBuilder) loadDelegation(roleName string, content []byte, minVersion int, allowExpired bool) error {
delegationRole, err := rb.repo.GetDelegationRole(roleName)
if err != nil {
return err
}
signedObj, err := rb.bytesToSignedAndValidateSigs(delegationRole.BaseRole, content)
if err != nil {
return err
}
signedTargets, err := data.TargetsFromSigned(signedObj, roleName)
if err != nil {
return err
}
if err := signed.VerifyVersion(&(signedTargets.Signed.SignedCommon), minVersion); err != nil {
return err
}
if !allowExpired { // check must go at the end because all other validation should pass
if err := signed.VerifyExpiry(&(signedTargets.Signed.SignedCommon), roleName); err != nil {
return err
}
}
rb.repo.Targets[roleName] = signedTargets
return nil
}
func (rb *repoBuilder) validateChecksumsFromTimestamp(ts *data.SignedTimestamp) error {
sn, ok := rb.loadedNotChecksummed[data.CanonicalSnapshotRole]
if ok {
// by this point, the SignedTimestamp has been validated so it must have a snapshot hash
snMeta := ts.Signed.Meta[data.CanonicalSnapshotRole].Hashes
if err := data.CheckHashes(sn, data.CanonicalSnapshotRole, snMeta); err != nil {
return err
}
delete(rb.loadedNotChecksummed, data.CanonicalSnapshotRole)
}
return nil
}
func (rb *repoBuilder) validateChecksumsFromSnapshot(sn *data.SignedSnapshot) error {
var goodRoles []string
for roleName, loadedBytes := range rb.loadedNotChecksummed {
switch roleName {
case data.CanonicalSnapshotRole, data.CanonicalTimestampRole:
break
default:
if err := data.CheckHashes(loadedBytes, roleName, sn.Signed.Meta[roleName].Hashes); err != nil {
return err
}
goodRoles = append(goodRoles, roleName)
}
}
for _, roleName := range goodRoles {
delete(rb.loadedNotChecksummed, roleName)
}
return nil
}
func (rb *repoBuilder) validateChecksumFor(content []byte, roleName string) error {
// validate the bootstrap checksum for root, if provided
if roleName == data.CanonicalRootRole && rb.bootstrappedRootChecksum != nil {
if err := data.CheckHashes(content, roleName, rb.bootstrappedRootChecksum.Hashes); err != nil {
return err
}
}
// but we also want to cache the root content, so that when the snapshot is
// loaded it is validated (to make sure everything in the repo is self-consistent)
checksums := rb.getChecksumsFor(roleName)
if checksums != nil { // as opposed to empty, in which case hash check should fail
if err := data.CheckHashes(content, roleName, *checksums); err != nil {
return err
}
} else if roleName != data.CanonicalTimestampRole {
// timestamp is the only role which does not need to be checksummed, but
// for everything else, cache the contents in the list of roles that have
// not been checksummed by the snapshot/timestamp yet
rb.loadedNotChecksummed[roleName] = content
}
return nil
}
// Checksums the given bytes, and if they validate, convert to a data.Signed object.
// If a checksums are nil (as opposed to empty), adds the bytes to the list of roles that
// haven't been checksummed (unless it's a timestamp, which has no checksum reference).
func (rb *repoBuilder) bytesToSigned(content []byte, roleName string) (*data.Signed, error) {
if err := rb.validateChecksumFor(content, roleName); err != nil {
return nil, err
}
// unmarshal to signed
signedObj := &data.Signed{}
if err := json.Unmarshal(content, signedObj); err != nil {
return nil, err
}
return signedObj, nil
}
func (rb *repoBuilder) bytesToSignedAndValidateSigs(role data.BaseRole, content []byte) (*data.Signed, error) {
signedObj, err := rb.bytesToSigned(content, role.Name)
if err != nil {
return nil, err
}
// verify signature
if err := signed.VerifySignatures(signedObj, role); err != nil {
return nil, err
}
return signedObj, nil
}
// If the checksum reference (the loaded timestamp for the snapshot role, and
// the loaded snapshot for every other role except timestamp and snapshot) is nil,
// then return nil for the checksums, meaning that the checksum is not yet
// available. If the checksum reference *is* loaded, then always returns the
// Hashes object for the given role - if it doesn't exist, returns an empty Hash
// object (against which any checksum validation would fail).
func (rb *repoBuilder) getChecksumsFor(role string) *data.Hashes {
var hashes data.Hashes
switch role {
case data.CanonicalTimestampRole:
return nil
case data.CanonicalSnapshotRole:
if rb.repo.Timestamp == nil {
return nil
}
hashes = rb.repo.Timestamp.Signed.Meta[data.CanonicalSnapshotRole].Hashes
default:
if rb.repo.Snapshot == nil {
return nil
}
hashes = rb.repo.Snapshot.Signed.Meta[role].Hashes
}
return &hashes
}

View file

@ -2,36 +2,34 @@ package client
import (
"encoding/json"
"fmt"
"path"
"github.com/Sirupsen/logrus"
"github.com/docker/notary"
tuf "github.com/docker/notary/tuf"
"github.com/docker/notary/tuf/data"
"github.com/docker/notary/tuf/signed"
"github.com/docker/notary/tuf/store"
"github.com/docker/notary/tuf/utils"
)
// Client is a usability wrapper around a raw TUF repo
type Client struct {
local *tuf.Repo
remote store.RemoteStore
cache store.MetadataStore
remote store.RemoteStore
cache store.MetadataStore
oldBuilder tuf.RepoBuilder
newBuilder tuf.RepoBuilder
}
// NewClient initialized a Client with the given repo, remote source of content, and cache
func NewClient(local *tuf.Repo, remote store.RemoteStore, cache store.MetadataStore) *Client {
func NewClient(oldBuilder, newBuilder tuf.RepoBuilder, remote store.RemoteStore, cache store.MetadataStore) *Client {
return &Client{
local: local,
remote: remote,
cache: cache,
oldBuilder: oldBuilder,
newBuilder: newBuilder,
remote: remote,
cache: cache,
}
}
// Update performs an update to the TUF repo as defined by the TUF spec
func (c *Client) Update() error {
func (c *Client) Update() (*tuf.Repo, error) {
// 1. Get timestamp
// a. If timestamp error (verification, expired, etc...) download new root and return to 1.
// 2. Check if local snapshot is up to date
@ -44,503 +42,188 @@ func (c *Client) Update() error {
err := c.update()
if err != nil {
logrus.Debug("Error occurred. Root will be downloaded and another update attempted")
logrus.Debug("Resetting the TUF builder...")
c.newBuilder = c.newBuilder.BootstrapNewBuilder()
if err := c.downloadRoot(); err != nil {
logrus.Debug("Client Update (Root):", err)
return err
return nil, err
}
// If we error again, we now have the latest root and just want to fail
// out as there's no expectation the problem can be resolved automatically
logrus.Debug("retrying TUF client update")
return c.update()
if err := c.update(); err != nil {
return nil, err
}
}
return nil
return c.newBuilder.Finish()
}
func (c *Client) update() error {
err := c.downloadTimestamp()
if err != nil {
if err := c.downloadTimestamp(); err != nil {
logrus.Debugf("Client Update (Timestamp): %s", err.Error())
return err
}
err = c.downloadSnapshot()
if err != nil {
if err := c.downloadSnapshot(); err != nil {
logrus.Debugf("Client Update (Snapshot): %s", err.Error())
return err
}
err = c.checkRoot()
if err != nil {
// In this instance the root has not expired base on time, but is
// expired based on the snapshot dictating a new root has been produced.
logrus.Debug(err)
return err
}
// will always need top level targets at a minimum
err = c.downloadTargets(data.CanonicalTargetsRole)
if err != nil {
if err := c.downloadTargets(); err != nil {
logrus.Debugf("Client Update (Targets): %s", err.Error())
return err
}
return nil
}
// checkRoot determines if the hash, and size are still those reported
// in the snapshot file. It will also check the expiry, however, if the
// hash and size in snapshot are unchanged but the root file has expired,
// there is little expectation that the situation can be remedied.
func (c Client) checkRoot() error {
role := data.CanonicalRootRole
size := c.local.Snapshot.Signed.Meta[role].Length
expectedHashes := c.local.Snapshot.Signed.Meta[role].Hashes
raw, err := c.cache.GetMeta(data.CanonicalRootRole, size)
if err != nil {
return err
}
if err := data.CheckHashes(raw, expectedHashes); err != nil {
return fmt.Errorf("Cached root hashes did not match snapshot root hashes")
}
if int64(len(raw)) != size {
return fmt.Errorf("Cached root size did not match snapshot size")
}
root := &data.SignedRoot{}
err = json.Unmarshal(raw, root)
if err != nil {
return ErrCorruptedCache{file: "root.json"}
}
if signed.IsExpired(root.Signed.Expires) {
return tuf.ErrLocalRootExpired{}
}
return nil
}
// downloadRoot is responsible for downloading the root.json
func (c *Client) downloadRoot() error {
logrus.Debug("Downloading Root...")
role := data.CanonicalRootRole
consistentInfo := c.newBuilder.GetConsistentInfo(role)
// We can't read an exact size for the root metadata without risking getting stuck in the TUF update cycle
// since it's possible that downloading timestamp/snapshot metadata may fail due to a signature mismatch
var size int64 = -1
if !consistentInfo.ChecksumKnown() {
logrus.Debugf("Loading root with no expected checksum")
// We could not expect what the "snapshot" meta has specified.
//
// In some old clients, there is only the "sha256",
// but both "sha256" and "sha512" in the newer ones.
//
// And possibly more in the future.
var expectedHashes data.Hashes
if c.local.Snapshot != nil {
if prevRootMeta, ok := c.local.Snapshot.Signed.Meta[role]; ok {
size = prevRootMeta.Length
expectedHashes = prevRootMeta.Hashes
}
// get the cached root, if it exists, just for version checking
cachedRoot, _ := c.cache.GetMeta(role, -1)
// prefer to download a new root
_, remoteErr := c.tryLoadRemote(consistentInfo, cachedRoot)
return remoteErr
}
// if we're bootstrapping we may not have a cached root, an
// error will result in the "previous root version" being
// interpreted as 0.
var download bool
var err error
var cachedRoot []byte
old := &data.Signed{}
version := 0
// Due to the same reason, we don't really know how many hashes are there.
if len(expectedHashes) != 0 {
// can only trust cache if we have an expected sha256(for example) to trust
cachedRoot, err = c.cache.GetMeta(role, size)
}
if cachedRoot == nil || err != nil {
logrus.Debug("didn't find a cached root, must download")
download = true
} else {
if err := data.CheckHashes(cachedRoot, expectedHashes); err != nil {
logrus.Debug("cached root's hash didn't match expected, must download")
download = true
}
err := json.Unmarshal(cachedRoot, old)
if err == nil {
root, err := data.RootFromSigned(old)
if err == nil {
version = root.Signed.Version
} else {
logrus.Debug("couldn't parse Signed part of cached root, must download")
download = true
}
} else {
logrus.Debug("couldn't parse cached root, must download")
download = true
}
}
var s *data.Signed
var raw []byte
if download {
// use consistent download if we have the checksum.
raw, s, err = c.downloadSigned(role, size, expectedHashes)
if err != nil {
return err
}
} else {
logrus.Debug("using cached root")
s = old
}
if err := c.verifyRoot(role, s, version); err != nil {
return err
}
if download {
logrus.Debug("caching downloaded root")
// Now that we have accepted new root, write it to cache
if err = c.cache.SetMeta(role, raw); err != nil {
logrus.Errorf("Failed to write root to local cache: %s", err.Error())
}
}
return nil
}
func (c Client) verifyRoot(role string, s *data.Signed, minVersion int) error {
// this will confirm that the root has been signed by the old root role
// with the root keys we bootstrapped with.
// Still need to determine if there has been a root key update and
// confirm signature with new root key
logrus.Debug("verifying root with existing keys")
rootRole, err := c.local.GetBaseRole(role)
if err != nil {
logrus.Debug("no previous root role loaded")
return err
}
// Verify using the rootRole loaded from the known root.json
if err = signed.Verify(s, rootRole, minVersion); err != nil {
logrus.Debug("root did not verify with existing keys")
return err
}
logrus.Debug("updating known root roles and keys")
root, err := data.RootFromSigned(s)
if err != nil {
logrus.Error(err.Error())
return err
}
// replace the existing root.json with the new one (just in memory, we
// have another validation step before we fully accept the new root)
err = c.local.SetRoot(root)
if err != nil {
logrus.Error(err.Error())
return err
}
// Verify the new root again having loaded the rootRole out of this new
// file (verifies self-referential integrity)
// TODO(endophage): be more intelligent and only re-verify if we detect
// there has been a change in root keys
logrus.Debug("verifying root with updated keys")
rootRole, err = c.local.GetBaseRole(role)
if err != nil {
logrus.Debug("root role with new keys not loaded")
return err
}
err = signed.Verify(s, rootRole, minVersion)
if err != nil {
logrus.Debug("root did not verify with new keys")
return err
}
logrus.Debug("successfully verified root")
return nil
_, err := c.tryLoadCacheThenRemote(consistentInfo)
return err
}
// downloadTimestamp is responsible for downloading the timestamp.json
// Timestamps are special in that we ALWAYS attempt to download and only
// use cache if the download fails (and the cache is still valid).
func (c *Client) downloadTimestamp() error {
logrus.Debug("Downloading Timestamp...")
logrus.Debug("Loading timestamp...")
role := data.CanonicalTimestampRole
consistentInfo := c.newBuilder.GetConsistentInfo(role)
// We may not have a cached timestamp if this is the first time
// we're interacting with the repo. This will result in the
// version being 0
var (
old *data.Signed
ts *data.SignedTimestamp
version = 0
)
cachedTS, err := c.cache.GetMeta(role, notary.MaxTimestampSize)
if err == nil {
cached := &data.Signed{}
err := json.Unmarshal(cachedTS, cached)
if err == nil {
ts, err := data.TimestampFromSigned(cached)
if err == nil {
version = ts.Signed.Version
}
old = cached
}
}
// unlike root, targets and snapshot, always try and download timestamps
// from remote, only using the cache one if we couldn't reach remote.
raw, s, err := c.downloadSigned(role, notary.MaxTimestampSize, nil)
if err == nil {
ts, err = c.verifyTimestamp(s, version)
if err == nil {
logrus.Debug("successfully verified downloaded timestamp")
c.cache.SetMeta(role, raw)
c.local.SetTimestamp(ts)
return nil
}
}
if old == nil {
// couldn't retrieve valid data from server and don't have unmarshallable data in cache.
logrus.Debug("no cached timestamp available")
return err
}
logrus.Debug(err.Error())
logrus.Warn("Error while downloading remote metadata, using cached timestamp - this might not be the latest version available remotely")
ts, err = c.verifyTimestamp(old, version)
if err != nil {
return err
}
logrus.Debug("successfully verified cached timestamp")
c.local.SetTimestamp(ts)
return nil
}
// get the cached timestamp, if it exists
cachedTS, cachedErr := c.cache.GetMeta(role, notary.MaxTimestampSize)
// always get the remote timestamp, since it supercedes the local one
_, remoteErr := c.tryLoadRemote(consistentInfo, cachedTS)
// verifies that a timestamp is valid, and returned the SignedTimestamp object to add to the tuf repo
func (c *Client) verifyTimestamp(s *data.Signed, minVersion int) (*data.SignedTimestamp, error) {
timestampRole, err := c.local.GetBaseRole(data.CanonicalTimestampRole)
if err != nil {
logrus.Debug("no timestamp role loaded")
return nil, err
switch {
case remoteErr == nil:
return nil
case cachedErr == nil:
logrus.Debug(remoteErr.Error())
logrus.Warn("Error while downloading remote metadata, using cached timestamp - this might not be the latest version available remotely")
err := c.newBuilder.Load(role, cachedTS, 1, false)
if err == nil {
logrus.Debug("successfully verified cached timestamp")
}
return err
default:
logrus.Debug("no cached or remote timestamp available")
return remoteErr
}
if err := signed.Verify(s, timestampRole, minVersion); err != nil {
return nil, err
}
return data.TimestampFromSigned(s)
}
// downloadSnapshot is responsible for downloading the snapshot.json
func (c *Client) downloadSnapshot() error {
logrus.Debug("Downloading Snapshot...")
logrus.Debug("Loading snapshot...")
role := data.CanonicalSnapshotRole
if c.local.Timestamp == nil {
return tuf.ErrNotLoaded{Role: data.CanonicalTimestampRole}
}
size := c.local.Timestamp.Signed.Meta[role].Length
expectedHashes := c.local.Timestamp.Signed.Meta[role].Hashes
if len(expectedHashes) == 0 {
return data.ErrMissingMeta{Role: data.CanonicalSnapshotRole}
}
consistentInfo := c.newBuilder.GetConsistentInfo(role)
var download bool
old := &data.Signed{}
version := 0
raw, err := c.cache.GetMeta(role, size)
if raw == nil || err != nil {
logrus.Debug("no snapshot in cache, must download")
download = true
} else {
// file may have been tampered with on disk. Always check the hash!
if err := data.CheckHashes(raw, expectedHashes); err != nil {
logrus.Debug("hash of snapshot in cache did not match expected hash, must download")
download = true
}
err := json.Unmarshal(raw, old)
if err == nil {
snap, err := data.SnapshotFromSigned(old)
if err == nil {
version = snap.Signed.Version
} else {
logrus.Debug("Could not parse Signed part of snapshot, must download")
download = true
}
} else {
logrus.Debug("Could not parse snapshot, must download")
download = true
}
}
var s *data.Signed
if download {
raw, s, err = c.downloadSigned(role, size, expectedHashes)
if err != nil {
return err
}
} else {
logrus.Debug("using cached snapshot")
s = old
}
snapshotRole, err := c.local.GetBaseRole(role)
if err != nil {
logrus.Debug("no snapshot role loaded")
return err
}
err = signed.Verify(s, snapshotRole, version)
if err != nil {
return err
}
logrus.Debug("successfully verified snapshot")
snap, err := data.SnapshotFromSigned(s)
if err != nil {
return err
}
c.local.SetSnapshot(snap)
if download {
err = c.cache.SetMeta(role, raw)
if err != nil {
logrus.Errorf("Failed to write snapshot to local cache: %s", err.Error())
}
}
return nil
_, err := c.tryLoadCacheThenRemote(consistentInfo)
return err
}
// downloadTargets downloads all targets and delegated targets for the repository.
// It uses a pre-order tree traversal as it's necessary to download parents first
// to obtain the keys to validate children.
func (c *Client) downloadTargets(role string) error {
logrus.Debug("Downloading Targets...")
stack := utils.NewStack()
stack.Push(role)
for !stack.Empty() {
role, err := stack.PopString()
if err != nil {
return err
}
if c.local.Snapshot == nil {
return tuf.ErrNotLoaded{Role: data.CanonicalSnapshotRole}
}
snap := c.local.Snapshot.Signed
root := c.local.Root.Signed
func (c *Client) downloadTargets() error {
toDownload := []data.DelegationRole{{
BaseRole: data.BaseRole{Name: data.CanonicalTargetsRole},
Paths: []string{""},
}}
for len(toDownload) > 0 {
role := toDownload[0]
toDownload = toDownload[1:]
s, err := c.getTargetsFile(role, snap.Meta, root.ConsistentSnapshot)
consistentInfo := c.newBuilder.GetConsistentInfo(role.Name)
if !consistentInfo.ChecksumKnown() {
logrus.Debugf("skipping %s because there is no checksum for it", role.Name)
continue
}
children, err := c.getTargetsFile(role, consistentInfo)
if err != nil {
if _, ok := err.(data.ErrMissingMeta); ok && role != data.CanonicalTargetsRole {
if _, ok := err.(data.ErrMissingMeta); ok && role.Name != data.CanonicalTargetsRole {
// if the role meta hasn't been published,
// that's ok, continue
continue
}
logrus.Error("Error getting targets file:", err)
logrus.Debugf("Error getting %s: %s", role.Name, err)
return err
}
t, err := data.TargetsFromSigned(s, role)
if err != nil {
return err
}
err = c.local.SetTargets(role, t)
if err != nil {
return err
}
// push delegated roles contained in the targets file onto the stack
for _, r := range t.Signed.Delegations.Roles {
if path.Dir(r.Name) == role {
// only load children that are direct 1st generation descendants
// of the role we've just downloaded
stack.Push(r.Name)
}
}
toDownload = append(children, toDownload...)
}
return nil
}
func (c *Client) downloadSigned(role string, size int64, expectedHashes data.Hashes) ([]byte, *data.Signed, error) {
rolePath := utils.ConsistentName(role, expectedHashes["sha256"])
raw, err := c.remote.GetMeta(rolePath, size)
func (c Client) getTargetsFile(role data.DelegationRole, ci tuf.ConsistentInfo) ([]data.DelegationRole, error) {
logrus.Debugf("Loading %s...", role.Name)
tgs := &data.SignedTargets{}
raw, err := c.tryLoadCacheThenRemote(ci)
if err != nil {
return nil, nil, err
}
if expectedHashes != nil {
if err := data.CheckHashes(raw, expectedHashes); err != nil {
return nil, nil, ErrChecksumMismatch{role: role}
}
}
s := &data.Signed{}
err = json.Unmarshal(raw, s)
if err != nil {
return nil, nil, err
}
return raw, s, nil
}
func (c Client) getTargetsFile(role string, snapshotMeta data.Files, consistent bool) (*data.Signed, error) {
// require role exists in snapshots
roleMeta, ok := snapshotMeta[role]
if !ok {
return nil, data.ErrMissingMeta{Role: role}
}
expectedHashes := snapshotMeta[role].Hashes
if len(expectedHashes) == 0 {
return nil, data.ErrMissingMeta{Role: role}
}
// try to get meta file from content addressed cache
var download bool
old := &data.Signed{}
version := 0
raw, err := c.cache.GetMeta(role, roleMeta.Length)
if err != nil || raw == nil {
logrus.Debugf("Couldn't not find cached %s, must download", role)
download = true
} else {
// file may have been tampered with on disk. Always check the hash!
if err := data.CheckHashes(raw, expectedHashes); err != nil {
download = true
}
err := json.Unmarshal(raw, old)
if err == nil {
targ, err := data.TargetsFromSigned(old, role)
if err == nil {
version = targ.Signed.Version
} else {
download = true
}
} else {
download = true
}
}
size := snapshotMeta[role].Length
var s *data.Signed
if download {
raw, s, err = c.downloadSigned(role, size, expectedHashes)
if err != nil {
return nil, err
}
} else {
logrus.Debug("using cached ", role)
s = old
}
var targetOrDelgRole data.BaseRole
if data.IsDelegation(role) {
delgRole, err := c.local.GetDelegationRole(role)
if err != nil {
logrus.Debugf("no %s delegation role loaded", role)
return nil, err
}
targetOrDelgRole = delgRole.BaseRole
} else {
targetOrDelgRole, err = c.local.GetBaseRole(role)
if err != nil {
logrus.Debugf("no %s role loaded", role)
return nil, err
}
}
if err = signed.Verify(s, targetOrDelgRole, version); err != nil {
return nil, err
}
logrus.Debugf("successfully verified %s", role)
if download {
// if we error when setting meta, we should continue.
err = c.cache.SetMeta(role, raw)
if err != nil {
logrus.Errorf("Failed to write %s to local cache: %s", role, err.Error())
}
}
return s, nil
// we know it unmarshals because if `tryLoadCacheThenRemote` didn't fail, then
// the raw has already been loaded into the builder
json.Unmarshal(raw, tgs)
return tgs.GetValidDelegations(role), nil
}
func (c *Client) tryLoadCacheThenRemote(consistentInfo tuf.ConsistentInfo) ([]byte, error) {
cachedTS, err := c.cache.GetMeta(consistentInfo.RoleName, consistentInfo.Length())
if err != nil {
logrus.Debugf("no %s in cache, must download", consistentInfo.RoleName)
return c.tryLoadRemote(consistentInfo, nil)
}
if err = c.newBuilder.Load(consistentInfo.RoleName, cachedTS, 1, false); err == nil {
logrus.Debugf("successfully verified cached %s", consistentInfo.RoleName)
return cachedTS, nil
}
logrus.Debugf("cached %s is invalid (must download): %s", consistentInfo.RoleName, err)
return c.tryLoadRemote(consistentInfo, cachedTS)
}
func (c *Client) tryLoadRemote(consistentInfo tuf.ConsistentInfo, old []byte) ([]byte, error) {
consistentName := consistentInfo.ConsistentName()
raw, err := c.remote.GetMeta(consistentName, consistentInfo.Length())
if err != nil {
logrus.Debugf("error downloading %s: %s", consistentName, err)
return old, err
}
// try to load the old data into the old builder - only use it to validate
// versions if it loads successfully. If it errors, then the loaded version
// will be 1
c.oldBuilder.Load(consistentInfo.RoleName, old, 1, true)
minVersion := c.oldBuilder.GetLoadedVersion(consistentInfo.RoleName)
if err := c.newBuilder.Load(consistentInfo.RoleName, raw, minVersion, false); err != nil {
logrus.Debugf("downloaded %s is invalid: %s", consistentName, err)
return raw, err
}
logrus.Debugf("successfully verified downloaded %s", consistentName)
if err := c.cache.SetMeta(consistentInfo.RoleName, raw); err != nil {
logrus.Debugf("Unable to write %s to cache: %s", consistentInfo.RoleName, err)
}
return raw, nil
}

View file

@ -4,15 +4,6 @@ import (
"fmt"
)
// ErrChecksumMismatch - a checksum failed verification
type ErrChecksumMismatch struct {
role string
}
func (e ErrChecksumMismatch) Error() string {
return fmt.Sprintf("tuf: checksum for %s did not match", e.role)
}
// ErrCorruptedCache - local data is incorrect
type ErrCorruptedCache struct {
file string

View file

@ -12,13 +12,14 @@ func (e ErrInvalidMetadata) Error() string {
return fmt.Sprintf("%s type metadata invalid: %s", e.role, e.msg)
}
// ErrMissingMeta - couldn't find the FileMeta object for a role or target
// ErrMissingMeta - couldn't find the FileMeta object for the given Role, or
// the FileMeta object contained no supported checksums
type ErrMissingMeta struct {
Role string
}
func (e ErrMissingMeta) Error() string {
return fmt.Sprintf("tuf: sha256 checksum required for %s", e.Role)
return fmt.Sprintf("no checksums for supported algorithms were provided for %s", e.Role)
}
// ErrInvalidChecksum is the error to be returned when checksum is invalid
@ -32,9 +33,12 @@ func (e ErrInvalidChecksum) Error() string {
// ErrMismatchedChecksum is the error to be returned when checksum is mismatched
type ErrMismatchedChecksum struct {
alg string
alg string
name string
expected string
}
func (e ErrMismatchedChecksum) Error() string {
return fmt.Sprintf("%s checksum mismatched", e.alg)
return fmt.Sprintf("%s checksum for %s did not match: expected %s", e.alg, e.name,
e.expected)
}

View file

@ -31,9 +31,9 @@ func isValidRootStructure(r Root) error {
role: CanonicalRootRole, msg: fmt.Sprintf("expected type %s, not %s", expectedType, r.Type)}
}
if r.Version < 0 {
if r.Version < 1 {
return ErrInvalidMetadata{
role: CanonicalRootRole, msg: "version cannot be negative"}
role: CanonicalRootRole, msg: "version cannot be less than 1"}
}
// all the base roles MUST appear in the root.json - other roles are allowed,

View file

@ -22,19 +22,19 @@ type Snapshot struct {
Meta Files `json:"meta"`
}
// isValidSnapshotStructure returns an error, or nil, depending on whether the content of the
// IsValidSnapshotStructure returns an error, or nil, depending on whether the content of the
// struct is valid for snapshot metadata. This does not check signatures or expiry, just that
// the metadata content is valid.
func isValidSnapshotStructure(s Snapshot) error {
func IsValidSnapshotStructure(s Snapshot) error {
expectedType := TUFTypes[CanonicalSnapshotRole]
if s.Type != expectedType {
return ErrInvalidMetadata{
role: CanonicalSnapshotRole, msg: fmt.Sprintf("expected type %s, not %s", expectedType, s.Type)}
}
if s.Version < 0 {
if s.Version < 1 {
return ErrInvalidMetadata{
role: CanonicalSnapshotRole, msg: "version cannot be negative"}
role: CanonicalSnapshotRole, msg: "version cannot be less than one"}
}
for _, role := range []string{CanonicalRootRole, CanonicalTargetsRole} {
@ -126,7 +126,9 @@ func (sp *SignedSnapshot) AddMeta(role string, meta FileMeta) {
// not found
func (sp *SignedSnapshot) GetMeta(role string) (*FileMeta, error) {
if meta, ok := sp.Signed.Meta[role]; ok {
return &meta, nil
if _, ok := meta.Hashes["sha256"]; ok {
return &meta, nil
}
}
return nil, ErrMissingMeta{Role: role}
}
@ -155,7 +157,7 @@ func SnapshotFromSigned(s *Signed) (*SignedSnapshot, error) {
if err := defaultSerializer.Unmarshal(*s.Signed, &sp); err != nil {
return nil, err
}
if err := isValidSnapshotStructure(sp); err != nil {
if err := IsValidSnapshotStructure(sp); err != nil {
return nil, err
}
sigs := make([]Signature, len(s.Signatures))

View file

@ -38,8 +38,8 @@ func isValidTargetsStructure(t Targets, roleName string) error {
role: roleName, msg: fmt.Sprintf("expected type %s, not %s", expectedType, t.Type)}
}
if t.Version < 0 {
return ErrInvalidMetadata{role: roleName, msg: "version cannot be negative"}
if t.Version < 1 {
return ErrInvalidMetadata{role: roleName, msg: "version cannot be less than one"}
}
for _, roleObj := range t.Delegations.Roles {

View file

@ -21,19 +21,19 @@ type Timestamp struct {
Meta Files `json:"meta"`
}
// isValidTimestampStructure returns an error, or nil, depending on whether the content of the struct
// IsValidTimestampStructure returns an error, or nil, depending on whether the content of the struct
// is valid for timestamp metadata. This does not check signatures or expiry, just that
// the metadata content is valid.
func isValidTimestampStructure(t Timestamp) error {
func IsValidTimestampStructure(t Timestamp) error {
expectedType := TUFTypes[CanonicalTimestampRole]
if t.Type != expectedType {
return ErrInvalidMetadata{
role: CanonicalTimestampRole, msg: fmt.Sprintf("expected type %s, not %s", expectedType, t.Type)}
}
if t.Version < 0 {
if t.Version < 1 {
return ErrInvalidMetadata{
role: CanonicalTimestampRole, msg: "version cannot be negative"}
role: CanonicalTimestampRole, msg: "version cannot be less than one"}
}
// Meta is a map of FileMeta, so if the role isn't in the map it returns
@ -124,7 +124,7 @@ func TimestampFromSigned(s *Signed) (*SignedTimestamp, error) {
if err := defaultSerializer.Unmarshal(*s.Signed, &ts); err != nil {
return nil, err
}
if err := isValidTimestampStructure(ts); err != nil {
if err := IsValidTimestampStructure(ts); err != nil {
return nil, err
}
sigs := make([]Signature, len(s.Signatures))

View file

@ -4,6 +4,7 @@ import (
"crypto/sha256"
"crypto/sha512"
"crypto/subtle"
"encoding/hex"
"fmt"
"hash"
"io"
@ -132,7 +133,7 @@ type FileMeta struct {
}
// CheckHashes verifies all the checksums specified by the "hashes" of the payload.
func CheckHashes(payload []byte, hashes Hashes) error {
func CheckHashes(payload []byte, name string, hashes Hashes) error {
cnt := 0
// k, v indicate the hash algorithm and the corresponding value
@ -141,20 +142,20 @@ func CheckHashes(payload []byte, hashes Hashes) error {
case notary.SHA256:
checksum := sha256.Sum256(payload)
if subtle.ConstantTimeCompare(checksum[:], v) == 0 {
return ErrMismatchedChecksum{alg: notary.SHA256}
return ErrMismatchedChecksum{alg: notary.SHA256, name: name, expected: hex.EncodeToString(v)}
}
cnt++
case notary.SHA512:
checksum := sha512.Sum512(payload)
if subtle.ConstantTimeCompare(checksum[:], v) == 0 {
return ErrMismatchedChecksum{alg: notary.SHA512}
return ErrMismatchedChecksum{alg: notary.SHA512, name: name, expected: hex.EncodeToString(v)}
}
cnt++
}
}
if cnt == 0 {
return fmt.Errorf("at least one supported hash needed")
return ErrMissingMeta{Role: name}
}
return nil

View file

@ -44,10 +44,15 @@ func (e ErrLowVersion) Error() string {
}
// ErrRoleThreshold indicates we did not validate enough signatures to meet the threshold
type ErrRoleThreshold struct{}
type ErrRoleThreshold struct {
Msg string
}
func (e ErrRoleThreshold) Error() string {
return "valid signatures did not meet threshold"
if e.Msg == "" {
return "valid signatures did not meet threshold"
}
return e.Msg
}
// ErrInvalidKeyType indicates the types for the key and signature it's associated with are

View file

@ -18,17 +18,20 @@ type KeyService interface {
// GetKey retrieves the public key if present, otherwise it returns nil
GetKey(keyID string) data.PublicKey
// GetPrivateKey retrieves the private key and role if present, otherwise
// it returns nil
// GetPrivateKey retrieves the private key and role if present and retrievable,
// otherwise it returns nil and an error
GetPrivateKey(keyID string) (data.PrivateKey, string, error)
// RemoveKey deletes the specified key
// RemoveKey deletes the specified key, and returns an error only if the key
// removal fails. If the key doesn't exist, no error should be returned.
RemoveKey(keyID string) error
// ListKeys returns a list of key IDs for the role
// ListKeys returns a list of key IDs for the role, or an empty list or
// nil if there are no keys.
ListKeys(role string) []string
// ListAllKeys returns a map of all available signing key IDs to role
// ListAllKeys returns a map of all available signing key IDs to role, or
// an empty map or nil if there are no keys.
ListAllKeys() map[string]string
}

View file

@ -21,39 +21,28 @@ var (
ErrWrongType = errors.New("tuf: meta file has wrong type")
)
// Verify checks the signatures and metadata (expiry, version) for the signed role
// data
func Verify(s *data.Signed, role data.BaseRole, minVersion int) error {
if err := verifyMeta(s, role.Name, minVersion); err != nil {
return err
}
return VerifySignatures(s, role)
}
func verifyMeta(s *data.Signed, role string, minVersion int) error {
sm := &data.SignedCommon{}
if err := json.Unmarshal(*s.Signed, sm); err != nil {
return err
}
if !data.ValidTUFType(sm.Type, role) {
return ErrWrongType
}
if IsExpired(sm.Expires) {
logrus.Errorf("Metadata for %s expired", role)
return ErrExpired{Role: role, Expired: sm.Expires.Format("Mon Jan 2 15:04:05 MST 2006")}
}
if sm.Version < minVersion {
return ErrLowVersion{sm.Version, minVersion}
}
return nil
}
// IsExpired checks if the given time passed before the present time
func IsExpired(t time.Time) bool {
return t.Before(time.Now())
}
// VerifyExpiry returns ErrExpired if the metadata is expired
func VerifyExpiry(s *data.SignedCommon, role string) error {
if IsExpired(s.Expires) {
logrus.Errorf("Metadata for %s expired", role)
return ErrExpired{Role: role, Expired: s.Expires.Format("Mon Jan 2 15:04:05 MST 2006")}
}
return nil
}
// VerifyVersion returns ErrLowVersion if the metadata version is lower than the min version
func VerifyVersion(s *data.SignedCommon, minVersion int) error {
if s.Version < minVersion {
return ErrLowVersion{Actual: s.Version, Current: minVersion}
}
return nil
}
// VerifySignatures checks the we have sufficient valid signatures for the given role
func VerifySignatures(s *data.Signed, roleData data.BaseRole) error {
if len(s.Signatures) == 0 {

View file

@ -39,7 +39,7 @@ func (f *FilesystemStore) getPath(name string) string {
}
// GetMeta returns the meta for the given name (a role) up to size bytes
// If size is -1, this corresponds to "infinite," but we cut off at the
// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a
// predefined threshold "notary.MaxDownloadSize".
func (f *FilesystemStore) GetMeta(name string, size int64) ([]byte, error) {
meta, err := ioutil.ReadFile(f.getPath(name))
@ -49,7 +49,7 @@ func (f *FilesystemStore) GetMeta(name string, size int64) ([]byte, error) {
}
return nil, err
}
if size == -1 {
if size == NoSizeLimit {
size = notary.MaxDownloadSize
}
// Only return up to size bytes

View file

@ -139,7 +139,8 @@ func translateStatusToError(resp *http.Response, resource string) error {
// GetMeta downloads the named meta file with the given size. A short body
// is acceptable because in the case of timestamp.json, the size is a cap,
// not an exact length.
// If size is -1, this corresponds to "infinite," but we cut off at 100MB
// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a
// predefined threshold "notary.MaxDownloadSize".
func (s HTTPStore) GetMeta(name string, size int64) ([]byte, error) {
url, err := s.buildMetaURL(name)
if err != nil {
@ -158,7 +159,7 @@ func (s HTTPStore) GetMeta(name string, size int64) ([]byte, error) {
logrus.Debugf("received HTTP status %d when requesting %s.", resp.StatusCode, name)
return nil, err
}
if size == -1 {
if size == NoSizeLimit {
size = notary.MaxDownloadSize
}
if resp.ContentLength > size {

View file

@ -1,5 +1,8 @@
package store
// NoSizeLimit is represented as -1 for arguments to GetMeta
const NoSizeLimit int64 = -1
// MetadataStore must be implemented by anything that intends to interact
// with a store of TUF files
type MetadataStore interface {

View file

@ -39,13 +39,14 @@ type MemoryStore struct {
}
// GetMeta returns up to size bytes of data references by name.
// If size is -1, this corresponds to "infinite," but we cut off at 100MB
// as we will always know the size for everything but a timestamp and
// sometimes a root, neither of which should be exceptionally large
// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a
// predefined threshold "notary.MaxDownloadSize", as we will always know the
// size for everything but a timestamp and sometimes a root,
// neither of which should be exceptionally large
func (m *MemoryStore) GetMeta(name string, size int64) ([]byte, error) {
d, ok := m.meta[name]
if ok {
if size == -1 {
if size == NoSizeLimit {
size = notary.MaxDownloadSize
}
if int64(len(d)) < size {

View file

@ -549,37 +549,6 @@ func (tr *Repo) InitTimestamp() error {
return nil
}
// SetRoot sets the Repo.Root field to the SignedRoot object.
func (tr *Repo) SetRoot(s *data.SignedRoot) error {
tr.Root = s
var err error
// originalRootRole is the root role prior to any mutations that might
// occur on tr.Root.
tr.originalRootRole, err = tr.Root.BuildBaseRole(data.CanonicalRootRole)
return err
}
// SetTimestamp parses the Signed object into a SignedTimestamp object
// and sets the Repo.Timestamp field.
func (tr *Repo) SetTimestamp(s *data.SignedTimestamp) error {
tr.Timestamp = s
return nil
}
// SetSnapshot parses the Signed object into a SignedSnapshots object
// and sets the Repo.Snapshot field.
func (tr *Repo) SetSnapshot(s *data.SignedSnapshot) error {
tr.Snapshot = s
return nil
}
// SetTargets sets the SignedTargets object agaist the role in the
// Repo.Targets map.
func (tr *Repo) SetTargets(role string, s *data.SignedTargets) error {
tr.Targets[role] = s
return nil
}
// TargetMeta returns the FileMeta entry for the given path in the
// targets file associated with the given role. This may be nil if
// the target isn't found in the targets file.
@ -876,7 +845,15 @@ func (tr *Repo) SignRoot(expires time.Time) (*data.Signed, error) {
}
}
// if the root role has changed and original role had not been saved as a previous role, save it now
// If the root role (root keys or root threshold) has changed, save the
// previous role under the role name "root.<n>", such that the "n" is the
// latest root.json version for which previous root role was valid.
// Also, guard against re-saving the previous role if the latest
// saved role is the same (which should not happen).
// n = root.json version of the originalRootRole (previous role)
// n+1 = root.json version of the currRoot (current role)
// n-m = root.json version of latestSavedRole (not necessarily n-1, because the
// last root rotation could have happened several root.json versions ago
if !tr.originalRootRole.Equals(currRoot) && !tr.originalRootRole.Equals(latestSavedRole) {
rolesToSignWith = append(rolesToSignWith, tr.originalRootRole)
latestSavedRole = tr.originalRootRole
@ -884,20 +861,11 @@ func (tr *Repo) SignRoot(expires time.Time) (*data.Signed, error) {
versionName := oldRootVersionName(tempRoot.Signed.Version)
tempRoot.Signed.Roles[versionName] = &data.RootRole{
KeyIDs: latestSavedRole.ListKeyIDs(), Threshold: latestSavedRole.Threshold}
}
tempRoot.Signed.Expires = expires
tempRoot.Signed.Version++
// if the current role doesn't match with the latest saved role, save it
if !currRoot.Equals(latestSavedRole) {
rolesToSignWith = append(rolesToSignWith, currRoot)
versionName := oldRootVersionName(tempRoot.Signed.Version)
tempRoot.Signed.Roles[versionName] = &data.RootRole{
KeyIDs: currRoot.ListKeyIDs(), Threshold: currRoot.Threshold}
}
rolesToSignWith = append(rolesToSignWith, currRoot)
signed, err := tempRoot.ToSigned()
if err != nil {
@ -914,7 +882,7 @@ func (tr *Repo) SignRoot(expires time.Time) (*data.Signed, error) {
return signed, nil
}
// get all the saved previous roles <= the current root version
// get all the saved previous roles < the current root version
func (tr *Repo) getOldRootRoles() versionedRootRoles {
oldRootRoles := make(versionedRootRoles, 0, len(tr.Root.Signed.Roles))
@ -930,7 +898,7 @@ func (tr *Repo) getOldRootRoles() versionedRootRoles {
continue
}
version, err := strconv.Atoi(nameTokens[1])
if err != nil || version > tr.Root.Signed.Version {
if err != nil || version >= tr.Root.Signed.Version {
continue
}