Notary delegation integration into docker

Signed-off-by: Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>
This commit is contained in:
Riyaz Faizullabhoy 2015-12-18 18:47:35 -08:00
parent 6213cc9d04
commit 1c125f50cf
50 changed files with 1932 additions and 779 deletions

View File

@ -153,7 +153,7 @@ RUN set -x \
&& rm -rf "$GOPATH"
# Install notary server
ENV NOTARY_COMMIT 8e8122eb5528f621afcd4e2854c47302f17392f7
ENV NOTARY_COMMIT 30c488b3b4c62fdbc2c1eae7cf3b62ca73f95fad
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \

View File

@ -11,6 +11,7 @@ import (
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"regexp"
"sort"
@ -35,9 +36,14 @@ import (
"github.com/docker/notary/passphrase"
"github.com/docker/notary/trustmanager"
"github.com/docker/notary/tuf/data"
"github.com/docker/notary/tuf/signed"
"github.com/docker/notary/tuf/store"
)
var untrusted bool
var (
releasesRole = path.Join(data.CanonicalTargetsRole, "releases")
untrusted bool
)
func addTrustedFlags(fs *flag.FlagSet, verify bool) {
var trusted bool
@ -238,11 +244,11 @@ func (cli *DockerCli) trustedReference(ref reference.NamedTagged) (reference.Can
return nil, err
}
t, err := notaryRepo.GetTargetByName(ref.Tag())
t, err := notaryRepo.GetTargetByName(ref.Tag(), releasesRole, data.CanonicalTargetsRole)
if err != nil {
return nil, err
}
r, err := convertTarget(*t)
r, err := convertTarget(t.Target)
if err != nil {
return nil, err
@ -264,17 +270,27 @@ func (cli *DockerCli) tagTrusted(trustedRef reference.Canonical, ref reference.N
return cli.client.ImageTag(options)
}
func notaryError(err error) error {
func notaryError(repoName string, err error) error {
switch err.(type) {
case *json.SyntaxError:
logrus.Debugf("Notary syntax error: %s", err)
return errors.New("no trust data available for remote repository")
return fmt.Errorf("Error: no trust data available for remote repository %s. Try running notary server and setting DOCKER_CONTENT_TRUST_SERVER to its HTTPS address?", repoName)
case client.ErrExpired:
return fmt.Errorf("remote repository out-of-date: %v", err)
return fmt.Errorf("Error: remote repository %s out-of-date: %v", repoName, err)
case trustmanager.ErrKeyNotFound:
return fmt.Errorf("signing keys not found: %v", err)
return fmt.Errorf("Error: signing keys for remote repository %s not found: %v", repoName, err)
case *net.OpError:
return fmt.Errorf("error contacting notary server: %v", err)
return fmt.Errorf("Error: error contacting notary server: %v", err)
case store.ErrMetaNotFound:
return fmt.Errorf("Error: trust data missing for remote repository %s: %v", repoName, err)
case signed.ErrInvalidKeyType:
return fmt.Errorf("Error: trust data mismatch for remote repository %s, could be malicious behavior: %v", repoName, err)
case signed.ErrNoKeys:
return fmt.Errorf("Error: could not find signing keys for remote repository %s: %v", repoName, err)
case signed.ErrLowVersion:
return fmt.Errorf("Error: trust data version is lower than expected for remote repository %s, could be malicious behavior: %v", repoName, err)
case signed.ErrInsufficientSignatures:
return fmt.Errorf("Error: trust data has insufficient signatures for remote repository %s, could be malicious behavior: %v", repoName, err)
}
return err
@ -291,12 +307,12 @@ func (cli *DockerCli) trustedPull(repoInfo *registry.RepositoryInfo, ref registr
if ref.String() == "" {
// List all targets
targets, err := notaryRepo.ListTargets()
targets, err := notaryRepo.ListTargets(releasesRole, data.CanonicalTargetsRole)
if err != nil {
return notaryError(err)
return notaryError(repoInfo.FullName(), err)
}
for _, tgt := range targets {
t, err := convertTarget(*tgt)
t, err := convertTarget(tgt.Target)
if err != nil {
fmt.Fprintf(cli.out, "Skipping target for %q\n", repoInfo.Name())
continue
@ -304,11 +320,11 @@ func (cli *DockerCli) trustedPull(repoInfo *registry.RepositoryInfo, ref registr
refs = append(refs, t)
}
} else {
t, err := notaryRepo.GetTargetByName(ref.String())
t, err := notaryRepo.GetTargetByName(ref.String(), releasesRole, data.CanonicalTargetsRole)
if err != nil {
return notaryError(err)
return notaryError(repoInfo.FullName(), err)
}
r, err := convertTarget(*t)
r, err := convertTarget(t.Target)
if err != nil {
return err
@ -413,7 +429,7 @@ func (cli *DockerCli) trustedPush(repoInfo *registry.RepositoryInfo, tag string,
repo, err := cli.getNotaryRepository(repoInfo, authConfig)
if err != nil {
fmt.Fprintf(cli.out, "Error establishing connection to notary repository: %s\n", err)
fmt.Fprintf(cli.out, "Error establishing connection to notary repository, has a notary server been setup and pointed to by the DOCKER_CONTENT_TRUST_SERVER environment variable?: %s\n", err)
return err
}
@ -429,14 +445,14 @@ func (cli *DockerCli) trustedPush(repoInfo *registry.RepositoryInfo, tag string,
},
Length: int64(target.size),
}
if err := repo.AddTarget(t); err != nil {
if err := repo.AddTarget(t, releasesRole); err != nil {
return err
}
}
err = repo.Publish()
if _, ok := err.(*client.ErrRepoNotInitialized); !ok {
return notaryError(err)
return notaryError(repoInfo.FullName(), err)
}
keys := repo.CryptoService.ListKeys(data.CanonicalRootRole)
@ -455,9 +471,9 @@ func (cli *DockerCli) trustedPush(repoInfo *registry.RepositoryInfo, tag string,
}
if err := repo.Initialize(rootKeyID); err != nil {
return notaryError(err)
return notaryError(repoInfo.FullName(), err)
}
fmt.Fprintf(cli.out, "Finished initializing %q\n", repoInfo.FullName())
return notaryError(repo.Publish())
return notaryError(repoInfo.FullName(), repo.Publish())
}

View File

@ -46,7 +46,9 @@ clone git github.com/boltdb/bolt v1.1.0
clone git github.com/docker/distribution 568bf038af6d65b376165d02886b1c7fcaef1f61
clone git github.com/vbatts/tar-split v0.9.11
clone git github.com/docker/notary 45de2828b5e0083bfb4e9a5a781eddb05e2ef9d0
# get desired notary commit, might also need to be updated in Dockerfile
clone git github.com/docker/notary 30c488b3b4c62fdbc2c1eae7cf3b62ca73f95fad
clone git google.golang.org/grpc 174192fc93efcb188fc8f46ca447f0da606b6885 https://github.com/grpc/grpc-go.git
clone git github.com/miekg/pkcs11 80f102b5cac759de406949c47f0928b99bd64cdf
clone git github.com/jfrazelle/go v1.5.1-1

View File

@ -5805,7 +5805,7 @@ func (s *DockerTrustSuite) TestTrustedBuildUntrustedTag(c *check.C) {
c.Fatalf("Expected error on trusted build with untrusted tag: %s\n%s", err, out)
}
if !strings.Contains(out, fmt.Sprintf("no trust data available")) {
if !strings.Contains(out, fmt.Sprintf("trust data unavailable")) {
c.Fatalf("Unexpected output on trusted build with untrusted tag:\n%s", out)
}
}

View File

@ -312,7 +312,7 @@ func (s *DockerTrustSuite) TestUntrustedCreate(c *check.C) {
s.trustedCmd(createCmd)
out, _, err := runCommandWithOutput(createCmd)
c.Assert(err, check.Not(check.IsNil))
c.Assert(string(out), checker.Contains, "no trust data available", check.Commentf("Missing expected output on trusted create:\n%s", out))
c.Assert(string(out), checker.Contains, "trust data unavailable", check.Commentf("Missing expected output on trusted create:\n%s", out))
}

View File

@ -58,7 +58,7 @@ func (s *DockerTrustSuite) TestUntrustedPull(c *check.C) {
out, _, err := runCommandWithOutput(pullCmd)
c.Assert(err, check.NotNil, check.Commentf(out))
c.Assert(string(out), checker.Contains, "no trust data available", check.Commentf(out))
c.Assert(string(out), checker.Contains, "trust data unavailable", check.Commentf(out))
}
func (s *DockerTrustSuite) TestPullWhenCertExpired(c *check.C) {

View File

@ -115,10 +115,17 @@ func (s *DockerTrustSuite) TestTrustedPush(c *check.C) {
out, _, err := runCommandWithOutput(pushCmd)
c.Assert(err, check.IsNil, check.Commentf("Error running trusted push: %s\n%s", err, out))
c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push"))
// Try pull after push
pullCmd := exec.Command(dockerBinary, "pull", repoName)
s.trustedCmd(pullCmd)
out, _, err = runCommandWithOutput(pullCmd)
c.Assert(err, check.IsNil, check.Commentf(out))
c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out))
}
func (s *DockerTrustSuite) TestTrustedPushWithEnvPasswords(c *check.C) {
repoName := fmt.Sprintf("%v/dockercli/trusted:latest", privateRegistryURL)
repoName := fmt.Sprintf("%v/dockerclienv/trusted:latest", privateRegistryURL)
// tag the image and upload it to the private registry
dockerCmd(c, "tag", "busybox", repoName)
@ -127,6 +134,13 @@ func (s *DockerTrustSuite) TestTrustedPushWithEnvPasswords(c *check.C) {
out, _, err := runCommandWithOutput(pushCmd)
c.Assert(err, check.IsNil, check.Commentf("Error running trusted push: %s\n%s", err, out))
c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push"))
// Try pull after push
pullCmd := exec.Command(dockerBinary, "pull", repoName)
s.trustedCmd(pullCmd)
out, _, err = runCommandWithOutput(pullCmd)
c.Assert(err, check.IsNil, check.Commentf(out))
c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out))
}
// This test ensures backwards compatibility with old ENV variables. Should be
@ -168,7 +182,7 @@ func (s *DockerTrustSuite) TestTrustedPushWithoutServerAndUntrusted(c *check.C)
}
func (s *DockerTrustSuite) TestTrustedPushWithExistingTag(c *check.C) {
repoName := fmt.Sprintf("%v/dockercli/trusted:latest", privateRegistryURL)
repoName := fmt.Sprintf("%v/dockerclitag/trusted:latest", privateRegistryURL)
// tag the image and upload it to the private registry
dockerCmd(c, "tag", "busybox", repoName)
dockerCmd(c, "push", repoName)
@ -178,6 +192,13 @@ func (s *DockerTrustSuite) TestTrustedPushWithExistingTag(c *check.C) {
out, _, err := runCommandWithOutput(pushCmd)
c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out))
c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag"))
// Try pull after push
pullCmd := exec.Command(dockerBinary, "pull", repoName)
s.trustedCmd(pullCmd)
out, _, err = runCommandWithOutput(pullCmd)
c.Assert(err, check.IsNil, check.Commentf(out))
c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out))
}
func (s *DockerTrustSuite) TestTrustedPushWithExistingSignedTag(c *check.C) {

View File

@ -3087,7 +3087,7 @@ func (s *DockerTrustSuite) TestUntrustedRun(c *check.C) {
c.Fatalf("Error expected when running trusted run with:\n%s", out)
}
if !strings.Contains(string(out), "no trust data available") {
if !strings.Contains(string(out), "trust data unavailable") {
c.Fatalf("Missing expected output on trusted run:\n%s", out)
}
}

View File

@ -28,9 +28,9 @@ const notaryURL = "https://" + notaryHost
func newTestNotary(c *check.C) (*testNotary, error) {
template := `{
"server": {
"addr": "%s",
"tls_key_file": "fixtures/notary/localhost.key",
"tls_cert_file": "fixtures/notary/localhost.cert"
"http_addr": "%s",
"tls_key_file": "%s",
"tls_cert_file": "%s"
},
"trust_service": {
"type": "local",
@ -39,8 +39,11 @@ func newTestNotary(c *check.C) (*testNotary, error) {
"key_algorithm": "ed25519"
},
"logging": {
"level": 5
}
"level": "debug"
},
"storage": {
"backend": "memory"
}
}`
tmp, err := ioutil.TempDir("", "notary-test-")
if err != nil {
@ -51,7 +54,12 @@ func newTestNotary(c *check.C) (*testNotary, error) {
if err != nil {
return nil, err
}
if _, err := fmt.Fprintf(config, template, notaryHost); err != nil {
workingDir, err := os.Getwd()
if err != nil {
return nil, err
}
if _, err := fmt.Fprintf(config, template, notaryHost, filepath.Join(workingDir, "fixtures/notary/localhost.key"), filepath.Join(workingDir, "fixtures/notary/localhost.cert")); err != nil {
os.RemoveAll(tmp)
return nil, err
}

View File

@ -20,4 +20,4 @@ RUN go install \
${NOTARYPKG}/cmd/notary-server
ENTRYPOINT [ "notary-server" ]
CMD [ "-config", "cmd/notary-server/config.json" ]
CMD [ "-config=fixtures/server-config-local.json" ]

View File

@ -38,4 +38,4 @@ RUN go install \
ENTRYPOINT [ "notary-signer" ]
CMD [ "-config=cmd/notary-signer/config.json" ]
CMD [ "-config=fixtures/signer-config-local.json" ]

View File

@ -1,5 +1,52 @@
David Lawrence <david.lawrence@docker.com> (@endophage)
Ying Li <ying.li@docker.com> (@cyli)
Nathan McCauley <nathan.mccauley@docker.com> (@NathanMcCauley)
Derek McGowan <derek@docker.com> (@dmcgowan)
Diogo Monica <diogo@docker.com> (@diogomonica)
# Notary maintainers file
#
# This file describes who runs the docker/notary project and how.
# This is a living document - if you see something out of date or missing, speak up!
#
# It is structured to be consumable by both humans and programs.
# To extract its contents programmatically, use any TOML-compliant parser.
#
# This file is compiled into the MAINTAINERS file in docker/opensource.
#
[Org]
[Org."Core maintainers"]
people = [
"cyli",
"diogomonica",
"dmcgowan",
"endophage",
"nathanmccauley",
]
[people]
# A reference list of all people associated with the project.
# All other sections should refer to people by their canonical key
# in the people section.
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
[people.cyli]
Name = "Ying Li"
Email = "ying.li@docker.com"
GitHub = "cyli"
[people.diogomonica]
Name = "Diogo Monica"
Email = "diogo@docker.com"
GitHub = "diogomonica"
[people.dmcgowan]
Name = "Derek McGowan"
Email = "derek@docker.com"
GitHub = "dmcgowan"
[people.endophage]
Name = "David Lawrence"
Email = "david.lawrence@docker.com"
GitHub = "endophage"
[people.nathanmccauley]
Name = "Nathan McCauley"
Email = "nathan.mccauley@docker.com"
GitHub = "nathanmccauley"

View File

@ -32,7 +32,7 @@ _space := $(empty) $(empty)
# go cover test variables
COVERDIR=.cover
COVERPROFILE=$(COVERDIR)/cover.out
COVERPROFILE?=$(COVERDIR)/cover.out
COVERMODE=count
PKGS = $(shell go list ./... | tr '\n' ' ')
@ -43,8 +43,8 @@ GO_VERSION = $(shell go version | awk '{print $$3}')
.DEFAULT: default
go_version:
ifneq ("$(GO_VERSION)", "go1.5.1")
$(error Requires go version 1.5.1 - found $(GO_VERSION))
ifeq (,$(findstring go1.5.,$(GO_VERSION)))
$(error Requires go version 1.5.x - found $(GO_VERSION))
else
@echo
endif
@ -73,6 +73,11 @@ ${PREFIX}/bin/notary-signer: NOTARY_VERSION $(shell find . -type f -name '*.go')
vet: go_version
@echo "+ $@"
ifeq ($(shell uname -s), Darwin)
@test -z "$(shell find . -iname *test*.go | grep -v _test.go | grep -v Godeps | xargs echo "This file should end with '_test':" | tee /dev/stderr)"
else
@test -z "$(shell find . -iname *test*.go | grep -v _test.go | grep -v Godeps | xargs -r echo "This file should end with '_test':" | tee /dev/stderr)"
endif
@test -z "$$(go tool vet -printf=false . 2>&1 | grep -v Godeps/_workspace/src/ | tee /dev/stderr)"
fmt:

View File

@ -1 +1 @@
1.0-rc1
0.2

View File

@ -87,7 +87,7 @@ curl example.com/install.sh | notary verify example.com/scripts v1 | sh
# Notary Server
Notary Server manages TUF data over an HTTP API compatible with the
[notary client](../notary/).
[notary client](cmd/notary).
It may be configured to use either JWT or HTTP Basic Auth for authentication.
Currently it only supports MySQL for storage of the TUF data, we intend to

View File

@ -1,12 +1,10 @@
package keystoremanager
package certs
import (
"crypto/rand"
"crypto/x509"
"errors"
"fmt"
"path/filepath"
"strings"
"time"
"github.com/Sirupsen/logrus"
@ -15,18 +13,13 @@ import (
"github.com/docker/notary/tuf/signed"
)
// KeyStoreManager is an abstraction around the root and non-root key stores,
// and related CA stores
type KeyStoreManager struct {
KeyStore *trustmanager.KeyFileStore
// Manager is an abstraction around trusted root CA stores
type Manager struct {
trustedCAStore trustmanager.X509Store
trustedCertificateStore trustmanager.X509Store
}
const (
trustDir = "trusted_certificates"
rsaRootKeySize = 4096 // Used for new root keys
)
const trustDir = "trusted_certificates"
// ErrValidationFail is returned when there is no valid trusted certificates
// being served inside of the roots.json
@ -52,9 +45,9 @@ func (err ErrRootRotationFail) Error() string {
return fmt.Sprintf("could not rotate trust to a new trusted root: %s", err.Reason)
}
// NewKeyStoreManager returns an initialized KeyStoreManager, or an error
// if it fails to create the KeyFileStores or load certificates
func NewKeyStoreManager(baseDir string, keyStore *trustmanager.KeyFileStore) (*KeyStoreManager, error) {
// NewManager returns an initialized Manager, or an error
// if it fails to load certificates
func NewManager(baseDir string) (*Manager, error) {
trustPath := filepath.Join(baseDir, trustDir)
// Load all CAs that aren't expired and don't use SHA1
@ -81,60 +74,32 @@ func NewKeyStoreManager(baseDir string, keyStore *trustmanager.KeyFileStore) (*K
return nil, err
}
return &KeyStoreManager{
KeyStore: keyStore,
return &Manager{
trustedCAStore: trustedCAStore,
trustedCertificateStore: trustedCertificateStore,
}, nil
}
// TrustedCertificateStore returns the trusted certificate store being managed
// by this KeyStoreManager
func (km *KeyStoreManager) TrustedCertificateStore() trustmanager.X509Store {
return km.trustedCertificateStore
// by this Manager
func (m *Manager) TrustedCertificateStore() trustmanager.X509Store {
return m.trustedCertificateStore
}
// TrustedCAStore returns the CA store being managed by this KeyStoreManager
func (km *KeyStoreManager) TrustedCAStore() trustmanager.X509Store {
return km.trustedCAStore
// TrustedCAStore returns the CA store being managed by this Manager
func (m *Manager) TrustedCAStore() trustmanager.X509Store {
return m.trustedCAStore
}
// AddTrustedCert adds a cert to the trusted certificate store (not the CA
// store)
func (km *KeyStoreManager) AddTrustedCert(cert *x509.Certificate) {
km.trustedCertificateStore.AddCert(cert)
func (m *Manager) AddTrustedCert(cert *x509.Certificate) {
m.trustedCertificateStore.AddCert(cert)
}
// AddTrustedCACert adds a cert to the trusted CA certificate store
func (km *KeyStoreManager) AddTrustedCACert(cert *x509.Certificate) {
km.trustedCAStore.AddCert(cert)
}
// GenRootKey generates a new root key
func (km *KeyStoreManager) GenRootKey(algorithm string) (string, error) {
var err error
var privKey data.PrivateKey
// We don't want external API callers to rely on internal TUF data types, so
// the API here should continue to receive a string algorithm, and ensure
// that it is downcased
switch strings.ToLower(algorithm) {
case data.RSAKey:
privKey, err = trustmanager.GenerateRSAKey(rand.Reader, rsaRootKeySize)
case data.ECDSAKey:
privKey, err = trustmanager.GenerateECDSAKey(rand.Reader)
default:
return "", fmt.Errorf("only RSA or ECDSA keys are currently supported. Found: %s", algorithm)
}
if err != nil {
return "", fmt.Errorf("failed to generate private key: %v", err)
}
// Changing the root
km.KeyStore.AddKey(privKey.ID(), "root", privKey)
return privKey.ID(), nil
func (m *Manager) AddTrustedCACert(cert *x509.Certificate) {
m.trustedCAStore.AddCert(cert)
}
/*
@ -164,7 +129,7 @@ we are using the current public PKI to validate the first download of the certif
adding an extra layer of security over the normal (SSH style) trust model.
We shall call this: TOFUS.
*/
func (km *KeyStoreManager) ValidateRoot(root *data.Signed, gun string) error {
func (m *Manager) ValidateRoot(root *data.Signed, gun string) error {
logrus.Debugf("entered ValidateRoot with dns: %s", gun)
signedRoot, err := data.RootFromSigned(root)
if err != nil {
@ -179,7 +144,7 @@ func (km *KeyStoreManager) ValidateRoot(root *data.Signed, gun string) error {
}
// Retrieve all the trusted certificates that match this gun
certsForCN, err := km.trustedCertificateStore.GetCertificatesByCN(gun)
certsForCN, err := m.trustedCertificateStore.GetCertificatesByCN(gun)
if err != nil {
// If the error that we get back is different than ErrNoCertificatesFound
// we couldn't check if there are any certificates with this CN already
@ -218,7 +183,7 @@ func (km *KeyStoreManager) ValidateRoot(root *data.Signed, gun string) error {
// Do root certificate rotation: we trust only the certs present in the new root
// First we add all the new certificates (even if they already exist)
for _, cert := range allValidCerts {
err := km.trustedCertificateStore.AddCert(cert)
err := m.trustedCertificateStore.AddCert(cert)
if err != nil {
// If the error is already exists we don't fail the rotation
if _, ok := err.(*trustmanager.ErrCertExists); ok {
@ -232,7 +197,7 @@ func (km *KeyStoreManager) ValidateRoot(root *data.Signed, gun string) error {
// Now we delete old certificates that aren't present in the new root
for certID, cert := range certsToRemove(certsForCN, allValidCerts) {
logrus.Debugf("removing certificate with certID: %s", certID)
err = km.trustedCertificateStore.RemoveCert(cert)
err = m.trustedCertificateStore.RemoveCert(cert)
if err != nil {
logrus.Debugf("failed to remove trusted certificate with keyID: %s, %v", certID, err)
return &ErrRootRotationFail{Reason: "failed to rotate root keys"}

View File

@ -16,6 +16,8 @@ machine:
BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR
# Workaround Circle parsing dumb bugs and/or YAML wonkyness
CIRCLE_PAIN: "mode: set"
# Put the coverage profile somewhere codecov's script can find it
COVERPROFILE: coverage.out
hosts:
# Not used yet
@ -72,8 +74,10 @@ test:
post:
- gvm use stable && make covmerge:
timeout: 600
parallel: true
pwd: $BASE_STABLE
# Report to codecov.io
# - bash <(curl -s https://codecov.io/bash):
# pwd: $BASE_STABLE
- bash <(curl -s https://codecov.io/bash):
parallel: true
pwd: $BASE_STABLE

View File

@ -77,3 +77,26 @@ func (c TufChange) Path() string {
func (c TufChange) Content() []byte {
return c.Data
}
// TufDelegation represents a modification to a target delegation
// this includes creating a delegations. This format is used to avoid
// unexpected race conditions between humans modifying the same delegation
type TufDelegation struct {
NewName string `json:"new_name,omitempty"`
NewThreshold int `json:"threshold, omitempty"`
AddKeys data.KeyList `json:"add_keys, omitempty"`
RemoveKeys []string `json:"remove_keys,omitempty"`
AddPaths []string `json:"add_paths,omitempty"`
RemovePaths []string `json:"remove_paths,omitempty"`
AddPathHashPrefixes []string `json:"add_prefixes,omitempty"`
RemovePathHashPrefixes []string `json:"remove_prefixes,omitempty"`
}
// ToNewRole creates a fresh role object from the TufDelegation data
func (td TufDelegation) ToNewRole(scope string) (*data.Role, error) {
name := scope
if td.NewName != "" {
name = td.NewName
}
return data.NewRole(name, td.NewThreshold, td.AddKeys.IDs(), td.AddPaths, td.AddPathHashPrefixes)
}

View File

@ -9,11 +9,13 @@ import (
"net/http"
"os"
"path/filepath"
"strings"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/notary/certs"
"github.com/docker/notary/client/changelist"
"github.com/docker/notary/cryptoservice"
"github.com/docker/notary/keystoremanager"
"github.com/docker/notary/trustmanager"
"github.com/docker/notary/tuf"
tufclient "github.com/docker/notary/tuf/client"
@ -52,6 +54,17 @@ type ErrExpired struct {
signed.ErrExpired
}
// ErrInvalidRemoteRole is returned when the server is requested to manage
// an unsupported key type
type ErrInvalidRemoteRole struct {
Role string
}
func (e ErrInvalidRemoteRole) Error() string {
return fmt.Sprintf(
"notary does not support the server managing the %s key", e.Role)
}
const (
tufDir = "tuf"
)
@ -63,23 +76,67 @@ var ErrRepositoryNotExist = errors.New("repository does not exist")
// NotaryRepository stores all the information needed to operate on a notary
// repository.
type NotaryRepository struct {
baseDir string
gun string
baseURL string
tufRepoPath string
fileStore store.MetadataStore
CryptoService signed.CryptoService
tufRepo *tuf.Repo
roundTrip http.RoundTripper
KeyStoreManager *keystoremanager.KeyStoreManager
baseDir string
gun string
baseURL string
tufRepoPath string
fileStore store.MetadataStore
CryptoService signed.CryptoService
tufRepo *tuf.Repo
roundTrip http.RoundTripper
CertManager *certs.Manager
}
// repositoryFromKeystores is a helper function for NewNotaryRepository that
// takes some basic NotaryRepository parameters as well as keystores (in order
// of usage preference), and returns a NotaryRepository.
func repositoryFromKeystores(baseDir, gun, baseURL string, rt http.RoundTripper,
keyStores []trustmanager.KeyStore) (*NotaryRepository, error) {
certManager, err := certs.NewManager(baseDir)
if err != nil {
return nil, err
}
cryptoService := cryptoservice.NewCryptoService(gun, keyStores...)
nRepo := &NotaryRepository{
gun: gun,
baseDir: baseDir,
baseURL: baseURL,
tufRepoPath: filepath.Join(baseDir, tufDir, filepath.FromSlash(gun)),
CryptoService: cryptoService,
roundTrip: rt,
CertManager: certManager,
}
fileStore, err := store.NewFilesystemStore(
nRepo.tufRepoPath,
"metadata",
"json",
"",
)
if err != nil {
return nil, err
}
nRepo.fileStore = fileStore
return nRepo, nil
}
// Target represents a simplified version of the data TUF operates on, so external
// applications don't have to depend on tuf data types.
type Target struct {
Name string
Hashes data.Hashes
Length int64
Name string // the name of the target
Hashes data.Hashes // the hash of the target
Length int64 // the size in bytes of the target
}
// TargetWithRole represents a Target that exists in a particular role - this is
// produced by ListTargets and GetTargetByName
type TargetWithRole struct {
Target
Role string
}
// NewTarget is a helper method that returns a Target
@ -99,18 +156,48 @@ func NewTarget(targetName string, targetPath string) (*Target, error) {
// Initialize creates a new repository by using rootKey as the root Key for the
// TUF repository.
func (r *NotaryRepository) Initialize(rootKeyID string) error {
func (r *NotaryRepository) Initialize(rootKeyID string, serverManagedRoles ...string) error {
privKey, _, err := r.CryptoService.GetPrivateKey(rootKeyID)
if err != nil {
return err
}
rootCert, err := cryptoservice.GenerateCertificate(privKey, r.gun)
// currently we only support server managing timestamps and snapshots, and
// nothing else - timestamps are always managed by the server, and implicit
// (do not have to be passed in as part of `serverManagedRoles`, so that
// the API of Initialize doens't change).
var serverManagesSnapshot bool
locallyManagedKeys := []string{
data.CanonicalTargetsRole,
data.CanonicalSnapshotRole,
// root is also locally managed, but that should have been created
// already
}
remotelyManagedKeys := []string{data.CanonicalTimestampRole}
for _, role := range serverManagedRoles {
switch role {
case data.CanonicalTimestampRole:
continue // timestamp is already in the right place
case data.CanonicalSnapshotRole:
// because we put Snapshot last
locallyManagedKeys = []string{data.CanonicalTargetsRole}
remotelyManagedKeys = append(
remotelyManagedKeys, data.CanonicalSnapshotRole)
serverManagesSnapshot = true
default:
return ErrInvalidRemoteRole{Role: role}
}
}
// Hard-coded policy: the generated certificate expires in 10 years.
startTime := time.Now()
rootCert, err := cryptoservice.GenerateCertificate(
privKey, r.gun, startTime, startTime.AddDate(10, 0, 0))
if err != nil {
return err
}
r.KeyStoreManager.AddTrustedCert(rootCert)
r.CertManager.AddTrustedCert(rootCert)
// The root key gets stored in the TUF metadata X509 encoded, linking
// the tuf root.json to our X509 PKI.
@ -127,58 +214,45 @@ func (r *NotaryRepository) Initialize(rootKeyID string) error {
return fmt.Errorf("invalid format for root key: %s", privKey.Algorithm())
}
// All the timestamp keys are generated by the remote server.
remote, err := getRemoteStore(r.baseURL, r.gun, r.roundTrip)
if err != nil {
return err
}
rawTSKey, err := remote.GetKey("timestamp")
if err != nil {
return err
}
timestampKey, err := data.UnmarshalPublicKey(rawTSKey)
if err != nil {
return err
}
logrus.Debugf("got remote %s timestamp key with keyID: %s", timestampKey.Algorithm(), timestampKey.ID())
// This is currently hardcoding the targets and snapshots keys to ECDSA
// Targets and snapshot keys are always generated locally.
targetsKey, err := r.CryptoService.Create("targets", data.ECDSAKey)
if err != nil {
return err
}
snapshotKey, err := r.CryptoService.Create("snapshot", data.ECDSAKey)
if err != nil {
return err
}
kdb := keys.NewDB()
kdb.AddKey(rootKey)
kdb.AddKey(targetsKey)
kdb.AddKey(snapshotKey)
kdb.AddKey(timestampKey)
err = initRoles(kdb, rootKey, targetsKey, snapshotKey, timestampKey)
err = addKeyForRole(kdb, data.CanonicalRootRole, rootKey)
if err != nil {
return err
}
// we want to create all the local keys first so we don't have to
// make unnecessary network calls
for _, role := range locallyManagedKeys {
// This is currently hardcoding the keys to ECDSA.
key, err := r.CryptoService.Create(role, data.ECDSAKey)
if err != nil {
return err
}
if err := addKeyForRole(kdb, role, key); err != nil {
return err
}
}
for _, role := range remotelyManagedKeys {
// This key is generated by the remote server.
key, err := getRemoteKey(r.baseURL, r.gun, role, r.roundTrip)
if err != nil {
return err
}
logrus.Debugf("got remote %s %s key with keyID: %s",
role, key.Algorithm(), key.ID())
if err := addKeyForRole(kdb, role, key); err != nil {
return err
}
}
r.tufRepo = tuf.NewRepo(kdb, r.CryptoService)
err = r.tufRepo.InitRoot(false)
if err != nil {
logrus.Debug("Error on InitRoot: ", err.Error())
switch err.(type) {
case signed.ErrInsufficientSignatures, trustmanager.ErrPasswordInvalid:
default:
return err
}
return err
}
err = r.tufRepo.InitTargets()
_, err = r.tufRepo.InitTargets(data.CanonicalTargetsRole)
if err != nil {
logrus.Debug("Error on InitTargets: ", err.Error())
return err
@ -189,11 +263,120 @@ func (r *NotaryRepository) Initialize(rootKeyID string) error {
return err
}
return r.saveMetadata()
return r.saveMetadata(serverManagesSnapshot)
}
// AddTarget adds a new target to the repository, forcing a timestamps check from TUF
func (r *NotaryRepository) AddTarget(target *Target) error {
// adds a TUF Change template to the given roles
func addChange(cl *changelist.FileChangelist, c changelist.Change, roles ...string) error {
if len(roles) == 0 {
roles = []string{data.CanonicalTargetsRole}
}
var changes []changelist.Change
for _, role := range roles {
role = strings.ToLower(role)
// Ensure we can only add targets to the CanonicalTargetsRole,
// or a Delegation role (which is <CanonicalTargetsRole>/something else)
if role != data.CanonicalTargetsRole && !data.IsDelegation(role) {
return data.ErrInvalidRole{
Role: role,
Reason: "cannot add targets to this role",
}
}
changes = append(changes, changelist.NewTufChange(
c.Action(),
role,
c.Type(),
c.Path(),
c.Content(),
))
}
for _, c := range changes {
if err := cl.Add(c); err != nil {
return err
}
}
return nil
}
// AddDelegation creates a new changelist entry to add a delegation to the repository
// when the changelist gets applied at publish time. This does not do any validation
// other than checking the name of the delegation to add - all that will happen
// at publish time.
func (r *NotaryRepository) AddDelegation(name string, threshold int,
delegationKeys []data.PublicKey, paths []string) error {
if !data.IsDelegation(name) {
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
}
cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist"))
if err != nil {
return err
}
defer cl.Close()
logrus.Debugf(`Adding delegation "%s" with threshold %d, and %d keys\n`,
name, threshold, len(delegationKeys))
tdJSON, err := json.Marshal(&changelist.TufDelegation{
NewThreshold: threshold,
AddKeys: data.KeyList(delegationKeys),
AddPaths: paths,
})
if err != nil {
return err
}
template := changelist.NewTufChange(
changelist.ActionCreate,
name,
changelist.TypeTargetsDelegation,
"", // no path
tdJSON,
)
return addChange(cl, template, name)
}
// RemoveDelegation creates a new changelist entry to remove a delegation from
// the repository when the changelist gets applied at publish time.
// This does not validate that the delegation exists, since one might exist
// after applying all changes.
func (r *NotaryRepository) RemoveDelegation(name string) error {
if !data.IsDelegation(name) {
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
}
cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist"))
if err != nil {
return err
}
defer cl.Close()
logrus.Debugf(`Removing delegation "%s"\n`, name)
template := changelist.NewTufChange(
changelist.ActionDelete,
name,
changelist.TypeTargetsDelegation,
"", // no path
nil,
)
return addChange(cl, template, name)
}
// AddTarget creates new changelist entries to add a target to the given roles
// in the repository when the changelist gets appied at publish time.
// If roles are unspecified, the default role is "targets".
func (r *NotaryRepository) AddTarget(target *Target, roles ...string) error {
cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist"))
if err != nil {
return err
@ -207,32 +390,35 @@ func (r *NotaryRepository) AddTarget(target *Target) error {
return err
}
c := changelist.NewTufChange(changelist.ActionCreate, changelist.ScopeTargets, "target", target.Name, metaJSON)
err = cl.Add(c)
if err != nil {
return err
}
return nil
template := changelist.NewTufChange(
changelist.ActionCreate, "", changelist.TypeTargetsTarget,
target.Name, metaJSON)
return addChange(cl, template, roles...)
}
// RemoveTarget creates a new changelist entry to remove a target from the repository
// when the changelist gets applied at publish time
func (r *NotaryRepository) RemoveTarget(targetName string) error {
// RemoveTarget creates new changelist entries to remove a target from the given
// roles in the repository when the changelist gets applied at publish time.
// If roles are unspecified, the default role is "target".
func (r *NotaryRepository) RemoveTarget(targetName string, roles ...string) error {
cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist"))
if err != nil {
return err
}
logrus.Debugf("Removing target \"%s\"", targetName)
c := changelist.NewTufChange(changelist.ActionDelete, changelist.ScopeTargets, "target", targetName, nil)
err = cl.Add(c)
if err != nil {
return err
}
return nil
template := changelist.NewTufChange(changelist.ActionDelete, "",
changelist.TypeTargetsTarget, targetName, nil)
return addChange(cl, template, roles...)
}
// ListTargets lists all targets for the current repository
func (r *NotaryRepository) ListTargets() ([]*Target, error) {
// ListTargets lists all targets for the current repository. The list of
// roles should be passed in order from highest to lowest priority.
// IMPORTANT: if you pass a set of roles such as [ "targets/a", "targets/x"
// "targets/a/b" ], even though "targets/a/b" is part of the "targets/a" subtree
// its entries will be strictly shadowed by those in other parts of the "targets/a"
// subtree and also the "targets/x" subtree, as we will defer parsing it until
// we explicitly reach it in our iteration of the provided list of roles.
func (r *NotaryRepository) ListTargets(roles ...string) ([]*TargetWithRole, error) {
c, err := r.bootstrapClient()
if err != nil {
return nil, err
@ -246,17 +432,61 @@ func (r *NotaryRepository) ListTargets() ([]*Target, error) {
return nil, err
}
var targetList []*Target
for name, meta := range r.tufRepo.Targets["targets"].Signed.Targets {
target := &Target{Name: name, Hashes: meta.Hashes, Length: meta.Length}
targetList = append(targetList, target)
if len(roles) == 0 {
roles = []string{data.CanonicalTargetsRole}
}
targets := make(map[string]*TargetWithRole)
for _, role := range roles {
// we don't need to do anything special with removing role from
// roles because listSubtree always processes role and only excludes
// descendent delegations that appear in roles.
r.listSubtree(targets, role, roles...)
}
var targetList []*TargetWithRole
for _, v := range targets {
targetList = append(targetList, v)
}
return targetList, nil
}
// GetTargetByName returns a target given a name
func (r *NotaryRepository) GetTargetByName(name string) (*Target, error) {
func (r *NotaryRepository) listSubtree(targets map[string]*TargetWithRole, role string, exclude ...string) {
excl := make(map[string]bool)
for _, r := range exclude {
excl[r] = true
}
roles := []string{role}
for len(roles) > 0 {
role = roles[0]
roles = roles[1:]
tgts, ok := r.tufRepo.Targets[role]
if !ok {
// not every role has to exist
continue
}
for name, meta := range tgts.Signed.Targets {
if _, ok := targets[name]; !ok {
targets[name] = &TargetWithRole{
Target: Target{Name: name, Hashes: meta.Hashes, Length: meta.Length}, Role: role}
}
}
for _, d := range tgts.Signed.Delegations.Roles {
if !excl[d.Name] {
roles = append(roles, d.Name)
}
}
}
}
// GetTargetByName returns a target given a name. If no roles are passed
// it uses the targets role and does a search of the entire delegation
// graph, finding the first entry in a breadth first search of the delegations.
// If roles are passed, they should be passed in descending priority and
// the target entry found in the subtree of the highest priority role
// will be returned
// See the IMPORTANT section on ListTargets above. Those roles also apply here.
func (r *NotaryRepository) GetTargetByName(name string, roles ...string) (*TargetWithRole, error) {
c, err := r.bootstrapClient()
if err != nil {
return nil, err
@ -270,14 +500,18 @@ func (r *NotaryRepository) GetTargetByName(name string) (*Target, error) {
return nil, err
}
meta, err := c.TargetMeta(name)
if meta == nil {
return nil, fmt.Errorf("No trust data for %s", name)
} else if err != nil {
return nil, err
if len(roles) == 0 {
roles = append(roles, data.CanonicalTargetsRole)
}
for _, role := range roles {
meta, foundRole := c.TargetMeta(role, name, roles...)
if meta != nil {
return &TargetWithRole{
Target: Target{Name: name, Hashes: meta.Hashes, Length: meta.Length}, Role: foundRole}, nil
}
}
return nil, fmt.Errorf("No trust data for %s", name)
return &Target{Name: name, Hashes: meta.Hashes, Length: meta.Length}, nil
}
// GetChangelist returns the list of the repository's unpublished changes
@ -294,30 +528,30 @@ func (r *NotaryRepository) GetChangelist() (changelist.Changelist, error) {
// Publish pushes the local changes in signed material to the remote notary-server
// Conceptually it performs an operation similar to a `git rebase`
func (r *NotaryRepository) Publish() error {
var updateRoot bool
var root *data.Signed
var initialPublish bool
// attempt to initialize the repo from the remote store
c, err := r.bootstrapClient()
if err != nil {
if _, ok := err.(store.ErrMetaNotFound); ok {
// if the remote store return a 404 (translated into ErrMetaNotFound),
// the repo hasn't been initialized yet. Attempt to load it from disk.
// there is no trust data for yet. Attempt to load it from disk.
err := r.bootstrapRepo()
if err != nil {
// Repo hasn't been initialized, It must be initialized before
// it can be published. Return an error and let caller determine
// what it wants to do.
logrus.Debug(err.Error())
logrus.Debug("Repository not initialized during Publish")
return &ErrRepoNotInitialized{}
}
// We had local data but the server doesn't know about the repo yet,
// ensure we will push the initial root file
root, err = r.tufRepo.Root.ToSigned()
if err != nil {
// There are lots of reasons there might be an error, such as
// corrupt metadata. We need better errors from bootstrapRepo.
logrus.Debugf("Unable to load repository from local files: %s",
err.Error())
if _, ok := err.(store.ErrMetaNotFound); ok {
return &ErrRepoNotInitialized{}
}
return err
}
updateRoot = true
// We had local data but the server doesn't know about the repo yet,
// ensure we will push the initial root and targets file. Either or
// both of the root and targets may not be marked as Dirty, since
// there may not be any changes that update them, so use a
// different boolean.
initialPublish = true
} else {
// The remote store returned an error other than 404. We're
// unable to determine if the repo has been initialized or not.
@ -326,8 +560,8 @@ func (r *NotaryRepository) Publish() error {
}
} else {
// If we were successfully able to bootstrap the client (which only pulls
// root.json), update it the rest of the tuf metadata in preparation for
// applying the changelist.
// root.json), update it with the rest of the tuf metadata in
// preparation for applying the changelist.
err = c.Update()
if err != nil {
if err, ok := err.(signed.ErrExpired); ok {
@ -347,24 +581,53 @@ func (r *NotaryRepository) Publish() error {
return err
}
// these are the tuf files we will need to update, serialized as JSON before
// we send anything to remote
updatedFiles := make(map[string][]byte)
// check if our root file is nearing expiry. Resign if it is.
if nearExpiry(r.tufRepo.Root) || r.tufRepo.Root.Dirty {
if nearExpiry(r.tufRepo.Root) || r.tufRepo.Root.Dirty || initialPublish {
rootJSON, err := serializeCanonicalRole(r.tufRepo, data.CanonicalRootRole)
if err != nil {
return err
}
root, err = r.tufRepo.SignRoot(data.DefaultExpires("root"))
if err != nil {
updatedFiles[data.CanonicalRootRole] = rootJSON
}
// iterate through all the targets files - if they are dirty, sign and update
for roleName, roleObj := range r.tufRepo.Targets {
if roleObj.Dirty || (roleName == data.CanonicalTargetsRole && initialPublish) {
targetsJSON, err := serializeCanonicalRole(r.tufRepo, roleName)
if err != nil {
return err
}
updatedFiles[roleName] = targetsJSON
}
}
// if we initialized the repo while designating the server as the snapshot
// signer, then there won't be a snapshots file. However, we might now
// have a local key (if there was a rotation), so initialize one.
if r.tufRepo.Snapshot == nil {
if err := r.tufRepo.InitSnapshot(); err != nil {
return err
}
updateRoot = true
}
// we will always resign targets and snapshots
targets, err := r.tufRepo.SignTargets("targets", data.DefaultExpires("targets"))
if err != nil {
return err
}
snapshot, err := r.tufRepo.SignSnapshot(data.DefaultExpires("snapshot"))
if err != nil {
snapshotJSON, err := serializeCanonicalRole(
r.tufRepo, data.CanonicalSnapshotRole)
if err == nil {
// Only update the snapshot if we've sucessfully signed it.
updatedFiles[data.CanonicalSnapshotRole] = snapshotJSON
} else if _, ok := err.(signed.ErrNoKeys); ok {
// If signing fails due to us not having the snapshot key, then
// assume the server is going to sign, and do not include any snapshot
// data.
logrus.Debugf("Client does not have the key to sign snapshot. " +
"Assuming that server should sign the snapshot.")
} else {
logrus.Debugf("Client was unable to sign the snapshot: %s", err.Error())
return err
}
@ -373,27 +636,7 @@ func (r *NotaryRepository) Publish() error {
return err
}
// ensure we can marshal all the json before sending anything to remote
targetsJSON, err := json.Marshal(targets)
if err != nil {
return err
}
snapshotJSON, err := json.Marshal(snapshot)
if err != nil {
return err
}
update := make(map[string][]byte)
// if we need to update the root, marshal it and push the update to remote
if updateRoot {
rootJSON, err := json.Marshal(root)
if err != nil {
return err
}
update["root"] = rootJSON
}
update["targets"] = targetsJSON
update["snapshot"] = snapshotJSON
err = remote.SetMultiMeta(update)
err = remote.SetMultiMeta(updatedFiles)
if err != nil {
return err
}
@ -407,6 +650,11 @@ func (r *NotaryRepository) Publish() error {
return nil
}
// bootstrapRepo loads the repository from the local file system. This attempts
// to load metadata for all roles. Since server snapshots are supported,
// if the snapshot metadata fails to load, that's ok.
// This can also be unified with some cache reading tools from tuf/client.
// This assumes that bootstrapRepo is only used by Publish()
func (r *NotaryRepository) bootstrapRepo() error {
kdb := keys.NewDB()
tufRepo := tuf.NewRepo(kdb, r.CryptoService)
@ -435,30 +683,32 @@ func (r *NotaryRepository) bootstrapRepo() error {
return err
}
tufRepo.SetTargets("targets", targets)
snapshotJSON, err := r.fileStore.GetMeta("snapshot", 0)
if err != nil {
if err == nil {
snapshot := &data.SignedSnapshot{}
err = json.Unmarshal(snapshotJSON, snapshot)
if err != nil {
return err
}
tufRepo.SetSnapshot(snapshot)
} else if _, ok := err.(store.ErrMetaNotFound); !ok {
return err
}
snapshot := &data.SignedSnapshot{}
err = json.Unmarshal(snapshotJSON, snapshot)
if err != nil {
return err
}
tufRepo.SetSnapshot(snapshot)
r.tufRepo = tufRepo
return nil
}
func (r *NotaryRepository) saveMetadata() error {
func (r *NotaryRepository) saveMetadata(ignoreSnapshot bool) error {
logrus.Debugf("Saving changes to Trusted Collection.")
signedRoot, err := r.tufRepo.SignRoot(data.DefaultExpires("root"))
rootJSON, err := serializeCanonicalRole(r.tufRepo, data.CanonicalRootRole)
if err != nil {
return err
}
rootJSON, err := json.Marshal(signedRoot)
err = r.fileStore.SetMeta(data.CanonicalRootRole, rootJSON)
if err != nil {
return err
}
@ -476,27 +726,22 @@ func (r *NotaryRepository) saveMetadata() error {
targetsToSave[t] = targetsJSON
}
signedSnapshot, err := r.tufRepo.SignSnapshot(data.DefaultExpires("snapshot"))
if err != nil {
return err
}
snapshotJSON, err := json.Marshal(signedSnapshot)
if err != nil {
return err
}
err = r.fileStore.SetMeta("root", rootJSON)
if err != nil {
return err
}
for role, blob := range targetsToSave {
parentDir := filepath.Dir(role)
os.MkdirAll(parentDir, 0755)
r.fileStore.SetMeta(role, blob)
}
return r.fileStore.SetMeta("snapshot", snapshotJSON)
if ignoreSnapshot {
return nil
}
snapshotJSON, err := serializeCanonicalRole(r.tufRepo, data.CanonicalSnapshotRole)
if err != nil {
return err
}
return r.fileStore.SetMeta(data.CanonicalSnapshotRole, snapshotJSON)
}
func (r *NotaryRepository) bootstrapClient() (*tufclient.Client, error) {
@ -515,11 +760,15 @@ func (r *NotaryRepository) bootstrapClient() (*tufclient.Client, error) {
// the store and it doesn't know about the repo.
return nil, err
}
rootJSON, err = r.fileStore.GetMeta("root", maxSize)
if err != nil {
// if cache didn't return a root, we cannot proceed
return nil, store.ErrMetaNotFound{}
result, cacheErr := r.fileStore.GetMeta("root", maxSize)
if cacheErr != nil {
// if cache didn't return a root, we cannot proceed - just return
// the original error.
return nil, err
}
rootJSON = result
logrus.Debugf(
"Using local cache instead of remote due to failure: %s", err.Error())
}
// can't just unmarshal into SignedRoot because validate root
// needs the root.Signed field to still be []byte for signature
@ -530,7 +779,7 @@ func (r *NotaryRepository) bootstrapClient() (*tufclient.Client, error) {
return nil, err
}
err = r.KeyStoreManager.ValidateRoot(root, r.gun)
err = r.CertManager.ValidateRoot(root, r.gun)
if err != nil {
return nil, err
}
@ -555,21 +804,32 @@ func (r *NotaryRepository) bootstrapClient() (*tufclient.Client, error) {
), nil
}
// RotateKeys removes all existing keys associated with role and adds
// the keys specified by keyIDs to the role. These changes are staged
// in a changelist until publish is called.
func (r *NotaryRepository) RotateKeys() error {
for _, role := range []string{"targets", "snapshot"} {
key, err := r.CryptoService.Create(role, data.ECDSAKey)
if err != nil {
return err
}
err = r.rootFileKeyChange(role, changelist.ActionCreate, key)
if err != nil {
return err
}
// RotateKey removes all existing keys associated with the role, and either
// creates and adds one new key or delegates managing the key to the server.
// These changes are staged in a changelist until publish is called.
func (r *NotaryRepository) RotateKey(role string, serverManagesKey bool) error {
if role == data.CanonicalRootRole || role == data.CanonicalTimestampRole {
return fmt.Errorf(
"notary does not currently support rotating the %s key", role)
}
return nil
if serverManagesKey && role == data.CanonicalTargetsRole {
return ErrInvalidRemoteRole{Role: data.CanonicalTargetsRole}
}
var (
pubKey data.PublicKey
err error
)
if serverManagesKey {
pubKey, err = getRemoteKey(r.baseURL, r.gun, role, r.roundTrip)
} else {
pubKey, err = r.CryptoService.Create(role, data.ECDSAKey)
}
if err != nil {
return err
}
return r.rootFileKeyChange(role, changelist.ActionCreate, pubKey)
}
func (r *NotaryRepository) rootFileKeyChange(role, action string, key data.PublicKey) error {

View File

@ -2,7 +2,9 @@ package client
import (
"encoding/json"
"fmt"
"net/http"
"path"
"time"
"github.com/Sirupsen/logrus"
@ -36,10 +38,11 @@ func applyChangelist(repo *tuf.Repo, cl changelist.Changelist) error {
if err != nil {
return err
}
switch c.Scope() {
case changelist.ScopeTargets:
isDel := data.IsDelegation(c.Scope())
switch {
case c.Scope() == changelist.ScopeTargets || isDel:
err = applyTargetsChange(repo, c)
case changelist.ScopeRoot:
case c.Scope() == changelist.ScopeRoot:
err = applyRootChange(repo, c)
default:
logrus.Debug("scope not supported: ", c.Scope())
@ -54,6 +57,89 @@ func applyChangelist(repo *tuf.Repo, cl changelist.Changelist) error {
}
func applyTargetsChange(repo *tuf.Repo, c changelist.Change) error {
switch c.Type() {
case changelist.TypeTargetsTarget:
return changeTargetMeta(repo, c)
case changelist.TypeTargetsDelegation:
return changeTargetsDelegation(repo, c)
default:
return fmt.Errorf("only target meta and delegations changes supported")
}
}
func changeTargetsDelegation(repo *tuf.Repo, c changelist.Change) error {
switch c.Action() {
case changelist.ActionCreate:
td := changelist.TufDelegation{}
err := json.Unmarshal(c.Content(), &td)
if err != nil {
return err
}
r, err := repo.GetDelegation(c.Scope())
if _, ok := err.(data.ErrNoSuchRole); err != nil && !ok {
// error that wasn't ErrNoSuchRole
return err
}
if err == nil {
// role existed
return data.ErrInvalidRole{
Role: c.Scope(),
Reason: "cannot create a role that already exists",
}
}
// role doesn't exist, create brand new
r, err = td.ToNewRole(c.Scope())
if err != nil {
return err
}
return repo.UpdateDelegations(r, td.AddKeys)
case changelist.ActionUpdate:
td := changelist.TufDelegation{}
err := json.Unmarshal(c.Content(), &td)
if err != nil {
return err
}
r, err := repo.GetDelegation(c.Scope())
if err != nil {
return err
}
// role exists, merge
if err := r.AddPaths(td.AddPaths); err != nil {
return err
}
if err := r.AddPathHashPrefixes(td.AddPathHashPrefixes); err != nil {
return err
}
r.RemoveKeys(td.RemoveKeys)
r.RemovePaths(td.RemovePaths)
r.RemovePathHashPrefixes(td.RemovePathHashPrefixes)
return repo.UpdateDelegations(r, td.AddKeys)
case changelist.ActionDelete:
r := data.Role{Name: c.Scope()}
return repo.DeleteDelegation(r)
default:
return fmt.Errorf("unsupported action against delegations: %s", c.Action())
}
}
// applies a function repeatedly, falling back on the parent role, until it no
// longer can
func doWithRoleFallback(role string, doFunc func(string) error) error {
for role == data.CanonicalTargetsRole || data.IsDelegation(role) {
err := doFunc(role)
if err == nil {
return nil
}
if _, ok := err.(data.ErrInvalidRole); !ok {
return err
}
role = path.Dir(role)
}
return data.ErrInvalidRole{Role: role}
}
func changeTargetMeta(repo *tuf.Repo, c changelist.Change) error {
var err error
switch c.Action() {
case changelist.ActionCreate:
@ -64,17 +150,29 @@ func applyTargetsChange(repo *tuf.Repo, c changelist.Change) error {
return err
}
files := data.Files{c.Path(): *meta}
_, err = repo.AddTargets(c.Scope(), files)
err = doWithRoleFallback(c.Scope(), func(role string) error {
_, e := repo.AddTargets(role, files)
return e
})
if err != nil {
logrus.Errorf("couldn't add target to %s: %s", c.Scope(), err.Error())
}
case changelist.ActionDelete:
logrus.Debug("changelist remove: ", c.Path())
err = repo.RemoveTargets(c.Scope(), c.Path())
err = doWithRoleFallback(c.Scope(), func(role string) error {
return repo.RemoveTargets(role, c.Path())
})
if err != nil {
logrus.Errorf("couldn't remove target from %s: %s", c.Scope(), err.Error())
}
default:
logrus.Debug("action not yet supported: ", c.Action())
}
if err != nil {
return err
}
return nil
return err
}
func applyRootChange(repo *tuf.Repo, c changelist.Change) error {
@ -112,35 +210,56 @@ func nearExpiry(r *data.SignedRoot) bool {
return r.Signed.Expires.Before(plus6mo)
}
func initRoles(kdb *keys.KeyDB, rootKey, targetsKey, snapshotKey, timestampKey data.PublicKey) error {
rootRole, err := data.NewRole("root", 1, []string{rootKey.ID()}, nil, nil)
// Fetches a public key from a remote store, given a gun and role
func getRemoteKey(url, gun, role string, rt http.RoundTripper) (data.PublicKey, error) {
remote, err := getRemoteStore(url, gun, rt)
if err != nil {
return err
return nil, err
}
targetsRole, err := data.NewRole("targets", 1, []string{targetsKey.ID()}, nil, nil)
rawPubKey, err := remote.GetKey(role)
if err != nil {
return err
}
snapshotRole, err := data.NewRole("snapshot", 1, []string{snapshotKey.ID()}, nil, nil)
if err != nil {
return err
}
timestampRole, err := data.NewRole("timestamp", 1, []string{timestampKey.ID()}, nil, nil)
if err != nil {
return err
return nil, err
}
if err := kdb.AddRole(rootRole); err != nil {
pubKey, err := data.UnmarshalPublicKey(rawPubKey)
if err != nil {
return nil, err
}
return pubKey, nil
}
// add a key to a KeyDB, and create a role for the key and add it.
func addKeyForRole(kdb *keys.KeyDB, role string, key data.PublicKey) error {
theRole, err := data.NewRole(role, 1, []string{key.ID()}, nil, nil)
if err != nil {
return err
}
if err := kdb.AddRole(targetsRole); err != nil {
return err
}
if err := kdb.AddRole(snapshotRole); err != nil {
return err
}
if err := kdb.AddRole(timestampRole); err != nil {
kdb.AddKey(key)
if err := kdb.AddRole(theRole); err != nil {
return err
}
return nil
}
// signs and serializes the metadata for a canonical role in a tuf repo to JSON
func serializeCanonicalRole(tufRepo *tuf.Repo, role string) (out []byte, err error) {
var s *data.Signed
switch {
case role == data.CanonicalRootRole:
s, err = tufRepo.SignRoot(data.DefaultExpires(role))
case role == data.CanonicalSnapshotRole:
s, err = tufRepo.SignSnapshot(data.DefaultExpires(role))
case tufRepo.Targets[role] != nil:
s, err = tufRepo.SignTargets(
role, data.DefaultExpires(data.CanonicalTargetsRole))
default:
err = fmt.Errorf("%s not supported role to sign on the client", role)
}
if err != nil {
return
}
return json.Marshal(s)
}

View File

@ -5,52 +5,23 @@ package client
import (
"fmt"
"net/http"
"path/filepath"
"github.com/docker/notary/cryptoservice"
"github.com/docker/notary/keystoremanager"
"github.com/docker/notary/passphrase"
"github.com/docker/notary/trustmanager"
"github.com/docker/notary/tuf/store"
)
// NewNotaryRepository is a helper method that returns a new notary repository.
// It takes the base directory under where all the trust files will be stored
// (usually ~/.docker/trust/).
func NewNotaryRepository(baseDir, gun, baseURL string, rt http.RoundTripper,
retriever passphrase.Retriever) (*NotaryRepository, error) {
retriever passphrase.Retriever) (
*NotaryRepository, error) {
fileKeyStore, err := trustmanager.NewKeyFileStore(baseDir, retriever)
if err != nil {
return nil, fmt.Errorf("failed to create private key store in directory: %s", baseDir)
}
keyStoreManager, err := keystoremanager.NewKeyStoreManager(baseDir, fileKeyStore)
if err != nil {
return nil, err
}
cryptoService := cryptoservice.NewCryptoService(gun, keyStoreManager.KeyStore)
nRepo := &NotaryRepository{
gun: gun,
baseDir: baseDir,
baseURL: baseURL,
tufRepoPath: filepath.Join(baseDir, tufDir, filepath.FromSlash(gun)),
CryptoService: cryptoService,
roundTrip: rt,
KeyStoreManager: keyStoreManager,
}
fileStore, err := store.NewFilesystemStore(
nRepo.tufRepoPath,
"metadata",
"json",
"",
)
if err != nil {
return nil, err
}
nRepo.fileStore = fileStore
return nRepo, nil
return repositoryFromKeystores(baseDir, gun, baseURL, rt,
[]trustmanager.KeyStore{fileKeyStore})
}

View File

@ -5,57 +5,29 @@ package client
import (
"fmt"
"net/http"
"path/filepath"
"github.com/docker/notary/cryptoservice"
"github.com/docker/notary/keystoremanager"
"github.com/docker/notary/passphrase"
"github.com/docker/notary/trustmanager"
"github.com/docker/notary/trustmanager/yubikey"
"github.com/docker/notary/tuf/signed"
"github.com/docker/notary/tuf/store"
)
// NewNotaryRepository is a helper method that returns a new notary repository.
// It takes the base directory under where all the trust files will be stored
// (usually ~/.docker/trust/).
func NewNotaryRepository(baseDir, gun, baseURL string, rt http.RoundTripper,
retriever passphrase.Retriever) (*NotaryRepository, error) {
retriever passphrase.Retriever) (
*NotaryRepository, error) {
fileKeyStore, err := trustmanager.NewKeyFileStore(baseDir, retriever)
if err != nil {
return nil, fmt.Errorf("failed to create private key store in directory: %s", baseDir)
}
keyStoreManager, err := keystoremanager.NewKeyStoreManager(baseDir, fileKeyStore)
keyStores := []trustmanager.KeyStore{fileKeyStore}
yubiKeyStore, _ := yubikey.NewYubiKeyStore(fileKeyStore, retriever)
var cryptoService signed.CryptoService
if yubiKeyStore == nil {
cryptoService = cryptoservice.NewCryptoService(gun, keyStoreManager.KeyStore)
} else {
cryptoService = cryptoservice.NewCryptoService(gun, yubiKeyStore, keyStoreManager.KeyStore)
if yubiKeyStore != nil {
keyStores = append(keyStores, yubiKeyStore)
}
nRepo := &NotaryRepository{
gun: gun,
baseDir: baseDir,
baseURL: baseURL,
tufRepoPath: filepath.Join(baseDir, tufDir, filepath.FromSlash(gun)),
CryptoService: cryptoService,
roundTrip: rt,
KeyStoreManager: keyStoreManager,
}
fileStore, err := store.NewFilesystemStore(
nRepo.tufRepoPath,
"metadata",
"json",
"",
)
if err != nil {
return nil, err
}
nRepo.fileStore = fileStore
return nRepo, nil
return repositoryFromKeystores(baseDir, gun, baseURL, rt, keyStores)
}

View File

@ -1,22 +1,35 @@
package cryptoservice
import (
"crypto"
"crypto/rand"
"crypto/x509"
"fmt"
"time"
"github.com/docker/notary/trustmanager"
"github.com/docker/notary/tuf/data"
)
// GenerateCertificate generates an X509 Certificate from a template, given a GUN
func GenerateCertificate(rootKey data.PrivateKey, gun string) (*x509.Certificate, error) {
// GenerateCertificate generates an X509 Certificate from a template, given a GUN and validity interval
func GenerateCertificate(rootKey data.PrivateKey, gun string, startTime, endTime time.Time) (*x509.Certificate, error) {
signer := rootKey.CryptoSigner()
if signer == nil {
return nil, fmt.Errorf("key type not supported for Certificate generation: %s\n", rootKey.Algorithm())
}
template, err := trustmanager.NewCertificate(gun)
return generateCertificate(signer, gun, startTime, endTime)
}
// GenerateTestingCertificate generates a non-expired X509 Certificate from a template, given a GUN.
// Good enough for tests where expiration does not really matter; do not use if you care about the policy.
func GenerateTestingCertificate(signer crypto.Signer, gun string) (*x509.Certificate, error) {
startTime := time.Now()
return generateCertificate(signer, gun, startTime, startTime.AddDate(10, 0, 0))
}
func generateCertificate(signer crypto.Signer, gun string, startTime, endTime time.Time) (*x509.Certificate, error) {
template, err := trustmanager.NewCertificate(gun, startTime, endTime)
if err != nil {
return nil, fmt.Errorf("failed to create the certificate template for: %s (%v)", gun, err)
}
@ -26,7 +39,6 @@ func GenerateCertificate(rootKey data.PrivateKey, gun string) (*x509.Certificate
return nil, fmt.Errorf("failed to create the certificate for: %s (%v)", gun, err)
}
// Encode the new certificate into PEM
cert, err := x509.ParseCertificate(derBytes)
if err != nil {
return nil, fmt.Errorf("failed to parse the certificate for key: %s (%v)", gun, err)

View File

@ -82,10 +82,15 @@ func (cs *CryptoService) GetPrivateKey(keyID string) (k data.PrivateKey, role st
for _, ks := range cs.keyStores {
for _, keyPath := range keyPaths {
k, role, err = ks.GetKey(keyPath)
if err != nil {
if err == nil {
return
}
switch err.(type) {
case trustmanager.ErrPasswordInvalid, trustmanager.ErrAttemptsExceeded:
return
default:
continue
}
return
}
}
return // returns whatever the final values were
@ -111,39 +116,6 @@ func (cs *CryptoService) RemoveKey(keyID string) (err error) {
return // returns whatever the final values were
}
// Sign returns the signatures for the payload with a set of keyIDs. It ignores
// errors to sign and expects the called to validate if the number of returned
// signatures is adequate.
func (cs *CryptoService) Sign(keyIDs []string, payload []byte) ([]data.Signature, error) {
signatures := make([]data.Signature, 0, len(keyIDs))
for _, keyID := range keyIDs {
privKey, _, err := cs.GetPrivateKey(keyID)
if err != nil {
logrus.Debugf("error attempting to retrieve private key: %s, %v", keyID, err)
continue
}
sigAlgo := privKey.SignatureAlgorithm()
sig, err := privKey.Sign(rand.Reader, payload, nil)
if err != nil {
logrus.Debugf("ignoring error attempting to %s sign with keyID: %s, %v",
privKey.Algorithm(), keyID, err)
continue
}
logrus.Debugf("appending %s signature with Key ID: %s", privKey.Algorithm(), keyID)
// Append signatures to result array
signatures = append(signatures, data.Signature{
KeyID: keyID,
Method: sigAlgo,
Signature: sig[:],
})
}
return signatures, nil
}
// ListKeys returns a list of key IDs valid for the given role
func (cs *CryptoService) ListKeys(role string) []string {
var res []string

View File

@ -261,12 +261,12 @@ func moveKeysByGUN(oldKeyStore, newKeyStore trustmanager.KeyStore, gun string) e
func moveKeys(oldKeyStore, newKeyStore trustmanager.KeyStore) error {
for f := range oldKeyStore.ListKeys() {
privateKey, alias, err := oldKeyStore.GetKey(f)
privateKey, role, err := oldKeyStore.GetKey(f)
if err != nil {
return err
}
err = newKeyStore.AddKey(f, alias, privateKey)
err = newKeyStore.AddKey(f, role, privateKey)
if err != nil {
return err
@ -278,7 +278,10 @@ func moveKeys(oldKeyStore, newKeyStore trustmanager.KeyStore) error {
func addKeysToArchive(zipWriter *zip.Writer, newKeyStore *trustmanager.KeyFileStore) error {
for _, relKeyPath := range newKeyStore.ListFiles() {
fullKeyPath := filepath.Join(newKeyStore.BaseDir(), relKeyPath)
fullKeyPath, err := newKeyStore.GetPath(relKeyPath)
if err != nil {
return err
}
fi, err := os.Lstat(fullKeyPath)
if err != nil {
@ -290,7 +293,11 @@ func addKeysToArchive(zipWriter *zip.Writer, newKeyStore *trustmanager.KeyFileSt
return err
}
infoHeader.Name = relKeyPath
relPath, err := filepath.Rel(newKeyStore.BaseDir(), fullKeyPath)
if err != nil {
return err
}
infoHeader.Name = relPath
zipFileEntryWriter, err := zipWriter.CreateHeader(infoHeader)
if err != nil {

View File

@ -8,7 +8,8 @@ notaryserver:
- "8080"
- "4443:4443"
environment:
SERVICE_NAME: notary
- SERVICE_NAME=notary
command: -config=fixtures/server-config.json
notarysigner:
volumes:
- /dev/bus/usb/003/010:/dev/bus/usb/002/010
@ -17,7 +18,10 @@ notarysigner:
dockerfile: Dockerfile.signer
links:
- notarymysql
command: -config=fixtures/signer-config.json
notarymysql:
volumes:
- notarymysql:/var/lib/mysql
build: ./notarymysql/
ports:
- "3306:3306"

View File

@ -99,12 +99,16 @@ func PromptRetrieverWithInOut(in io.Reader, out io.Writer, aliasMap map[string]s
return "", true, ErrTooManyAttempts
}
state, err := term.SaveState(0)
if err != nil {
return "", false, err
// If typing on the terminal, we do not want the terminal to echo the
// password that is typed (so it doesn't display)
if term.IsTerminal(0) {
state, err := term.SaveState(0)
if err != nil {
return "", false, err
}
term.DisableEcho(0, state)
defer term.RestoreTerminal(0, state)
}
term.DisableEcho(0, state)
defer term.RestoreTerminal(0, state)
stdin := bufio.NewReader(in)

View File

@ -1,45 +1,13 @@
package trustmanager
import (
"errors"
"fmt"
"github.com/docker/notary"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
)
const (
visible = notary.PubCertPerms
private = notary.PrivKeyPerms
)
var (
// ErrPathOutsideStore indicates that the returned path would be
// outside the store
ErrPathOutsideStore = errors.New("path outside file store")
)
// LimitedFileStore implements the bare bones primitives (no hierarchy)
type LimitedFileStore interface {
Add(fileName string, data []byte) error
Remove(fileName string) error
Get(fileName string) ([]byte, error)
ListFiles() []string
}
// FileStore is the interface for full-featured FileStores
type FileStore interface {
LimitedFileStore
RemoveDir(directoryName string) error
GetPath(fileName string) (string, error)
ListDir(directoryName string) []string
BaseDir() string
}
// SimpleFileStore implements FileStore
type SimpleFileStore struct {
baseDir string
@ -55,6 +23,10 @@ func NewSimpleFileStore(baseDir string, fileExt string) (*SimpleFileStore, error
return nil, err
}
if !strings.HasPrefix(fileExt, ".") {
fileExt = "." + fileExt
}
return &SimpleFileStore{
baseDir: baseDir,
fileExt: fileExt,
@ -68,6 +40,10 @@ func NewPrivateSimpleFileStore(baseDir string, fileExt string) (*SimpleFileStore
return nil, err
}
if !strings.HasPrefix(fileExt, ".") {
fileExt = "." + fileExt
}
return &SimpleFileStore{
baseDir: baseDir,
fileExt: fileExt,
@ -176,7 +152,8 @@ func (f *SimpleFileStore) list(path string) []string {
if err != nil {
return err
}
files = append(files, fp)
trimmed := strings.TrimSuffix(fp, f.fileExt)
files = append(files, trimmed)
}
return nil
})
@ -185,7 +162,7 @@ func (f *SimpleFileStore) list(path string) []string {
// genFileName returns the name using the right extension
func (f *SimpleFileStore) genFileName(name string) string {
return fmt.Sprintf("%s.%s", name, f.fileExt)
return fmt.Sprintf("%s%s", name, f.fileExt)
}
// BaseDir returns the base directory of the filestore
@ -212,68 +189,3 @@ func createDirectory(dir string, perms os.FileMode) error {
dir = dir + "/"
return os.MkdirAll(dir, perms)
}
// MemoryFileStore is an implementation of LimitedFileStore that keeps
// the contents in memory.
type MemoryFileStore struct {
sync.Mutex
files map[string][]byte
}
// NewMemoryFileStore creates a MemoryFileStore
func NewMemoryFileStore() *MemoryFileStore {
return &MemoryFileStore{
files: make(map[string][]byte),
}
}
// ErrMemFileNotFound is returned for a nonexistent "file" in the memory file
// store
var ErrMemFileNotFound = errors.New("key not found in memory file store")
// Add writes data to a file with a given name
func (f *MemoryFileStore) Add(name string, data []byte) error {
f.Lock()
defer f.Unlock()
f.files[name] = data
return nil
}
// Remove removes a file identified by name
func (f *MemoryFileStore) Remove(name string) error {
f.Lock()
defer f.Unlock()
if _, present := f.files[name]; !present {
return ErrMemFileNotFound
}
delete(f.files, name)
return nil
}
// Get returns the data given a file name
func (f *MemoryFileStore) Get(name string) ([]byte, error) {
f.Lock()
defer f.Unlock()
fileData, present := f.files[name]
if !present {
return nil, ErrMemFileNotFound
}
return fileData, nil
}
// ListFiles lists all the files inside of a store
func (f *MemoryFileStore) ListFiles() []string {
var list []string
for name := range f.files {
list = append(list, name)
}
return list
}

View File

@ -1,11 +1,13 @@
package trustmanager
import (
"encoding/pem"
"fmt"
"path/filepath"
"strings"
"sync"
"github.com/Sirupsen/logrus"
"github.com/docker/notary/passphrase"
"github.com/docker/notary/tuf/data"
)
@ -54,10 +56,10 @@ func (s *KeyFileStore) Name() string {
}
// AddKey stores the contents of a PEM-encoded private key as a PEM block
func (s *KeyFileStore) AddKey(name, alias string, privKey data.PrivateKey) error {
func (s *KeyFileStore) AddKey(name, role string, privKey data.PrivateKey) error {
s.Lock()
defer s.Unlock()
return addKey(s, s.Retriever, s.cachedKeys, name, alias, privKey)
return addKey(s, s.Retriever, s.cachedKeys, name, role, privKey)
}
// GetKey returns the PrivateKey given a KeyID
@ -153,7 +155,7 @@ func (s *KeyMemoryStore) ImportKey(pemBytes []byte, alias string) error {
return importKey(s, s.Retriever, s.cachedKeys, alias, pemBytes)
}
func addKey(s LimitedFileStore, passphraseRetriever passphrase.Retriever, cachedKeys map[string]*cachedKey, name, alias string, privKey data.PrivateKey) error {
func addKey(s LimitedFileStore, passphraseRetriever passphrase.Retriever, cachedKeys map[string]*cachedKey, name, role string, privKey data.PrivateKey) error {
var (
chosenPassphrase string
@ -162,7 +164,7 @@ func addKey(s LimitedFileStore, passphraseRetriever passphrase.Retriever, cached
)
for attempts := 0; ; attempts++ {
chosenPassphrase, giveup, err = passphraseRetriever(name, alias, true, attempts)
chosenPassphrase, giveup, err = passphraseRetriever(name, role, true, attempts)
if err != nil {
continue
}
@ -175,25 +177,37 @@ func addKey(s LimitedFileStore, passphraseRetriever passphrase.Retriever, cached
break
}
return encryptAndAddKey(s, chosenPassphrase, cachedKeys, name, alias, privKey)
return encryptAndAddKey(s, chosenPassphrase, cachedKeys, name, role, privKey)
}
func getKeyAlias(s LimitedFileStore, keyID string) (string, error) {
files := s.ListFiles()
// getKeyRole finds the role for the given keyID. It attempts to look
// both in the newer format PEM headers, and also in the legacy filename
// format. It returns: the role, whether it was found in the legacy format
// (true == legacy), and an error
func getKeyRole(s LimitedFileStore, keyID string) (string, bool, error) {
name := strings.TrimSpace(strings.TrimSuffix(filepath.Base(keyID), filepath.Ext(keyID)))
for _, file := range files {
for _, file := range s.ListFiles() {
filename := filepath.Base(file)
if strings.HasPrefix(filename, name) {
aliasPlusDotKey := strings.TrimPrefix(filename, name+"_")
retVal := strings.TrimSuffix(aliasPlusDotKey, "."+keyExtension)
return retVal, nil
d, err := s.Get(file)
if err != nil {
return "", false, err
}
block, _ := pem.Decode(d)
if block != nil {
if role, ok := block.Headers["role"]; ok {
return role, false, nil
}
}
role := strings.TrimPrefix(filename, name+"_")
return role, true, nil
}
}
return "", &ErrKeyNotFound{KeyID: keyID}
return "", false, &ErrKeyNotFound{KeyID: keyID}
}
// GetKey returns the PrivateKey given a KeyID
@ -208,14 +222,13 @@ func getKey(s LimitedFileStore, passphraseRetriever passphrase.Retriever, cached
return nil, "", err
}
var retErr error
// See if the key is encrypted. If its encrypted we'll fail to parse the private key
privKey, err := ParsePEMPrivateKey(keyBytes, "")
if err != nil {
privKey, _, retErr = GetPasswdDecryptBytes(passphraseRetriever, keyBytes, name, string(keyAlias))
}
if retErr != nil {
return nil, "", retErr
privKey, _, err = GetPasswdDecryptBytes(passphraseRetriever, keyBytes, name, string(keyAlias))
if err != nil {
return nil, "", err
}
}
cachedKeys[name] = &cachedKey{alias: keyAlias, key: privKey}
return privKey, keyAlias, nil
@ -228,44 +241,58 @@ func listKeys(s LimitedFileStore) map[string]string {
for _, f := range s.ListFiles() {
// Remove the prefix of the directory from the filename
if f[:len(rootKeysSubdir)] == rootKeysSubdir {
f = strings.TrimPrefix(f, rootKeysSubdir+"/")
var keyIDFull string
if strings.HasPrefix(f, rootKeysSubdir+"/") {
keyIDFull = strings.TrimPrefix(f, rootKeysSubdir+"/")
} else {
f = strings.TrimPrefix(f, nonRootKeysSubdir+"/")
keyIDFull = strings.TrimPrefix(f, nonRootKeysSubdir+"/")
}
// Remove the extension from the full filename
// abcde_root.key becomes abcde_root
keyIDFull := strings.TrimSpace(strings.TrimSuffix(f, filepath.Ext(f)))
keyIDFull = strings.TrimSpace(keyIDFull)
// If the key does not have a _, it is malformed
// If the key does not have a _, we'll attempt to
// read it as a PEM
underscoreIndex := strings.LastIndex(keyIDFull, "_")
if underscoreIndex == -1 {
continue
d, err := s.Get(f)
if err != nil {
logrus.Error(err)
continue
}
block, _ := pem.Decode(d)
if block == nil {
continue
}
if role, ok := block.Headers["role"]; ok {
keyIDMap[keyIDFull] = role
}
} else {
// The keyID is the first part of the keyname
// The KeyAlias is the second part of the keyname
// in a key named abcde_root, abcde is the keyID and root is the KeyAlias
keyID := keyIDFull[:underscoreIndex]
keyAlias := keyIDFull[underscoreIndex+1:]
keyIDMap[keyID] = keyAlias
}
// The keyID is the first part of the keyname
// The KeyAlias is the second part of the keyname
// in a key named abcde_root, abcde is the keyID and root is the KeyAlias
keyID := keyIDFull[:underscoreIndex]
keyAlias := keyIDFull[underscoreIndex+1:]
keyIDMap[keyID] = keyAlias
}
return keyIDMap
}
// RemoveKey removes the key from the keyfilestore
func removeKey(s LimitedFileStore, cachedKeys map[string]*cachedKey, name string) error {
keyAlias, err := getKeyAlias(s, name)
role, legacy, err := getKeyRole(s, name)
if err != nil {
return err
}
delete(cachedKeys, name)
if legacy {
name = name + "_" + role
}
// being in a subdirectory is for backwards compatibliity
filename := name + "_" + keyAlias
err = s.Remove(filepath.Join(getSubdir(keyAlias), filename))
err = s.Remove(filepath.Join(getSubdir(role), name))
if err != nil {
return err
}
@ -283,18 +310,21 @@ func getSubdir(alias string) string {
// Given a key ID, gets the bytes and alias belonging to that key if the key
// exists
func getRawKey(s LimitedFileStore, name string) ([]byte, string, error) {
keyAlias, err := getKeyAlias(s, name)
role, legacy, err := getKeyRole(s, name)
if err != nil {
return nil, "", err
}
filename := name + "_" + keyAlias
if legacy {
name = name + "_" + role
}
var keyBytes []byte
keyBytes, err = s.Get(filepath.Join(getSubdir(keyAlias), filename))
keyBytes, err = s.Get(filepath.Join(getSubdir(role), name))
if err != nil {
return nil, "", err
}
return keyBytes, keyAlias, nil
return keyBytes, role, nil
}
// GetPasswdDecryptBytes gets the password to decript the given pem bytes.
@ -335,7 +365,7 @@ func GetPasswdDecryptBytes(passphraseRetriever passphrase.Retriever, pemBytes []
return privKey, passwd, nil
}
func encryptAndAddKey(s LimitedFileStore, passwd string, cachedKeys map[string]*cachedKey, name, alias string, privKey data.PrivateKey) error {
func encryptAndAddKey(s LimitedFileStore, passwd string, cachedKeys map[string]*cachedKey, name, role string, privKey data.PrivateKey) error {
var (
pemPrivKey []byte
@ -343,17 +373,17 @@ func encryptAndAddKey(s LimitedFileStore, passwd string, cachedKeys map[string]*
)
if passwd != "" {
pemPrivKey, err = EncryptPrivateKey(privKey, passwd)
pemPrivKey, err = EncryptPrivateKey(privKey, role, passwd)
} else {
pemPrivKey, err = KeyToPEM(privKey)
pemPrivKey, err = KeyToPEM(privKey, role)
}
if err != nil {
return err
}
cachedKeys[name] = &cachedKey{alias: alias, key: privKey}
return s.Add(filepath.Join(getSubdir(alias), name+"_"+alias), pemPrivKey)
cachedKeys[name] = &cachedKey{alias: role, key: privKey}
return s.Add(filepath.Join(getSubdir(role), name), pemPrivKey)
}
func importKey(s LimitedFileStore, passphraseRetriever passphrase.Retriever, cachedKeys map[string]*cachedKey, alias string, pemBytes []byte) error {

View File

@ -0,0 +1,67 @@
package trustmanager
import (
"os"
"sync"
)
// MemoryFileStore is an implementation of LimitedFileStore that keeps
// the contents in memory.
type MemoryFileStore struct {
sync.Mutex
files map[string][]byte
}
// NewMemoryFileStore creates a MemoryFileStore
func NewMemoryFileStore() *MemoryFileStore {
return &MemoryFileStore{
files: make(map[string][]byte),
}
}
// Add writes data to a file with a given name
func (f *MemoryFileStore) Add(name string, data []byte) error {
f.Lock()
defer f.Unlock()
f.files[name] = data
return nil
}
// Remove removes a file identified by name
func (f *MemoryFileStore) Remove(name string) error {
f.Lock()
defer f.Unlock()
if _, present := f.files[name]; !present {
return os.ErrNotExist
}
delete(f.files, name)
return nil
}
// Get returns the data given a file name
func (f *MemoryFileStore) Get(name string) ([]byte, error) {
f.Lock()
defer f.Unlock()
fileData, present := f.files[name]
if !present {
return nil, os.ErrNotExist
}
return fileData, nil
}
// ListFiles lists all the files inside of a store
func (f *MemoryFileStore) ListFiles() []string {
var list []string
for name := range f.files {
list = append(list, name)
}
return list
}

View File

@ -0,0 +1,52 @@
package trustmanager
import (
"errors"
"github.com/docker/notary"
)
const (
visible = notary.PubCertPerms
private = notary.PrivKeyPerms
)
var (
// ErrPathOutsideStore indicates that the returned path would be
// outside the store
ErrPathOutsideStore = errors.New("path outside file store")
)
// LimitedFileStore implements the bare bones primitives (no hierarchy)
type LimitedFileStore interface {
// Add writes a file to the specified location, returning an error if this
// is not possible (reasons may include permissions errors). The path is cleaned
// before being made absolute against the store's base dir.
Add(fileName string, data []byte) error
// Remove deletes a file from the store relative to the store's base directory.
// The path is cleaned before being made absolute to ensure no path traversal
// outside the base directory is possible.
Remove(fileName string) error
// Get returns the file content found at fileName relative to the base directory
// of the file store. The path is cleaned before being made absolute to ensure
// path traversal outside the store is not possible. If the file is not found
// an error to that effect is returned.
Get(fileName string) ([]byte, error)
// ListFiles returns a list of paths relative to the base directory of the
// filestore. Any of these paths must be retrievable via the
// LimitedFileStore.Get method.
ListFiles() []string
}
// FileStore is the interface for full-featured FileStores
type FileStore interface {
LimitedFileStore
RemoveDir(directoryName string) error
GetPath(fileName string) (string, error)
ListDir(directoryName string) []string
BaseDir() string
}

View File

@ -15,7 +15,6 @@ import (
"math/big"
"net/http"
"net/url"
"path/filepath"
"time"
"github.com/Sirupsen/logrus"
@ -117,11 +116,15 @@ func fingerprintCert(cert *x509.Certificate) (CertID, error) {
// loadCertsFromDir receives a store AddCertFromFile for each certificate found
func loadCertsFromDir(s *X509FileStore) error {
certFiles := s.fileStore.ListFiles()
for _, f := range certFiles {
for _, f := range s.fileStore.ListFiles() {
// ListFiles returns relative paths
fullPath := filepath.Join(s.fileStore.BaseDir(), f)
err := s.AddCertFromFile(fullPath)
data, err := s.fileStore.Get(f)
if err != nil {
// the filestore told us it had a file that it then couldn't serve.
// this is a serious problem so error immediately
return err
}
err = s.AddCertFromPEM(data)
if err != nil {
if _, ok := err.(*ErrCertValidation); ok {
logrus.Debugf("ignoring certificate, did not pass validation: %s", f)
@ -411,18 +414,26 @@ func blockType(k data.PrivateKey) (string, error) {
}
// KeyToPEM returns a PEM encoded key from a Private Key
func KeyToPEM(privKey data.PrivateKey) ([]byte, error) {
func KeyToPEM(privKey data.PrivateKey, role string) ([]byte, error) {
bt, err := blockType(privKey)
if err != nil {
return nil, err
}
return pem.EncodeToMemory(&pem.Block{Type: bt, Bytes: privKey.Private()}), nil
block := &pem.Block{
Type: bt,
Headers: map[string]string{
"role": role,
},
Bytes: privKey.Private(),
}
return pem.EncodeToMemory(block), nil
}
// EncryptPrivateKey returns an encrypted PEM key given a Privatekey
// and a passphrase
func EncryptPrivateKey(key data.PrivateKey, passphrase string) ([]byte, error) {
func EncryptPrivateKey(key data.PrivateKey, role, passphrase string) ([]byte, error) {
bt, err := blockType(key)
if err != nil {
return nil, err
@ -440,6 +451,11 @@ func EncryptPrivateKey(key data.PrivateKey, passphrase string) ([]byte, error) {
return nil, err
}
if encryptedPEMBlock.Headers == nil {
return nil, fmt.Errorf("unable to encrypt key - invalid PEM file produced")
}
encryptedPEMBlock.Headers["role"] = role
return pem.EncodeToMemory(encryptedPEMBlock), nil
}
@ -471,12 +487,8 @@ func CertsToKeys(certs []*x509.Certificate) map[string]data.PublicKey {
return keys
}
// NewCertificate returns an X509 Certificate following a template, given a GUN.
func NewCertificate(gun string) (*x509.Certificate, error) {
notBefore := time.Now()
// Certificates will expire in 10 years
notAfter := notBefore.Add(time.Hour * 24 * 365 * 10)
// NewCertificate returns an X509 Certificate following a template, given a GUN and validity interval.
func NewCertificate(gun string, startTime, endTime time.Time) (*x509.Certificate, error) {
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
@ -489,8 +501,8 @@ func NewCertificate(gun string) (*x509.Certificate, error) {
Subject: pkix.Name{
CommonName: gun,
},
NotBefore: notBefore,
NotAfter: notAfter,
NotBefore: startTime,
NotAfter: endTime,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning},

View File

@ -14,6 +14,7 @@ import (
"io"
"math/big"
"os"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/notary/passphrase"
@ -217,7 +218,9 @@ func addECDSAKey(
ecdsaPrivKeyD := ensurePrivateKeySize(ecdsaPrivKey.D.Bytes())
template, err := trustmanager.NewCertificate(role)
// Hard-coded policy: the generated certificate expires in 10 years.
startTime := time.Now()
template, err := trustmanager.NewCertificate(role, startTime, startTime.AddDate(10, 0, 0))
if err != nil {
return fmt.Errorf("failed to create the certificate template: %v", err)
}
@ -483,6 +486,12 @@ func yubiListKeys(ctx IPKCS11Ctx, session pkcs11.SessionHandle) (keys map[string
}
}
}
// we found nothing
if cert == nil {
continue
}
var ecdsaPubKey *ecdsa.PublicKey
switch cert.PublicKeyAlgorithm {
case x509.ECDSA:
@ -790,28 +799,31 @@ func SetupHSMEnv(libraryPath string, libLoader pkcs11LibLoader) (
IPKCS11Ctx, pkcs11.SessionHandle, error) {
if libraryPath == "" {
return nil, 0, errors.New("No library found.")
return nil, 0, fmt.Errorf("no library found.")
}
p := libLoader(libraryPath)
if p == nil {
return nil, 0, errors.New("Failed to init library")
return nil, 0, fmt.Errorf("failed to load library %s", libraryPath)
}
if err := p.Initialize(); err != nil {
defer finalizeAndDestroy(p)
return nil, 0, fmt.Errorf("Initialize error %s", err.Error())
return nil, 0, fmt.Errorf(
"found library %s, but initialize error %s", libraryPath, err.Error())
}
slots, err := p.GetSlotList(true)
if err != nil {
defer finalizeAndDestroy(p)
return nil, 0, fmt.Errorf("Failed to list HSM slots %s", err)
return nil, 0, fmt.Errorf(
"loaded library %s, but failed to list HSM slots %s", libraryPath, err)
}
// Check to see if we got any slots from the HSM.
if len(slots) < 1 {
defer finalizeAndDestroy(p)
return nil, 0, fmt.Errorf("No HSM Slots found")
return nil, 0, fmt.Errorf(
"loaded library %s, but no HSM slots found", libraryPath)
}
// CKF_SERIAL_SESSION: TRUE if cryptographic functions are performed in serial with the application; FALSE if the functions may be performed in parallel with the application.
@ -819,9 +831,12 @@ func SetupHSMEnv(libraryPath string, libLoader pkcs11LibLoader) (
session, err := p.OpenSession(slots[0], pkcs11.CKF_SERIAL_SESSION|pkcs11.CKF_RW_SESSION)
if err != nil {
defer cleanup(p, session)
return nil, 0, fmt.Errorf("Failed to Start Session with HSM %s", err)
return nil, 0, fmt.Errorf(
"loaded library %s, but failed to start session with HSM %s",
libraryPath, err)
}
logrus.Debugf("Initialized PKCS11 library %s and started HSM session", libraryPath)
return p, session, nil
}

View File

@ -8,7 +8,6 @@ import (
"fmt"
"io"
"path"
"path/filepath"
"strings"
"github.com/Sirupsen/logrus"
@ -82,7 +81,7 @@ func (c *Client) update() error {
// In this instance the root has not expired base on time, but is
// expired based on the snapshot dictating a new root has been produced.
logrus.Debug(err)
return tuf.ErrLocalRootExpired{}
return err
}
// will always need top level targets at a minimum
err = c.downloadTargets("targets")
@ -369,34 +368,52 @@ func (c *Client) downloadSnapshot() error {
return nil
}
// downloadTargets is responsible for downloading any targets file
// including delegates roles.
// downloadTargets downloads all targets and delegated targets for the repository.
// It uses a pre-order tree traversal as it's necessary to download parents first
// to obtain the keys to validate children.
func (c *Client) downloadTargets(role string) error {
role = data.RoleName(role) // this will really only do something for base targets role
if c.local.Snapshot == nil {
return ErrMissingMeta{role: role}
}
snap := c.local.Snapshot.Signed
root := c.local.Root.Signed
r := c.keysDB.GetRole(role)
if r == nil {
return fmt.Errorf("Invalid role: %s", role)
}
keyIDs := r.KeyIDs
s, err := c.getTargetsFile(role, keyIDs, snap.Meta, root.ConsistentSnapshot, r.Threshold)
if err != nil {
logrus.Error("Error getting targets file:", err)
return err
}
t, err := data.TargetsFromSigned(s)
if err != nil {
return err
}
err = c.local.SetTargets(role, t)
if err != nil {
return err
}
stack := utils.NewStack()
stack.Push(role)
for !stack.Empty() {
role, err := stack.PopString()
if err != nil {
return err
}
role = data.RoleName(role) // this will really only do something for base targets role
if c.local.Snapshot == nil {
return ErrMissingMeta{role: role}
}
snap := c.local.Snapshot.Signed
root := c.local.Root.Signed
r := c.keysDB.GetRole(role)
if r == nil {
return fmt.Errorf("Invalid role: %s", role)
}
keyIDs := r.KeyIDs
s, err := c.getTargetsFile(role, keyIDs, snap.Meta, root.ConsistentSnapshot, r.Threshold)
if err != nil {
if _, ok := err.(ErrMissingMeta); ok && role != data.CanonicalTargetsRole {
// if the role meta hasn't been published,
// that's ok, continue
continue
}
logrus.Error("Error getting targets file:", err)
return err
}
t, err := data.TargetsFromSigned(s)
if err != nil {
return err
}
err = c.local.SetTargets(role, t)
if err != nil {
return err
}
// push delegated roles contained in the targets file onto the stack
for _, r := range t.Signed.Delegations.Roles {
stack.Push(r.Name)
}
}
return nil
}
@ -482,17 +499,18 @@ func (c Client) getTargetsFile(role string, keyIDs []string, snapshotMeta data.F
// if we error when setting meta, we should continue.
err = c.cache.SetMeta(role, raw)
if err != nil {
logrus.Errorf("Failed to write snapshot to local cache: %s", err.Error())
logrus.Errorf("Failed to write %s to local cache: %s", role, err.Error())
}
}
return s, nil
}
// RoleTargetsPath generates the appropriate filename for the targets file,
// RoleTargetsPath generates the appropriate HTTP URL for the targets file,
// based on whether the repo is marked as consistent.
func (c Client) RoleTargetsPath(role string, hashSha256 string, consistent bool) (string, error) {
if consistent {
dir := filepath.Dir(role)
// Use path instead of filepath since we refer to the TUF role directly instead of its target files
dir := path.Dir(role)
if strings.Contains(role, "/") {
lastSlashIdx := strings.LastIndex(role, "/")
role = role[lastSlashIdx+1:]
@ -505,42 +523,41 @@ func (c Client) RoleTargetsPath(role string, hashSha256 string, consistent bool)
return role, nil
}
// TargetMeta ensures the repo is up to date, downloading the minimum
// necessary metadata files
func (c Client) TargetMeta(path string) (*data.FileMeta, error) {
c.Update()
var meta *data.FileMeta
// TargetMeta ensures the repo is up to date. It assumes downloadTargets
// has already downloaded all delegated roles
func (c Client) TargetMeta(role, path string, excludeRoles ...string) (*data.FileMeta, string) {
excl := make(map[string]bool)
for _, r := range excludeRoles {
excl[r] = true
}
pathDigest := sha256.Sum256([]byte(path))
pathHex := hex.EncodeToString(pathDigest[:])
// FIFO list of targets delegations to inspect for target
roles := []string{data.ValidRoles["targets"]}
var role string
roles := []string{role}
var (
meta *data.FileMeta
curr string
)
for len(roles) > 0 {
// have to do these lines here because of order of execution in for statement
role = roles[0]
curr = roles[0]
roles = roles[1:]
// Download the target role file if necessary
err := c.downloadTargets(role)
if err != nil {
// as long as we find a valid target somewhere we're happy.
// continue and search other delegated roles if any
continue
}
meta = c.local.TargetMeta(role, path)
meta = c.local.TargetMeta(curr, path)
if meta != nil {
// we found the target!
return meta, nil
return meta, curr
}
delegations := c.local.TargetDelegations(role, path, pathHex)
delegations := c.local.TargetDelegations(curr, path, pathHex)
for _, d := range delegations {
roles = append(roles, d.Name)
if !excl[d.Name] {
roles = append(roles, d.Name)
}
}
}
return meta, nil
return meta, ""
}
// DownloadTarget downloads the target to dst from the remote

View File

@ -77,6 +77,15 @@ func (ks *KeyList) UnmarshalJSON(data []byte) error {
return nil
}
// IDs generates a list of the hex encoded key IDs in the KeyList
func (ks KeyList) IDs() []string {
keyIDs := make([]string, 0, len(ks))
for _, k := range ks {
keyIDs = append(keyIDs, k.ID())
}
return keyIDs
}
func typedPublicKey(tk tufKey) PublicKey {
switch tk.Algorithm() {
case ECDSAKey:

View File

@ -2,6 +2,8 @@ package data
import (
"fmt"
"path"
"regexp"
"strings"
)
@ -24,15 +26,28 @@ var ValidRoles = map[string]string{
CanonicalTimestampRole: CanonicalTimestampRole,
}
// ErrNoSuchRole indicates the roles doesn't exist
type ErrNoSuchRole struct {
Role string
}
func (e ErrNoSuchRole) Error() string {
return fmt.Sprintf("role does not exist: %s", e.Role)
}
// ErrInvalidRole represents an error regarding a role. Typically
// something like a role for which sone of the public keys were
// not found in the TUF repo.
type ErrInvalidRole struct {
Role string
Role string
Reason string
}
func (e ErrInvalidRole) Error() string {
return fmt.Sprintf("tuf: invalid role %s", e.Role)
if e.Reason != "" {
return fmt.Sprintf("tuf: invalid role %s. %s", e.Role, e.Reason)
}
return fmt.Sprintf("tuf: invalid role %s.", e.Role)
}
// SetValidRoles is a utility function to override some or all of the roles
@ -85,10 +100,11 @@ func ValidRole(name string) bool {
if v, ok := ValidRoles[name]; ok {
return name == v
}
targetsBase := fmt.Sprintf("%s/", ValidRoles[CanonicalTargetsRole])
if strings.HasPrefix(name, targetsBase) {
if IsDelegation(name) {
return true
}
for _, v := range ValidRoles {
if name == v {
return true
@ -97,6 +113,24 @@ func ValidRole(name string) bool {
return false
}
// IsDelegation checks if the role is a delegation or a root role
func IsDelegation(role string) bool {
targetsBase := ValidRoles[CanonicalTargetsRole] + "/"
delegationRegexp := regexp.MustCompile("^[-a-z0-9_/]+$")
whitelistedChars := delegationRegexp.MatchString(role)
// Limit size of full role string to 255 chars for db column size limit
correctLength := len(role) < 256
// Removes ., .., extra slashes, and trailing slash
isClean := path.Clean(role) == role
return strings.HasPrefix(role, targetsBase) &&
whitelistedChars &&
correctLength &&
isClean
}
// RootRole is a cut down role as it appears in the root.json
type RootRole struct {
KeyIDs []string `json:"keyids"`
@ -115,7 +149,18 @@ type Role struct {
// NewRole creates a new Role object from the given parameters
func NewRole(name string, threshold int, keyIDs, paths, pathHashPrefixes []string) (*Role, error) {
if len(paths) > 0 && len(pathHashPrefixes) > 0 {
return nil, ErrInvalidRole{Role: name}
return nil, ErrInvalidRole{
Role: name,
Reason: "roles may not have both Paths and PathHashPrefixes",
}
}
if IsDelegation(name) {
if len(paths) == 0 && len(pathHashPrefixes) == 0 {
return nil, ErrInvalidRole{
Role: name,
Reason: "roles with no Paths and no PathHashPrefixes will never be able to publish content",
}
}
}
if threshold < 1 {
return nil, ErrInvalidRole{Role: name}
@ -173,6 +218,78 @@ func (r Role) CheckPrefixes(hash string) bool {
// IsDelegation checks if the role is a delegation or a root role
func (r Role) IsDelegation() bool {
targetsBase := fmt.Sprintf("%s/", ValidRoles[CanonicalTargetsRole])
return strings.HasPrefix(r.Name, targetsBase)
return IsDelegation(r.Name)
}
// AddKeys merges the ids into the current list of role key ids
func (r *Role) AddKeys(ids []string) {
r.KeyIDs = mergeStrSlices(r.KeyIDs, ids)
}
// AddPaths merges the paths into the current list of role paths
func (r *Role) AddPaths(paths []string) error {
if len(paths) == 0 {
return nil
}
if len(r.PathHashPrefixes) > 0 {
return ErrInvalidRole{Role: r.Name, Reason: "attempted to add paths to role that already has hash prefixes"}
}
r.Paths = mergeStrSlices(r.Paths, paths)
return nil
}
// AddPathHashPrefixes merges the prefixes into the list of role path hash prefixes
func (r *Role) AddPathHashPrefixes(prefixes []string) error {
if len(prefixes) == 0 {
return nil
}
if len(r.Paths) > 0 {
return ErrInvalidRole{Role: r.Name, Reason: "attempted to add hash prefixes to role that already has paths"}
}
r.PathHashPrefixes = mergeStrSlices(r.PathHashPrefixes, prefixes)
return nil
}
// RemoveKeys removes the ids from the current list of key ids
func (r *Role) RemoveKeys(ids []string) {
r.KeyIDs = subtractStrSlices(r.KeyIDs, ids)
}
// RemovePaths removes the paths from the current list of role paths
func (r *Role) RemovePaths(paths []string) {
r.Paths = subtractStrSlices(r.Paths, paths)
}
// RemovePathHashPrefixes removes the prefixes from the current list of path hash prefixes
func (r *Role) RemovePathHashPrefixes(prefixes []string) {
r.PathHashPrefixes = subtractStrSlices(r.PathHashPrefixes, prefixes)
}
func mergeStrSlices(orig, new []string) []string {
have := make(map[string]bool)
for _, e := range orig {
have[e] = true
}
merged := make([]string, len(orig), len(orig)+len(new))
copy(merged, orig)
for _, e := range new {
if !have[e] {
merged = append(merged, e)
}
}
return merged
}
func subtractStrSlices(orig, remove []string) []string {
kill := make(map[string]bool)
for _, e := range remove {
kill[e] = true
}
var keep []string
for _, e := range orig {
if !kill[e] {
keep = append(keep, e)
}
}
return keep
}

View File

@ -88,6 +88,15 @@ func (sp *SignedSnapshot) AddMeta(role string, meta FileMeta) {
sp.Dirty = true
}
// DeleteMeta removes a role from the snapshot. If the role doesn't
// exist in the snapshot, it's a noop.
func (sp *SignedSnapshot) DeleteMeta(role string) {
if _, ok := sp.Signed.Meta[role]; ok {
delete(sp.Signed.Meta, role)
sp.Dirty = true
}
}
// SnapshotFromSigned fully unpacks a Signed object into a SignedSnapshot
func SnapshotFromSigned(s *Signed) (*SignedSnapshot, error) {
sp := Snapshot{}

View File

@ -3,6 +3,7 @@ package data
import (
"crypto/sha256"
"encoding/hex"
"errors"
"github.com/jfrazelle/go/canonical/json"
)
@ -88,7 +89,7 @@ func (t *SignedTargets) AddTarget(path string, meta FileMeta) {
// ensuring the keys either already exist, or are added to the map
// of delegation keys
func (t *SignedTargets) AddDelegation(role *Role, keys []*PublicKey) error {
return nil
return errors.New("Not Implemented")
}
// ToSigned partially serializes a SignedTargets for further signing

View File

@ -46,8 +46,10 @@ func (e *Ed25519) RemoveKey(keyID string) error {
// ListKeys returns the list of keys IDs for the role
func (e *Ed25519) ListKeys(role string) []string {
keyIDs := make([]string, 0, len(e.keys))
for id := range e.keys {
keyIDs = append(keyIDs, id)
for id, edCryptoKey := range e.keys {
if edCryptoKey.role == role {
keyIDs = append(keyIDs, id)
}
}
return keyIDs
}
@ -61,23 +63,6 @@ func (e *Ed25519) ListAllKeys() map[string]string {
return keys
}
// Sign generates an Ed25519 signature over the data
func (e *Ed25519) Sign(keyIDs []string, toSign []byte) ([]data.Signature, error) {
signatures := make([]data.Signature, 0, len(keyIDs))
for _, keyID := range keyIDs {
priv := [ed25519.PrivateKeySize]byte{}
copy(priv[:], e.keys[keyID].privKey.Private())
sig := ed25519.Sign(&priv, toSign)
signatures = append(signatures, data.Signature{
KeyID: keyID,
Method: data.EDDSASignature,
Signature: sig[:],
})
}
return signatures, nil
}
// Create generates a new key and returns the public part
func (e *Ed25519) Create(role, algorithm string) (data.PublicKey, error) {
if algorithm != data.ED25519Key {

View File

@ -63,10 +63,10 @@ func (e ErrInvalidKeyLength) Error() string {
// ErrNoKeys indicates no signing keys were found when trying to sign
type ErrNoKeys struct {
keyIDs []string
KeyIDs []string
}
func (e ErrNoKeys) Error() string {
return fmt.Sprintf("could not find necessary signing keys, at least one of these keys must be available: %s",
strings.Join(e.keyIDs, ", "))
strings.Join(e.KeyIDs, ", "))
}

View File

@ -5,14 +5,6 @@ import (
"io"
)
// SigningService defines the necessary functions to determine
// if a user is able to sign with a key, and to perform signing.
type SigningService interface {
// Sign takes a slice of keyIDs and a piece of data to sign
// and returns a slice of signatures and an error
Sign(keyIDs []string, data []byte) ([]data.Signature, error)
}
// KeyService provides management of keys locally. It will never
// accept or provide private keys. Communication between the KeyService
// and a SigningService happen behind the Create function.
@ -44,10 +36,9 @@ type KeyService interface {
ImportRootKey(source io.Reader) error
}
// CryptoService defines a unified Signing and Key Service as this
// will be most useful for most applications.
// CryptoService is deprecated and all instances of its use should be
// replaced with KeyService
type CryptoService interface {
SigningService
KeyService
}

View File

@ -46,7 +46,7 @@ func Sign(service CryptoService, s *data.Signed, keys ...data.PublicKey) error {
// Check to ensure we have at least one signing key
if len(privKeys) == 0 {
return ErrNoKeys{keyIDs: ids}
return ErrNoKeys{KeyIDs: ids}
}
// Do signing and generate list of signatures

View File

@ -1,9 +1,13 @@
package store
import "fmt"
// ErrMetaNotFound indicates we did not find a particular piece
// of metadata in the store
type ErrMetaNotFound struct{}
type ErrMetaNotFound struct {
Role string
}
func (err ErrMetaNotFound) Error() string {
return "no trust data available"
return fmt.Sprintf("%s trust data unavailable", err.Role)
}

View File

@ -45,6 +45,9 @@ func (f *FilesystemStore) GetMeta(name string, size int64) ([]byte, error) {
path := filepath.Join(f.metaDir, fileName)
meta, err := ioutil.ReadFile(path)
if err != nil {
if os.IsNotExist(err) {
err = ErrMetaNotFound{Role: name}
}
return nil, err
}
return meta, nil
@ -65,7 +68,18 @@ func (f *FilesystemStore) SetMultiMeta(metas map[string][]byte) error {
func (f *FilesystemStore) SetMeta(name string, meta []byte) error {
fileName := fmt.Sprintf("%s.%s", name, f.metaExtension)
path := filepath.Join(f.metaDir, fileName)
if err := ioutil.WriteFile(path, meta, 0600); err != nil {
// Ensures the parent directories of the file we are about to write exist
err := os.MkdirAll(filepath.Dir(path), 0700)
if err != nil {
return err
}
// if something already exists, just delete it and re-write it
os.RemoveAll(path)
// Write the file to disk
if err = ioutil.WriteFile(path, meta, 0600); err != nil {
return err
}
return nil

View File

@ -1,7 +1,18 @@
// A Store that can fetch and set metadata on a remote server.
// Some API constraints:
// - Response bodies for error codes should be unmarshallable as:
// {"errors": [{..., "detail": <serialized validation error>}]}
// else validation error details, etc. will be unparsable. The errors
// should have a github.com/docker/notary/tuf/validation/SerializableError
// in the Details field.
// If writing your own server, please have a look at
// github.com/docker/distribution/registry/api/errcode
package store
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
@ -12,6 +23,7 @@ import (
"path"
"github.com/Sirupsen/logrus"
"github.com/docker/notary/tuf/validation"
)
// ErrServerUnavailable indicates an error from the server. code allows us to
@ -21,7 +33,7 @@ type ErrServerUnavailable struct {
}
func (err ErrServerUnavailable) Error() string {
return fmt.Sprintf("Unable to reach trust server at this time: %d.", err.code)
return fmt.Sprintf("unable to reach trust server at this time: %d.", err.code)
}
// ErrMaliciousServer indicates the server returned a response that is highly suspected
@ -30,7 +42,20 @@ func (err ErrServerUnavailable) Error() string {
type ErrMaliciousServer struct{}
func (err ErrMaliciousServer) Error() string {
return "Trust server returned a bad response."
return "trust server returned a bad response."
}
// ErrInvalidOperation indicates that the server returned a 400 response and
// propagate any body we received.
type ErrInvalidOperation struct {
msg string
}
func (err ErrInvalidOperation) Error() string {
if err.msg != "" {
return fmt.Sprintf("trust server rejected operation: %s", err.msg)
}
return "trust server rejected operation."
}
// HTTPStore manages pulling and pushing metadata from and to a remote
@ -70,6 +95,42 @@ func NewHTTPStore(baseURL, metaPrefix, metaExtension, targetsPrefix, keyExtensio
}, nil
}
func tryUnmarshalError(resp *http.Response, defaultError error) error {
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return defaultError
}
var parsedErrors struct {
Errors []struct {
Detail validation.SerializableError `json:"detail"`
} `json:"errors"`
}
if err := json.Unmarshal(bodyBytes, &parsedErrors); err != nil {
return defaultError
}
if len(parsedErrors.Errors) != 1 {
return defaultError
}
err = parsedErrors.Errors[0].Detail.Error
if err == nil {
return defaultError
}
return err
}
func translateStatusToError(resp *http.Response) error {
switch resp.StatusCode {
case http.StatusOK:
return nil
case http.StatusNotFound:
return ErrMetaNotFound{}
case http.StatusBadRequest:
return tryUnmarshalError(resp, ErrInvalidOperation{})
default:
return ErrServerUnavailable{code: resp.StatusCode}
}
}
// GetMeta downloads the named meta file with the given size. A short body
// is acceptable because in the case of timestamp.json, the size is a cap,
// not an exact length.
@ -87,11 +148,9 @@ func (s HTTPStore) GetMeta(name string, size int64) ([]byte, error) {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNotFound {
return nil, ErrMetaNotFound{}
} else if resp.StatusCode != http.StatusOK {
if err := translateStatusToError(resp); err != nil {
logrus.Debugf("received HTTP status %d when requesting %s.", resp.StatusCode, name)
return nil, ErrServerUnavailable{code: resp.StatusCode}
return nil, err
}
if resp.ContentLength > size {
return nil, ErrMaliciousServer{}
@ -120,12 +179,31 @@ func (s HTTPStore) SetMeta(name string, blob []byte) error {
return err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNotFound {
return ErrMetaNotFound{}
} else if resp.StatusCode != http.StatusOK {
return ErrServerUnavailable{code: resp.StatusCode}
return translateStatusToError(resp)
}
// NewMultiPartMetaRequest builds a request with the provided metadata updates
// in multipart form
func NewMultiPartMetaRequest(url string, metas map[string][]byte) (*http.Request, error) {
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
for role, blob := range metas {
part, err := writer.CreateFormFile("files", role)
_, err = io.Copy(part, bytes.NewBuffer(blob))
if err != nil {
return nil, err
}
}
return nil
err := writer.Close()
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", url, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", writer.FormDataContentType())
return req, nil
}
// SetMultiMeta does a single batch upload of multiple pieces of TUF metadata.
@ -136,21 +214,7 @@ func (s HTTPStore) SetMultiMeta(metas map[string][]byte) error {
if err != nil {
return err
}
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
for role, blob := range metas {
part, err := writer.CreateFormFile("files", role)
_, err = io.Copy(part, bytes.NewBuffer(blob))
if err != nil {
return err
}
}
err = writer.Close()
if err != nil {
return err
}
req, err := http.NewRequest("POST", url.String(), body)
req.Header.Set("Content-Type", writer.FormDataContentType())
req, err := NewMultiPartMetaRequest(url.String(), metas)
if err != nil {
return err
}
@ -159,12 +223,7 @@ func (s HTTPStore) SetMultiMeta(metas map[string][]byte) error {
return err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNotFound {
return ErrMetaNotFound{}
} else if resp.StatusCode != http.StatusOK {
return ErrServerUnavailable{code: resp.StatusCode}
}
return nil
return translateStatusToError(resp)
}
func (s HTTPStore) buildMetaURL(name string) (*url.URL, error) {
@ -212,10 +271,8 @@ func (s HTTPStore) GetTarget(path string) (io.ReadCloser, error) {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNotFound {
return nil, ErrMetaNotFound{}
} else if resp.StatusCode != http.StatusOK {
return nil, ErrServerUnavailable{code: resp.StatusCode}
if err := translateStatusToError(resp); err != nil {
return nil, err
}
return resp.Body, nil
}
@ -235,10 +292,8 @@ func (s HTTPStore) GetKey(role string) ([]byte, error) {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNotFound {
return nil, ErrMetaNotFound{}
} else if resp.StatusCode != http.StatusOK {
return nil, ErrServerUnavailable{code: resp.StatusCode}
if err := translateStatusToError(resp); err != nil {
return nil, err
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {

View File

@ -7,7 +7,7 @@ import (
"encoding/hex"
"encoding/json"
"fmt"
"path/filepath"
"path"
"strings"
"time"
@ -99,8 +99,24 @@ func (tr *Repo) AddBaseKeys(role string, keys ...data.PublicKey) error {
}
tr.keysDB.AddRole(r)
tr.Root.Dirty = true
return nil
// also, whichever role was switched out needs to be re-signed
// root has already been marked dirty
switch role {
case data.CanonicalSnapshotRole:
if tr.Snapshot != nil {
tr.Snapshot.Dirty = true
}
case data.CanonicalTargetsRole:
if target, ok := tr.Targets[data.CanonicalTargetsRole]; ok {
target.Dirty = true
}
case data.CanonicalTimestampRole:
if tr.Timestamp != nil {
tr.Timestamp.Dirty = true
}
}
return nil
}
// ReplaceBaseKeys is used to replace all keys for the given role with the new keys
@ -157,24 +173,58 @@ func (tr *Repo) RemoveBaseKeys(role string, keyIDs ...string) error {
return nil
}
// GetDelegation finds the role entry representing the provided
// role name or ErrInvalidRole
func (tr *Repo) GetDelegation(role string) (*data.Role, error) {
r := data.Role{Name: role}
if !r.IsDelegation() {
return nil, data.ErrInvalidRole{Role: role, Reason: "not a valid delegated role"}
}
parent := path.Dir(role)
// check the parent role
if parentRole := tr.keysDB.GetRole(parent); parentRole == nil {
return nil, data.ErrInvalidRole{Role: role, Reason: "parent role not found"}
}
// check the parent role's metadata
p, ok := tr.Targets[parent]
if !ok { // the parent targetfile may not exist yet, so it can't be in the list
return nil, data.ErrNoSuchRole{Role: role}
}
foundAt := utils.FindRoleIndex(p.Signed.Delegations.Roles, role)
if foundAt < 0 {
return nil, data.ErrNoSuchRole{Role: role}
}
return p.Signed.Delegations.Roles[foundAt], nil
}
// UpdateDelegations updates the appropriate delegations, either adding
// a new delegation or updating an existing one. If keys are
// provided, the IDs will be added to the role (if they do not exist
// there already), and the keys will be added to the targets file.
// The "before" argument specifies another role which this new role
// will be added in front of (i.e. higher priority) in the delegation list.
// An empty before string indicates to add the role to the end of the
// delegation list.
// A new, empty, targets file will be created for the new role.
func (tr *Repo) UpdateDelegations(role *data.Role, keys []data.PublicKey, before string) error {
func (tr *Repo) UpdateDelegations(role *data.Role, keys []data.PublicKey) error {
if !role.IsDelegation() || !role.IsValid() {
return data.ErrInvalidRole{Role: role.Name}
return data.ErrInvalidRole{Role: role.Name, Reason: "not a valid delegated role"}
}
parent := filepath.Dir(role.Name)
parent := path.Dir(role.Name)
if err := tr.VerifyCanSign(parent); err != nil {
return err
}
// check the parent role's metadata
p, ok := tr.Targets[parent]
if !ok {
return data.ErrInvalidRole{Role: role.Name}
if !ok { // the parent targetfile may not exist yet - if not, then create it
var err error
p, err = tr.InitTargets(parent)
if err != nil {
return err
}
}
for _, k := range keys {
if !utils.StrSliceContains(role.KeyIDs, k.ID()) {
role.KeyIDs = append(role.KeyIDs, k.ID())
@ -183,24 +233,75 @@ func (tr *Repo) UpdateDelegations(role *data.Role, keys []data.PublicKey, before
tr.keysDB.AddKey(k)
}
i := -1
var r *data.Role
for i, r = range p.Signed.Delegations.Roles {
if r.Name == role.Name {
break
}
// if the role has fewer keys than the threshold, it
// will never be able to create a valid targets file
// and should be considered invalid.
if len(role.KeyIDs) < role.Threshold {
return data.ErrInvalidRole{Role: role.Name, Reason: "insufficient keys to meet threshold"}
}
if i >= 0 {
p.Signed.Delegations.Roles[i] = role
foundAt := utils.FindRoleIndex(p.Signed.Delegations.Roles, role.Name)
if foundAt >= 0 {
p.Signed.Delegations.Roles[foundAt] = role
} else {
p.Signed.Delegations.Roles = append(p.Signed.Delegations.Roles, role)
}
// We've made a change to parent. Set it to dirty
p.Dirty = true
roleTargets := data.NewTargets() // NewTargets always marked Dirty
tr.Targets[role.Name] = roleTargets
// We don't actually want to create the new delegation metadata yet.
// When we add a delegation, it may only be signable by a key we don't have
// (hence we are delegating signing).
tr.keysDB.AddRole(role)
utils.RemoveUnusedKeys(p)
return nil
}
// DeleteDelegation removes a delegated targets role from its parent
// targets object. It also deletes the delegation from the snapshot.
// DeleteDelegation will only make use of the role Name field.
func (tr *Repo) DeleteDelegation(role data.Role) error {
if !role.IsDelegation() {
return data.ErrInvalidRole{Role: role.Name, Reason: "not a valid delegated role"}
}
// the role variable must not be used past this assignment for safety
name := role.Name
parent := path.Dir(name)
if err := tr.VerifyCanSign(parent); err != nil {
return err
}
// delete delegated data from Targets map and Snapshot - if they don't
// exist, these are no-op
delete(tr.Targets, name)
tr.Snapshot.DeleteMeta(name)
p, ok := tr.Targets[parent]
if !ok {
// if there is no parent metadata (the role exists though), then this
// is as good as done.
return nil
}
foundAt := utils.FindRoleIndex(p.Signed.Delegations.Roles, name)
if foundAt >= 0 {
var roles []*data.Role
// slice out deleted role
roles = append(roles, p.Signed.Delegations.Roles[:foundAt]...)
if foundAt+1 < len(p.Signed.Delegations.Roles) {
roles = append(roles, p.Signed.Delegations.Roles[foundAt+1:]...)
}
p.Signed.Delegations.Roles = roles
utils.RemoveUnusedKeys(p)
p.Dirty = true
} // if the role wasn't found, it's a good as deleted
return nil
}
@ -213,7 +314,7 @@ func (tr *Repo) InitRepo(consistent bool) error {
if err := tr.InitRoot(consistent); err != nil {
return err
}
if err := tr.InitTargets(); err != nil {
if _, err := tr.InitTargets(data.CanonicalTargetsRole); err != nil {
return err
}
if err := tr.InitSnapshot(); err != nil {
@ -230,7 +331,7 @@ func (tr *Repo) InitRoot(consistent bool) error {
for _, r := range data.ValidRoles {
role := tr.keysDB.GetRole(r)
if role == nil {
return data.ErrInvalidRole{Role: data.CanonicalRootRole}
return data.ErrInvalidRole{Role: data.CanonicalRootRole, Reason: "root role not initialized in key database"}
}
rootRoles[r] = &role.RootRole
for _, kid := range role.KeyIDs {
@ -248,20 +349,34 @@ func (tr *Repo) InitRoot(consistent bool) error {
return nil
}
// InitTargets initializes an empty targets
func (tr *Repo) InitTargets() error {
// InitTargets initializes an empty targets, and returns the new empty target
func (tr *Repo) InitTargets(role string) (*data.SignedTargets, error) {
r := data.Role{Name: role}
if !r.IsDelegation() && data.CanonicalRole(role) != data.CanonicalTargetsRole {
return nil, data.ErrInvalidRole{
Role: role,
Reason: fmt.Sprintf("role is not a valid targets role name: %s", role),
}
}
targets := data.NewTargets()
tr.Targets[data.ValidRoles["targets"]] = targets
return nil
tr.Targets[data.RoleName(role)] = targets
return targets, nil
}
// InitSnapshot initializes a snapshot based on the current root and targets
func (tr *Repo) InitSnapshot() error {
if tr.Root == nil {
return ErrNotLoaded{role: "root"}
}
root, err := tr.Root.ToSigned()
if err != nil {
return err
}
targets, err := tr.Targets[data.ValidRoles["targets"]].ToSigned()
if _, ok := tr.Targets[data.RoleName(data.CanonicalTargetsRole)]; !ok {
return ErrNotLoaded{role: "targets"}
}
targets, err := tr.Targets[data.RoleName(data.CanonicalTargetsRole)].ToSigned()
if err != nil {
return err
}
@ -403,19 +518,61 @@ func (tr Repo) FindTarget(path string) *data.FileMeta {
return walkTargets("targets")
}
// AddTargets will attempt to add the given targets specifically to
// the directed role. If the user does not have the signing keys for the role
// the function will return an error and the full slice of targets.
func (tr *Repo) AddTargets(role string, targets data.Files) (data.Files, error) {
t, ok := tr.Targets[role]
if !ok {
return targets, data.ErrInvalidRole{Role: role}
// VerifyCanSign returns nil if the role exists and we have at least one
// signing key for the role, false otherwise. This does not check that we have
// enough signing keys to meet the threshold, since we want to support the use
// case of multiple signers for a role. It returns an error if the role doesn't
// exist or if there are no signing keys.
func (tr *Repo) VerifyCanSign(roleName string) error {
role := tr.keysDB.GetRole(roleName)
if role == nil {
return data.ErrInvalidRole{Role: roleName, Reason: "does not exist"}
}
for _, keyID := range role.KeyIDs {
k := tr.keysDB.GetKey(keyID)
canonicalID, err := utils.CanonicalKeyID(k)
check := []string{keyID}
if err == nil {
check = append(check, canonicalID)
}
for _, id := range check {
p, _, err := tr.cryptoService.GetPrivateKey(id)
if err == nil && p != nil {
return nil
}
}
}
return signed.ErrNoKeys{KeyIDs: role.KeyIDs}
}
// AddTargets will attempt to add the given targets specifically to
// the directed role. If the metadata for the role doesn't exist yet,
// AddTargets will create one.
func (tr *Repo) AddTargets(role string, targets data.Files) (data.Files, error) {
err := tr.VerifyCanSign(role)
if err != nil {
return nil, err
}
// check the role's metadata
t, ok := tr.Targets[role]
if !ok { // the targetfile may not exist yet - if not, then create it
var err error
t, err = tr.InitTargets(role)
if err != nil {
return nil, err
}
}
// VerifyCanSign already makes sure this is not nil
r := tr.keysDB.GetRole(role)
invalid := make(data.Files)
for path, target := range targets {
pathDigest := sha256.Sum256([]byte(path))
pathHex := hex.EncodeToString(pathDigest[:])
r := tr.keysDB.GetRole(role)
if role == data.ValidRoles["targets"] || (r.CheckPaths(path) || r.CheckPrefixes(pathHex)) {
t.Signed.Targets[path] = target
} else {
@ -431,15 +588,19 @@ func (tr *Repo) AddTargets(role string, targets data.Files) (data.Files, error)
// RemoveTargets removes the given target (paths) from the given target role (delegation)
func (tr *Repo) RemoveTargets(role string, targets ...string) error {
t, ok := tr.Targets[role]
if !ok {
return data.ErrInvalidRole{Role: role}
if err := tr.VerifyCanSign(role); err != nil {
return err
}
for _, path := range targets {
delete(t.Signed.Targets, path)
// if the role exists but metadata does not yet, then our work is done
t, ok := tr.Targets[role]
if ok {
for _, path := range targets {
delete(t.Signed.Targets, path)
}
t.Dirty = true
}
t.Dirty = true
return nil
}
@ -494,6 +655,12 @@ func (tr *Repo) SignRoot(expires time.Time) (*data.Signed, error) {
// SignTargets signs the targets file for the given top level or delegated targets role
func (tr *Repo) SignTargets(role string, expires time.Time) (*data.Signed, error) {
logrus.Debugf("sign targets called for role %s", role)
if _, ok := tr.Targets[role]; !ok {
return nil, data.ErrInvalidRole{
Role: role,
Reason: "SignTargets called with non-existant targets role",
}
}
tr.Targets[role].Signed.Expires = expires
tr.Targets[role].Signed.Version++
signed, err := tr.Targets[role].ToSigned()
@ -532,6 +699,7 @@ func (tr *Repo) SignSnapshot(expires time.Time) (*data.Signed, error) {
if err != nil {
return nil, err
}
targets.Dirty = false
}
tr.Snapshot.Signed.Expires = expires
tr.Snapshot.Signed.Version++

View File

@ -0,0 +1,31 @@
package utils
import (
"strings"
)
// RoleList is a list of roles
type RoleList []string
// Len returns the length of the list
func (r RoleList) Len() int {
return len(r)
}
// Less returns true if the item at i should be sorted
// before the item at j. It's an unstable partial ordering
// based on the number of segments, separated by "/", in
// the role name
func (r RoleList) Less(i, j int) bool {
segsI := strings.Split(r[i], "/")
segsJ := strings.Split(r[j], "/")
if len(segsI) == len(segsJ) {
return r[i] < r[j]
}
return len(segsI) < len(segsJ)
}
// Swap the items at 2 locations in the list
func (r RoleList) Swap(i, j int) {
r[i], r[j] = r[j], r[i]
}

View File

@ -0,0 +1,85 @@
package utils
import (
"fmt"
"sync"
)
// ErrEmptyStack is used when an action that requires some
// content is invoked and the stack is empty
type ErrEmptyStack struct {
action string
}
func (err ErrEmptyStack) Error() string {
return fmt.Sprintf("attempted to %s with empty stack", err.action)
}
// ErrBadTypeCast is used by PopX functions when the item
// cannot be typed to X
type ErrBadTypeCast struct{}
func (err ErrBadTypeCast) Error() string {
return "attempted to do a typed pop and item was not of type"
}
// Stack is a simple type agnostic stack implementation
type Stack struct {
s []interface{}
l sync.Mutex
}
// NewStack create a new stack
func NewStack() *Stack {
s := &Stack{
s: make([]interface{}, 0),
}
return s
}
// Push adds an item to the top of the stack.
func (s *Stack) Push(item interface{}) {
s.l.Lock()
defer s.l.Unlock()
s.s = append(s.s, item)
}
// Pop removes and returns the top item on the stack, or returns
// ErrEmptyStack if the stack has no content
func (s *Stack) Pop() (interface{}, error) {
s.l.Lock()
defer s.l.Unlock()
l := len(s.s)
if l > 0 {
item := s.s[l-1]
s.s = s.s[:l-1]
return item, nil
}
return nil, ErrEmptyStack{action: "Pop"}
}
// PopString attempts to cast the top item on the stack to the string type.
// If this succeeds, it removes and returns the top item. If the item
// is not of the string type, ErrBadTypeCast is returned. If the stack
// is empty, ErrEmptyStack is returned
func (s *Stack) PopString() (string, error) {
s.l.Lock()
defer s.l.Unlock()
l := len(s.s)
if l > 0 {
item := s.s[l-1]
if item, ok := item.(string); ok {
s.s = s.s[:l-1]
return item, nil
}
return "", ErrBadTypeCast{}
}
return "", ErrEmptyStack{action: "PopString"}
}
// Empty returns true if the stack is empty
func (s *Stack) Empty() bool {
s.l.Lock()
defer s.l.Unlock()
return len(s.s) == 0
}

View File

@ -105,3 +105,44 @@ func DoHash(alg string, d []byte) []byte {
}
return nil
}
// UnusedDelegationKeys prunes a list of keys, returning those that are no
// longer in use for a given targets file
func UnusedDelegationKeys(t data.SignedTargets) []string {
// compare ids to all still active key ids in all active roles
// with the targets file
found := make(map[string]bool)
for _, r := range t.Signed.Delegations.Roles {
for _, id := range r.KeyIDs {
found[id] = true
}
}
var discard []string
for id := range t.Signed.Delegations.Keys {
if !found[id] {
discard = append(discard, id)
}
}
return discard
}
// RemoveUnusedKeys determines which keys in the slice of IDs are no longer
// used in the given targets file and removes them from the delegated keys
// map
func RemoveUnusedKeys(t *data.SignedTargets) {
unusedIDs := UnusedDelegationKeys(*t)
for _, id := range unusedIDs {
delete(t.Signed.Delegations.Keys, id)
}
}
// FindRoleIndex returns the index of the role named <name> or -1 if no
// matching role is found.
func FindRoleIndex(rs []*data.Role, name string) int {
for i, r := range rs {
if r.Name == name {
return i
}
}
return -1
}

View File

@ -0,0 +1,126 @@
package validation
import (
"encoding/json"
"fmt"
)
// VALIDATION ERRORS
// ErrValidation represents a general validation error
type ErrValidation struct {
Msg string
}
func (err ErrValidation) Error() string {
return fmt.Sprintf("An error occurred during validation: %s", err.Msg)
}
// ErrBadHierarchy represents missing metadata. Currently: a missing snapshot
// at this current time. When delegations are implemented it will also
// represent a missing delegation parent
type ErrBadHierarchy struct {
Missing string
Msg string
}
func (err ErrBadHierarchy) Error() string {
return fmt.Sprintf("Metadata hierarchy is incomplete: %s", err.Msg)
}
// ErrBadRoot represents a failure validating the root
type ErrBadRoot struct {
Msg string
}
func (err ErrBadRoot) Error() string {
return fmt.Sprintf("The root metadata is invalid: %s", err.Msg)
}
// ErrBadTargets represents a failure to validate a targets (incl delegations)
type ErrBadTargets struct {
Msg string
}
func (err ErrBadTargets) Error() string {
return fmt.Sprintf("The targets metadata is invalid: %s", err.Msg)
}
// ErrBadSnapshot represents a failure to validate the snapshot
type ErrBadSnapshot struct {
Msg string
}
func (err ErrBadSnapshot) Error() string {
return fmt.Sprintf("The snapshot metadata is invalid: %s", err.Msg)
}
// END VALIDATION ERRORS
// SerializableError is a struct that can be used to serialize an error as JSON
type SerializableError struct {
Name string
Error error
}
// UnmarshalJSON attempts to unmarshal the error into the right type
func (s *SerializableError) UnmarshalJSON(text []byte) (err error) {
var x struct{ Name string }
err = json.Unmarshal(text, &x)
if err != nil {
return
}
var theError error
switch x.Name {
case "ErrValidation":
var e struct{ Error ErrValidation }
err = json.Unmarshal(text, &e)
theError = e.Error
case "ErrBadHierarchy":
var e struct{ Error ErrBadHierarchy }
err = json.Unmarshal(text, &e)
theError = e.Error
case "ErrBadRoot":
var e struct{ Error ErrBadRoot }
err = json.Unmarshal(text, &e)
theError = e.Error
case "ErrBadTargets":
var e struct{ Error ErrBadTargets }
err = json.Unmarshal(text, &e)
theError = e.Error
case "ErrBadSnapshot":
var e struct{ Error ErrBadSnapshot }
err = json.Unmarshal(text, &e)
theError = e.Error
default:
err = fmt.Errorf("do not know how to unmarshal %s", x.Name)
return
}
if err != nil {
return
}
s.Name = x.Name
s.Error = theError
return nil
}
// NewSerializableError serializes one of the above errors into JSON
func NewSerializableError(err error) (*SerializableError, error) {
// make sure it's one of our errors
var name string
switch err.(type) {
case ErrValidation:
name = "ErrValidation"
case ErrBadHierarchy:
name = "ErrBadHierarchy"
case ErrBadRoot:
name = "ErrBadRoot"
case ErrBadTargets:
name = "ErrBadTargets"
case ErrBadSnapshot:
name = "ErrBadSnapshot"
default:
return nil, fmt.Errorf("does not support serializing non-validation errors")
}
return &SerializableError{Name: name, Error: err}, nil
}