vendor notary for docker1.11

Signed-off-by: Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>
This commit is contained in:
Riyaz Faizullabhoy 2016-03-17 10:23:18 -07:00
parent 666563b190
commit ab3772f72f
35 changed files with 647 additions and 496 deletions

View File

@ -171,7 +171,7 @@ RUN set -x \
&& rm -rf "$GOPATH"
# Install notary server
ENV NOTARY_VERSION v0.2.0
ENV NOTARY_VERSION docker-v1.11-3
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \

View File

@ -117,7 +117,7 @@ RUN set -x \
&& rm -rf "$GOPATH"
# Install notary server
ENV NOTARY_VERSION v0.2.0
ENV NOTARY_VERSION docker-v1.11-3
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \

View File

@ -135,7 +135,7 @@ RUN set -x \
&& rm -rf "$GOPATH"
# Install notary server
ENV NOTARY_VERSION v0.2.0
ENV NOTARY_VERSION docker-v1.11-3
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \

View File

@ -127,7 +127,7 @@ RUN set -x \
&& rm -rf "$GOPATH"
# Install notary and notary-server
ENV NOTARY_VERSION v0.2.0
ENV NOTARY_VERSION docker-v1.11-3
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \

View File

@ -108,7 +108,7 @@ RUN set -x \
&& rm -rf "$GOPATH"
# Install notary server
ENV NOTARY_VERSION v0.2.0
ENV NOTARY_VERSION docker-v1.11-3
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \

View File

@ -202,17 +202,17 @@ func convertTarget(t client.Target) (target, error) {
func (cli *DockerCli) getPassphraseRetriever() passphrase.Retriever {
aliasMap := map[string]string{
"root": "root",
"snapshot": "repository",
"targets": "repository",
"targets/releases": "repository",
"root": "root",
"snapshot": "repository",
"targets": "repository",
"default": "repository",
}
baseRetriever := passphrase.PromptRetrieverWithInOut(cli.in, cli.out, aliasMap)
env := map[string]string{
"root": os.Getenv("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE"),
"snapshot": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"),
"targets": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"),
"targets/releases": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"),
"root": os.Getenv("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE"),
"snapshot": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"),
"targets": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"),
"default": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"),
}
// Backwards compatibility with old env names. We should remove this in 1.10
@ -222,11 +222,11 @@ func (cli *DockerCli) getPassphraseRetriever() passphrase.Retriever {
fmt.Fprintf(cli.err, "[DEPRECATED] The environment variable DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE has been deprecated and will be removed in v1.10. Please use DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE\n")
}
}
if env["snapshot"] == "" || env["targets"] == "" || env["targets/releases"] == "" {
if env["snapshot"] == "" || env["targets"] == "" || env["default"] == "" {
if passphrase := os.Getenv("DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE"); passphrase != "" {
env["snapshot"] = passphrase
env["targets"] = passphrase
env["targets/releases"] = passphrase
env["default"] = passphrase
fmt.Fprintf(cli.err, "[DEPRECATED] The environment variable DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE has been deprecated and will be removed in v1.10. Please use DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE\n")
}
}
@ -235,6 +235,10 @@ func (cli *DockerCli) getPassphraseRetriever() passphrase.Retriever {
if v := env[alias]; v != "" {
return v, numAttempts > 1, nil
}
// For non-root roles, we can also try the "default" alias if it is specified
if v := env["default"]; v != "" && alias != data.CanonicalRootRole {
return v, numAttempts > 1, nil
}
return baseRetriever(keyName, alias, createNew, numAttempts)
}
}
@ -473,7 +477,7 @@ func (cli *DockerCli) trustedPush(repoInfo *registry.RepositoryInfo, tag string,
sort.Strings(keys)
rootKeyID = keys[0]
} else {
rootPublicKey, err := repo.CryptoService.Create(data.CanonicalRootRole, data.ECDSAKey)
rootPublicKey, err := repo.CryptoService.Create(data.CanonicalRootRole, "", data.ECDSAKey)
if err != nil {
return err
}

View File

@ -52,7 +52,7 @@ clone git github.com/docker/distribution d06d6d3b093302c02a93153ac7b06ebc0ffd179
clone git github.com/vbatts/tar-split v0.9.11
# get desired notary commit, might also need to be updated in Dockerfile
clone git github.com/docker/notary v0.2.0
clone git github.com/docker/notary docker-v1.11-3
clone git google.golang.org/grpc a22b6611561e9f0a3e0919690dd2caf48f14c517 https://github.com/grpc/grpc-go.git
clone git github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f

View File

@ -232,6 +232,7 @@ func notaryClientEnv(cmd *exec.Cmd) {
fmt.Sprintf("NOTARY_ROOT_PASSPHRASE=%s", pwd),
fmt.Sprintf("NOTARY_TARGETS_PASSPHRASE=%s", pwd),
fmt.Sprintf("NOTARY_SNAPSHOT_PASSPHRASE=%s", pwd),
fmt.Sprintf("NOTARY_DELEGATION_PASSPHRASE=%s", pwd),
}
cmd.Env = append(os.Environ(), env...)
}

View File

@ -0,0 +1,26 @@
# Changelog
## [v0.2](https://github.com/docker/notary/releases/tag/v0.2.0) 2/24/2016
+ Add support for delegation roles in `notary` server and client
+ Add `notary CLI` commands for managing delegation roles: `notary delegation`
+ `add`, `list` and `remove` subcommands
+ Enhance `notary CLI` commands for adding targets to delegation roles
+ `notary add --roles` and `notary remove --roles` to manipulate targets for delegations
+ Support for rotating the snapshot key to one managed by the `notary` server
+ Add consistent download functionality to download metadata and content by checksum
+ Update `docker-compose` configuration to use official mariadb image
+ deprecate `notarymysql`
+ default to using a volume for `data` directory
+ use separate databases for `notary-server` and `notary-signer` with separate users
+ Add `notary CLI` command for changing private key passphrases: `notary key passwd`
+ Enhance `notary CLI` commands for importing and exporting keys
+ Change default `notary CLI` log level to fatal, introduce new verbose (error-level) and debug-level settings
+ Store roles as PEM headers in private keys, incompatible with previous notary v0.1 key format
+ No longer store keys as `<KEY_ID>_role.key`, instead store as `<KEY_ID>.key`; new private keys from new notary clients will crash old notary clients
+ Support logging as JSON format on server and signer
+ Support mutual TLS between notary client and notary server
## [v0.1](https://github.com/docker/notary/releases/tag/v0.1) 11/15/2015
+ Initial non-alpha `notary` version
+ Implement TUF (the update framework) with support for root, targets, snapshot, and timestamp roles
+ Add PKCS11 interface to store and sign with keys in HSMs (i.e. Yubikey)

View File

@ -1,4 +1,4 @@
FROM golang:1.5.1
FROM golang:1.6.0
RUN apt-get update && apt-get install -y \
libltdl-dev \
@ -12,6 +12,4 @@ RUN go get golang.org/x/tools/cmd/vet \
COPY . /go/src/github.com/docker/notary
ENV GOPATH /go/src/github.com/docker/notary/Godeps/_workspace:$GOPATH
WORKDIR /go/src/github.com/docker/notary

View File

@ -20,7 +20,7 @@ GO_EXC = go
NOTARYDIR := /go/src/github.com/docker/notary
# check to be sure pkcs11 lib is always imported with a build tag
GO_LIST_PKCS11 := $(shell go list -e -f '{{join .Deps "\n"}}' ./... | xargs go list -e -f '{{if not .Standard}}{{.ImportPath}}{{end}}' | grep -q pkcs11)
GO_LIST_PKCS11 := $(shell go list -e -f '{{join .Deps "\n"}}' ./... | grep -v /vendor/ | xargs go list -e -f '{{if not .Standard}}{{.ImportPath}}{{end}}' | grep -q pkcs11)
ifeq ($(GO_LIST_PKCS11),)
$(info pkcs11 import was not found anywhere without a build tag, yay)
else
@ -34,7 +34,7 @@ _space := $(empty) $(empty)
COVERDIR=.cover
COVERPROFILE?=$(COVERDIR)/cover.out
COVERMODE=count
PKGS ?= $(shell go list ./... | tr '\n' ' ')
PKGS ?= $(shell go list ./... | grep -v /vendor/ | tr '\n' ' ')
GO_VERSION = $(shell go version | awk '{print $$3}')
@ -79,22 +79,22 @@ ${PREFIX}/bin/static/notary-signer: NOTARY_VERSION $(shell find . -type f -name
@godep go build -tags ${NOTARY_BUILDTAGS} -o $@ ${GO_LDFLAGS_STATIC} ./cmd/notary-signer
endif
vet:
vet:
@echo "+ $@"
ifeq ($(shell uname -s), Darwin)
@test -z "$(shell find . -iname *test*.go | grep -v _test.go | grep -v Godeps | xargs echo "This file should end with '_test':" | tee /dev/stderr)"
@test -z "$(shell find . -iname *test*.go | grep -v _test.go | grep -v vendor | xargs echo "This file should end with '_test':" | tee /dev/stderr)"
else
@test -z "$(shell find . -iname *test*.go | grep -v _test.go | grep -v Godeps | xargs -r echo "This file should end with '_test':" | tee /dev/stderr)"
@test -z "$(shell find . -iname *test*.go | grep -v _test.go | grep -v vendor | xargs -r echo "This file should end with '_test':" | tee /dev/stderr)"
endif
@test -z "$$(go tool vet -printf=false . 2>&1 | grep -v Godeps/_workspace/src/ | tee /dev/stderr)"
@test -z "$$(go tool vet -printf=false . 2>&1 | grep -v vendor/ | tee /dev/stderr)"
fmt:
@echo "+ $@"
@test -z "$$(gofmt -s -l .| grep -v .pb. | grep -v Godeps/_workspace/src/ | tee /dev/stderr)"
@test -z "$$(gofmt -s -l .| grep -v .pb. | grep -v vendor/ | tee /dev/stderr)"
lint:
@echo "+ $@"
@test -z "$$(golint ./... | grep -v .pb. | grep -v Godeps/_workspace/src/ | tee /dev/stderr)"
@test -z "$$(golint ./... | grep -v .pb. | grep -v vendor/ | tee /dev/stderr)"
# Requires that the following:
# go get -u github.com/client9/misspell/cmd/misspell
@ -104,27 +104,27 @@ lint:
# misspell target, don't include Godeps, binaries, python tests, or git files
misspell:
@echo "+ $@"
@test -z "$$(find . -name '*' | grep -v Godeps/_workspace/src/ | grep -v bin/ | grep -v misc/ | grep -v .git/ | xargs misspell | tee /dev/stderr)"
@test -z "$$(find . -name '*' | grep -v vendor/ | grep -v bin/ | grep -v misc/ | grep -v .git/ | xargs misspell | tee /dev/stderr)"
build:
@echo "+ $@"
@go build -tags "${NOTARY_BUILDTAGS}" -v ${GO_LDFLAGS} ./...
@go build -tags "${NOTARY_BUILDTAGS}" -v ${GO_LDFLAGS} $(PKGS)
# When running `go test ./...`, it runs all the suites in parallel, which causes
# problems when running with a yubikey
test: TESTOPTS =
test:
test:
@echo Note: when testing with a yubikey plugged in, make sure to include 'TESTOPTS="-p 1"'
@echo "+ $@ $(TESTOPTS)"
@echo
go test -tags "${NOTARY_BUILDTAGS}" $(TESTOPTS) ./...
go test -tags "${NOTARY_BUILDTAGS}" $(TESTOPTS) $(PKGS)
test-full: TESTOPTS =
test-full: vet lint
@echo Note: when testing with a yubikey plugged in, make sure to include 'TESTOPTS="-p 1"'
@echo "+ $@"
@echo
go test -tags "${NOTARY_BUILDTAGS}" $(TESTOPTS) -v ./...
go test -tags "${NOTARY_BUILDTAGS}" $(TESTOPTS) -v $(PKGS)
protos:
@protoc --go_out=plugins=grpc:. proto/*.proto
@ -139,7 +139,7 @@ define gocover
$(GO_EXC) test $(OPTS) $(TESTOPTS) -covermode="$(COVERMODE)" -coverprofile="$(COVERDIR)/$(subst /,-,$(1)).$(subst $(_space),.,$(NOTARY_BUILDTAGS)).coverage.txt" "$(1)" || exit 1;
endef
gen-cover:
gen-cover:
@mkdir -p "$(COVERDIR)"
$(foreach PKG,$(PKGS),$(call gocover,$(PKG)))
rm -f "$(COVERDIR)"/*testutils*.coverage.txt
@ -179,7 +179,7 @@ mkdir -p ${PREFIX}/cross/$(1)/$(2);
GOOS=$(1) GOARCH=$(2) CGO_ENABLED=0 go build -o ${PREFIX}/cross/$(1)/$(2)/notary -a -tags "static_build netgo" -installsuffix netgo ${GO_LDFLAGS_STATIC} ./cmd/notary;
endef
cross:
cross:
$(foreach GOARCH,$(GOARCHS),$(foreach GOOS,$(GOOSES),$(call template,$(GOOS),$(GOARCH))))

View File

@ -39,14 +39,25 @@ func (err ErrRepoNotInitialized) Error() string {
}
// ErrInvalidRemoteRole is returned when the server is requested to manage
// an unsupported key type
// a key type that is not permitted
type ErrInvalidRemoteRole struct {
Role string
}
func (err ErrInvalidRemoteRole) Error() string {
return fmt.Sprintf(
"notary does not support the server managing the %s key", err.Role)
"notary does not permit the server managing the %s key", err.Role)
}
// ErrInvalidLocalRole is returned when the client wants to manage
// a key type that is not permitted
type ErrInvalidLocalRole struct {
Role string
}
func (err ErrInvalidLocalRole) Error() string {
return fmt.Sprintf(
"notary does not permit the client managing the %s key", err.Role)
}
// ErrRepositoryNotExist is returned when an action is taken on a remote
@ -93,7 +104,7 @@ func repositoryFromKeystores(baseDir, gun, baseURL string, rt http.RoundTripper,
return nil, err
}
cryptoService := cryptoservice.NewCryptoService(gun, keyStores...)
cryptoService := cryptoservice.NewCryptoService(keyStores...)
nRepo := &NotaryRepository{
gun: gun,
@ -140,7 +151,7 @@ func NewTarget(targetName string, targetPath string) (*Target, error) {
return nil, err
}
meta, err := data.NewFileMeta(bytes.NewBuffer(b))
meta, err := data.NewFileMeta(bytes.NewBuffer(b), data.NotaryDefaultHashes...)
if err != nil {
return nil, err
}
@ -223,7 +234,7 @@ func (r *NotaryRepository) Initialize(rootKeyID string, serverManagedRoles ...st
// make unnecessary network calls
for _, role := range locallyManagedKeys {
// This is currently hardcoding the keys to ECDSA.
key, err := r.CryptoService.Create(role, data.ECDSAKey)
key, err := r.CryptoService.Create(role, r.gun, data.ECDSAKey)
if err != nil {
return err
}
@ -520,6 +531,25 @@ func (r *NotaryRepository) ListRoles() ([]RoleWithSignatures, error) {
// Publish pushes the local changes in signed material to the remote notary-server
// Conceptually it performs an operation similar to a `git rebase`
func (r *NotaryRepository) Publish() error {
cl, err := r.GetChangelist()
if err != nil {
return err
}
if err = r.publish(cl); err != nil {
return err
}
if err = cl.Clear(""); err != nil {
// This is not a critical problem when only a single host is pushing
// but will cause weird behaviour if changelist cleanup is failing
// and there are multiple hosts writing to the repo.
logrus.Warn("Unable to clear changelist. You may want to manually delete the folder ", filepath.Join(r.tufRepoPath, "changelist"))
}
return nil
}
// publish pushes the changes in the given changelist to the remote notary-server
// Conceptually it performs an operation similar to a `git rebase`
func (r *NotaryRepository) publish(cl changelist.Changelist) error {
var initialPublish bool
// update first before publishing
_, err := r.Update(true)
@ -543,15 +573,10 @@ func (r *NotaryRepository) Publish() error {
initialPublish = true
} else {
// We could not update, so we cannot publish.
logrus.Error("Could not publish Repository: ", err.Error())
logrus.Error("Could not publish Repository since we could not update: ", err.Error())
return err
}
}
cl, err := r.GetChangelist()
if err != nil {
return err
}
// apply the changelist to the repo
err = applyChangelist(r.tufRepo, cl)
if err != nil {
@ -622,25 +647,14 @@ func (r *NotaryRepository) Publish() error {
return err
}
err = remote.SetMultiMeta(updatedFiles)
if err != nil {
return err
}
err = cl.Clear("")
if err != nil {
// This is not a critical problem when only a single host is pushing
// but will cause weird behaviour if changelist cleanup is failing
// and there are multiple hosts writing to the repo.
logrus.Warn("Unable to clear changelist. You may want to manually delete the folder ", filepath.Join(r.tufRepoPath, "changelist"))
}
return nil
return remote.SetMultiMeta(updatedFiles)
}
// bootstrapRepo loads the repository from the local file system. This attempts
// to load metadata for all roles. Since server snapshots are supported,
// if the snapshot metadata fails to load, that's ok.
// This can also be unified with some cache reading tools from tuf/client.
// This assumes that bootstrapRepo is only used by Publish()
// This assumes that bootstrapRepo is only used by Publish() or RotateKey()
func (r *NotaryRepository) bootstrapRepo() error {
tufRepo := tuf.NewRepo(r.CryptoService)
@ -858,37 +872,53 @@ func (r *NotaryRepository) validateRoot(rootJSON []byte) (*data.SignedRoot, erro
// creates and adds one new key or delegates managing the key to the server.
// These changes are staged in a changelist until publish is called.
func (r *NotaryRepository) RotateKey(role string, serverManagesKey bool) error {
if role == data.CanonicalRootRole || role == data.CanonicalTimestampRole {
return fmt.Errorf(
"notary does not currently support rotating the %s key", role)
}
if serverManagesKey && role == data.CanonicalTargetsRole {
switch {
// We currently support locally or remotely managing snapshot keys...
case role == data.CanonicalSnapshotRole:
break
// locally managing targets keys only
case role == data.CanonicalTargetsRole && !serverManagesKey:
break
case role == data.CanonicalTargetsRole && serverManagesKey:
return ErrInvalidRemoteRole{Role: data.CanonicalTargetsRole}
// and remotely managing timestamp keys only
case role == data.CanonicalTimestampRole && serverManagesKey:
break
case role == data.CanonicalTimestampRole && !serverManagesKey:
return ErrInvalidLocalRole{Role: data.CanonicalTimestampRole}
default:
return fmt.Errorf("notary does not currently permit rotating the %s key", role)
}
var (
pubKey data.PublicKey
err error
pubKey data.PublicKey
err error
errFmtMsg string
)
if serverManagesKey {
switch serverManagesKey {
case true:
pubKey, err = getRemoteKey(r.baseURL, r.gun, role, r.roundTrip)
} else {
pubKey, err = r.CryptoService.Create(role, data.ECDSAKey)
}
if err != nil {
return err
errFmtMsg = "unable to rotate remote key: %s"
default:
pubKey, err = r.CryptoService.Create(role, r.gun, data.ECDSAKey)
errFmtMsg = "unable to generate key: %s"
}
return r.rootFileKeyChange(role, changelist.ActionCreate, pubKey)
if err != nil {
return fmt.Errorf(errFmtMsg, err)
}
cl := changelist.NewMemChangelist()
if err := r.rootFileKeyChange(cl, role, changelist.ActionCreate, pubKey); err != nil {
return err
}
return r.publish(cl)
}
func (r *NotaryRepository) rootFileKeyChange(role, action string, key data.PublicKey) error {
cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist"))
if err != nil {
return err
}
defer cl.Close()
func (r *NotaryRepository) rootFileKeyChange(cl changelist.Changelist, role, action string, key data.PublicKey) error {
kl := make(data.KeyList, 0, 1)
kl = append(kl, key)
meta := changelist.TufRootData{
@ -907,11 +937,7 @@ func (r *NotaryRepository) rootFileKeyChange(role, action string, key data.Publi
role,
metaJSON,
)
err = cl.Add(c)
if err != nil {
return err
}
return nil
return cl.Add(c)
}
// DeleteTrustData removes the trust data stored for this repo in the TUF cache and certificate store on the client side

View File

@ -4,7 +4,6 @@ import (
"encoding/json"
"fmt"
"net/http"
"path"
"strings"
"time"
@ -130,22 +129,6 @@ func changeTargetsDelegation(repo *tuf.Repo, c changelist.Change) error {
}
// applies a function repeatedly, falling back on the parent role, until it no
// longer can
func doWithRoleFallback(role string, doFunc func(string) error) error {
for role == data.CanonicalTargetsRole || data.IsDelegation(role) {
err := doFunc(role)
if err == nil {
return nil
}
if _, ok := err.(data.ErrInvalidRole); !ok {
return err
}
role = path.Dir(role)
}
return data.ErrInvalidRole{Role: role}
}
func changeTargetMeta(repo *tuf.Repo, c changelist.Change) error {
var err error
switch c.Action() {
@ -158,21 +141,16 @@ func changeTargetMeta(repo *tuf.Repo, c changelist.Change) error {
}
files := data.Files{c.Path(): *meta}
err = doWithRoleFallback(c.Scope(), func(role string) error {
_, e := repo.AddTargets(role, files)
return e
})
if err != nil {
// Attempt to add the target to this role
if _, err = repo.AddTargets(c.Scope(), files); err != nil {
logrus.Errorf("couldn't add target to %s: %s", c.Scope(), err.Error())
}
case changelist.ActionDelete:
logrus.Debug("changelist remove: ", c.Path())
err = doWithRoleFallback(c.Scope(), func(role string) error {
return repo.RemoveTargets(role, c.Path())
})
if err != nil {
// Attempt to remove the target from this role
if err = repo.RemoveTargets(c.Scope(), c.Path()); err != nil {
logrus.Errorf("couldn't remove target from %s: %s", c.Scope(), err.Error())
}

View File

@ -20,6 +20,10 @@ const (
PubCertPerms = 0755
// Sha256HexSize is how big a Sha256 hex is in number of characters
Sha256HexSize = 64
// SHA256 is the name of SHA256 hash algorithm
SHA256 = "sha256"
// SHA512 is the name of SHA512 hash algorithm
SHA512 = "sha512"
// TrustedCertsDir is the directory, under the notary repo base directory, where trusted certs are stored
TrustedCertsDir = "trusted_certificates"
// PrivDir is the directory, under the notary repo base directory, where private keys are stored
@ -38,6 +42,13 @@ const (
NotaryTargetsExpiry = 3 * Year
NotarySnapshotExpiry = 3 * Year
NotaryTimestampExpiry = 14 * Day
ConsistentMetadataCacheMaxAge = 30 * Day
CurrentMetadataCacheMaxAge = 5 * time.Minute
// CacheMaxAgeLimit is the generally recommended maximum age for Cache-Control headers
// (one year, in seconds, since one year is forever in terms of internet
// content)
CacheMaxAgeLimit = 1 * Year
)
// NotaryDefaultExpiries is the construct used to configure the default expiry times of

View File

@ -5,6 +5,6 @@
# subpackage's dependencies within the containing package, as well as the
# subpackage itself.
DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2})"
DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2} | grep -v ${2}/vendor)"
echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ','

View File

@ -3,7 +3,6 @@ package cryptoservice
import (
"crypto/rand"
"fmt"
"path/filepath"
"github.com/Sirupsen/logrus"
"github.com/docker/notary/trustmanager"
@ -17,17 +16,16 @@ const (
// CryptoService implements Sign and Create, holding a specific GUN and keystore to
// operate on
type CryptoService struct {
gun string
keyStores []trustmanager.KeyStore
}
// NewCryptoService returns an instance of CryptoService
func NewCryptoService(gun string, keyStores ...trustmanager.KeyStore) *CryptoService {
return &CryptoService{gun: gun, keyStores: keyStores}
func NewCryptoService(keyStores ...trustmanager.KeyStore) *CryptoService {
return &CryptoService{keyStores: keyStores}
}
// Create is used to generate keys for targets, snapshots and timestamps
func (cs *CryptoService) Create(role, algorithm string) (data.PublicKey, error) {
func (cs *CryptoService) Create(role, gun, algorithm string) (data.PublicKey, error) {
var privKey data.PrivateKey
var err error
@ -52,16 +50,9 @@ func (cs *CryptoService) Create(role, algorithm string) (data.PublicKey, error)
}
logrus.Debugf("generated new %s key for role: %s and keyID: %s", algorithm, role, privKey.ID())
// Store the private key into our keystore with the name being: /GUN/ID.key with an alias of role
var keyPath string
if role == data.CanonicalRootRole {
keyPath = privKey.ID()
} else {
keyPath = filepath.Join(cs.gun, privKey.ID())
}
// Store the private key into our keystore
for _, ks := range cs.keyStores {
err = ks.AddKey(keyPath, role, privKey)
err = ks.AddKey(trustmanager.KeyInfo{Role: role, Gun: gun}, privKey)
if err == nil {
return data.PublicKeyFromPrivate(privKey), nil
}
@ -74,23 +65,16 @@ func (cs *CryptoService) Create(role, algorithm string) (data.PublicKey, error)
}
// GetPrivateKey returns a private key and role if present by ID.
// It tries to get the key first without a GUN (in which case it's a root key).
// If that fails, try to get the key with the GUN (non-root key).
// If that fails, then we don't have the key.
func (cs *CryptoService) GetPrivateKey(keyID string) (k data.PrivateKey, role string, err error) {
keyPaths := []string{keyID, filepath.Join(cs.gun, keyID)}
for _, ks := range cs.keyStores {
for _, keyPath := range keyPaths {
k, role, err = ks.GetKey(keyPath)
if err == nil {
return
}
switch err.(type) {
case trustmanager.ErrPasswordInvalid, trustmanager.ErrAttemptsExceeded:
return
default:
continue
}
if k, role, err = ks.GetKey(keyID); err == nil {
return
}
switch err.(type) {
case trustmanager.ErrPasswordInvalid, trustmanager.ErrAttemptsExceeded:
return
default:
continue
}
}
return // returns whatever the final values were
@ -105,12 +89,42 @@ func (cs *CryptoService) GetKey(keyID string) data.PublicKey {
return data.PublicKeyFromPrivate(privKey)
}
// GetKeyInfo returns role and GUN info of a key by ID
func (cs *CryptoService) GetKeyInfo(keyID string) (trustmanager.KeyInfo, error) {
for _, store := range cs.keyStores {
if info, err := store.GetKeyInfo(keyID); err == nil {
return info, nil
}
}
return trustmanager.KeyInfo{}, fmt.Errorf("Could not find info for keyID %s", keyID)
}
// RemoveKey deletes a key by ID
func (cs *CryptoService) RemoveKey(keyID string) (err error) {
keyPaths := []string{keyID, filepath.Join(cs.gun, keyID)}
for _, ks := range cs.keyStores {
for _, keyPath := range keyPaths {
ks.RemoveKey(keyPath)
ks.RemoveKey(keyID)
}
return // returns whatever the final values were
}
// AddKey adds a private key to a specified role.
// The GUN is inferred from the cryptoservice itself for non-root roles
func (cs *CryptoService) AddKey(role, gun string, key data.PrivateKey) (err error) {
// First check if this key already exists in any of our keystores
for _, ks := range cs.keyStores {
if keyInfo, err := ks.GetKeyInfo(key.ID()); err == nil {
if keyInfo.Role != role {
return fmt.Errorf("key with same ID already exists for role: %s", keyInfo.Role)
}
logrus.Debugf("key with same ID %s and role %s already exists", key.ID(), keyInfo.Role)
return nil
}
}
// If the key didn't exist in any of our keystores, add and return on the first successful keystore
for _, ks := range cs.keyStores {
// Try to add to this keystore, return if successful
if err = ks.AddKey(trustmanager.KeyInfo{Role: role, Gun: gun}, key); err == nil {
return nil
}
}
return // returns whatever the final values were
@ -121,7 +135,7 @@ func (cs *CryptoService) ListKeys(role string) []string {
var res []string
for _, ks := range cs.keyStores {
for k, r := range ks.ListKeys() {
if r == role {
if r.Role == role {
res = append(res, k)
}
}
@ -134,7 +148,7 @@ func (cs *CryptoService) ListAllKeys() map[string]string {
res := make(map[string]string)
for _, ks := range cs.keyStores {
for k, r := range ks.ListKeys() {
res[k] = r // keys are content addressed so don't care about overwrites
res[k] = r.Role // keys are content addressed so don't care about overwrites
}
}
return res

View File

@ -11,10 +11,8 @@ import (
"path/filepath"
"strings"
"github.com/docker/notary"
"github.com/docker/notary/passphrase"
"github.com/docker/notary/trustmanager"
"github.com/docker/notary/tuf/data"
)
const zipMadeByUNIX = 3 << 8
@ -41,9 +39,6 @@ func (cs *CryptoService) ExportKey(dest io.Writer, keyID, role string) error {
err error
)
if role != data.CanonicalRootRole {
keyID = filepath.Join(cs.gun, keyID)
}
for _, ks := range cs.keyStores {
pemBytes, err = ks.ExportKey(keyID)
if err != nil {
@ -67,7 +62,12 @@ func (cs *CryptoService) ExportKey(dest io.Writer, keyID, role string) error {
// ExportKeyReencrypt exports the specified private key to an io.Writer in
// PEM format. The key is reencrypted with a new passphrase.
func (cs *CryptoService) ExportKeyReencrypt(dest io.Writer, keyID string, newPassphraseRetriever passphrase.Retriever) error {
privateKey, role, err := cs.GetPrivateKey(keyID)
privateKey, _, err := cs.GetPrivateKey(keyID)
if err != nil {
return err
}
keyInfo, err := cs.GetKeyInfo(keyID)
if err != nil {
return err
}
@ -81,7 +81,7 @@ func (cs *CryptoService) ExportKeyReencrypt(dest io.Writer, keyID string, newPas
return err
}
err = tempKeyStore.AddKey(keyID, role, privateKey)
err = tempKeyStore.AddKey(keyInfo, privateKey)
if err != nil {
return err
}
@ -100,56 +100,6 @@ func (cs *CryptoService) ExportKeyReencrypt(dest io.Writer, keyID string, newPas
return nil
}
// ImportRootKey imports a root in PEM format key from an io.Reader
// It prompts for the key's passphrase to verify the data and to determine
// the key ID.
func (cs *CryptoService) ImportRootKey(source io.Reader) error {
pemBytes, err := ioutil.ReadAll(source)
if err != nil {
return err
}
return cs.ImportRoleKey(pemBytes, data.CanonicalRootRole, nil)
}
// ImportRoleKey imports a private key in PEM format key from a byte array
// It prompts for the key's passphrase to verify the data and to determine
// the key ID.
func (cs *CryptoService) ImportRoleKey(pemBytes []byte, role string, newPassphraseRetriever passphrase.Retriever) error {
var alias string
var err error
if role == data.CanonicalRootRole {
alias = role
if err = checkRootKeyIsEncrypted(pemBytes); err != nil {
return err
}
} else {
// Parse the private key to get the key ID so that we can import it to the correct location
privKey, err := trustmanager.ParsePEMPrivateKey(pemBytes, "")
if err != nil {
privKey, _, err = trustmanager.GetPasswdDecryptBytes(newPassphraseRetriever, pemBytes, role, string(role))
if err != nil {
return err
}
}
// Since we're importing a non-root role, we need to pass the path as an alias
alias = filepath.Join(notary.NonRootKeysSubdir, cs.gun, privKey.ID())
// We also need to ensure that the role is properly set in the PEM headers
pemBytes, err = trustmanager.KeyToPEM(privKey, role)
if err != nil {
return err
}
}
for _, ks := range cs.keyStores {
// don't redeclare err, we want the value carried out of the loop
if err = ks.ImportKey(pemBytes, alias); err == nil {
return nil //bail on the first keystore we import to
}
}
return err
}
// ExportAllKeys exports all keys to an io.Writer in zip format.
// newPassphraseRetriever will be used to obtain passphrases to use to encrypt the existing keys.
func (cs *CryptoService) ExportAllKeys(dest io.Writer, newPassphraseRetriever passphrase.Retriever) error {
@ -182,7 +132,7 @@ func (cs *CryptoService) ExportAllKeys(dest io.Writer, newPassphraseRetriever pa
// ImportKeysZip imports keys from a zip file provided as an zip.Reader. The
// keys in the root_keys directory are left encrypted, but the other keys are
// decrypted with the specified passphrase.
func (cs *CryptoService) ImportKeysZip(zipReader zip.Reader) error {
func (cs *CryptoService) ImportKeysZip(zipReader zip.Reader, retriever passphrase.Retriever) error {
// Temporarily store the keys in maps, so we can bail early if there's
// an error (for example, wrong passphrase), without leaving the key
// store in an inconsistent state
@ -191,7 +141,6 @@ func (cs *CryptoService) ImportKeysZip(zipReader zip.Reader) error {
// Iterate through the files in the archive. Don't add the keys
for _, f := range zipReader.File {
fNameTrimmed := strings.TrimSuffix(f.Name, filepath.Ext(f.Name))
rc, err := f.Open()
if err != nil {
return err
@ -206,7 +155,7 @@ func (cs *CryptoService) ImportKeysZip(zipReader zip.Reader) error {
// Note that using / as a separator is okay here - the zip
// package guarantees that the separator will be /
if fNameTrimmed[len(fNameTrimmed)-5:] == "_root" {
if err = checkRootKeyIsEncrypted(fileBytes); err != nil {
if err = CheckRootKeyIsEncrypted(fileBytes); err != nil {
return err
}
}
@ -214,22 +163,21 @@ func (cs *CryptoService) ImportKeysZip(zipReader zip.Reader) error {
}
for keyName, pemBytes := range newKeys {
if keyName[len(keyName)-5:] == "_root" {
keyName = "root"
// Get the key role information as well as its data.PrivateKey representation
_, keyInfo, err := trustmanager.KeyInfoFromPEM(pemBytes, keyName)
if err != nil {
return err
}
// try to import the key to all key stores. As long as one of them
// succeeds, consider it a success
var tmpErr error
for _, ks := range cs.keyStores {
if err := ks.ImportKey(pemBytes, keyName); err != nil {
tmpErr = err
} else {
tmpErr = nil
break
privKey, err := trustmanager.ParsePEMPrivateKey(pemBytes, "")
if err != nil {
privKey, _, err = trustmanager.GetPasswdDecryptBytes(retriever, pemBytes, "", "imported "+keyInfo.Role)
if err != nil {
return err
}
}
if tmpErr != nil {
return tmpErr
// Add the key to our cryptoservice, will add to the first successful keystore
if err = cs.AddKey(keyInfo.Role, keyInfo.Gun, privKey); err != nil {
return err
}
}
@ -271,18 +219,18 @@ func (cs *CryptoService) ExportKeysByGUN(dest io.Writer, gun string, passphraseR
}
func moveKeysByGUN(oldKeyStore, newKeyStore trustmanager.KeyStore, gun string) error {
for relKeyPath := range oldKeyStore.ListKeys() {
for keyID, keyInfo := range oldKeyStore.ListKeys() {
// Skip keys that aren't associated with this GUN
if !strings.HasPrefix(relKeyPath, filepath.FromSlash(gun)) {
if keyInfo.Gun != gun {
continue
}
privKey, alias, err := oldKeyStore.GetKey(relKeyPath)
privKey, _, err := oldKeyStore.GetKey(keyID)
if err != nil {
return err
}
err = newKeyStore.AddKey(relKeyPath, alias, privKey)
err = newKeyStore.AddKey(keyInfo, privKey)
if err != nil {
return err
}
@ -292,13 +240,13 @@ func moveKeysByGUN(oldKeyStore, newKeyStore trustmanager.KeyStore, gun string) e
}
func moveKeys(oldKeyStore, newKeyStore trustmanager.KeyStore) error {
for f := range oldKeyStore.ListKeys() {
privateKey, role, err := oldKeyStore.GetKey(f)
for keyID, keyInfo := range oldKeyStore.ListKeys() {
privateKey, _, err := oldKeyStore.GetKey(keyID)
if err != nil {
return err
}
err = newKeyStore.AddKey(f, role, privateKey)
err = newKeyStore.AddKey(keyInfo, privateKey)
if err != nil {
return err
@ -349,9 +297,9 @@ func addKeysToArchive(zipWriter *zip.Writer, newKeyStore *trustmanager.KeyFileSt
return nil
}
// checkRootKeyIsEncrypted makes sure the root key is encrypted. We have
// CheckRootKeyIsEncrypted makes sure the root key is encrypted. We have
// internal assumptions that depend on this.
func checkRootKeyIsEncrypted(pemBytes []byte) error {
func CheckRootKeyIsEncrypted(pemBytes []byte) error {
block, _ := pem.Decode(pemBytes)
if block == nil {
return ErrNoValidPrivateKey

View File

@ -1,4 +1,4 @@
FROM golang:1.5.3
FROM golang:1.6.0
MAINTAINER David Lawrence "david.lawrence@docker.com"
RUN apt-get update && apt-get install -y \
@ -12,9 +12,6 @@ EXPOSE 4443
RUN go get github.com/mattes/migrate
ENV NOTARYPKG github.com/docker/notary
ENV GOPATH /go/src/${NOTARYPKG}/Godeps/_workspace:$GOPATH
# Copy the local repo to the expected go path
COPY . /go/src/github.com/docker/notary

View File

@ -1,4 +1,4 @@
FROM golang:1.5.3
FROM golang:1.6.0
MAINTAINER David Lawrence "david.lawrence@docker.com"
RUN apt-get update && apt-get install -y \
@ -12,7 +12,6 @@ EXPOSE 4444
RUN go get github.com/mattes/migrate
ENV NOTARYPKG github.com/docker/notary
ENV GOPATH /go/src/${NOTARYPKG}/Godeps/_workspace:$GOPATH
ENV NOTARY_SIGNER_DEFAULT_ALIAS="timestamp_1"
ENV NOTARY_SIGNER_TIMESTAMP_1="testpassword"

View File

@ -13,12 +13,15 @@ import (
"github.com/docker/notary/tuf/data"
)
type keyInfoMap map[string]KeyInfo
// KeyFileStore persists and manages private keys on disk
type KeyFileStore struct {
sync.Mutex
SimpleFileStore
passphrase.Retriever
cachedKeys map[string]*cachedKey
keyInfoMap
}
// KeyMemoryStore manages private keys in memory
@ -27,6 +30,14 @@ type KeyMemoryStore struct {
MemoryFileStore
passphrase.Retriever
cachedKeys map[string]*cachedKey
keyInfoMap
}
// KeyInfo stores the role, path, and gun for a corresponding private key ID
// It is assumed that each private key ID is unique
type KeyInfo struct {
Gun string
Role string
}
// NewKeyFileStore returns a new KeyFileStore creating a private directory to
@ -38,10 +49,93 @@ func NewKeyFileStore(baseDir string, passphraseRetriever passphrase.Retriever) (
return nil, err
}
cachedKeys := make(map[string]*cachedKey)
keyInfoMap := make(keyInfoMap)
return &KeyFileStore{SimpleFileStore: *fileStore,
keyStore := &KeyFileStore{SimpleFileStore: *fileStore,
Retriever: passphraseRetriever,
cachedKeys: cachedKeys}, nil
cachedKeys: cachedKeys,
keyInfoMap: keyInfoMap,
}
// Load this keystore's ID --> gun/role map
keyStore.loadKeyInfo()
return keyStore, nil
}
func generateKeyInfoMap(s LimitedFileStore) map[string]KeyInfo {
keyInfoMap := make(map[string]KeyInfo)
for _, keyPath := range s.ListFiles() {
d, err := s.Get(keyPath)
if err != nil {
logrus.Error(err)
continue
}
keyID, keyInfo, err := KeyInfoFromPEM(d, keyPath)
if err != nil {
logrus.Error(err)
continue
}
keyInfoMap[keyID] = keyInfo
}
return keyInfoMap
}
// Attempts to infer the keyID, role, and GUN from the specified key path.
// Note that non-root roles can only be inferred if this is a legacy style filename: KEYID_ROLE.key
func inferKeyInfoFromKeyPath(keyPath string) (string, string, string) {
var keyID, role, gun string
keyID = filepath.Base(keyPath)
underscoreIndex := strings.LastIndex(keyID, "_")
// This is the legacy KEYID_ROLE filename
// The keyID is the first part of the keyname
// The keyRole is the second part of the keyname
// in a key named abcde_root, abcde is the keyID and root is the KeyAlias
if underscoreIndex != -1 {
role = keyID[underscoreIndex+1:]
keyID = keyID[:underscoreIndex]
}
if filepath.HasPrefix(keyPath, notary.RootKeysSubdir+"/") {
return keyID, data.CanonicalRootRole, ""
}
keyPath = strings.TrimPrefix(keyPath, notary.NonRootKeysSubdir+"/")
gun = getGunFromFullID(keyPath)
return keyID, role, gun
}
func getGunFromFullID(fullKeyID string) string {
keyGun := filepath.Dir(fullKeyID)
// If the gun is empty, Dir will return .
if keyGun == "." {
keyGun = ""
}
return keyGun
}
func (s *KeyFileStore) loadKeyInfo() {
s.keyInfoMap = generateKeyInfoMap(s)
}
func (s *KeyMemoryStore) loadKeyInfo() {
s.keyInfoMap = generateKeyInfoMap(s)
}
// GetKeyInfo returns the corresponding gun and role key info for a keyID
func (s *KeyFileStore) GetKeyInfo(keyID string) (KeyInfo, error) {
if info, ok := s.keyInfoMap[keyID]; ok {
return info, nil
}
return KeyInfo{}, fmt.Errorf("Could not find info for keyID %s", keyID)
}
// GetKeyInfo returns the corresponding gun and role key info for a keyID
func (s *KeyMemoryStore) GetKeyInfo(keyID string) (KeyInfo, error) {
if info, ok := s.keyInfoMap[keyID]; ok {
return info, nil
}
return KeyInfo{}, fmt.Errorf("Could not find info for keyID %s", keyID)
}
// Name returns a user friendly name for the location this store
@ -51,55 +145,81 @@ func (s *KeyFileStore) Name() string {
}
// AddKey stores the contents of a PEM-encoded private key as a PEM block
func (s *KeyFileStore) AddKey(name, role string, privKey data.PrivateKey) error {
func (s *KeyFileStore) AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error {
s.Lock()
defer s.Unlock()
return addKey(s, s.Retriever, s.cachedKeys, name, role, privKey)
if keyInfo.Role == data.CanonicalRootRole || data.IsDelegation(keyInfo.Role) || !data.ValidRole(keyInfo.Role) {
keyInfo.Gun = ""
}
err := addKey(s, s.Retriever, s.cachedKeys, filepath.Join(keyInfo.Gun, privKey.ID()), keyInfo.Role, privKey)
if err != nil {
return err
}
s.keyInfoMap[privKey.ID()] = keyInfo
return nil
}
// GetKey returns the PrivateKey given a KeyID
func (s *KeyFileStore) GetKey(name string) (data.PrivateKey, string, error) {
s.Lock()
defer s.Unlock()
// If this is a bare key ID without the gun, prepend the gun so the filestore lookup succeeds
if keyInfo, ok := s.keyInfoMap[name]; ok {
name = filepath.Join(keyInfo.Gun, name)
}
return getKey(s, s.Retriever, s.cachedKeys, name)
}
// ListKeys returns a list of unique PublicKeys present on the KeyFileStore.
func (s *KeyFileStore) ListKeys() map[string]string {
return listKeys(s)
// ListKeys returns a list of unique PublicKeys present on the KeyFileStore, by returning a copy of the keyInfoMap
func (s *KeyFileStore) ListKeys() map[string]KeyInfo {
return copyKeyInfoMap(s.keyInfoMap)
}
// RemoveKey removes the key from the keyfilestore
func (s *KeyFileStore) RemoveKey(name string) error {
func (s *KeyFileStore) RemoveKey(keyID string) error {
s.Lock()
defer s.Unlock()
return removeKey(s, s.cachedKeys, name)
// If this is a bare key ID without the gun, prepend the gun so the filestore lookup succeeds
if keyInfo, ok := s.keyInfoMap[keyID]; ok {
keyID = filepath.Join(keyInfo.Gun, keyID)
}
err := removeKey(s, s.cachedKeys, keyID)
if err != nil {
return err
}
// Remove this key from our keyInfo map if we removed from our filesystem
delete(s.keyInfoMap, filepath.Base(keyID))
return nil
}
// ExportKey exportes the encrypted bytes from the keystore and writes it to
// dest.
func (s *KeyFileStore) ExportKey(name string) ([]byte, error) {
keyBytes, _, err := getRawKey(s, name)
// ExportKey exports the encrypted bytes from the keystore
func (s *KeyFileStore) ExportKey(keyID string) ([]byte, error) {
if keyInfo, ok := s.keyInfoMap[keyID]; ok {
keyID = filepath.Join(keyInfo.Gun, keyID)
}
keyBytes, _, err := getRawKey(s, keyID)
if err != nil {
return nil, err
}
return keyBytes, nil
}
// ImportKey imports the private key in the encrypted bytes into the keystore
// with the given key ID and alias.
func (s *KeyFileStore) ImportKey(pemBytes []byte, alias string) error {
return importKey(s, s.Retriever, s.cachedKeys, alias, pemBytes)
}
// NewKeyMemoryStore returns a new KeyMemoryStore which holds keys in memory
func NewKeyMemoryStore(passphraseRetriever passphrase.Retriever) *KeyMemoryStore {
memStore := NewMemoryFileStore()
cachedKeys := make(map[string]*cachedKey)
return &KeyMemoryStore{MemoryFileStore: *memStore,
keyInfoMap := make(keyInfoMap)
keyStore := &KeyMemoryStore{MemoryFileStore: *memStore,
Retriever: passphraseRetriever,
cachedKeys: cachedKeys}
cachedKeys: cachedKeys,
keyInfoMap: keyInfoMap,
}
// Load this keystore's ID --> gun/role map
keyStore.loadKeyInfo()
return keyStore
}
// Name returns a user friendly name for the location this store
@ -109,45 +229,84 @@ func (s *KeyMemoryStore) Name() string {
}
// AddKey stores the contents of a PEM-encoded private key as a PEM block
func (s *KeyMemoryStore) AddKey(name, alias string, privKey data.PrivateKey) error {
func (s *KeyMemoryStore) AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error {
s.Lock()
defer s.Unlock()
return addKey(s, s.Retriever, s.cachedKeys, name, alias, privKey)
if keyInfo.Role == data.CanonicalRootRole || data.IsDelegation(keyInfo.Role) || !data.ValidRole(keyInfo.Role) {
keyInfo.Gun = ""
}
err := addKey(s, s.Retriever, s.cachedKeys, filepath.Join(keyInfo.Gun, privKey.ID()), keyInfo.Role, privKey)
if err != nil {
return err
}
s.keyInfoMap[privKey.ID()] = keyInfo
return nil
}
// GetKey returns the PrivateKey given a KeyID
func (s *KeyMemoryStore) GetKey(name string) (data.PrivateKey, string, error) {
s.Lock()
defer s.Unlock()
// If this is a bare key ID without the gun, prepend the gun so the filestore lookup succeeds
if keyInfo, ok := s.keyInfoMap[name]; ok {
name = filepath.Join(keyInfo.Gun, name)
}
return getKey(s, s.Retriever, s.cachedKeys, name)
}
// ListKeys returns a list of unique PublicKeys present on the KeyFileStore.
func (s *KeyMemoryStore) ListKeys() map[string]string {
return listKeys(s)
// ListKeys returns a list of unique PublicKeys present on the KeyFileStore, by returning a copy of the keyInfoMap
func (s *KeyMemoryStore) ListKeys() map[string]KeyInfo {
return copyKeyInfoMap(s.keyInfoMap)
}
// copyKeyInfoMap returns a deep copy of the passed-in keyInfoMap
func copyKeyInfoMap(keyInfoMap map[string]KeyInfo) map[string]KeyInfo {
copyMap := make(map[string]KeyInfo)
for keyID, keyInfo := range keyInfoMap {
copyMap[keyID] = KeyInfo{Role: keyInfo.Role, Gun: keyInfo.Gun}
}
return copyMap
}
// RemoveKey removes the key from the keystore
func (s *KeyMemoryStore) RemoveKey(name string) error {
func (s *KeyMemoryStore) RemoveKey(keyID string) error {
s.Lock()
defer s.Unlock()
return removeKey(s, s.cachedKeys, name)
// If this is a bare key ID without the gun, prepend the gun so the filestore lookup succeeds
if keyInfo, ok := s.keyInfoMap[keyID]; ok {
keyID = filepath.Join(keyInfo.Gun, keyID)
}
err := removeKey(s, s.cachedKeys, keyID)
if err != nil {
return err
}
// Remove this key from our keyInfo map if we removed from our filesystem
delete(s.keyInfoMap, filepath.Base(keyID))
return nil
}
// ExportKey exportes the encrypted bytes from the keystore and writes it to
// dest.
func (s *KeyMemoryStore) ExportKey(name string) ([]byte, error) {
keyBytes, _, err := getRawKey(s, name)
// ExportKey exports the encrypted bytes from the keystore
func (s *KeyMemoryStore) ExportKey(keyID string) ([]byte, error) {
keyBytes, _, err := getRawKey(s, keyID)
if err != nil {
return nil, err
}
return keyBytes, nil
}
// ImportKey imports the private key in the encrypted bytes into the keystore
// with the given key ID and alias.
func (s *KeyMemoryStore) ImportKey(pemBytes []byte, alias string) error {
return importKey(s, s.Retriever, s.cachedKeys, alias, pemBytes)
// KeyInfoFromPEM attempts to get a keyID and KeyInfo from the filename and PEM bytes of a key
func KeyInfoFromPEM(pemBytes []byte, filename string) (string, KeyInfo, error) {
keyID, role, gun := inferKeyInfoFromKeyPath(filename)
if role == "" {
block, _ := pem.Decode(pemBytes)
if block == nil {
return "", KeyInfo{}, fmt.Errorf("could not decode PEM block for key %s", filename)
}
if keyRole, ok := block.Headers["role"]; ok {
role = keyRole
}
}
return keyID, KeyInfo{Gun: gun, Role: role}, nil
}
func addKey(s LimitedFileStore, passphraseRetriever passphrase.Retriever, cachedKeys map[string]*cachedKey, name, role string, privKey data.PrivateKey) error {
@ -229,50 +388,6 @@ func getKey(s LimitedFileStore, passphraseRetriever passphrase.Retriever, cached
return privKey, keyAlias, nil
}
// ListKeys returns a map of unique PublicKeys present on the KeyFileStore and
// their corresponding aliases.
func listKeys(s LimitedFileStore) map[string]string {
keyIDMap := make(map[string]string)
for _, f := range s.ListFiles() {
// Remove the prefix of the directory from the filename
var keyIDFull string
if strings.HasPrefix(f, notary.RootKeysSubdir+"/") {
keyIDFull = strings.TrimPrefix(f, notary.RootKeysSubdir+"/")
} else {
keyIDFull = strings.TrimPrefix(f, notary.NonRootKeysSubdir+"/")
}
keyIDFull = strings.TrimSpace(keyIDFull)
// If the key does not have a _, we'll attempt to
// read it as a PEM
underscoreIndex := strings.LastIndex(keyIDFull, "_")
if underscoreIndex == -1 {
d, err := s.Get(f)
if err != nil {
logrus.Error(err)
continue
}
block, _ := pem.Decode(d)
if block == nil {
continue
}
if role, ok := block.Headers["role"]; ok {
keyIDMap[keyIDFull] = role
}
} else {
// The keyID is the first part of the keyname
// The KeyAlias is the second part of the keyname
// in a key named abcde_root, abcde is the keyID and root is the KeyAlias
keyID := keyIDFull[:underscoreIndex]
keyAlias := keyIDFull[underscoreIndex+1:]
keyIDMap[keyID] = keyAlias
}
}
return keyIDMap
}
// RemoveKey removes the key from the keyfilestore
func removeKey(s LimitedFileStore, cachedKeys map[string]*cachedKey, name string) error {
role, legacy, err := getKeyRole(s, name)
@ -296,7 +411,7 @@ func removeKey(s LimitedFileStore, cachedKeys map[string]*cachedKey, name string
// Assumes 2 subdirectories, 1 containing root keys and 1 containing tuf keys
func getSubdir(alias string) string {
if alias == "root" {
if alias == data.CanonicalRootRole {
return notary.RootKeysSubdir
}
return notary.NonRootKeysSubdir
@ -380,21 +495,3 @@ func encryptAndAddKey(s LimitedFileStore, passwd string, cachedKeys map[string]*
cachedKeys[name] = &cachedKey{alias: role, key: privKey}
return s.Add(filepath.Join(getSubdir(role), name), pemPrivKey)
}
func importKey(s LimitedFileStore, passphraseRetriever passphrase.Retriever, cachedKeys map[string]*cachedKey, alias string, pemBytes []byte) error {
if alias != data.CanonicalRootRole {
return s.Add(alias, pemBytes)
}
privKey, passphrase, err := GetPasswdDecryptBytes(
passphraseRetriever, pemBytes, "", "imported "+alias)
if err != nil {
return err
}
var name string
name = privKey.ID()
return encryptAndAddKey(s, passphrase, cachedKeys, name, alias, privKey)
}

View File

@ -40,14 +40,14 @@ const (
// KeyStore is a generic interface for private key storage
type KeyStore interface {
// Add Key adds a key to the KeyStore, and if the key already exists,
// AddKey adds a key to the KeyStore, and if the key already exists,
// succeeds. Otherwise, returns an error if it cannot add.
AddKey(name, alias string, privKey data.PrivateKey) error
GetKey(name string) (data.PrivateKey, string, error)
ListKeys() map[string]string
RemoveKey(name string) error
ExportKey(name string) ([]byte, error)
ImportKey(pemBytes []byte, alias string) error
AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error
GetKey(keyID string) (data.PrivateKey, string, error)
GetKeyInfo(keyID string) (KeyInfo, error)
ListKeys() map[string]KeyInfo
RemoveKey(keyID string) error
ExportKey(keyID string) ([]byte, error)
Name() string
}

View File

@ -517,7 +517,7 @@ func EncryptPrivateKey(key data.PrivateKey, role, passphrase string) ([]byte, er
// ReadRoleFromPEM returns the value from the role PEM header, if it exists
func ReadRoleFromPEM(pemBytes []byte) string {
pemBlock, _ := pem.Decode(pemBytes)
if pemBlock.Headers == nil {
if pemBlock == nil || pemBlock.Headers == nil {
return ""
}
role, ok := pemBlock.Headers["role"]

View File

@ -617,7 +617,7 @@ func (s *YubiKeyStore) setLibLoader(loader pkcs11LibLoader) {
s.libLoader = loader
}
func (s *YubiKeyStore) ListKeys() map[string]string {
func (s *YubiKeyStore) ListKeys() map[string]trustmanager.KeyInfo {
if len(s.keys) > 0 {
return buildKeyMap(s.keys)
}
@ -639,15 +639,15 @@ func (s *YubiKeyStore) ListKeys() map[string]string {
}
// AddKey puts a key inside the Yubikey, as well as writing it to the backup store
func (s *YubiKeyStore) AddKey(keyID, role string, privKey data.PrivateKey) error {
added, err := s.addKey(keyID, role, privKey)
func (s *YubiKeyStore) AddKey(keyInfo trustmanager.KeyInfo, privKey data.PrivateKey) error {
added, err := s.addKey(privKey.ID(), keyInfo.Role, privKey)
if err != nil {
return err
}
if added {
err = s.backupStore.AddKey(privKey.ID(), role, privKey)
if added && s.backupStore != nil {
err = s.backupStore.AddKey(keyInfo, privKey)
if err != nil {
defer s.RemoveKey(keyID)
defer s.RemoveKey(privKey.ID())
return ErrBackupFailed{err: err.Error()}
}
}
@ -762,20 +762,9 @@ func (s *YubiKeyStore) ExportKey(keyID string) ([]byte, error) {
return nil, errors.New("Keys cannot be exported from a Yubikey.")
}
// ImportKey imports a root key into a Yubikey
func (s *YubiKeyStore) ImportKey(pemBytes []byte, keyPath string) error {
logrus.Debugf("Attempting to import: %s key inside of YubiKeyStore", keyPath)
if keyPath != data.CanonicalRootRole {
return fmt.Errorf("yubikey only supports storing root keys")
}
privKey, _, err := trustmanager.GetPasswdDecryptBytes(
s.passRetriever, pemBytes, "", "imported root")
if err != nil {
logrus.Debugf("Failed to get and retrieve a key from: %s", keyPath)
return err
}
_, err = s.addKey(privKey.ID(), "root", privKey)
return err
// Not yet implemented
func (s *YubiKeyStore) GetKeyInfo(keyID string) (trustmanager.KeyInfo, error) {
return trustmanager.KeyInfo{}, fmt.Errorf("Not yet implemented")
}
func cleanup(ctx IPKCS11Ctx, session pkcs11.SessionHandle) {
@ -890,10 +879,10 @@ func login(ctx IPKCS11Ctx, session pkcs11.SessionHandle, passRetriever passphras
return nil
}
func buildKeyMap(keys map[string]yubiSlot) map[string]string {
res := make(map[string]string)
func buildKeyMap(keys map[string]yubiSlot) map[string]trustmanager.KeyInfo {
res := make(map[string]trustmanager.KeyInfo)
for k, v := range keys {
res[k] = v.role
res[k] = trustmanager.KeyInfo{Role: v.role, Gun: ""}
}
return res
}

View File

@ -1,8 +1,6 @@
package client
import (
"bytes"
"crypto/sha256"
"encoding/json"
"fmt"
"path"
@ -92,16 +90,16 @@ func (c *Client) update() error {
func (c Client) checkRoot() error {
role := data.CanonicalRootRole
size := c.local.Snapshot.Signed.Meta[role].Length
hashSha256 := c.local.Snapshot.Signed.Meta[role].Hashes["sha256"]
expectedHashes := c.local.Snapshot.Signed.Meta[role].Hashes
raw, err := c.cache.GetMeta("root", size)
if err != nil {
return err
}
hash := sha256.Sum256(raw)
if !bytes.Equal(hash[:], hashSha256) {
return fmt.Errorf("Cached root sha256 did not match snapshot root sha256")
if err := data.CheckHashes(raw, expectedHashes); err != nil {
return fmt.Errorf("Cached root hashes did not match snapshot root hashes")
}
if int64(len(raw)) != size {
@ -127,11 +125,19 @@ func (c *Client) downloadRoot() error {
// We can't read an exact size for the root metadata without risking getting stuck in the TUF update cycle
// since it's possible that downloading timestamp/snapshot metadata may fail due to a signature mismatch
var size int64 = -1
var expectedSha256 []byte
// We could not expect what the "snapshot" meta has specified.
//
// In some old clients, there is only the "sha256",
// but both "sha256" and "sha512" in the newer ones.
//
// And possibly more in the future.
var expectedHashes data.Hashes
if c.local.Snapshot != nil {
if prevRootMeta, ok := c.local.Snapshot.Signed.Meta[role]; ok {
size = prevRootMeta.Length
expectedSha256 = prevRootMeta.Hashes["sha256"]
expectedHashes = prevRootMeta.Hashes
}
}
@ -144,8 +150,9 @@ func (c *Client) downloadRoot() error {
old := &data.Signed{}
version := 0
if expectedSha256 != nil {
// can only trust cache if we have an expected sha256 to trust
// Due to the same reason, we don't really know how many hashes are there.
if len(expectedHashes) != 0 {
// can only trust cache if we have an expected sha256(for example) to trust
cachedRoot, err = c.cache.GetMeta(role, size)
}
@ -153,11 +160,11 @@ func (c *Client) downloadRoot() error {
logrus.Debug("didn't find a cached root, must download")
download = true
} else {
hash := sha256.Sum256(cachedRoot)
if !bytes.Equal(hash[:], expectedSha256) {
if err := data.CheckHashes(cachedRoot, expectedHashes); err != nil {
logrus.Debug("cached root's hash didn't match expected, must download")
download = true
}
err := json.Unmarshal(cachedRoot, old)
if err == nil {
root, err := data.RootFromSigned(old)
@ -176,7 +183,7 @@ func (c *Client) downloadRoot() error {
var raw []byte
if download {
// use consistent download if we have the checksum.
raw, s, err = c.downloadSigned(role, size, expectedSha256)
raw, s, err = c.downloadSigned(role, size, expectedHashes)
if err != nil {
return err
}
@ -322,8 +329,8 @@ func (c *Client) downloadSnapshot() error {
return tuf.ErrNotLoaded{Role: data.CanonicalTimestampRole}
}
size := c.local.Timestamp.Signed.Meta[role].Length
expectedSha256, ok := c.local.Timestamp.Signed.Meta[role].Hashes["sha256"]
if !ok {
expectedHashes := c.local.Timestamp.Signed.Meta[role].Hashes
if len(expectedHashes) == 0 {
return data.ErrMissingMeta{Role: "snapshot"}
}
@ -336,11 +343,11 @@ func (c *Client) downloadSnapshot() error {
download = true
} else {
// file may have been tampered with on disk. Always check the hash!
genHash := sha256.Sum256(raw)
if !bytes.Equal(genHash[:], expectedSha256) {
if err := data.CheckHashes(raw, expectedHashes); err != nil {
logrus.Debug("hash of snapshot in cache did not match expected hash, must download")
download = true
}
err := json.Unmarshal(raw, old)
if err == nil {
snap, err := data.SnapshotFromSigned(old)
@ -357,7 +364,7 @@ func (c *Client) downloadSnapshot() error {
}
var s *data.Signed
if download {
raw, s, err = c.downloadSigned(role, size, expectedSha256)
raw, s, err = c.downloadSigned(role, size, expectedHashes)
if err != nil {
return err
}
@ -439,18 +446,19 @@ func (c *Client) downloadTargets(role string) error {
return nil
}
func (c *Client) downloadSigned(role string, size int64, expectedSha256 []byte) ([]byte, *data.Signed, error) {
rolePath := utils.ConsistentName(role, expectedSha256)
func (c *Client) downloadSigned(role string, size int64, expectedHashes data.Hashes) ([]byte, *data.Signed, error) {
rolePath := utils.ConsistentName(role, expectedHashes["sha256"])
raw, err := c.remote.GetMeta(rolePath, size)
if err != nil {
return nil, nil, err
}
if expectedSha256 != nil {
genHash := sha256.Sum256(raw)
if !bytes.Equal(genHash[:], expectedSha256) {
if expectedHashes != nil {
if err := data.CheckHashes(raw, expectedHashes); err != nil {
return nil, nil, ErrChecksumMismatch{role: role}
}
}
s := &data.Signed{}
err = json.Unmarshal(raw, s)
if err != nil {
@ -465,8 +473,8 @@ func (c Client) getTargetsFile(role string, snapshotMeta data.Files, consistent
if !ok {
return nil, data.ErrMissingMeta{Role: role}
}
expectedSha256, ok := snapshotMeta[role].Hashes["sha256"]
if !ok {
expectedHashes := snapshotMeta[role].Hashes
if len(expectedHashes) == 0 {
return nil, data.ErrMissingMeta{Role: role}
}
@ -480,10 +488,10 @@ func (c Client) getTargetsFile(role string, snapshotMeta data.Files, consistent
download = true
} else {
// file may have been tampered with on disk. Always check the hash!
genHash := sha256.Sum256(raw)
if !bytes.Equal(genHash[:], expectedSha256) {
if err := data.CheckHashes(raw, expectedHashes); err != nil {
download = true
}
err := json.Unmarshal(raw, old)
if err == nil {
targ, err := data.TargetsFromSigned(old, role)
@ -500,7 +508,7 @@ func (c Client) getTargetsFile(role string, snapshotMeta data.Files, consistent
size := snapshotMeta[role].Length
var s *data.Signed
if download {
raw, s, err = c.downloadSigned(role, size, expectedSha256)
raw, s, err = c.downloadSigned(role, size, expectedHashes)
if err != nil {
return nil, err
}

View File

@ -129,7 +129,7 @@ func (r SignedRoot) ToSigned() (*Signed, error) {
copy(sigs, r.Signatures)
return &Signed{
Signatures: sigs,
Signed: signed,
Signed: &signed,
}, nil
}
@ -146,7 +146,7 @@ func (r SignedRoot) MarshalJSON() ([]byte, error) {
// that it is a valid SignedRoot
func RootFromSigned(s *Signed) (*SignedRoot, error) {
r := Root{}
if err := defaultSerializer.Unmarshal(s.Signed, &r); err != nil {
if err := defaultSerializer.Unmarshal(*s.Signed, &r); err != nil {
return nil, err
}
if err := isValidRootStructure(r); err != nil {

View File

@ -2,12 +2,12 @@ package data
import (
"bytes"
"crypto/sha256"
"fmt"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/go/canonical/json"
"github.com/docker/notary"
)
// SignedSnapshot is a fully unpacked snapshot.json
@ -39,10 +39,18 @@ func isValidSnapshotStructure(s Snapshot) error {
// Meta is a map of FileMeta, so if the role isn't in the map it returns
// an empty FileMeta, which has an empty map, and you can check on keys
// from an empty map.
if checksum, ok := s.Meta[role].Hashes["sha256"]; !ok || len(checksum) != sha256.Size {
//
// For now sha256 is required and sha512 is not.
if _, ok := s.Meta[role].Hashes[notary.SHA256]; !ok {
return ErrInvalidMetadata{
role: CanonicalSnapshotRole,
msg: fmt.Sprintf("missing or invalid %s sha256 checksum information", role),
msg: fmt.Sprintf("missing %s sha256 checksum information", role),
}
}
if err := CheckValidHashStructures(s.Meta[role].Hashes); err != nil {
return ErrInvalidMetadata{
role: CanonicalSnapshotRole,
msg: fmt.Sprintf("invalid %s checksum information, %v", role, err),
}
}
}
@ -63,11 +71,11 @@ func NewSnapshot(root *Signed, targets *Signed) (*SignedSnapshot, error) {
logrus.Debug("Error Marshalling Root")
return nil, err
}
rootMeta, err := NewFileMeta(bytes.NewReader(rootJSON), "sha256")
rootMeta, err := NewFileMeta(bytes.NewReader(rootJSON), NotaryDefaultHashes...)
if err != nil {
return nil, err
}
targetsMeta, err := NewFileMeta(bytes.NewReader(targetsJSON), "sha256")
targetsMeta, err := NewFileMeta(bytes.NewReader(targetsJSON), NotaryDefaultHashes...)
if err != nil {
return nil, err
}
@ -85,10 +93,6 @@ func NewSnapshot(root *Signed, targets *Signed) (*SignedSnapshot, error) {
}, nil
}
func (sp *SignedSnapshot) hashForRole(role string) []byte {
return sp.Signed.Meta[role].Hashes["sha256"]
}
// ToSigned partially serializes a SignedSnapshot for further signing
func (sp *SignedSnapshot) ToSigned() (*Signed, error) {
s, err := defaultSerializer.MarshalCanonical(sp.Signed)
@ -104,7 +108,7 @@ func (sp *SignedSnapshot) ToSigned() (*Signed, error) {
copy(sigs, sp.Signatures)
return &Signed{
Signatures: sigs,
Signed: signed,
Signed: &signed,
}, nil
}
@ -144,7 +148,7 @@ func (sp *SignedSnapshot) MarshalJSON() ([]byte, error) {
// SnapshotFromSigned fully unpacks a Signed object into a SignedSnapshot
func SnapshotFromSigned(s *Signed) (*SignedSnapshot, error) {
sp := Snapshot{}
if err := defaultSerializer.Unmarshal(s.Signed, &sp); err != nil {
if err := defaultSerializer.Unmarshal(*s.Signed, &sp); err != nil {
return nil, err
}
if err := isValidSnapshotStructure(sp); err != nil {

View File

@ -162,7 +162,7 @@ func (t *SignedTargets) ToSigned() (*Signed, error) {
copy(sigs, t.Signatures)
return &Signed{
Signatures: sigs,
Signed: signed,
Signed: &signed,
}, nil
}
@ -179,7 +179,7 @@ func (t *SignedTargets) MarshalJSON() ([]byte, error) {
// a role name (so it can validate the SignedTargets object)
func TargetsFromSigned(s *Signed, roleName string) (*SignedTargets, error) {
t := Targets{}
if err := defaultSerializer.Unmarshal(s.Signed, &t); err != nil {
if err := defaultSerializer.Unmarshal(*s.Signed, &t); err != nil {
return nil, err
}
if err := isValidTargetsStructure(t, roleName); err != nil {

View File

@ -2,11 +2,11 @@ package data
import (
"bytes"
"crypto/sha256"
"fmt"
"time"
"github.com/docker/go/canonical/json"
"github.com/docker/notary"
)
// SignedTimestamp is a fully unpacked timestamp.json
@ -37,10 +37,17 @@ func isValidTimestampStructure(t Timestamp) error {
// Meta is a map of FileMeta, so if the role isn't in the map it returns
// an empty FileMeta, which has an empty map, and you can check on keys
// from an empty map.
if cs, ok := t.Meta[CanonicalSnapshotRole].Hashes["sha256"]; !ok || len(cs) != sha256.Size {
//
// For now sha256 is required and sha512 is not.
if _, ok := t.Meta[CanonicalSnapshotRole].Hashes[notary.SHA256]; !ok {
return ErrInvalidMetadata{
role: CanonicalTimestampRole, msg: "missing or invalid snapshot sha256 checksum information"}
role: CanonicalTimestampRole, msg: "missing snapshot sha256 checksum information"}
}
if err := CheckValidHashStructures(t.Meta[CanonicalSnapshotRole].Hashes); err != nil {
return ErrInvalidMetadata{
role: CanonicalTimestampRole, msg: fmt.Sprintf("invalid snapshot checksum information, %v", err)}
}
return nil
}
@ -50,7 +57,7 @@ func NewTimestamp(snapshot *Signed) (*SignedTimestamp, error) {
if err != nil {
return nil, err
}
snapshotMeta, err := NewFileMeta(bytes.NewReader(snapshotJSON), "sha256")
snapshotMeta, err := NewFileMeta(bytes.NewReader(snapshotJSON), NotaryDefaultHashes...)
if err != nil {
return nil, err
}
@ -83,7 +90,7 @@ func (ts *SignedTimestamp) ToSigned() (*Signed, error) {
copy(sigs, ts.Signatures)
return &Signed{
Signatures: sigs,
Signed: signed,
Signed: &signed,
}, nil
}
@ -110,7 +117,7 @@ func (ts *SignedTimestamp) MarshalJSON() ([]byte, error) {
// SignedTimestamp
func TimestampFromSigned(s *Signed) (*SignedTimestamp, error) {
ts := Timestamp{}
if err := defaultSerializer.Unmarshal(s.Signed, &ts); err != nil {
if err := defaultSerializer.Unmarshal(*s.Signed, &ts); err != nil {
return nil, err
}
if err := isValidTimestampStructure(ts); err != nil {

View File

@ -3,6 +3,7 @@ package data
import (
"crypto/sha256"
"crypto/sha512"
"crypto/subtle"
"fmt"
"hash"
"io"
@ -85,8 +86,8 @@ func ValidTUFType(typ, role string) bool {
// used to verify signatures before fully unpacking, or to add signatures
// before fully packing
type Signed struct {
Signed json.RawMessage `json:"signed"`
Signatures []Signature `json:"signatures"`
Signed *json.RawMessage `json:"signed"`
Signatures []Signature `json:"signatures"`
}
// SignedCommon contains the fields common to the Signed component of all
@ -119,12 +120,71 @@ type Files map[string]FileMeta
// and target file
type Hashes map[string][]byte
// NotaryDefaultHashes contains the default supported hash algorithms.
var NotaryDefaultHashes = []string{notary.SHA256, notary.SHA512}
// FileMeta contains the size and hashes for a metadata or target file. Custom
// data can be optionally added.
type FileMeta struct {
Length int64 `json:"length"`
Hashes Hashes `json:"hashes"`
Custom json.RawMessage `json:"custom,omitempty"`
Length int64 `json:"length"`
Hashes Hashes `json:"hashes"`
Custom *json.RawMessage `json:"custom,omitempty"`
}
// CheckHashes verifies all the checksums specified by the "hashes" of the payload.
func CheckHashes(payload []byte, hashes Hashes) error {
cnt := 0
// k, v indicate the hash algorithm and the corresponding value
for k, v := range hashes {
switch k {
case notary.SHA256:
checksum := sha256.Sum256(payload)
if subtle.ConstantTimeCompare(checksum[:], v) == 0 {
return fmt.Errorf("%s checksum mismatched", k)
}
cnt++
case notary.SHA512:
checksum := sha512.Sum512(payload)
if subtle.ConstantTimeCompare(checksum[:], v) == 0 {
return fmt.Errorf("%s checksum mismatched", k)
}
cnt++
}
}
if cnt == 0 {
return fmt.Errorf("at least one supported hash needed")
}
return nil
}
// CheckValidHashStructures returns an error, or nil, depending on whether
// the content of the hashes is valid or not.
func CheckValidHashStructures(hashes Hashes) error {
cnt := 0
for k, v := range hashes {
switch k {
case notary.SHA256:
if len(v) != sha256.Size {
return fmt.Errorf("invalid %s checksum", notary.SHA256)
}
cnt++
case notary.SHA512:
if len(v) != sha512.Size {
return fmt.Errorf("invalid %s checksum", notary.SHA512)
}
cnt++
}
}
if cnt == 0 {
return fmt.Errorf("at least one supported hash needed")
}
return nil
}
// NewFileMeta generates a FileMeta object from the reader, using the
@ -137,12 +197,12 @@ func NewFileMeta(r io.Reader, hashAlgorithms ...string) (FileMeta, error) {
for _, hashAlgorithm := range hashAlgorithms {
var h hash.Hash
switch hashAlgorithm {
case "sha256":
case notary.SHA256:
h = sha256.New()
case "sha512":
case notary.SHA512:
h = sha512.New()
default:
return FileMeta{}, fmt.Errorf("Unknown Hash Algorithm: %s", hashAlgorithm)
return FileMeta{}, fmt.Errorf("Unknown hash algorithm: %s", hashAlgorithm)
}
hashes[hashAlgorithm] = h
r = io.TeeReader(r, h)

View File

@ -3,10 +3,7 @@ package signed
import (
"crypto/rand"
"errors"
"io"
"io/ioutil"
"github.com/agl/ed25519"
"github.com/docker/notary/trustmanager"
"github.com/docker/notary/tuf/data"
)
@ -29,6 +26,12 @@ func NewEd25519() *Ed25519 {
}
}
// AddKey allows you to add a private key
func (e *Ed25519) AddKey(role, gun string, k data.PrivateKey) error {
e.addKey(role, k)
return nil
}
// addKey allows you to add a private key
func (e *Ed25519) addKey(role string, k data.PrivateKey) {
e.keys[k.ID()] = edCryptoKey{
@ -64,7 +67,7 @@ func (e *Ed25519) ListAllKeys() map[string]string {
}
// Create generates a new key and returns the public part
func (e *Ed25519) Create(role, algorithm string) (data.PublicKey, error) {
func (e *Ed25519) Create(role, gun, algorithm string) (data.PublicKey, error) {
if algorithm != data.ED25519Key {
return nil, errors.New("only ED25519 supported by this cryptoservice")
}
@ -102,22 +105,3 @@ func (e *Ed25519) GetPrivateKey(keyID string) (data.PrivateKey, string, error) {
}
return nil, "", trustmanager.ErrKeyNotFound{KeyID: keyID}
}
// ImportRootKey adds an Ed25519 key to the store as a root key
func (e *Ed25519) ImportRootKey(r io.Reader) error {
raw, err := ioutil.ReadAll(r)
if err != nil {
return err
}
dataSize := ed25519.PublicKeySize + ed25519.PrivateKeySize
if len(raw) < dataSize || len(raw) > dataSize {
return errors.New("Wrong length of data for Ed25519 Key Import")
}
public := data.NewED25519PublicKey(raw[:ed25519.PublicKeySize])
private, err := data.NewED25519PrivateKey(*public, raw[ed25519.PublicKeySize:])
e.keys[private.ID()] = edCryptoKey{
role: "root",
privKey: private,
}
return nil
}

View File

@ -2,7 +2,6 @@ package signed
import (
"github.com/docker/notary/tuf/data"
"io"
)
// KeyService provides management of keys locally. It will never
@ -11,9 +10,10 @@ import (
type KeyService interface {
// Create issues a new key pair and is responsible for loading
// the private key into the appropriate signing service.
// The role isn't currently used for anything, but it's here to support
// future features
Create(role, algorithm string) (data.PublicKey, error)
Create(role, gun, algorithm string) (data.PublicKey, error)
// AddKey adds a private key to the specified role and gun
AddKey(role, gun string, key data.PrivateKey) error
// GetKey retrieves the public key if present, otherwise it returns nil
GetKey(keyID string) data.PublicKey
@ -30,10 +30,6 @@ type KeyService interface {
// ListAllKeys returns a map of all available signing key IDs to role
ListAllKeys() map[string]string
// ImportRootKey imports a root key to the highest priority keystore associated with
// the cryptoservice
ImportRootKey(source io.Reader) error
}
// CryptoService is deprecated and all instances of its use should be

View File

@ -22,10 +22,13 @@ import (
// Sign takes a data.Signed and a key, calculated and adds the signature
// to the data.Signed
// N.B. All public keys for a role should be passed so that this function
// can correctly clean up signatures that are no longer valid.
func Sign(service CryptoService, s *data.Signed, keys ...data.PublicKey) error {
logrus.Debugf("sign called with %d keys", len(keys))
signatures := make([]data.Signature, 0, len(s.Signatures)+1)
signingKeyIDs := make(map[string]struct{})
tufIDs := make(map[string]data.PublicKey)
ids := make([]string, 0, len(keys))
privKeys := make(map[string]data.PrivateKey)
@ -34,6 +37,7 @@ func Sign(service CryptoService, s *data.Signed, keys ...data.PublicKey) error {
for _, key := range keys {
canonicalID, err := utils.CanonicalKeyID(key)
ids = append(ids, canonicalID)
tufIDs[key.ID()] = key
if err != nil {
continue
}
@ -51,7 +55,7 @@ func Sign(service CryptoService, s *data.Signed, keys ...data.PublicKey) error {
// Do signing and generate list of signatures
for keyID, pk := range privKeys {
sig, err := pk.Sign(rand.Reader, s.Signed, nil)
sig, err := pk.Sign(rand.Reader, *s.Signed, nil)
if err != nil {
logrus.Debugf("Failed to sign with key: %s. Reason: %v", keyID, err)
continue
@ -78,6 +82,20 @@ func Sign(service CryptoService, s *data.Signed, keys ...data.PublicKey) error {
// key is in the set of key IDs for which a signature has been created
continue
}
var (
k data.PublicKey
ok bool
)
if k, ok = tufIDs[sig.KeyID]; !ok {
// key is no longer a valid signing key
continue
}
if err := VerifySignature(*s.Signed, sig, k); err != nil {
// signature is no longer valid
continue
}
// keep any signatures that still represent valid keys and are
// themselves valid
signatures = append(signatures, sig)
}
s.Signatures = signatures

View File

@ -2,6 +2,7 @@ package signed
import (
"errors"
"fmt"
"strings"
"time"
@ -28,7 +29,7 @@ func VerifyRoot(s *data.Signed, minVersion int, keys map[string]data.PublicKey)
}
var decoded map[string]interface{}
if err := json.Unmarshal(s.Signed, &decoded); err != nil {
if err := json.Unmarshal(*s.Signed, &decoded); err != nil {
return err
}
msg, err := json.MarshalCanonical(decoded)
@ -72,7 +73,7 @@ func Verify(s *data.Signed, role data.BaseRole, minVersion int) error {
func verifyMeta(s *data.Signed, role string, minVersion int) error {
sm := &data.SignedCommon{}
if err := json.Unmarshal(s.Signed, sm); err != nil {
if err := json.Unmarshal(*s.Signed, sm); err != nil {
return err
}
if !data.ValidTUFType(sm.Type, role) {
@ -108,7 +109,7 @@ func VerifySignatures(s *data.Signed, roleData data.BaseRole) error {
// remarshal the signed part so we can verify the signature, since the signature has
// to be of a canonically marshalled signed object
var decoded map[string]interface{}
if err := json.Unmarshal(s.Signed, &decoded); err != nil {
if err := json.Unmarshal(*s.Signed, &decoded); err != nil {
return err
}
msg, err := json.MarshalCanonical(decoded)
@ -124,16 +125,8 @@ func VerifySignatures(s *data.Signed, roleData data.BaseRole) error {
logrus.Debugf("continuing b/c keyid lookup was nil: %s\n", sig.KeyID)
continue
}
// method lookup is consistent due to Unmarshal JSON doing lower case for us.
method := sig.Method
verifier, ok := Verifiers[method]
if !ok {
logrus.Debugf("continuing b/c signing method is not supported: %s\n", sig.Method)
continue
}
if err := verifier.Verify(key, sig.Signature, msg); err != nil {
logrus.Debugf("continuing b/c signature was invalid\n")
if err := VerifySignature(msg, sig, key); err != nil {
logrus.Debugf("continuing b/c %s", err.Error())
continue
}
valid[sig.KeyID] = struct{}{}
@ -145,3 +138,18 @@ func VerifySignatures(s *data.Signed, roleData data.BaseRole) error {
return nil
}
// VerifySignature checks a single signature and public key against a payload
func VerifySignature(msg []byte, sig data.Signature, pk data.PublicKey) error {
// method lookup is consistent due to Unmarshal JSON doing lower case for us.
method := sig.Method
verifier, ok := Verifiers[method]
if !ok {
return fmt.Errorf("signing method is not supported: %s\n", sig.Method)
}
if err := verifier.Verify(pk, sig.Signature, msg); err != nil {
return fmt.Errorf("signature was invalid\n")
}
return nil
}

View File

@ -575,8 +575,9 @@ func (tr Repo) TargetDelegations(role, path string) []*data.Role {
// exist or if there are no signing keys.
func (tr *Repo) VerifyCanSign(roleName string) error {
var (
role data.BaseRole
err error
role data.BaseRole
err error
canonicalKeyIDs []string
)
// we only need the BaseRole part of a delegation because we're just
// checking KeyIDs
@ -597,6 +598,7 @@ func (tr *Repo) VerifyCanSign(roleName string) error {
check := []string{keyID}
if canonicalID, err := utils.CanonicalKeyID(k); err == nil {
check = append(check, canonicalID)
canonicalKeyIDs = append(canonicalKeyIDs, canonicalID)
}
for _, id := range check {
p, _, err := tr.cryptoService.GetPrivateKey(id)
@ -605,7 +607,7 @@ func (tr *Repo) VerifyCanSign(roleName string) error {
}
}
}
return signed.ErrNoKeys{KeyIDs: role.ListKeyIDs()}
return signed.ErrNoKeys{KeyIDs: canonicalKeyIDs}
}
// used for walking the targets/delegations tree, potentially modifying the underlying SignedTargets for the repo
@ -760,7 +762,7 @@ func (tr *Repo) UpdateSnapshot(role string, s *data.Signed) error {
if err != nil {
return err
}
meta, err := data.NewFileMeta(bytes.NewReader(jsonData), "sha256")
meta, err := data.NewFileMeta(bytes.NewReader(jsonData), data.NotaryDefaultHashes...)
if err != nil {
return err
}
@ -775,7 +777,7 @@ func (tr *Repo) UpdateTimestamp(s *data.Signed) error {
if err != nil {
return err
}
meta, err := data.NewFileMeta(bytes.NewReader(jsonData), "sha256")
meta, err := data.NewFileMeta(bytes.NewReader(jsonData), data.NotaryDefaultHashes...)
if err != nil {
return err
}
@ -917,12 +919,7 @@ func (tr *Repo) SignTimestamp(expires time.Time) (*data.Signed, error) {
}
func (tr Repo) sign(signedData *data.Signed, role data.BaseRole) (*data.Signed, error) {
ks := role.ListKeys()
if len(ks) < 1 {
return nil, signed.ErrNoKeys{}
}
err := signed.Sign(tr.cryptoService, signedData, ks...)
if err != nil {
if err := signed.Sign(tr.cryptoService, signedData, role.ListKeys()...); err != nil {
return nil, err
}
return signedData, nil

View File

@ -1,7 +1,6 @@
package utils
import (
"bytes"
"crypto/sha256"
"crypto/sha512"
"crypto/tls"
@ -34,24 +33,6 @@ func Upload(url string, body io.Reader) (*http.Response, error) {
return client.Post(url, "application/json", body)
}
// ValidateTarget ensures that the data read from reader matches
// the known metadata
func ValidateTarget(r io.Reader, m *data.FileMeta) error {
h := sha256.New()
length, err := io.Copy(h, r)
if err != nil {
return err
}
if length != m.Length {
return fmt.Errorf("Size of downloaded target did not match targets entry.\nExpected: %d\nReceived: %d\n", m.Length, length)
}
hashDigest := h.Sum(nil)
if bytes.Compare(m.Hashes["sha256"], hashDigest[:]) != 0 {
return fmt.Errorf("Hash of downloaded target did not match targets entry.\nExpected: %x\nReceived: %x\n", m.Hashes["sha256"], hashDigest)
}
return nil
}
// StrSliceContains checks if the given string appears in the slice
func StrSliceContains(ss []string, s string) bool {
for _, v := range ss {