mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Bump vendored and dockerfile notary version to v0.4.2
Signed-off-by: cyli <cyli@twistedmatrix.com>
This commit is contained in:
parent
f2c094cb99
commit
5f4dcd8252
74 changed files with 2264 additions and 2237 deletions
|
@ -176,7 +176,7 @@ RUN set -x \
|
|||
&& rm -rf "$GOPATH"
|
||||
|
||||
# Install notary and notary-server
|
||||
ENV NOTARY_VERSION v0.3.0
|
||||
ENV NOTARY_VERSION v0.4.2
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
|
||||
|
|
|
@ -121,7 +121,7 @@ RUN set -x \
|
|||
&& rm -rf "$GOPATH"
|
||||
|
||||
# Install notary and notary-server
|
||||
ENV NOTARY_VERSION v0.3.0
|
||||
ENV NOTARY_VERSION v0.4.2
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
|
||||
|
|
|
@ -120,7 +120,7 @@ RUN set -x \
|
|||
&& rm -rf "$GOPATH"
|
||||
|
||||
# Install notary and notary-server
|
||||
ENV NOTARY_VERSION v0.3.0
|
||||
ENV NOTARY_VERSION v0.4.2
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
|
||||
|
|
|
@ -139,7 +139,7 @@ RUN set -x \
|
|||
&& rm -rf "$GOPATH"
|
||||
|
||||
# Install notary and notary-server
|
||||
ENV NOTARY_VERSION v0.3.0
|
||||
ENV NOTARY_VERSION v0.4.2
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
|
||||
|
|
|
@ -131,7 +131,7 @@ RUN set -x \
|
|||
&& rm -rf "$GOPATH"
|
||||
|
||||
# Install notary and notary-server
|
||||
ENV NOTARY_VERSION v0.3.0
|
||||
ENV NOTARY_VERSION v0.4.2
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
|
||||
|
|
|
@ -99,7 +99,7 @@ clone git github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa
|
|||
clone git github.com/pborman/uuid v1.0
|
||||
|
||||
# get desired notary commit, might also need to be updated in Dockerfile
|
||||
clone git github.com/docker/notary v0.3.0
|
||||
clone git github.com/docker/notary v0.4.2
|
||||
|
||||
clone git google.golang.org/grpc v1.0.1-GA https://github.com/grpc/grpc-go.git
|
||||
clone git github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
/.vscode
|
||||
/cmd/notary-server/notary-server
|
||||
/cmd/notary-server/local.config.json
|
||||
/cmd/notary-signer/local.config.json
|
||||
|
@ -8,4 +9,5 @@ cross
|
|||
*.swp
|
||||
.idea
|
||||
*.iml
|
||||
*.test
|
||||
coverage.out
|
||||
|
|
61
vendor/src/github.com/docker/notary/CHANGELOG.md
vendored
61
vendor/src/github.com/docker/notary/CHANGELOG.md
vendored
|
@ -1,5 +1,66 @@
|
|||
# Changelog
|
||||
|
||||
## [v0.4.2](https://github.com/docker/notary/releases/tag/v0.4.2) 9/30/2016
|
||||
+ Bump the cross compiler to golang 1.7.1, since [1.6.3 builds binaries that could have non-deterministic bugs in OS X Sierra](https://groups.google.com/forum/#!msg/golang-dev/Jho5sBHZgAg/cq6d97S1AwAJ) [#984](https://github.com/docker/notary/pull/984)
|
||||
|
||||
## [v0.4.1](https://github.com/docker/notary/releases/tag/v0.4.1) 9/27/2016
|
||||
+ Preliminary Windows support for notary client [#970](https://github.com/docker/notary/pull/970)
|
||||
+ Output message to CLI when repo changes have been successfully published [#974](https://github.com/docker/notary/pull/974)
|
||||
+ Improved error messages for client authentication errors and for the witness command [#972](https://github.com/docker/notary/pull/972)
|
||||
+ Support for finding keys that are anywhere in the notary directory's "private" directory, not just under "private/root_keys" or "private/tuf_keys" [#981](https://github.com/docker/notary/pull/981)
|
||||
+ Previously, on any error updating, the client would fall back on the cache. Now we only do so if there is a network error or if the server is unavailable or missing the TUF data. Invalid TUF data will cause the update to fail - for example if there was an invalid root rotation. [#982](https://github.com/docker/notary/pull/982)
|
||||
|
||||
## [v0.4.0](https://github.com/docker/notary/releases/tag/v0.4.0) 9/21/2016
|
||||
+ Server-managed key rotations [#889](https://github.com/docker/notary/pull/889)
|
||||
+ Remove `timestamp_keys` table, which stored redundant information [#889](https://github.com/docker/notary/pull/889)
|
||||
+ Introduce `notary delete` command to delete local and/or remote repo data [#895](https://github.com/docker/notary/pull/895)
|
||||
+ Introduce `notary witness` command to stage signatures for specified roles [#875](https://github.com/docker/notary/pull/875)
|
||||
+ Add `-p` flag to offline commands to attempt auto-publish [#886](https://github.com/docker/notary/pull/886) [#912](https://github.com/docker/notary/pull/912) [#923](https://github.com/docker/notary/pull/923)
|
||||
+ Introduce `notary reset` command to manage staged changes [#959](https://github.com/docker/notary/pull/959) [#856](https://github.com/docker/notary/pull/856)
|
||||
+ Add `--rootkey` flag to `notary init` to provide a private root key for a repo [#801](https://github.com/docker/notary/pull/801)
|
||||
+ Introduce `notary delegation purge` command to remove a specified key from all delegations [#855](https://github.com/docker/notary/pull/855)
|
||||
+ Removed HTTP endpoint from notary-signer [#870](https://github.com/docker/notary/pull/870)
|
||||
+ Refactored and unified key storage [#825](https://github.com/docker/notary/pull/825)
|
||||
+ Batched key import and export now operate on PEM files (potentially with multiple blocks) instead of ZIP [#825](https://github.com/docker/notary/pull/825) [#882](https://github.com/docker/notary/pull/882)
|
||||
+ Add full database integration test-suite [#824](https://github.com/docker/notary/pull/824) [#854](https://github.com/docker/notary/pull/854) [#863](https://github.com/docker/notary/pull/863)
|
||||
+ Improve notary-server, trust pinning, and yubikey logging [#798](https://github.com/docker/notary/pull/798) [#858](https://github.com/docker/notary/pull/858) [#891](https://github.com/docker/notary/pull/891)
|
||||
+ Warn if certificates for root or delegations are near expiry [#802](https://github.com/docker/notary/pull/802)
|
||||
+ Warn if role metadata is near expiry [#786](https://github.com/docker/notary/pull/786)
|
||||
+ Reformat CLI table output to use the `text/tabwriter` package [#809](https://github.com/docker/notary/pull/809)
|
||||
+ Fix passphrase retrieval attempt counting and terminal detection [#906](https://github.com/docker/notary/pull/906)
|
||||
+ Fix listing nested delegations [#864](https://github.com/docker/notary/pull/864)
|
||||
+ Bump go version to 1.6.3, fix go1.7 compatibility [#851](https://github.com/docker/notary/pull/851) [#793](https://github.com/docker/notary/pull/793)
|
||||
+ Convert docker-compose files to v2 format [#755](https://github.com/docker/notary/pull/755)
|
||||
+ Validate root rotations against trust pinning [#800](https://github.com/docker/notary/pull/800)
|
||||
+ Update fixture certificates for two-year expiry window [#951](https://github.com/docker/notary/pull/951)
|
||||
|
||||
## [v0.3.0](https://github.com/docker/notary/releases/tag/v0.3.0) 5/11/2016
|
||||
+ Root rotations
|
||||
+ RethinkDB support as a storage backend for Server and Signer
|
||||
+ A new TUF repo builder that merges server and client validation
|
||||
+ Trust Pinning: configure known good key IDs and CAs to replace TOFU.
|
||||
+ Add --input, --output, and --quiet flags to notary verify command
|
||||
+ Remove local certificate store. It was redundant as all certs were also stored in the cached root.json
|
||||
+ Cleanup of dead code in client side key storage logic
|
||||
+ Update project to Go 1.6.1
|
||||
+ Reorganize vendoring to meet Go 1.6+ standard. Still using Godeps to manage vendored packages
|
||||
+ Add targets by hash, no longer necessary to have the original target data available
|
||||
+ Active Key ID verification during signature verification
|
||||
+ Switch all testing from assert to require, reduces noise in test runs
|
||||
+ Use alpine based images for smaller downloads and faster setup times
|
||||
+ Clean up out of data signatures when re-signing content
|
||||
+ Set cache control headers on HTTP responses from Notary Server
|
||||
+ Add sha512 support for targets
|
||||
+ Add environment variable for delegation key passphrase
|
||||
+ Reduce permissions requested by client from token server
|
||||
+ Update formatting for delegation list output
|
||||
+ Move SQLite dependency to tests only so it doesn't get built into official images
|
||||
+ Fixed asking for password to list private repositories
|
||||
+ Enable using notary client with username/password in a scripted fashion
|
||||
+ Fix static compilation of client
|
||||
+ Enforce TUF version to be >= 1, previously 0 was acceptable although unused
|
||||
+ json.RawMessage should always be used as *json.RawMessage due to concepts of addressability in Go and effects on encoding
|
||||
|
||||
## [v0.2](https://github.com/docker/notary/releases/tag/v0.2.0) 2/24/2016
|
||||
+ Add support for delegation roles in `notary` server and client
|
||||
+ Add `notary CLI` commands for managing delegation roles: `notary delegation`
|
||||
|
|
11
vendor/src/github.com/docker/notary/Dockerfile
vendored
11
vendor/src/github.com/docker/notary/Dockerfile
vendored
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.6.1
|
||||
FROM golang:1.7.1
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
curl \
|
||||
|
@ -8,10 +8,14 @@ RUN apt-get update && apt-get install -y \
|
|||
patch \
|
||||
tar \
|
||||
xz-utils \
|
||||
python \
|
||||
python-pip \
|
||||
--no-install-recommends \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN go get golang.org/x/tools/cmd/cover
|
||||
RUN useradd -ms /bin/bash notary \
|
||||
&& pip install codecov \
|
||||
&& go get golang.org/x/tools/cmd/cover github.com/golang/lint/golint github.com/client9/misspell/cmd/misspell github.com/gordonklaus/ineffassign
|
||||
|
||||
# Configure the container for OSX cross compilation
|
||||
ENV OSX_SDK MacOSX10.11.sdk
|
||||
|
@ -27,8 +31,7 @@ ENV PATH /osxcross/target/bin:$PATH
|
|||
ENV NOTARYDIR /go/src/github.com/docker/notary
|
||||
|
||||
COPY . ${NOTARYDIR}
|
||||
|
||||
ENV GOPATH ${NOTARYDIR}/Godeps/_workspace:$GOPATH
|
||||
RUN chmod -R a+rw /go
|
||||
|
||||
WORKDIR ${NOTARYDIR}
|
||||
|
||||
|
|
80
vendor/src/github.com/docker/notary/Makefile
vendored
80
vendor/src/github.com/docker/notary/Makefile
vendored
|
@ -13,13 +13,15 @@ endif
|
|||
CTIMEVAR=-X $(NOTARY_PKG)/version.GitCommit=$(GITCOMMIT) -X $(NOTARY_PKG)/version.NotaryVersion=$(NOTARY_VERSION)
|
||||
GO_LDFLAGS=-ldflags "-w $(CTIMEVAR)"
|
||||
GO_LDFLAGS_STATIC=-ldflags "-w $(CTIMEVAR) -extldflags -static"
|
||||
GOOSES = darwin linux
|
||||
GOOSES = darwin linux windows
|
||||
NOTARY_BUILDTAGS ?= pkcs11
|
||||
NOTARYDIR := /go/src/github.com/docker/notary
|
||||
|
||||
GO_VERSION := $(shell go version | grep "1\.[6-9]\(\.[0-9]+\)*")
|
||||
# check to make sure we have the right version
|
||||
ifeq ($(strip $(GO_VERSION)),)
|
||||
GO_VERSION := $(shell go version | grep "1\.[6-9]\(\.[0-9]+\)*\|devel")
|
||||
# check to make sure we have the right version. development versions of Go are
|
||||
# not officially supported, but allowed for building
|
||||
|
||||
ifeq ($(strip $(GO_VERSION))$(SKIPENVCHECK),)
|
||||
$(error Bad Go version - please install Go >= 1.6)
|
||||
endif
|
||||
|
||||
|
@ -40,13 +42,11 @@ COVERPROFILE?=$(COVERDIR)/cover.out
|
|||
COVERMODE=count
|
||||
PKGS ?= $(shell go list -tags "${NOTARY_BUILDTAGS}" ./... | grep -v /vendor/ | tr '\n' ' ')
|
||||
|
||||
GO_VERSION = $(shell go version | awk '{print $$3}')
|
||||
|
||||
.PHONY: clean all fmt vet lint build test binaries cross cover docker-images notary-dockerfile
|
||||
.PHONY: clean all lint build test binaries cross cover docker-images notary-dockerfile
|
||||
.DELETE_ON_ERROR: cover
|
||||
.DEFAULT: default
|
||||
|
||||
all: AUTHORS clean fmt vet fmt lint build test binaries
|
||||
all: AUTHORS clean lint build test binaries
|
||||
|
||||
AUTHORS: .git/HEAD
|
||||
git log --format='%aN <%aE>' | sort -fu > $@
|
||||
|
@ -90,32 +90,27 @@ ${PREFIX}/bin/static/notary:
|
|||
@go build -tags ${NOTARY_BUILDTAGS} -o $@ ${GO_LDFLAGS_STATIC} ./cmd/notary
|
||||
endif
|
||||
|
||||
vet:
|
||||
@echo "+ $@"
|
||||
|
||||
# run all lint functionality - excludes Godep directory, vendoring, binaries, python tests, and git files
|
||||
lint:
|
||||
@echo "+ $@: golint, go vet, go fmt, misspell, ineffassign"
|
||||
# golint
|
||||
@test -z "$(shell find . -type f -name "*.go" -not -path "./vendor/*" -not -name "*.pb.*" -exec golint {} \; | tee /dev/stderr)"
|
||||
# gofmt
|
||||
@test -z "$$(gofmt -s -l .| grep -v .pb. | grep -v vendor/ | tee /dev/stderr)"
|
||||
# govet
|
||||
ifeq ($(shell uname -s), Darwin)
|
||||
@test -z "$(shell find . -iname *test*.go | grep -v _test.go | grep -v vendor | xargs echo "This file should end with '_test':" | tee /dev/stderr)"
|
||||
else
|
||||
@test -z "$(shell find . -iname *test*.go | grep -v _test.go | grep -v vendor | xargs -r echo "This file should end with '_test':" | tee /dev/stderr)"
|
||||
endif
|
||||
@test -z "$$(go tool vet -printf=false . 2>&1 | grep -v vendor/ | tee /dev/stderr)"
|
||||
|
||||
fmt:
|
||||
@echo "+ $@"
|
||||
@test -z "$$(gofmt -s -l .| grep -v .pb. | grep -v vendor/ | tee /dev/stderr)"
|
||||
|
||||
lint:
|
||||
@echo "+ $@"
|
||||
@test -z "$(shell find . -type f -name "*.go" -not -path "./vendor/*" -not -name "*.pb.*" -exec golint {} \; | tee /dev/stderr)"
|
||||
|
||||
# Requires that the following:
|
||||
# misspell - requires that the following be run first:
|
||||
# go get -u github.com/client9/misspell/cmd/misspell
|
||||
#
|
||||
# be run first
|
||||
|
||||
# misspell target, don't include Godeps, binaries, python tests, or git files
|
||||
misspell:
|
||||
@echo "+ $@"
|
||||
@test -z "$$(find . -name '*' | grep -v vendor/ | grep -v bin/ | grep -v misc/ | grep -v .git/ | xargs misspell | tee /dev/stderr)"
|
||||
@test -z "$$(find . -type f | grep -v vendor/ | grep -v bin/ | grep -v misc/ | grep -v .git/ | grep -v \.pdf | xargs misspell | tee /dev/stderr)"
|
||||
# ineffassign - requires that the following be run first:
|
||||
# go get -u github.com/gordonklaus/ineffassign
|
||||
@test -z "$(shell find . -type f -name "*.go" -not -path "./vendor/*" -not -name "*.pb.*" -exec ineffassign {} \; | tee /dev/stderr)"
|
||||
|
||||
build:
|
||||
@echo "+ $@"
|
||||
|
@ -130,15 +125,13 @@ test:
|
|||
@echo
|
||||
go test -tags "${NOTARY_BUILDTAGS}" $(TESTOPTS) $(PKGS)
|
||||
|
||||
test-full: TESTOPTS =
|
||||
test-full: vet lint
|
||||
@echo Note: when testing with a yubikey plugged in, make sure to include 'TESTOPTS="-p 1"'
|
||||
@echo "+ $@"
|
||||
@echo
|
||||
go test -tags "${NOTARY_BUILDTAGS}" $(TESTOPTS) -v $(PKGS)
|
||||
integration: TESTDB = mysql
|
||||
integration: clean
|
||||
buildscripts/integrationtest.sh $(TESTDB)
|
||||
|
||||
integration:
|
||||
buildscripts/integrationtest.sh development.yml
|
||||
testdb: TESTDB = mysql
|
||||
testdb:
|
||||
buildscripts/dbtests.sh $(TESTDB)
|
||||
|
||||
protos:
|
||||
@protoc --go_out=plugins=grpc:. proto/*.proto
|
||||
|
@ -148,25 +141,19 @@ protos:
|
|||
# go get github.com/wadey/gocovmerge; go install github.com/wadey/gocovmerge
|
||||
#
|
||||
# be run first
|
||||
|
||||
define gocover
|
||||
go test $(OPTS) $(TESTOPTS) -covermode="$(COVERMODE)" -coverprofile="$(COVERDIR)/$(subst /,-,$(1)).$(subst $(_space),.,$(NOTARY_BUILDTAGS)).coverage.txt" "$(1)" || exit 1;
|
||||
endef
|
||||
|
||||
gen-cover:
|
||||
gen-cover:
|
||||
@mkdir -p "$(COVERDIR)"
|
||||
$(foreach PKG,$(PKGS),$(call gocover,$(PKG)))
|
||||
rm -f "$(COVERDIR)"/*testutils*.coverage.txt
|
||||
python -u buildscripts/covertest.py --coverdir "$(COVERDIR)" --tags "$(NOTARY_BUILDTAGS)" --pkgs="$(PKGS)" --testopts="${TESTOPTS}"
|
||||
|
||||
# Generates the cover binaries and runs them all in serial, so this can be used
|
||||
# run all tests with a yubikey without any problems
|
||||
cover: OPTS = -tags "${NOTARY_BUILDTAGS}" -coverpkg "$(shell ./coverpkg.sh $(1) $(NOTARY_PKG))"
|
||||
cover: gen-cover covmerge
|
||||
@go tool cover -html="$(COVERPROFILE)"
|
||||
|
||||
# Generates the cover binaries and runs them all in serial, so this can be used
|
||||
# run all tests with a yubikey without any problems
|
||||
ci: OPTS = -tags "${NOTARY_BUILDTAGS}" -race -coverpkg "$(shell ./coverpkg.sh $(1) $(NOTARY_PKG))"
|
||||
ci: override TESTOPTS = -race
|
||||
# Codecov knows how to merge multiple coverage files, so covmerge is not needed
|
||||
ci: gen-cover
|
||||
|
||||
|
@ -205,10 +192,9 @@ shell: notary-dockerfile
|
|||
|
||||
cross: notary-dockerfile
|
||||
@rm -rf $(CURDIR)/cross
|
||||
docker run --rm -v $(CURDIR)/cross:$(NOTARYDIR)/cross -e NOTARY_BUILDTAGS=$(NOTARY_BUILDTAGS) notary buildscripts/cross.sh $(GOOSES)
|
||||
|
||||
docker run --rm -v $(CURDIR)/cross:$(NOTARYDIR)/cross -e CTIMEVAR="${CTIMEVAR}" -e NOTARY_BUILDTAGS=$(NOTARY_BUILDTAGS) notary buildscripts/cross.sh $(GOOSES)
|
||||
|
||||
clean:
|
||||
@echo "+ $@"
|
||||
@rm -rf "$(COVERDIR)"
|
||||
@rm -rf "$(COVERDIR)" cross
|
||||
@rm -rf "${PREFIX}/bin/notary-server" "${PREFIX}/bin/notary" "${PREFIX}/bin/notary-signer"
|
||||
|
|
|
@ -1 +1 @@
|
|||
0.2
|
||||
0.4.2
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Notary
|
||||
[![Circle CI](https://circleci.com/gh/docker/notary/tree/master.svg?style=shield)](https://circleci.com/gh/docker/notary/tree/master) [![CodeCov](https://codecov.io/github/docker/notary/coverage.svg?branch=master)](https://codecov.io/github/docker/notary)
|
||||
[![Circle CI](https://circleci.com/gh/docker/notary/tree/master.svg?style=shield)](https://circleci.com/gh/docker/notary/tree/master) [![CodeCov](https://codecov.io/github/docker/notary/coverage.svg?branch=master)](https://codecov.io/github/docker/notary) [![GoReportCard](https://goreportcard.com/badge/docker/notary)](https://goreportcard.com/report/github.com/docker/notary)
|
||||
|
||||
The Notary project comprises a [server](cmd/notary-server) and a [client](cmd/notary) for running and interacting
|
||||
with trusted collections. Please see the [service architecture](docs/service_architecture.md) documentation
|
||||
|
@ -80,7 +80,8 @@ to use `notary` with Docker images.
|
|||
|
||||
Prerequisites:
|
||||
|
||||
- Go >= 1.6.1
|
||||
- Go >= 1.7
|
||||
|
||||
- [godep](https://github.com/tools/godep) installed
|
||||
- libtool development headers installed
|
||||
- Ubuntu: `apt-get install libltdl-dev`
|
||||
|
|
76
vendor/src/github.com/docker/notary/circle.yml
vendored
76
vendor/src/github.com/docker/notary/circle.yml
vendored
|
@ -1,87 +1,23 @@
|
|||
# Pony-up!
|
||||
machine:
|
||||
pre:
|
||||
# Install gvm
|
||||
- bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer)
|
||||
# Upgrade docker
|
||||
- sudo curl -L -o /usr/bin/docker 'https://s3-external-1.amazonaws.com/circle-downloads/docker-1.9.1-circleci'
|
||||
- sudo chmod 0755 /usr/bin/docker
|
||||
|
||||
post:
|
||||
# Install many go versions
|
||||
- gvm install go1.6.1 -B --name=stable
|
||||
- curl -sSL https://s3.amazonaws.com/circle-downloads/install-circleci-docker.sh | bash -s -- 1.10.0
|
||||
# upgrade compose
|
||||
- sudo pip install --upgrade docker-compose
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
environment:
|
||||
# Convenient shortcuts to "common" locations
|
||||
CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME
|
||||
BASE_DIR: src/github.com/docker/notary
|
||||
# Trick circle brainflat "no absolute path" behavior
|
||||
BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR
|
||||
# Workaround Circle parsing dumb bugs and/or YAML wonkyness
|
||||
CIRCLE_PAIN: "mode: set"
|
||||
# Put the coverage profile somewhere codecov's script can find it
|
||||
COVERPROFILE: coverage.out
|
||||
|
||||
hosts:
|
||||
# Not used yet
|
||||
fancy: 127.0.0.1
|
||||
|
||||
dependencies:
|
||||
pre:
|
||||
# Copy the code to the gopath of all go versions
|
||||
- >
|
||||
gvm use stable &&
|
||||
mkdir -p "$(dirname $BASE_STABLE)" &&
|
||||
cp -R "$CHECKOUT" "$BASE_STABLE"
|
||||
|
||||
override:
|
||||
# don't use circleci's default dependency installation step of `go get -d -u ./...`
|
||||
# since we already vendor everything; additionally install linting and misspell tools
|
||||
- >
|
||||
gvm use stable &&
|
||||
go get github.com/golang/lint/golint &&
|
||||
go get -u github.com/client9/misspell/cmd/misspell
|
||||
- docker build -t notary_client .
|
||||
|
||||
test:
|
||||
pre:
|
||||
# Output the go versions we are going to test
|
||||
- gvm use stable && go version
|
||||
|
||||
# CLEAN
|
||||
- gvm use stable && make clean:
|
||||
pwd: $BASE_STABLE
|
||||
|
||||
# FMT
|
||||
- gvm use stable && make fmt:
|
||||
pwd: $BASE_STABLE
|
||||
|
||||
# VET
|
||||
- gvm use stable && make vet:
|
||||
pwd: $BASE_STABLE
|
||||
|
||||
# LINT
|
||||
- gvm use stable && make lint:
|
||||
pwd: $BASE_STABLE
|
||||
|
||||
# MISSPELL
|
||||
- gvm use stable && make misspell:
|
||||
pwd: $BASE_STABLE
|
||||
|
||||
override:
|
||||
# Test stable, and report
|
||||
# hacking this to be parallel
|
||||
- case $CIRCLE_NODE_INDEX in 0) gvm use stable && NOTARY_BUILDTAGS=pkcs11 make ci ;; 1) gvm use stable && NOTARY_BUILDTAGS=none make ci ;; 2) gvm use stable && make integration ;; esac:
|
||||
# circleci only supports manual parellism
|
||||
- buildscripts/circle_parallelism.sh:
|
||||
parallel: true
|
||||
timeout: 600
|
||||
pwd: $BASE_STABLE
|
||||
|
||||
post:
|
||||
# Report to codecov.io
|
||||
- case $CIRCLE_NODE_INDEX in 0) bash <(curl -s https://codecov.io/bash) ;; 1) bash <(curl -s https://codecov.io/bash) ;; esac:
|
||||
parallel: true
|
||||
pwd: $BASE_STABLE
|
||||
- docker-compose -f docker-compose.yml down -v
|
||||
- docker-compose -f docker-compose.rethink.yml down -v
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"github.com/docker/notary/tuf/data"
|
||||
)
|
||||
|
||||
// Scopes for TufChanges are simply the TUF roles.
|
||||
// Scopes for TUFChanges are simply the TUF roles.
|
||||
// Unfortunately because of targets delegations, we can only
|
||||
// cover the base roles.
|
||||
const (
|
||||
|
@ -14,7 +14,7 @@ const (
|
|||
ScopeTimestamp = "timestamp"
|
||||
)
|
||||
|
||||
// Types for TufChanges are namespaced by the Role they
|
||||
// Types for TUFChanges are namespaced by the Role they
|
||||
// are relevant for. The Root and Targets roles are the
|
||||
// only ones for which user action can cause a change, as
|
||||
// all changes in Snapshot and Timestamp are programmatically
|
||||
|
@ -23,10 +23,11 @@ const (
|
|||
TypeRootRole = "role"
|
||||
TypeTargetsTarget = "target"
|
||||
TypeTargetsDelegation = "delegation"
|
||||
TypeWitness = "witness"
|
||||
)
|
||||
|
||||
// TufChange represents a change to a TUF repo
|
||||
type TufChange struct {
|
||||
// TUFChange represents a change to a TUF repo
|
||||
type TUFChange struct {
|
||||
// Abbreviated because Go doesn't permit a field and method of the same name
|
||||
Actn string `json:"action"`
|
||||
Role string `json:"role"`
|
||||
|
@ -35,16 +36,16 @@ type TufChange struct {
|
|||
Data []byte `json:"data"`
|
||||
}
|
||||
|
||||
// TufRootData represents a modification of the keys associated
|
||||
// TUFRootData represents a modification of the keys associated
|
||||
// with a role that appears in the root.json
|
||||
type TufRootData struct {
|
||||
type TUFRootData struct {
|
||||
Keys data.KeyList `json:"keys"`
|
||||
RoleName string `json:"role"`
|
||||
}
|
||||
|
||||
// NewTufChange initializes a tufChange object
|
||||
func NewTufChange(action string, role, changeType, changePath string, content []byte) *TufChange {
|
||||
return &TufChange{
|
||||
// NewTUFChange initializes a TUFChange object
|
||||
func NewTUFChange(action string, role, changeType, changePath string, content []byte) *TUFChange {
|
||||
return &TUFChange{
|
||||
Actn: action,
|
||||
Role: role,
|
||||
ChangeType: changeType,
|
||||
|
@ -54,34 +55,34 @@ func NewTufChange(action string, role, changeType, changePath string, content []
|
|||
}
|
||||
|
||||
// Action return c.Actn
|
||||
func (c TufChange) Action() string {
|
||||
func (c TUFChange) Action() string {
|
||||
return c.Actn
|
||||
}
|
||||
|
||||
// Scope returns c.Role
|
||||
func (c TufChange) Scope() string {
|
||||
func (c TUFChange) Scope() string {
|
||||
return c.Role
|
||||
}
|
||||
|
||||
// Type returns c.ChangeType
|
||||
func (c TufChange) Type() string {
|
||||
func (c TUFChange) Type() string {
|
||||
return c.ChangeType
|
||||
}
|
||||
|
||||
// Path return c.ChangePath
|
||||
func (c TufChange) Path() string {
|
||||
func (c TUFChange) Path() string {
|
||||
return c.ChangePath
|
||||
}
|
||||
|
||||
// Content returns c.Data
|
||||
func (c TufChange) Content() []byte {
|
||||
func (c TUFChange) Content() []byte {
|
||||
return c.Data
|
||||
}
|
||||
|
||||
// TufDelegation represents a modification to a target delegation
|
||||
// TUFDelegation represents a modification to a target delegation
|
||||
// this includes creating a delegations. This format is used to avoid
|
||||
// unexpected race conditions between humans modifying the same delegation
|
||||
type TufDelegation struct {
|
||||
type TUFDelegation struct {
|
||||
NewName string `json:"new_name,omitempty"`
|
||||
NewThreshold int `json:"threshold, omitempty"`
|
||||
AddKeys data.KeyList `json:"add_keys, omitempty"`
|
||||
|
@ -91,8 +92,8 @@ type TufDelegation struct {
|
|||
ClearAllPaths bool `json:"clear_paths,omitempty"`
|
||||
}
|
||||
|
||||
// ToNewRole creates a fresh role object from the TufDelegation data
|
||||
func (td TufDelegation) ToNewRole(scope string) (*data.Role, error) {
|
||||
// ToNewRole creates a fresh role object from the TUFDelegation data
|
||||
func (td TUFDelegation) ToNewRole(scope string) (*data.Role, error) {
|
||||
name := scope
|
||||
if td.NewName != "" {
|
||||
name = td.NewName
|
||||
|
|
|
@ -21,6 +21,24 @@ func (cl *memChangelist) Add(c Change) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Remove deletes the changes found at the given indices
|
||||
func (cl *memChangelist) Remove(idxs []int) error {
|
||||
remove := make(map[int]struct{})
|
||||
for _, i := range idxs {
|
||||
remove[i] = struct{}{}
|
||||
}
|
||||
var keep []Change
|
||||
|
||||
for i, c := range cl.changes {
|
||||
if _, ok := remove[i]; ok {
|
||||
continue
|
||||
}
|
||||
keep = append(keep, c)
|
||||
}
|
||||
cl.changes = keep
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clear empties the changelist file.
|
||||
func (cl *memChangelist) Clear(archive string) error {
|
||||
// appending to a nil list initializes it.
|
||||
|
|
|
@ -5,12 +5,12 @@ import (
|
|||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution/uuid"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// FileChangelist stores all the changes as files
|
||||
|
@ -46,13 +46,14 @@ func getFileNames(dirName string) ([]os.FileInfo, error) {
|
|||
}
|
||||
fileInfos = append(fileInfos, f)
|
||||
}
|
||||
sort.Sort(fileChanges(fileInfos))
|
||||
return fileInfos, nil
|
||||
}
|
||||
|
||||
// Read a JSON formatted file from disk; convert to TufChange struct
|
||||
func unmarshalFile(dirname string, f os.FileInfo) (*TufChange, error) {
|
||||
c := &TufChange{}
|
||||
raw, err := ioutil.ReadFile(path.Join(dirname, f.Name()))
|
||||
// Read a JSON formatted file from disk; convert to TUFChange struct
|
||||
func unmarshalFile(dirname string, f os.FileInfo) (*TUFChange, error) {
|
||||
c := &TUFChange{}
|
||||
raw, err := ioutil.ReadFile(filepath.Join(dirname, f.Name()))
|
||||
if err != nil {
|
||||
return c, err
|
||||
}
|
||||
|
@ -70,7 +71,6 @@ func (cl FileChangelist) List() []Change {
|
|||
if err != nil {
|
||||
return changes
|
||||
}
|
||||
sort.Sort(fileChanges(fileInfos))
|
||||
for _, f := range fileInfos {
|
||||
c, err := unmarshalFile(cl.dir, f)
|
||||
if err != nil {
|
||||
|
@ -89,10 +89,32 @@ func (cl FileChangelist) Add(c Change) error {
|
|||
return err
|
||||
}
|
||||
filename := fmt.Sprintf("%020d_%s.change", time.Now().UnixNano(), uuid.Generate())
|
||||
return ioutil.WriteFile(path.Join(cl.dir, filename), cJSON, 0644)
|
||||
return ioutil.WriteFile(filepath.Join(cl.dir, filename), cJSON, 0644)
|
||||
}
|
||||
|
||||
// Remove deletes the changes found at the given indices
|
||||
func (cl FileChangelist) Remove(idxs []int) error {
|
||||
fileInfos, err := getFileNames(cl.dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
remove := make(map[int]struct{})
|
||||
for _, i := range idxs {
|
||||
remove[i] = struct{}{}
|
||||
}
|
||||
for i, c := range fileInfos {
|
||||
if _, ok := remove[i]; ok {
|
||||
file := filepath.Join(cl.dir, c.Name())
|
||||
if err := os.Remove(file); err != nil {
|
||||
logrus.Errorf("could not remove change %d: %s", i, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clear clears the change list
|
||||
// N.B. archiving not currently implemented
|
||||
func (cl FileChangelist) Clear(archive string) error {
|
||||
dir, err := os.Open(cl.dir)
|
||||
if err != nil {
|
||||
|
@ -104,7 +126,7 @@ func (cl FileChangelist) Clear(archive string) error {
|
|||
return err
|
||||
}
|
||||
for _, f := range files {
|
||||
os.Remove(path.Join(cl.dir, f.Name()))
|
||||
os.Remove(filepath.Join(cl.dir, f.Name()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -121,7 +143,6 @@ func (cl FileChangelist) NewIterator() (ChangeIterator, error) {
|
|||
if err != nil {
|
||||
return &FileChangeListIterator{}, err
|
||||
}
|
||||
sort.Sort(fileChanges(fileInfos))
|
||||
return &FileChangeListIterator{dirname: cl.dir, collection: fileInfos}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,9 @@ type Changelist interface {
|
|||
// to save a copy of the changelist in that location
|
||||
Clear(archive string) error
|
||||
|
||||
// Remove deletes the changes corresponding with the indices given
|
||||
Remove(idxs []int) error
|
||||
|
||||
// Close syncronizes any pending writes to the underlying
|
||||
// storage and closes the file/connection
|
||||
Close() error
|
||||
|
|
165
vendor/src/github.com/docker/notary/client/client.go
vendored
165
vendor/src/github.com/docker/notary/client/client.go
vendored
|
@ -16,13 +16,12 @@ import (
|
|||
"github.com/docker/notary"
|
||||
"github.com/docker/notary/client/changelist"
|
||||
"github.com/docker/notary/cryptoservice"
|
||||
store "github.com/docker/notary/storage"
|
||||
"github.com/docker/notary/trustmanager"
|
||||
"github.com/docker/notary/trustpinning"
|
||||
"github.com/docker/notary/tuf"
|
||||
tufclient "github.com/docker/notary/tuf/client"
|
||||
"github.com/docker/notary/tuf/data"
|
||||
"github.com/docker/notary/tuf/signed"
|
||||
"github.com/docker/notary/tuf/store"
|
||||
"github.com/docker/notary/tuf/utils"
|
||||
)
|
||||
|
||||
|
@ -85,6 +84,7 @@ type NotaryRepository struct {
|
|||
fileStore store.MetadataStore
|
||||
CryptoService signed.CryptoService
|
||||
tufRepo *tuf.Repo
|
||||
invalid *tuf.Repo // known data that was parsable but deemed invalid
|
||||
roundTrip http.RoundTripper
|
||||
trustPinning trustpinning.TrustPinConfig
|
||||
}
|
||||
|
@ -121,7 +121,7 @@ func repositoryFromKeystores(baseDir, gun, baseURL string, rt http.RoundTripper,
|
|||
}
|
||||
|
||||
// Target represents a simplified version of the data TUF operates on, so external
|
||||
// applications don't have to depend on tuf data types.
|
||||
// applications don't have to depend on TUF data types.
|
||||
type Target struct {
|
||||
Name string // the name of the target
|
||||
Hashes data.Hashes // the hash of the target
|
||||
|
@ -159,7 +159,7 @@ func rootCertKey(gun string, privKey data.PrivateKey) (data.PublicKey, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
x509PublicKey := trustmanager.CertToKey(cert)
|
||||
x509PublicKey := utils.CertToKey(cert)
|
||||
if x509PublicKey == nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot use regenerated certificate: format %s", cert.PublicKeyAlgorithm)
|
||||
|
@ -173,11 +173,15 @@ func rootCertKey(gun string, privKey data.PrivateKey) (data.PublicKey, error) {
|
|||
// timestamp key and possibly other serverManagedRoles), but the created repository
|
||||
// result is only stored on local disk, not published to the server. To do that,
|
||||
// use r.Publish() eventually.
|
||||
func (r *NotaryRepository) Initialize(rootKeyID string, serverManagedRoles ...string) error {
|
||||
privKey, _, err := r.CryptoService.GetPrivateKey(rootKeyID)
|
||||
func (r *NotaryRepository) Initialize(rootKeyIDs []string, serverManagedRoles ...string) error {
|
||||
privKeys := make([]data.PrivateKey, 0, len(rootKeyIDs))
|
||||
for _, keyID := range rootKeyIDs {
|
||||
privKey, _, err := r.CryptoService.GetPrivateKey(keyID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
privKeys = append(privKeys, privKey)
|
||||
}
|
||||
|
||||
// currently we only support server managing timestamps and snapshots, and
|
||||
// nothing else - timestamps are always managed by the server, and implicit
|
||||
|
@ -206,16 +210,20 @@ func (r *NotaryRepository) Initialize(rootKeyID string, serverManagedRoles ...st
|
|||
}
|
||||
}
|
||||
|
||||
rootKeys := make([]data.PublicKey, 0, len(privKeys))
|
||||
for _, privKey := range privKeys {
|
||||
rootKey, err := rootCertKey(r.gun, privKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rootKeys = append(rootKeys, rootKey)
|
||||
}
|
||||
|
||||
var (
|
||||
rootRole = data.NewBaseRole(
|
||||
data.CanonicalRootRole,
|
||||
notary.MinThreshold,
|
||||
rootKey,
|
||||
rootKeys...,
|
||||
)
|
||||
timestampRole data.BaseRole
|
||||
snapshotRole data.BaseRole
|
||||
|
@ -271,7 +279,7 @@ func (r *NotaryRepository) Initialize(rootKeyID string, serverManagedRoles ...st
|
|||
|
||||
r.tufRepo = tuf.NewRepo(r.CryptoService)
|
||||
|
||||
err = r.tufRepo.InitRoot(
|
||||
err := r.tufRepo.InitRoot(
|
||||
rootRole,
|
||||
timestampRole,
|
||||
snapshotRole,
|
||||
|
@ -307,14 +315,14 @@ func addChange(cl *changelist.FileChangelist, c changelist.Change, roles ...stri
|
|||
for _, role := range roles {
|
||||
// Ensure we can only add targets to the CanonicalTargetsRole,
|
||||
// or a Delegation role (which is <CanonicalTargetsRole>/something else)
|
||||
if role != data.CanonicalTargetsRole && !data.IsDelegation(role) {
|
||||
if role != data.CanonicalTargetsRole && !data.IsDelegation(role) && !data.IsWildDelegation(role) {
|
||||
return data.ErrInvalidRole{
|
||||
Role: role,
|
||||
Reason: "cannot add targets to this role",
|
||||
}
|
||||
}
|
||||
|
||||
changes = append(changes, changelist.NewTufChange(
|
||||
changes = append(changes, changelist.NewTUFChange(
|
||||
c.Action(),
|
||||
role,
|
||||
c.Type(),
|
||||
|
@ -352,7 +360,7 @@ func (r *NotaryRepository) AddTarget(target *Target, roles ...string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
template := changelist.NewTufChange(
|
||||
template := changelist.NewTUFChange(
|
||||
changelist.ActionCreate, "", changelist.TypeTargetsTarget,
|
||||
target.Name, metaJSON)
|
||||
return addChange(cl, template, roles...)
|
||||
|
@ -368,13 +376,14 @@ func (r *NotaryRepository) RemoveTarget(targetName string, roles ...string) erro
|
|||
return err
|
||||
}
|
||||
logrus.Debugf("Removing target \"%s\"", targetName)
|
||||
template := changelist.NewTufChange(changelist.ActionDelete, "",
|
||||
template := changelist.NewTUFChange(changelist.ActionDelete, "",
|
||||
changelist.TypeTargetsTarget, targetName, nil)
|
||||
return addChange(cl, template, roles...)
|
||||
}
|
||||
|
||||
// ListTargets lists all targets for the current repository. The list of
|
||||
// roles should be passed in order from highest to lowest priority.
|
||||
//
|
||||
// IMPORTANT: if you pass a set of roles such as [ "targets/a", "targets/x"
|
||||
// "targets/a/b" ], even though "targets/a/b" is part of the "targets/a" subtree
|
||||
// its entries will be strictly shadowed by those in other parts of the "targets/a"
|
||||
|
@ -402,11 +411,18 @@ func (r *NotaryRepository) ListTargets(roles ...string) ([]*TargetWithRole, erro
|
|||
if _, ok := targets[targetName]; ok || !validRole.CheckPaths(targetName) {
|
||||
continue
|
||||
}
|
||||
targets[targetName] =
|
||||
&TargetWithRole{Target: Target{Name: targetName, Hashes: targetMeta.Hashes, Length: targetMeta.Length}, Role: validRole.Name}
|
||||
targets[targetName] = &TargetWithRole{
|
||||
Target: Target{
|
||||
Name: targetName,
|
||||
Hashes: targetMeta.Hashes,
|
||||
Length: targetMeta.Length,
|
||||
},
|
||||
Role: validRole.Name,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
r.tufRepo.WalkTargets("", role, listVisitorFunc, skipRoles...)
|
||||
}
|
||||
|
||||
|
@ -462,6 +478,62 @@ func (r *NotaryRepository) GetTargetByName(name string, roles ...string) (*Targe
|
|||
|
||||
}
|
||||
|
||||
// TargetSignedStruct is a struct that contains a Target, the role it was found in, and the list of signatures for that role
|
||||
type TargetSignedStruct struct {
|
||||
Role data.DelegationRole
|
||||
Target Target
|
||||
Signatures []data.Signature
|
||||
}
|
||||
|
||||
// GetAllTargetMetadataByName searches the entire delegation role tree to find the specified target by name for all
|
||||
// roles, and returns a list of TargetSignedStructs for each time it finds the specified target.
|
||||
// If given an empty string for a target name, it will return back all targets signed into the repository in every role
|
||||
func (r *NotaryRepository) GetAllTargetMetadataByName(name string) ([]TargetSignedStruct, error) {
|
||||
if err := r.Update(false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var targetInfoList []TargetSignedStruct
|
||||
|
||||
// Define a visitor function to find the specified target
|
||||
getAllTargetInfoByNameVisitorFunc := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
|
||||
if tgt == nil {
|
||||
return nil
|
||||
}
|
||||
// We found a target and validated path compatibility in our walk,
|
||||
// so add it to our list if we have a match
|
||||
// if we have an empty name, add all targets, else check if we have it
|
||||
var targetMetaToAdd data.Files
|
||||
if name == "" {
|
||||
targetMetaToAdd = tgt.Signed.Targets
|
||||
} else {
|
||||
if meta, ok := tgt.Signed.Targets[name]; ok {
|
||||
targetMetaToAdd = data.Files{name: meta}
|
||||
}
|
||||
}
|
||||
|
||||
for targetName, resultMeta := range targetMetaToAdd {
|
||||
targetInfo := TargetSignedStruct{
|
||||
Role: validRole,
|
||||
Target: Target{Name: targetName, Hashes: resultMeta.Hashes, Length: resultMeta.Length},
|
||||
Signatures: tgt.Signatures,
|
||||
}
|
||||
targetInfoList = append(targetInfoList, targetInfo)
|
||||
}
|
||||
// continue walking to all child roles
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check that we didn't error, and that we found the target at least once
|
||||
if err := r.tufRepo.WalkTargets(name, "", getAllTargetInfoByNameVisitorFunc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(targetInfoList) == 0 {
|
||||
return nil, fmt.Errorf("No valid trust data for %s", name)
|
||||
}
|
||||
return targetInfoList, nil
|
||||
}
|
||||
|
||||
// GetChangelist returns the list of the repository's unpublished changes
|
||||
func (r *NotaryRepository) GetChangelist() (changelist.Changelist, error) {
|
||||
changelistDir := filepath.Join(r.tufRepoPath, "changelist")
|
||||
|
@ -567,19 +639,19 @@ func (r *NotaryRepository) publish(cl changelist.Changelist) error {
|
|||
}
|
||||
}
|
||||
// apply the changelist to the repo
|
||||
if err := applyChangelist(r.tufRepo, cl); err != nil {
|
||||
if err := applyChangelist(r.tufRepo, r.invalid, cl); err != nil {
|
||||
logrus.Debug("Error applying changelist")
|
||||
return err
|
||||
}
|
||||
|
||||
// these are the tuf files we will need to update, serialized as JSON before
|
||||
// these are the TUF files we will need to update, serialized as JSON before
|
||||
// we send anything to remote
|
||||
updatedFiles := make(map[string][]byte)
|
||||
|
||||
// check if our root file is nearing expiry or dirty. Resign if it is. If
|
||||
// root is not dirty but we are publishing for the first time, then just
|
||||
// publish the existing root we have.
|
||||
if nearExpiry(r.tufRepo.Root) || r.tufRepo.Root.Dirty {
|
||||
if nearExpiry(r.tufRepo.Root.Signed.SignedCommon) || r.tufRepo.Root.Dirty {
|
||||
rootJSON, err := serializeCanonicalRole(r.tufRepo, data.CanonicalRootRole)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -635,7 +707,7 @@ func (r *NotaryRepository) publish(cl changelist.Changelist) error {
|
|||
return err
|
||||
}
|
||||
|
||||
return remote.SetMultiMeta(updatedFiles)
|
||||
return remote.SetMulti(updatedFiles)
|
||||
}
|
||||
|
||||
// bootstrapRepo loads the repository from the local file system (i.e.
|
||||
|
@ -649,7 +721,7 @@ func (r *NotaryRepository) bootstrapRepo() error {
|
|||
logrus.Debugf("Loading trusted collection.")
|
||||
|
||||
for _, role := range data.BaseRoles {
|
||||
jsonBytes, err := r.fileStore.GetMeta(role, store.NoSizeLimit)
|
||||
jsonBytes, err := r.fileStore.GetSized(role, store.NoSizeLimit)
|
||||
if err != nil {
|
||||
if _, ok := err.(store.ErrMetaNotFound); ok &&
|
||||
// server snapshots are supported, and server timestamp management
|
||||
|
@ -665,7 +737,7 @@ func (r *NotaryRepository) bootstrapRepo() error {
|
|||
}
|
||||
}
|
||||
|
||||
tufRepo, err := b.Finish()
|
||||
tufRepo, _, err := b.Finish()
|
||||
if err == nil {
|
||||
r.tufRepo = tufRepo
|
||||
}
|
||||
|
@ -681,7 +753,7 @@ func (r *NotaryRepository) saveMetadata(ignoreSnapshot bool) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = r.fileStore.SetMeta(data.CanonicalRootRole, rootJSON)
|
||||
err = r.fileStore.Set(data.CanonicalRootRole, rootJSON)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -702,7 +774,7 @@ func (r *NotaryRepository) saveMetadata(ignoreSnapshot bool) error {
|
|||
for role, blob := range targetsToSave {
|
||||
parentDir := filepath.Dir(role)
|
||||
os.MkdirAll(parentDir, 0755)
|
||||
r.fileStore.SetMeta(role, blob)
|
||||
r.fileStore.Set(role, blob)
|
||||
}
|
||||
|
||||
if ignoreSnapshot {
|
||||
|
@ -714,7 +786,7 @@ func (r *NotaryRepository) saveMetadata(ignoreSnapshot bool) error {
|
|||
return err
|
||||
}
|
||||
|
||||
return r.fileStore.SetMeta(data.CanonicalSnapshotRole, snapshotJSON)
|
||||
return r.fileStore.Set(data.CanonicalSnapshotRole, snapshotJSON)
|
||||
}
|
||||
|
||||
// returns a properly constructed ErrRepositoryNotExist error based on this
|
||||
|
@ -738,7 +810,7 @@ func (r *NotaryRepository) Update(forWrite bool) error {
|
|||
}
|
||||
return err
|
||||
}
|
||||
repo, err := c.Update()
|
||||
repo, invalid, err := c.Update()
|
||||
if err != nil {
|
||||
// notFound.Resource may include a checksum so when the role is root,
|
||||
// it will be root or root.<checksum>. Therefore best we can
|
||||
|
@ -748,7 +820,11 @@ func (r *NotaryRepository) Update(forWrite bool) error {
|
|||
}
|
||||
return err
|
||||
}
|
||||
// we can be assured if we are at this stage that the repo we built is good
|
||||
// no need to test the following function call for an error as it will always be fine should the repo be good- it is!
|
||||
r.tufRepo = repo
|
||||
r.invalid = invalid
|
||||
warnRolesNearExpiry(repo)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -759,16 +835,16 @@ func (r *NotaryRepository) Update(forWrite bool) error {
|
|||
// and return an error if the remote repository errors.
|
||||
//
|
||||
// Populates a tuf.RepoBuilder with this root metadata (only use
|
||||
// tufclient.Client.Update to load the rest).
|
||||
// TUFClient.Update to load the rest).
|
||||
//
|
||||
// Fails if the remote server is reachable and does not know the repo
|
||||
// (i.e. before the first r.Publish()), in which case the error is
|
||||
// store.ErrMetaNotFound, or if the root metadata (from whichever source is used)
|
||||
// is not trusted.
|
||||
//
|
||||
// Returns a tufclient.Client for the remote server, which may not be actually
|
||||
// Returns a TUFClient for the remote server, which may not be actually
|
||||
// operational (if the URL is invalid but a root.json is cached).
|
||||
func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Client, error) {
|
||||
func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*TUFClient, error) {
|
||||
minVersion := 1
|
||||
// the old root on disk should not be validated against any trust pinning configuration
|
||||
// because if we have an old root, it itself is the thing that pins trust
|
||||
|
@ -781,7 +857,7 @@ func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Cl
|
|||
// during update which will cause us to download a new root and perform a rotation.
|
||||
// If we have an old root, and it's valid, then we overwrite the newBuilder to be one
|
||||
// preloaded with the old root or one which uses the old root for trust bootstrapping.
|
||||
if rootJSON, err := r.fileStore.GetMeta(data.CanonicalRootRole, store.NoSizeLimit); err == nil {
|
||||
if rootJSON, err := r.fileStore.GetSized(data.CanonicalRootRole, store.NoSizeLimit); err == nil {
|
||||
// if we can't load the cached root, fail hard because that is how we pin trust
|
||||
if err := oldBuilder.Load(data.CanonicalRootRole, rootJSON, minVersion, true); err != nil {
|
||||
return nil, err
|
||||
|
@ -794,8 +870,9 @@ func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Cl
|
|||
if err := newBuilder.Load(data.CanonicalRootRole, rootJSON, minVersion, false); err != nil {
|
||||
// Ok, the old root is expired - we want to download a new one. But we want to use the
|
||||
// old root to verify the new root, so bootstrap a new builder with the old builder
|
||||
// but use the trustpinning to validate the new root
|
||||
minVersion = oldBuilder.GetLoadedVersion(data.CanonicalRootRole)
|
||||
newBuilder = oldBuilder.BootstrapNewBuilder()
|
||||
newBuilder = oldBuilder.BootstrapNewBuilderWithNewTrustpin(r.trustPinning)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -808,7 +885,7 @@ func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Cl
|
|||
|
||||
// if remote store successfully set up, try and get root from remote
|
||||
// We don't have any local data to determine the size of root, so try the maximum (though it is restricted at 100MB)
|
||||
tmpJSON, err := remote.GetMeta(data.CanonicalRootRole, store.NoSizeLimit)
|
||||
tmpJSON, err := remote.GetSized(data.CanonicalRootRole, store.NoSizeLimit)
|
||||
if err != nil {
|
||||
// we didn't have a root in cache and were unable to load one from
|
||||
// the server. Nothing we can do but error.
|
||||
|
@ -821,7 +898,7 @@ func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Cl
|
|||
return nil, err
|
||||
}
|
||||
|
||||
err = r.fileStore.SetMeta(data.CanonicalRootRole, tmpJSON)
|
||||
err = r.fileStore.Set(data.CanonicalRootRole, tmpJSON)
|
||||
if err != nil {
|
||||
// if we can't write cache we should still continue, just log error
|
||||
logrus.Errorf("could not save root to cache: %s", err.Error())
|
||||
|
@ -835,7 +912,7 @@ func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Cl
|
|||
return nil, ErrRepoNotInitialized{}
|
||||
}
|
||||
|
||||
return tufclient.NewClient(oldBuilder, newBuilder, remote, r.fileStore), nil
|
||||
return NewTUFClient(oldBuilder, newBuilder, remote, r.fileStore), nil
|
||||
}
|
||||
|
||||
// RotateKey removes all existing keys associated with the role, and either
|
||||
|
@ -864,7 +941,7 @@ func (r *NotaryRepository) RotateKey(role string, serverManagesKey bool) error {
|
|||
)
|
||||
switch serverManagesKey {
|
||||
case true:
|
||||
pubKey, err = getRemoteKey(r.baseURL, r.gun, role, r.roundTrip)
|
||||
pubKey, err = rotateRemoteKey(r.baseURL, r.gun, role, r.roundTrip)
|
||||
errFmtMsg = "unable to rotate remote key: %s"
|
||||
default:
|
||||
pubKey, err = r.CryptoService.Create(role, r.gun, data.ECDSAKey)
|
||||
|
@ -897,7 +974,7 @@ func (r *NotaryRepository) RotateKey(role string, serverManagesKey bool) error {
|
|||
func (r *NotaryRepository) rootFileKeyChange(cl changelist.Changelist, role, action string, key data.PublicKey) error {
|
||||
kl := make(data.KeyList, 0, 1)
|
||||
kl = append(kl, key)
|
||||
meta := changelist.TufRootData{
|
||||
meta := changelist.TUFRootData{
|
||||
RoleName: role,
|
||||
Keys: kl,
|
||||
}
|
||||
|
@ -906,7 +983,7 @@ func (r *NotaryRepository) rootFileKeyChange(cl changelist.Changelist, role, act
|
|||
return err
|
||||
}
|
||||
|
||||
c := changelist.NewTufChange(
|
||||
c := changelist.NewTUFChange(
|
||||
action,
|
||||
changelist.ScopeRoot,
|
||||
changelist.TypeRootRole,
|
||||
|
@ -917,11 +994,21 @@ func (r *NotaryRepository) rootFileKeyChange(cl changelist.Changelist, role, act
|
|||
}
|
||||
|
||||
// DeleteTrustData removes the trust data stored for this repo in the TUF cache on the client side
|
||||
func (r *NotaryRepository) DeleteTrustData() error {
|
||||
// Clear TUF files and cache
|
||||
if err := r.fileStore.RemoveAll(); err != nil {
|
||||
// Note that we will not delete any private key material from local storage
|
||||
func (r *NotaryRepository) DeleteTrustData(deleteRemote bool) error {
|
||||
// Remove the tufRepoPath directory, which includes local TUF metadata files and changelist information
|
||||
if err := os.RemoveAll(r.tufRepoPath); err != nil {
|
||||
return fmt.Errorf("error clearing TUF repo data: %v", err)
|
||||
}
|
||||
r.tufRepo = tuf.NewRepo(nil)
|
||||
// Note that this will require admin permission in this NotaryRepository's roundtripper
|
||||
if deleteRemote {
|
||||
remote, err := getRemoteStore(r.baseURL, r.gun, r.roundTrip)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := remote.RemoveAll(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -8,8 +8,8 @@ import (
|
|||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/notary"
|
||||
"github.com/docker/notary/client/changelist"
|
||||
store "github.com/docker/notary/storage"
|
||||
"github.com/docker/notary/tuf/data"
|
||||
"github.com/docker/notary/tuf/store"
|
||||
"github.com/docker/notary/tuf/utils"
|
||||
)
|
||||
|
||||
|
@ -50,7 +50,7 @@ func (r *NotaryRepository) AddDelegationRoleAndKeys(name string, delegationKeys
|
|||
name, notary.MinThreshold, len(delegationKeys))
|
||||
|
||||
// Defaulting to threshold of 1, since we don't allow for larger thresholds at the moment.
|
||||
tdJSON, err := json.Marshal(&changelist.TufDelegation{
|
||||
tdJSON, err := json.Marshal(&changelist.TUFDelegation{
|
||||
NewThreshold: notary.MinThreshold,
|
||||
AddKeys: data.KeyList(delegationKeys),
|
||||
})
|
||||
|
@ -78,7 +78,7 @@ func (r *NotaryRepository) AddDelegationPaths(name string, paths []string) error
|
|||
|
||||
logrus.Debugf(`Adding %s paths to delegation %s\n`, paths, name)
|
||||
|
||||
tdJSON, err := json.Marshal(&changelist.TufDelegation{
|
||||
tdJSON, err := json.Marshal(&changelist.TUFDelegation{
|
||||
AddPaths: paths,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -141,7 +141,7 @@ func (r *NotaryRepository) RemoveDelegationPaths(name string, paths []string) er
|
|||
|
||||
logrus.Debugf(`Removing %s paths from delegation "%s"\n`, paths, name)
|
||||
|
||||
tdJSON, err := json.Marshal(&changelist.TufDelegation{
|
||||
tdJSON, err := json.Marshal(&changelist.TUFDelegation{
|
||||
RemovePaths: paths,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -155,9 +155,11 @@ func (r *NotaryRepository) RemoveDelegationPaths(name string, paths []string) er
|
|||
// RemoveDelegationKeys creates a changelist entry to remove provided keys from an existing delegation.
|
||||
// When this changelist is applied, if the specified keys are the only keys left in the role,
|
||||
// the role itself will be deleted in its entirety.
|
||||
// It can also delete a key from all delegations under a parent using a name
|
||||
// with a wildcard at the end.
|
||||
func (r *NotaryRepository) RemoveDelegationKeys(name string, keyIDs []string) error {
|
||||
|
||||
if !data.IsDelegation(name) {
|
||||
if !data.IsDelegation(name) && !data.IsWildDelegation(name) {
|
||||
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
|
||||
}
|
||||
|
||||
|
@ -169,7 +171,7 @@ func (r *NotaryRepository) RemoveDelegationKeys(name string, keyIDs []string) er
|
|||
|
||||
logrus.Debugf(`Removing %s keys from delegation "%s"\n`, keyIDs, name)
|
||||
|
||||
tdJSON, err := json.Marshal(&changelist.TufDelegation{
|
||||
tdJSON, err := json.Marshal(&changelist.TUFDelegation{
|
||||
RemoveKeys: keyIDs,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -195,7 +197,7 @@ func (r *NotaryRepository) ClearDelegationPaths(name string) error {
|
|||
|
||||
logrus.Debugf(`Removing all paths from delegation "%s"\n`, name)
|
||||
|
||||
tdJSON, err := json.Marshal(&changelist.TufDelegation{
|
||||
tdJSON, err := json.Marshal(&changelist.TUFDelegation{
|
||||
ClearAllPaths: true,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -206,8 +208,8 @@ func (r *NotaryRepository) ClearDelegationPaths(name string) error {
|
|||
return addChange(cl, template, name)
|
||||
}
|
||||
|
||||
func newUpdateDelegationChange(name string, content []byte) *changelist.TufChange {
|
||||
return changelist.NewTufChange(
|
||||
func newUpdateDelegationChange(name string, content []byte) *changelist.TUFChange {
|
||||
return changelist.NewTUFChange(
|
||||
changelist.ActionUpdate,
|
||||
name,
|
||||
changelist.TypeTargetsDelegation,
|
||||
|
@ -216,8 +218,8 @@ func newUpdateDelegationChange(name string, content []byte) *changelist.TufChang
|
|||
)
|
||||
}
|
||||
|
||||
func newCreateDelegationChange(name string, content []byte) *changelist.TufChange {
|
||||
return changelist.NewTufChange(
|
||||
func newCreateDelegationChange(name string, content []byte) *changelist.TUFChange {
|
||||
return changelist.NewTUFChange(
|
||||
changelist.ActionCreate,
|
||||
name,
|
||||
changelist.TypeTargetsDelegation,
|
||||
|
@ -226,8 +228,8 @@ func newCreateDelegationChange(name string, content []byte) *changelist.TufChang
|
|||
)
|
||||
}
|
||||
|
||||
func newDeleteDelegationChange(name string, content []byte) *changelist.TufChange {
|
||||
return changelist.NewTufChange(
|
||||
func newDeleteDelegationChange(name string, content []byte) *changelist.TUFChange {
|
||||
return changelist.NewTUFChange(
|
||||
changelist.ActionDelete,
|
||||
name,
|
||||
changelist.TypeTargetsDelegation,
|
||||
|
@ -238,7 +240,7 @@ func newDeleteDelegationChange(name string, content []byte) *changelist.TufChang
|
|||
|
||||
// GetDelegationRoles returns the keys and roles of the repository's delegations
|
||||
// Also converts key IDs to canonical key IDs to keep consistent with signing prompts
|
||||
func (r *NotaryRepository) GetDelegationRoles() ([]*data.Role, error) {
|
||||
func (r *NotaryRepository) GetDelegationRoles() ([]data.Role, error) {
|
||||
// Update state of the repo to latest
|
||||
if err := r.Update(false); err != nil {
|
||||
return nil, err
|
||||
|
@ -251,7 +253,7 @@ func (r *NotaryRepository) GetDelegationRoles() ([]*data.Role, error) {
|
|||
}
|
||||
|
||||
// make a copy for traversing nested delegations
|
||||
allDelegations := []*data.Role{}
|
||||
allDelegations := []data.Role{}
|
||||
|
||||
// Define a visitor function to populate the delegations list and translate their key IDs to canonical IDs
|
||||
delegationCanonicalListVisitor := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
|
||||
|
@ -271,20 +273,23 @@ func (r *NotaryRepository) GetDelegationRoles() ([]*data.Role, error) {
|
|||
return allDelegations, nil
|
||||
}
|
||||
|
||||
func translateDelegationsToCanonicalIDs(delegationInfo data.Delegations) ([]*data.Role, error) {
|
||||
canonicalDelegations := make([]*data.Role, len(delegationInfo.Roles))
|
||||
copy(canonicalDelegations, delegationInfo.Roles)
|
||||
func translateDelegationsToCanonicalIDs(delegationInfo data.Delegations) ([]data.Role, error) {
|
||||
canonicalDelegations := make([]data.Role, len(delegationInfo.Roles))
|
||||
// Do a copy by value to ensure local delegation metadata is untouched
|
||||
for idx, origRole := range delegationInfo.Roles {
|
||||
canonicalDelegations[idx] = *origRole
|
||||
}
|
||||
delegationKeys := delegationInfo.Keys
|
||||
for i, delegation := range canonicalDelegations {
|
||||
canonicalKeyIDs := []string{}
|
||||
for _, keyID := range delegation.KeyIDs {
|
||||
pubKey, ok := delegationKeys[keyID]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Could not translate canonical key IDs for %s", delegation.Name)
|
||||
return []data.Role{}, fmt.Errorf("Could not translate canonical key IDs for %s", delegation.Name)
|
||||
}
|
||||
canonicalKeyID, err := utils.CanonicalKeyID(pubKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not translate canonical key IDs for %s: %v", delegation.Name, err)
|
||||
return []data.Role{}, fmt.Errorf("Could not translate canonical key IDs for %s: %v", delegation.Name, err)
|
||||
}
|
||||
canonicalKeyIDs = append(canonicalKeyIDs, canonicalKeyID)
|
||||
}
|
||||
|
|
|
@ -4,14 +4,13 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/notary/client/changelist"
|
||||
tuf "github.com/docker/notary/tuf"
|
||||
store "github.com/docker/notary/storage"
|
||||
"github.com/docker/notary/tuf"
|
||||
"github.com/docker/notary/tuf/data"
|
||||
"github.com/docker/notary/tuf/store"
|
||||
"github.com/docker/notary/tuf/utils"
|
||||
)
|
||||
|
||||
|
@ -30,7 +29,7 @@ func getRemoteStore(baseURL, gun string, rt http.RoundTripper) (store.RemoteStor
|
|||
return s, err
|
||||
}
|
||||
|
||||
func applyChangelist(repo *tuf.Repo, cl changelist.Changelist) error {
|
||||
func applyChangelist(repo *tuf.Repo, invalid *tuf.Repo, cl changelist.Changelist) error {
|
||||
it, err := cl.NewIterator()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -41,30 +40,33 @@ func applyChangelist(repo *tuf.Repo, cl changelist.Changelist) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isDel := data.IsDelegation(c.Scope())
|
||||
isDel := data.IsDelegation(c.Scope()) || data.IsWildDelegation(c.Scope())
|
||||
switch {
|
||||
case c.Scope() == changelist.ScopeTargets || isDel:
|
||||
err = applyTargetsChange(repo, c)
|
||||
err = applyTargetsChange(repo, invalid, c)
|
||||
case c.Scope() == changelist.ScopeRoot:
|
||||
err = applyRootChange(repo, c)
|
||||
default:
|
||||
logrus.Debug("scope not supported: ", c.Scope())
|
||||
return fmt.Errorf("scope not supported: %s", c.Scope())
|
||||
}
|
||||
index++
|
||||
if err != nil {
|
||||
logrus.Debugf("error attempting to apply change #%d: %s, on scope: %s path: %s type: %s", index, c.Action(), c.Scope(), c.Path(), c.Type())
|
||||
return err
|
||||
}
|
||||
index++
|
||||
}
|
||||
logrus.Debugf("applied %d change(s)", index)
|
||||
return nil
|
||||
}
|
||||
|
||||
func applyTargetsChange(repo *tuf.Repo, c changelist.Change) error {
|
||||
func applyTargetsChange(repo *tuf.Repo, invalid *tuf.Repo, c changelist.Change) error {
|
||||
switch c.Type() {
|
||||
case changelist.TypeTargetsTarget:
|
||||
return changeTargetMeta(repo, c)
|
||||
case changelist.TypeTargetsDelegation:
|
||||
return changeTargetsDelegation(repo, c)
|
||||
case changelist.TypeWitness:
|
||||
return witnessTargets(repo, invalid, c.Scope())
|
||||
default:
|
||||
return fmt.Errorf("only target meta and delegations changes supported")
|
||||
}
|
||||
|
@ -73,7 +75,7 @@ func applyTargetsChange(repo *tuf.Repo, c changelist.Change) error {
|
|||
func changeTargetsDelegation(repo *tuf.Repo, c changelist.Change) error {
|
||||
switch c.Action() {
|
||||
case changelist.ActionCreate:
|
||||
td := changelist.TufDelegation{}
|
||||
td := changelist.TUFDelegation{}
|
||||
err := json.Unmarshal(c.Content(), &td)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -87,11 +89,15 @@ func changeTargetsDelegation(repo *tuf.Repo, c changelist.Change) error {
|
|||
}
|
||||
return repo.UpdateDelegationPaths(c.Scope(), td.AddPaths, []string{}, false)
|
||||
case changelist.ActionUpdate:
|
||||
td := changelist.TufDelegation{}
|
||||
td := changelist.TUFDelegation{}
|
||||
err := json.Unmarshal(c.Content(), &td)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if data.IsWildDelegation(c.Scope()) {
|
||||
return repo.PurgeDelegationKeys(c.Scope(), td.RemoveKeys)
|
||||
}
|
||||
|
||||
delgRole, err := repo.GetDelegationRole(c.Scope())
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -112,10 +118,6 @@ func changeTargetsDelegation(repo *tuf.Repo, c changelist.Change) error {
|
|||
removeTUFKeyIDs = append(removeTUFKeyIDs, canonicalToTUFID[canonID])
|
||||
}
|
||||
|
||||
// If we specify the only keys left delete the role, else just delete specified keys
|
||||
if strings.Join(delgRole.ListKeyIDs(), ";") == strings.Join(removeTUFKeyIDs, ";") && len(td.AddKeys) == 0 {
|
||||
return repo.DeleteDelegation(c.Scope())
|
||||
}
|
||||
err = repo.UpdateDelegationKeys(c.Scope(), td.AddKeys, removeTUFKeyIDs, td.NewThreshold)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -155,7 +157,7 @@ func changeTargetMeta(repo *tuf.Repo, c changelist.Change) error {
|
|||
}
|
||||
|
||||
default:
|
||||
logrus.Debug("action not yet supported: ", c.Action())
|
||||
err = fmt.Errorf("action not yet supported: %s", c.Action())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -166,7 +168,7 @@ func applyRootChange(repo *tuf.Repo, c changelist.Change) error {
|
|||
case changelist.TypeRootRole:
|
||||
err = applyRootRoleChange(repo, c)
|
||||
default:
|
||||
logrus.Debug("type of root change not yet supported: ", c.Type())
|
||||
err = fmt.Errorf("type of root change not yet supported: %s", c.Type())
|
||||
}
|
||||
return err // might be nil
|
||||
}
|
||||
|
@ -175,7 +177,7 @@ func applyRootRoleChange(repo *tuf.Repo, c changelist.Change) error {
|
|||
switch c.Action() {
|
||||
case changelist.ActionCreate:
|
||||
// replaces all keys for a role
|
||||
d := &changelist.TufRootData{}
|
||||
d := &changelist.TUFRootData{}
|
||||
err := json.Unmarshal(c.Content(), d)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -185,14 +187,34 @@ func applyRootRoleChange(repo *tuf.Repo, c changelist.Change) error {
|
|||
return err
|
||||
}
|
||||
default:
|
||||
logrus.Debug("action not yet supported for root: ", c.Action())
|
||||
return fmt.Errorf("action not yet supported for root: %s", c.Action())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func nearExpiry(r *data.SignedRoot) bool {
|
||||
func nearExpiry(r data.SignedCommon) bool {
|
||||
plus6mo := time.Now().AddDate(0, 6, 0)
|
||||
return r.Signed.Expires.Before(plus6mo)
|
||||
return r.Expires.Before(plus6mo)
|
||||
}
|
||||
|
||||
func warnRolesNearExpiry(r *tuf.Repo) {
|
||||
//get every role and its respective signed common and call nearExpiry on it
|
||||
//Root check
|
||||
if nearExpiry(r.Root.Signed.SignedCommon) {
|
||||
logrus.Warn("root is nearing expiry, you should re-sign the role metadata")
|
||||
}
|
||||
//Targets and delegations check
|
||||
for role, signedTOrD := range r.Targets {
|
||||
//signedTOrD is of type *data.SignedTargets
|
||||
if nearExpiry(signedTOrD.Signed.SignedCommon) {
|
||||
logrus.Warn(role, " metadata is nearing expiry, you should re-sign the role metadata")
|
||||
}
|
||||
}
|
||||
//Snapshot check
|
||||
if nearExpiry(r.Snapshot.Signed.SignedCommon) {
|
||||
logrus.Warn("snapshot is nearing expiry, you should re-sign the role metadata")
|
||||
}
|
||||
//do not need to worry about Timestamp, notary signer will re-sign with the timestamp key
|
||||
}
|
||||
|
||||
// Fetches a public key from a remote store, given a gun and role
|
||||
|
@ -214,7 +236,26 @@ func getRemoteKey(url, gun, role string, rt http.RoundTripper) (data.PublicKey,
|
|||
return pubKey, nil
|
||||
}
|
||||
|
||||
// signs and serializes the metadata for a canonical role in a tuf repo to JSON
|
||||
// Rotates a private key in a remote store and returns the public key component
|
||||
func rotateRemoteKey(url, gun, role string, rt http.RoundTripper) (data.PublicKey, error) {
|
||||
remote, err := getRemoteStore(url, gun, rt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rawPubKey, err := remote.RotateKey(role)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pubKey, err := data.UnmarshalPublicKey(rawPubKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pubKey, nil
|
||||
}
|
||||
|
||||
// signs and serializes the metadata for a canonical role in a TUF repo to JSON
|
||||
func serializeCanonicalRole(tufRepo *tuf.Repo, role string) (out []byte, err error) {
|
||||
var s *data.Signed
|
||||
switch {
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/notary/passphrase"
|
||||
"github.com/docker/notary"
|
||||
"github.com/docker/notary/trustmanager"
|
||||
"github.com/docker/notary/trustpinning"
|
||||
)
|
||||
|
@ -16,7 +16,7 @@ import (
|
|||
// (This is normally defaults to "~/.notary" or "~/.docker/trust" when enabling
|
||||
// docker content trust).
|
||||
func NewNotaryRepository(baseDir, gun, baseURL string, rt http.RoundTripper,
|
||||
retriever passphrase.Retriever, trustPinning trustpinning.TrustPinConfig) (
|
||||
retriever notary.PassRetriever, trustPinning trustpinning.TrustPinConfig) (
|
||||
*NotaryRepository, error) {
|
||||
|
||||
fileKeyStore, err := trustmanager.NewKeyFileStore(baseDir, retriever)
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/notary/passphrase"
|
||||
"github.com/docker/notary"
|
||||
"github.com/docker/notary/trustmanager"
|
||||
"github.com/docker/notary/trustmanager/yubikey"
|
||||
"github.com/docker/notary/trustpinning"
|
||||
|
@ -16,7 +16,7 @@ import (
|
|||
// It takes the base directory under where all the trust files will be stored
|
||||
// (usually ~/.docker/trust/).
|
||||
func NewNotaryRepository(baseDir, gun, baseURL string, rt http.RoundTripper,
|
||||
retriever passphrase.Retriever, trustPinning trustpinning.TrustPinConfig) (
|
||||
retriever notary.PassRetriever, trustPinning trustpinning.TrustPinConfig) (
|
||||
*NotaryRepository, error) {
|
||||
|
||||
fileKeyStore, err := trustmanager.NewKeyFileStore(baseDir, retriever)
|
||||
|
|
|
@ -5,22 +5,23 @@ import (
|
|||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/notary"
|
||||
tuf "github.com/docker/notary/tuf"
|
||||
store "github.com/docker/notary/storage"
|
||||
"github.com/docker/notary/tuf"
|
||||
"github.com/docker/notary/tuf/data"
|
||||
"github.com/docker/notary/tuf/store"
|
||||
"github.com/docker/notary/tuf/signed"
|
||||
)
|
||||
|
||||
// Client is a usability wrapper around a raw TUF repo
|
||||
type Client struct {
|
||||
// TUFClient is a usability wrapper around a raw TUF repo
|
||||
type TUFClient struct {
|
||||
remote store.RemoteStore
|
||||
cache store.MetadataStore
|
||||
oldBuilder tuf.RepoBuilder
|
||||
newBuilder tuf.RepoBuilder
|
||||
}
|
||||
|
||||
// NewClient initialized a Client with the given repo, remote source of content, and cache
|
||||
func NewClient(oldBuilder, newBuilder tuf.RepoBuilder, remote store.RemoteStore, cache store.MetadataStore) *Client {
|
||||
return &Client{
|
||||
// NewTUFClient initialized a TUFClient with the given repo, remote source of content, and cache
|
||||
func NewTUFClient(oldBuilder, newBuilder tuf.RepoBuilder, remote store.RemoteStore, cache store.MetadataStore) *TUFClient {
|
||||
return &TUFClient{
|
||||
oldBuilder: oldBuilder,
|
||||
newBuilder: newBuilder,
|
||||
remote: remote,
|
||||
|
@ -29,7 +30,7 @@ func NewClient(oldBuilder, newBuilder tuf.RepoBuilder, remote store.RemoteStore,
|
|||
}
|
||||
|
||||
// Update performs an update to the TUF repo as defined by the TUF spec
|
||||
func (c *Client) Update() (*tuf.Repo, error) {
|
||||
func (c *TUFClient) Update() (*tuf.Repo, *tuf.Repo, error) {
|
||||
// 1. Get timestamp
|
||||
// a. If timestamp error (verification, expired, etc...) download new root and return to 1.
|
||||
// 2. Check if local snapshot is up to date
|
||||
|
@ -48,19 +49,19 @@ func (c *Client) Update() (*tuf.Repo, error) {
|
|||
|
||||
if err := c.downloadRoot(); err != nil {
|
||||
logrus.Debug("Client Update (Root):", err)
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
// If we error again, we now have the latest root and just want to fail
|
||||
// out as there's no expectation the problem can be resolved automatically
|
||||
logrus.Debug("retrying TUF client update")
|
||||
if err := c.update(); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return c.newBuilder.Finish()
|
||||
}
|
||||
|
||||
func (c *Client) update() error {
|
||||
func (c *TUFClient) update() error {
|
||||
if err := c.downloadTimestamp(); err != nil {
|
||||
logrus.Debugf("Client Update (Timestamp): %s", err.Error())
|
||||
return err
|
||||
|
@ -78,7 +79,7 @@ func (c *Client) update() error {
|
|||
}
|
||||
|
||||
// downloadRoot is responsible for downloading the root.json
|
||||
func (c *Client) downloadRoot() error {
|
||||
func (c *TUFClient) downloadRoot() error {
|
||||
role := data.CanonicalRootRole
|
||||
consistentInfo := c.newBuilder.GetConsistentInfo(role)
|
||||
|
||||
|
@ -88,7 +89,7 @@ func (c *Client) downloadRoot() error {
|
|||
logrus.Debugf("Loading root with no expected checksum")
|
||||
|
||||
// get the cached root, if it exists, just for version checking
|
||||
cachedRoot, _ := c.cache.GetMeta(role, -1)
|
||||
cachedRoot, _ := c.cache.GetSized(role, -1)
|
||||
// prefer to download a new root
|
||||
_, remoteErr := c.tryLoadRemote(consistentInfo, cachedRoot)
|
||||
return remoteErr
|
||||
|
@ -101,36 +102,43 @@ func (c *Client) downloadRoot() error {
|
|||
// downloadTimestamp is responsible for downloading the timestamp.json
|
||||
// Timestamps are special in that we ALWAYS attempt to download and only
|
||||
// use cache if the download fails (and the cache is still valid).
|
||||
func (c *Client) downloadTimestamp() error {
|
||||
func (c *TUFClient) downloadTimestamp() error {
|
||||
logrus.Debug("Loading timestamp...")
|
||||
role := data.CanonicalTimestampRole
|
||||
consistentInfo := c.newBuilder.GetConsistentInfo(role)
|
||||
|
||||
// get the cached timestamp, if it exists
|
||||
cachedTS, cachedErr := c.cache.GetMeta(role, notary.MaxTimestampSize)
|
||||
// always get the remote timestamp, since it supercedes the local one
|
||||
// always get the remote timestamp, since it supersedes the local one
|
||||
cachedTS, cachedErr := c.cache.GetSized(role, notary.MaxTimestampSize)
|
||||
_, remoteErr := c.tryLoadRemote(consistentInfo, cachedTS)
|
||||
|
||||
switch {
|
||||
case remoteErr == nil:
|
||||
// check that there was no remote error, or if there was a network problem
|
||||
// If there was a validation error, we should error out so we can download a new root or fail the update
|
||||
switch remoteErr.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case cachedErr == nil:
|
||||
logrus.Debug(remoteErr.Error())
|
||||
logrus.Warn("Error while downloading remote metadata, using cached timestamp - this might not be the latest version available remotely")
|
||||
case store.ErrMetaNotFound, store.ErrServerUnavailable, store.ErrOffline, store.NetworkError:
|
||||
break
|
||||
default:
|
||||
return remoteErr
|
||||
}
|
||||
|
||||
// since it was a network error: get the cached timestamp, if it exists
|
||||
if cachedErr != nil {
|
||||
logrus.Debug("no cached or remote timestamp available")
|
||||
return remoteErr
|
||||
}
|
||||
|
||||
logrus.Warn("Error while downloading remote metadata, using cached timestamp - this might not be the latest version available remotely")
|
||||
err := c.newBuilder.Load(role, cachedTS, 1, false)
|
||||
if err == nil {
|
||||
logrus.Debug("successfully verified cached timestamp")
|
||||
}
|
||||
return err
|
||||
default:
|
||||
logrus.Debug("no cached or remote timestamp available")
|
||||
return remoteErr
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// downloadSnapshot is responsible for downloading the snapshot.json
|
||||
func (c *Client) downloadSnapshot() error {
|
||||
func (c *TUFClient) downloadSnapshot() error {
|
||||
logrus.Debug("Loading snapshot...")
|
||||
role := data.CanonicalSnapshotRole
|
||||
consistentInfo := c.newBuilder.GetConsistentInfo(role)
|
||||
|
@ -142,11 +150,12 @@ func (c *Client) downloadSnapshot() error {
|
|||
// downloadTargets downloads all targets and delegated targets for the repository.
|
||||
// It uses a pre-order tree traversal as it's necessary to download parents first
|
||||
// to obtain the keys to validate children.
|
||||
func (c *Client) downloadTargets() error {
|
||||
func (c *TUFClient) downloadTargets() error {
|
||||
toDownload := []data.DelegationRole{{
|
||||
BaseRole: data.BaseRole{Name: data.CanonicalTargetsRole},
|
||||
Paths: []string{""},
|
||||
}}
|
||||
|
||||
for len(toDownload) > 0 {
|
||||
role := toDownload[0]
|
||||
toDownload = toDownload[1:]
|
||||
|
@ -158,21 +167,23 @@ func (c *Client) downloadTargets() error {
|
|||
}
|
||||
|
||||
children, err := c.getTargetsFile(role, consistentInfo)
|
||||
if err != nil {
|
||||
if _, ok := err.(data.ErrMissingMeta); ok && role.Name != data.CanonicalTargetsRole {
|
||||
// if the role meta hasn't been published,
|
||||
// that's ok, continue
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("Error getting %s: %s", role.Name, err)
|
||||
switch err.(type) {
|
||||
case signed.ErrExpired, signed.ErrRoleThreshold:
|
||||
if role.Name == data.CanonicalTargetsRole {
|
||||
return err
|
||||
}
|
||||
logrus.Warnf("Error getting %s: %s", role.Name, err)
|
||||
break
|
||||
case nil:
|
||||
toDownload = append(children, toDownload...)
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c Client) getTargetsFile(role data.DelegationRole, ci tuf.ConsistentInfo) ([]data.DelegationRole, error) {
|
||||
func (c TUFClient) getTargetsFile(role data.DelegationRole, ci tuf.ConsistentInfo) ([]data.DelegationRole, error) {
|
||||
logrus.Debugf("Loading %s...", role.Name)
|
||||
tgs := &data.SignedTargets{}
|
||||
|
||||
|
@ -187,8 +198,8 @@ func (c Client) getTargetsFile(role data.DelegationRole, ci tuf.ConsistentInfo)
|
|||
return tgs.GetValidDelegations(role), nil
|
||||
}
|
||||
|
||||
func (c *Client) tryLoadCacheThenRemote(consistentInfo tuf.ConsistentInfo) ([]byte, error) {
|
||||
cachedTS, err := c.cache.GetMeta(consistentInfo.RoleName, consistentInfo.Length())
|
||||
func (c *TUFClient) tryLoadCacheThenRemote(consistentInfo tuf.ConsistentInfo) ([]byte, error) {
|
||||
cachedTS, err := c.cache.GetSized(consistentInfo.RoleName, consistentInfo.Length())
|
||||
if err != nil {
|
||||
logrus.Debugf("no %s in cache, must download", consistentInfo.RoleName)
|
||||
return c.tryLoadRemote(consistentInfo, nil)
|
||||
|
@ -203,9 +214,9 @@ func (c *Client) tryLoadCacheThenRemote(consistentInfo tuf.ConsistentInfo) ([]by
|
|||
return c.tryLoadRemote(consistentInfo, cachedTS)
|
||||
}
|
||||
|
||||
func (c *Client) tryLoadRemote(consistentInfo tuf.ConsistentInfo, old []byte) ([]byte, error) {
|
||||
func (c *TUFClient) tryLoadRemote(consistentInfo tuf.ConsistentInfo, old []byte) ([]byte, error) {
|
||||
consistentName := consistentInfo.ConsistentName()
|
||||
raw, err := c.remote.GetMeta(consistentName, consistentInfo.Length())
|
||||
raw, err := c.remote.GetSized(consistentName, consistentInfo.Length())
|
||||
if err != nil {
|
||||
logrus.Debugf("error downloading %s: %s", consistentName, err)
|
||||
return old, err
|
||||
|
@ -216,13 +227,12 @@ func (c *Client) tryLoadRemote(consistentInfo tuf.ConsistentInfo, old []byte) ([
|
|||
// will be 1
|
||||
c.oldBuilder.Load(consistentInfo.RoleName, old, 1, true)
|
||||
minVersion := c.oldBuilder.GetLoadedVersion(consistentInfo.RoleName)
|
||||
|
||||
if err := c.newBuilder.Load(consistentInfo.RoleName, raw, minVersion, false); err != nil {
|
||||
logrus.Debugf("downloaded %s is invalid: %s", consistentName, err)
|
||||
return raw, err
|
||||
}
|
||||
logrus.Debugf("successfully verified downloaded %s", consistentName)
|
||||
if err := c.cache.SetMeta(consistentInfo.RoleName, raw); err != nil {
|
||||
if err := c.cache.Set(consistentInfo.RoleName, raw); err != nil {
|
||||
logrus.Debugf("Unable to write %s to cache: %s", consistentInfo.RoleName, err)
|
||||
}
|
||||
return raw, nil
|
69
vendor/src/github.com/docker/notary/client/witness.go
vendored
Normal file
69
vendor/src/github.com/docker/notary/client/witness.go
vendored
Normal file
|
@ -0,0 +1,69 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/notary/client/changelist"
|
||||
"github.com/docker/notary/tuf"
|
||||
"github.com/docker/notary/tuf/data"
|
||||
)
|
||||
|
||||
// Witness creates change objects to witness (i.e. re-sign) the given
|
||||
// roles on the next publish. One change is created per role
|
||||
func (r *NotaryRepository) Witness(roles ...string) ([]string, error) {
|
||||
cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cl.Close()
|
||||
|
||||
successful := make([]string, 0, len(roles))
|
||||
for _, role := range roles {
|
||||
// scope is role
|
||||
c := changelist.NewTUFChange(
|
||||
changelist.ActionUpdate,
|
||||
role,
|
||||
changelist.TypeWitness,
|
||||
"",
|
||||
nil,
|
||||
)
|
||||
err = cl.Add(c)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
successful = append(successful, role)
|
||||
}
|
||||
return successful, err
|
||||
}
|
||||
|
||||
func witnessTargets(repo *tuf.Repo, invalid *tuf.Repo, role string) error {
|
||||
if r, ok := repo.Targets[role]; ok {
|
||||
// role is already valid, mark for re-signing/updating
|
||||
r.Dirty = true
|
||||
return nil
|
||||
}
|
||||
|
||||
if roleObj, err := repo.GetDelegationRole(role); err == nil && invalid != nil {
|
||||
// A role with a threshold > len(keys) is technically invalid, but we let it build in the builder because
|
||||
// we want to be able to download the role (which may still have targets on it), add more keys, and then
|
||||
// witness the role, thus bringing it back to valid. However, if no keys have been added before witnessing,
|
||||
// then it is still an invalid role, and can't be witnessed because nothing can bring it back to valid.
|
||||
if roleObj.Threshold > len(roleObj.Keys) {
|
||||
return data.ErrInvalidRole{
|
||||
Role: role,
|
||||
Reason: "role does not specify enough valid signing keys to meet its required threshold",
|
||||
}
|
||||
}
|
||||
if r, ok := invalid.Targets[role]; ok {
|
||||
// role is recognized but invalid, move to valid data and mark for re-signing
|
||||
repo.Targets[role] = r
|
||||
r.Dirty = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// role isn't recognized, even as invalid
|
||||
return data.ErrInvalidRole{
|
||||
Role: role,
|
||||
Reason: "this role is not known",
|
||||
}
|
||||
}
|
|
@ -3,16 +3,20 @@ codecov:
|
|||
# 2 builds on circleci, 1 jenkins build
|
||||
after_n_builds: 3
|
||||
coverage:
|
||||
range: "50...100"
|
||||
status:
|
||||
# project will give us the diff in the total code coverage between a commit
|
||||
# and its parent
|
||||
project:
|
||||
default:
|
||||
target: auto
|
||||
threshold: "0.05%"
|
||||
# patch would give us the code coverage of the diff only
|
||||
patch: false
|
||||
# changes tells us if there are unexpected code coverage changes in other files
|
||||
# which were not changed by the diff
|
||||
changes: false
|
||||
ignore: # ignore testutils for coverage
|
||||
- "tuf/testutils/*"
|
||||
comment: off
|
||||
|
||||
|
|
8
vendor/src/github.com/docker/notary/const.go
vendored
8
vendor/src/github.com/docker/notary/const.go
vendored
|
@ -1,8 +1,6 @@
|
|||
package notary
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
import "time"
|
||||
|
||||
// application wide constants
|
||||
const (
|
||||
|
@ -34,6 +32,8 @@ const (
|
|||
RootKeysSubdir = "root_keys"
|
||||
// NonRootKeysSubdir is the subdirectory under PrivDir where non-root private keys are stored
|
||||
NonRootKeysSubdir = "tuf_keys"
|
||||
// KeyExtension is the file extension to use for private key files
|
||||
KeyExtension = "key"
|
||||
|
||||
// Day is a duration of one day
|
||||
Day = 24 * time.Hour
|
||||
|
@ -56,6 +56,8 @@ const (
|
|||
MemoryBackend = "memory"
|
||||
SQLiteBackend = "sqlite3"
|
||||
RethinkDBBackend = "rethinkdb"
|
||||
|
||||
DefaultImportRole = "delegation"
|
||||
)
|
||||
|
||||
// NotaryDefaultExpiries is the construct used to configure the default expiry times of
|
||||
|
|
16
vendor/src/github.com/docker/notary/const_nowindows.go
vendored
Normal file
16
vendor/src/github.com/docker/notary/const_nowindows.go
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
// +build !windows
|
||||
|
||||
package notary
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// NotarySupportedSignals contains the signals we would like to capture:
|
||||
// - SIGUSR1, indicates a increment of the log level.
|
||||
// - SIGUSR2, indicates a decrement of the log level.
|
||||
var NotarySupportedSignals = []os.Signal{
|
||||
syscall.SIGUSR1,
|
||||
syscall.SIGUSR2,
|
||||
}
|
8
vendor/src/github.com/docker/notary/const_windows.go
vendored
Normal file
8
vendor/src/github.com/docker/notary/const_windows.go
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
// +build windows
|
||||
|
||||
package notary
|
||||
|
||||
import "os"
|
||||
|
||||
// NotarySupportedSignals does not contain any signals, because SIGUSR1/2 are not supported on windows
|
||||
var NotarySupportedSignals = []os.Signal{}
|
10
vendor/src/github.com/docker/notary/coverpkg.sh
vendored
10
vendor/src/github.com/docker/notary/coverpkg.sh
vendored
|
@ -1,10 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Given a subpackage and the containing package, figures out which packages
|
||||
# need to be passed to `go test -coverpkg`: this includes all of the
|
||||
# subpackage's dependencies within the containing package, as well as the
|
||||
# subpackage itself.
|
||||
|
||||
DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2} | grep -v ${2}/vendor)"
|
||||
|
||||
echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ','
|
|
@ -7,8 +7,8 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/docker/notary/trustmanager"
|
||||
"github.com/docker/notary/tuf/data"
|
||||
"github.com/docker/notary/tuf/utils"
|
||||
)
|
||||
|
||||
// GenerateCertificate generates an X509 Certificate from a template, given a GUN and validity interval
|
||||
|
@ -22,7 +22,7 @@ func GenerateCertificate(rootKey data.PrivateKey, gun string, startTime, endTime
|
|||
}
|
||||
|
||||
func generateCertificate(signer crypto.Signer, gun string, startTime, endTime time.Time) (*x509.Certificate, error) {
|
||||
template, err := trustmanager.NewCertificate(gun, startTime, endTime)
|
||||
template, err := utils.NewCertificate(gun, startTime, endTime)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create the certificate template for: %s (%v)", gun, err)
|
||||
}
|
||||
|
|
|
@ -4,13 +4,24 @@ import (
|
|||
"crypto/rand"
|
||||
"fmt"
|
||||
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/notary"
|
||||
"github.com/docker/notary/trustmanager"
|
||||
"github.com/docker/notary/tuf/data"
|
||||
"github.com/docker/notary/tuf/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
rsaKeySize = 2048 // Used for snapshots and targets keys
|
||||
var (
|
||||
// ErrNoValidPrivateKey is returned if a key being imported doesn't
|
||||
// look like a private key
|
||||
ErrNoValidPrivateKey = errors.New("no valid private key found")
|
||||
|
||||
// ErrRootKeyNotEncrypted is returned if a root key being imported is
|
||||
// unencrypted
|
||||
ErrRootKeyNotEncrypted = errors.New("only encrypted root keys may be imported")
|
||||
)
|
||||
|
||||
// CryptoService implements Sign and Create, holding a specific GUN and keystore to
|
||||
|
@ -31,17 +42,17 @@ func (cs *CryptoService) Create(role, gun, algorithm string) (data.PublicKey, er
|
|||
|
||||
switch algorithm {
|
||||
case data.RSAKey:
|
||||
privKey, err = trustmanager.GenerateRSAKey(rand.Reader, rsaKeySize)
|
||||
privKey, err = utils.GenerateRSAKey(rand.Reader, notary.MinRSABitSize)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate RSA key: %v", err)
|
||||
}
|
||||
case data.ECDSAKey:
|
||||
privKey, err = trustmanager.GenerateECDSAKey(rand.Reader)
|
||||
privKey, err = utils.GenerateECDSAKey(rand.Reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate EC key: %v", err)
|
||||
}
|
||||
case data.ED25519Key:
|
||||
privKey, err = trustmanager.GenerateED25519Key(rand.Reader)
|
||||
privKey, err = utils.GenerateED25519Key(rand.Reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate ED25519 key: %v", err)
|
||||
}
|
||||
|
@ -153,3 +164,18 @@ func (cs *CryptoService) ListAllKeys() map[string]string {
|
|||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// CheckRootKeyIsEncrypted makes sure the root key is encrypted. We have
|
||||
// internal assumptions that depend on this.
|
||||
func CheckRootKeyIsEncrypted(pemBytes []byte) error {
|
||||
block, _ := pem.Decode(pemBytes)
|
||||
if block == nil {
|
||||
return ErrNoValidPrivateKey
|
||||
}
|
||||
|
||||
if !x509.IsEncryptedPEMBlock(block) {
|
||||
return ErrRootKeyNotEncrypted
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,313 +0,0 @@
|
|||
package cryptoservice
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/notary/passphrase"
|
||||
"github.com/docker/notary/trustmanager"
|
||||
)
|
||||
|
||||
const zipMadeByUNIX = 3 << 8
|
||||
|
||||
var (
|
||||
// ErrNoValidPrivateKey is returned if a key being imported doesn't
|
||||
// look like a private key
|
||||
ErrNoValidPrivateKey = errors.New("no valid private key found")
|
||||
|
||||
// ErrRootKeyNotEncrypted is returned if a root key being imported is
|
||||
// unencrypted
|
||||
ErrRootKeyNotEncrypted = errors.New("only encrypted root keys may be imported")
|
||||
|
||||
// ErrNoKeysFoundForGUN is returned if no keys are found for the
|
||||
// specified GUN during export
|
||||
ErrNoKeysFoundForGUN = errors.New("no keys found for specified GUN")
|
||||
)
|
||||
|
||||
// ExportKey exports the specified private key to an io.Writer in PEM format.
|
||||
// The key's existing encryption is preserved.
|
||||
func (cs *CryptoService) ExportKey(dest io.Writer, keyID, role string) error {
|
||||
var (
|
||||
pemBytes []byte
|
||||
err error
|
||||
)
|
||||
|
||||
for _, ks := range cs.keyStores {
|
||||
pemBytes, err = ks.ExportKey(keyID)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nBytes, err := dest.Write(pemBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if nBytes != len(pemBytes) {
|
||||
return errors.New("Unable to finish writing exported key.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExportKeyReencrypt exports the specified private key to an io.Writer in
|
||||
// PEM format. The key is reencrypted with a new passphrase.
|
||||
func (cs *CryptoService) ExportKeyReencrypt(dest io.Writer, keyID string, newPassphraseRetriever passphrase.Retriever) error {
|
||||
privateKey, _, err := cs.GetPrivateKey(keyID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
keyInfo, err := cs.GetKeyInfo(keyID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create temporary keystore to use as a staging area
|
||||
tempBaseDir, err := ioutil.TempDir("", "notary-key-export-")
|
||||
defer os.RemoveAll(tempBaseDir)
|
||||
|
||||
tempKeyStore, err := trustmanager.NewKeyFileStore(tempBaseDir, newPassphraseRetriever)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = tempKeyStore.AddKey(keyInfo, privateKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pemBytes, err := tempKeyStore.ExportKey(keyID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nBytes, err := dest.Write(pemBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if nBytes != len(pemBytes) {
|
||||
return errors.New("Unable to finish writing exported key.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExportAllKeys exports all keys to an io.Writer in zip format.
|
||||
// newPassphraseRetriever will be used to obtain passphrases to use to encrypt the existing keys.
|
||||
func (cs *CryptoService) ExportAllKeys(dest io.Writer, newPassphraseRetriever passphrase.Retriever) error {
|
||||
tempBaseDir, err := ioutil.TempDir("", "notary-key-export-")
|
||||
defer os.RemoveAll(tempBaseDir)
|
||||
|
||||
// Create temporary keystore to use as a staging area
|
||||
tempKeyStore, err := trustmanager.NewKeyFileStore(tempBaseDir, newPassphraseRetriever)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, ks := range cs.keyStores {
|
||||
if err := moveKeys(ks, tempKeyStore); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
zipWriter := zip.NewWriter(dest)
|
||||
|
||||
if err := addKeysToArchive(zipWriter, tempKeyStore); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
zipWriter.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ImportKeysZip imports keys from a zip file provided as an zip.Reader. The
|
||||
// keys in the root_keys directory are left encrypted, but the other keys are
|
||||
// decrypted with the specified passphrase.
|
||||
func (cs *CryptoService) ImportKeysZip(zipReader zip.Reader, retriever passphrase.Retriever) error {
|
||||
// Temporarily store the keys in maps, so we can bail early if there's
|
||||
// an error (for example, wrong passphrase), without leaving the key
|
||||
// store in an inconsistent state
|
||||
newKeys := make(map[string][]byte)
|
||||
|
||||
// Iterate through the files in the archive. Don't add the keys
|
||||
for _, f := range zipReader.File {
|
||||
fNameTrimmed := strings.TrimSuffix(f.Name, filepath.Ext(f.Name))
|
||||
rc, err := f.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
fileBytes, err := ioutil.ReadAll(rc)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Note that using / as a separator is okay here - the zip
|
||||
// package guarantees that the separator will be /
|
||||
if fNameTrimmed[len(fNameTrimmed)-5:] == "_root" {
|
||||
if err = CheckRootKeyIsEncrypted(fileBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
newKeys[fNameTrimmed] = fileBytes
|
||||
}
|
||||
|
||||
for keyName, pemBytes := range newKeys {
|
||||
// Get the key role information as well as its data.PrivateKey representation
|
||||
_, keyInfo, err := trustmanager.KeyInfoFromPEM(pemBytes, keyName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
privKey, err := trustmanager.ParsePEMPrivateKey(pemBytes, "")
|
||||
if err != nil {
|
||||
privKey, _, err = trustmanager.GetPasswdDecryptBytes(retriever, pemBytes, "", "imported "+keyInfo.Role)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Add the key to our cryptoservice, will add to the first successful keystore
|
||||
if err = cs.AddKey(keyInfo.Role, keyInfo.Gun, privKey); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExportKeysByGUN exports all keys associated with a specified GUN to an
|
||||
// io.Writer in zip format. passphraseRetriever is used to select new passphrases to use to
|
||||
// encrypt the keys.
|
||||
func (cs *CryptoService) ExportKeysByGUN(dest io.Writer, gun string, passphraseRetriever passphrase.Retriever) error {
|
||||
tempBaseDir, err := ioutil.TempDir("", "notary-key-export-")
|
||||
defer os.RemoveAll(tempBaseDir)
|
||||
|
||||
// Create temporary keystore to use as a staging area
|
||||
tempKeyStore, err := trustmanager.NewKeyFileStore(tempBaseDir, passphraseRetriever)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, ks := range cs.keyStores {
|
||||
if err := moveKeysByGUN(ks, tempKeyStore, gun); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
zipWriter := zip.NewWriter(dest)
|
||||
|
||||
if len(tempKeyStore.ListKeys()) == 0 {
|
||||
return ErrNoKeysFoundForGUN
|
||||
}
|
||||
|
||||
if err := addKeysToArchive(zipWriter, tempKeyStore); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
zipWriter.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func moveKeysByGUN(oldKeyStore, newKeyStore trustmanager.KeyStore, gun string) error {
|
||||
for keyID, keyInfo := range oldKeyStore.ListKeys() {
|
||||
// Skip keys that aren't associated with this GUN
|
||||
if keyInfo.Gun != gun {
|
||||
continue
|
||||
}
|
||||
|
||||
privKey, _, err := oldKeyStore.GetKey(keyID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = newKeyStore.AddKey(keyInfo, privKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func moveKeys(oldKeyStore, newKeyStore trustmanager.KeyStore) error {
|
||||
for keyID, keyInfo := range oldKeyStore.ListKeys() {
|
||||
privateKey, _, err := oldKeyStore.GetKey(keyID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = newKeyStore.AddKey(keyInfo, privateKey)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addKeysToArchive(zipWriter *zip.Writer, newKeyStore *trustmanager.KeyFileStore) error {
|
||||
for _, relKeyPath := range newKeyStore.ListFiles() {
|
||||
fullKeyPath, err := newKeyStore.GetPath(relKeyPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fi, err := os.Lstat(fullKeyPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
infoHeader, err := zip.FileInfoHeader(fi)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
relPath, err := filepath.Rel(newKeyStore.BaseDir(), fullKeyPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
infoHeader.Name = relPath
|
||||
|
||||
zipFileEntryWriter, err := zipWriter.CreateHeader(infoHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fileContents, err := ioutil.ReadFile(fullKeyPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = zipFileEntryWriter.Write(fileContents); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckRootKeyIsEncrypted makes sure the root key is encrypted. We have
|
||||
// internal assumptions that depend on this.
|
||||
func CheckRootKeyIsEncrypted(pemBytes []byte) error {
|
||||
block, _ := pem.Decode(pemBytes)
|
||||
if block == nil {
|
||||
return ErrNoValidPrivateKey
|
||||
}
|
||||
|
||||
if !x509.IsEncryptedPEMBlock(block) {
|
||||
return ErrRootKeyNotEncrypted
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
60
vendor/src/github.com/docker/notary/development.mysql.yml
vendored
Normal file
60
vendor/src/github.com/docker/notary/development.mysql.yml
vendored
Normal file
|
@ -0,0 +1,60 @@
|
|||
version: "2"
|
||||
services:
|
||||
server:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: server.Dockerfile
|
||||
networks:
|
||||
mdb:
|
||||
sig:
|
||||
srv:
|
||||
aliases:
|
||||
- notary-server
|
||||
entrypoint: /usr/bin/env sh
|
||||
command: -c "./migrations/migrate.sh && notary-server -config=fixtures/server-config.json"
|
||||
depends_on:
|
||||
- mysql
|
||||
- signer
|
||||
signer:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: signer.Dockerfile
|
||||
networks:
|
||||
mdb:
|
||||
sig:
|
||||
aliases:
|
||||
- notarysigner
|
||||
entrypoint: /usr/bin/env sh
|
||||
command: -c "./migrations/migrate.sh && notary-signer -config=fixtures/signer-config.json"
|
||||
depends_on:
|
||||
- mysql
|
||||
mysql:
|
||||
networks:
|
||||
- mdb
|
||||
volumes:
|
||||
- ./notarymysql/docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d
|
||||
image: mariadb:10.1.10
|
||||
environment:
|
||||
- TERM=dumb
|
||||
- MYSQL_ALLOW_EMPTY_PASSWORD="true"
|
||||
command: mysqld --innodb_file_per_table
|
||||
client:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
env_file: buildscripts/env.list
|
||||
command: buildscripts/testclient.py
|
||||
volumes:
|
||||
- ./test_output:/test_output
|
||||
networks:
|
||||
- mdb
|
||||
- srv
|
||||
depends_on:
|
||||
- server
|
||||
networks:
|
||||
mdb:
|
||||
external: false
|
||||
sig:
|
||||
external: false
|
||||
srv:
|
||||
external: false
|
|
@ -11,8 +11,6 @@ services:
|
|||
links:
|
||||
- rdb-proxy:rdb-proxy.rdb
|
||||
- signer
|
||||
environment:
|
||||
- SERVICE_NAME=notary_server
|
||||
ports:
|
||||
- "8080"
|
||||
- "4443:4443"
|
||||
|
@ -32,14 +30,12 @@ services:
|
|||
- notarysigner
|
||||
links:
|
||||
- rdb-proxy:rdb-proxy.rdb
|
||||
environment:
|
||||
- SERVICE_NAME=notary_signer
|
||||
entrypoint: /usr/bin/env sh
|
||||
command: -c "sh migrations/rethink_migrate.sh && notary-signer -config=fixtures/signer-config.rethink.json"
|
||||
depends_on:
|
||||
- rdb-proxy
|
||||
rdb-01:
|
||||
image: jlhawn/rethinkdb:2.3.0
|
||||
image: jlhawn/rethinkdb:2.3.4
|
||||
volumes:
|
||||
- ./fixtures/rethinkdb:/tls
|
||||
- rdb-01-data:/var/data
|
||||
|
@ -51,7 +47,7 @@ services:
|
|||
- rdb-01.rdb
|
||||
command: "--bind all --no-http-admin --server-name rdb_01 --canonical-address rdb-01.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
|
||||
rdb-02:
|
||||
image: jlhawn/rethinkdb:2.3.0
|
||||
image: jlhawn/rethinkdb:2.3.4
|
||||
volumes:
|
||||
- ./fixtures/rethinkdb:/tls
|
||||
- rdb-02-data:/var/data
|
||||
|
@ -63,7 +59,7 @@ services:
|
|||
- rdb-02.rdb
|
||||
command: "--bind all --no-http-admin --server-name rdb_02 --canonical-address rdb-02.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
|
||||
rdb-03:
|
||||
image: jlhawn/rethinkdb:2.3.0
|
||||
image: jlhawn/rethinkdb:2.3.4
|
||||
volumes:
|
||||
- ./fixtures/rethinkdb:/tls
|
||||
- rdb-03-data:/var/data
|
||||
|
@ -75,7 +71,7 @@ services:
|
|||
- rdb-03.rdb
|
||||
command: "--bind all --no-http-admin --server-name rdb_03 --canonical-address rdb-03.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
|
||||
rdb-proxy:
|
||||
image: jlhawn/rethinkdb:2.3.0
|
||||
image: jlhawn/rethinkdb:2.3.4
|
||||
ports:
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
|
@ -91,16 +87,17 @@ services:
|
|||
- rdb-02
|
||||
- rdb-03
|
||||
client:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
volumes:
|
||||
- ./test_output:/test_output
|
||||
networks:
|
||||
- rdb
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
env_file: buildscripts/env.list
|
||||
links:
|
||||
- server:notary-server
|
||||
command: buildscripts/testclient.sh
|
||||
command: buildscripts/testclient.py
|
||||
volumes:
|
||||
rdb-01-data:
|
||||
external: false
|
||||
|
|
|
@ -1,36 +0,0 @@
|
|||
server:
|
||||
build: .
|
||||
dockerfile: server.Dockerfile
|
||||
links:
|
||||
- mysql
|
||||
- signer
|
||||
- signer:notarysigner
|
||||
environment:
|
||||
- SERVICE_NAME=notary_server
|
||||
entrypoint: /usr/bin/env sh
|
||||
command: -c "./migrations/migrate.sh && notary-server -config=fixtures/server-config.json"
|
||||
signer:
|
||||
build: .
|
||||
dockerfile: signer.Dockerfile
|
||||
links:
|
||||
- mysql
|
||||
environment:
|
||||
- SERVICE_NAME=notary_signer
|
||||
entrypoint: /usr/bin/env sh
|
||||
command: -c "./migrations/migrate.sh && notary-signer -config=fixtures/signer-config.json"
|
||||
mysql:
|
||||
volumes:
|
||||
- ./notarymysql/docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d
|
||||
image: mariadb:10.1.10
|
||||
environment:
|
||||
- TERM=dumb
|
||||
- MYSQL_ALLOW_EMPTY_PASSWORD="true"
|
||||
command: mysqld --innodb_file_per_table
|
||||
client:
|
||||
volumes:
|
||||
- ./test_output:/test_output
|
||||
build: .
|
||||
dockerfile: Dockerfile
|
||||
links:
|
||||
- server:notary-server
|
||||
command: buildscripts/testclient.sh
|
|
@ -11,10 +11,7 @@ services:
|
|||
links:
|
||||
- rdb-proxy:rdb-proxy.rdb
|
||||
- signer
|
||||
environment:
|
||||
- SERVICE_NAME=notary_server
|
||||
ports:
|
||||
- "8080"
|
||||
- "4443:4443"
|
||||
entrypoint: /usr/bin/env sh
|
||||
command: -c "sh migrations/rethink_migrate.sh && notary-server -config=fixtures/server-config.rethink.json"
|
||||
|
@ -32,50 +29,47 @@ services:
|
|||
- notarysigner
|
||||
links:
|
||||
- rdb-proxy:rdb-proxy.rdb
|
||||
environment:
|
||||
- SERVICE_NAME=notary_signer
|
||||
entrypoint: /usr/bin/env sh
|
||||
command: -c "sh migrations/rethink_migrate.sh && notary-signer -config=fixtures/signer-config.rethink.json"
|
||||
depends_on:
|
||||
- rdb-proxy
|
||||
rdb-01:
|
||||
image: jlhawn/rethinkdb:2.3.0
|
||||
image: jlhawn/rethinkdb:2.3.4
|
||||
volumes:
|
||||
- ./fixtures/rethinkdb:/tls
|
||||
- rdb-01-data:/var/data
|
||||
networks:
|
||||
rdb:
|
||||
aliases:
|
||||
- rdb
|
||||
- rdb.rdb
|
||||
- rdb-01.rdb
|
||||
command: "--bind all --no-http-admin --server-name rdb_01 --canonical-address rdb-01.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
|
||||
command: "--bind all --no-http-admin --server-name rdb_01 --canonical-address rdb-01.rdb --directory /var/data/rethinkdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
|
||||
rdb-02:
|
||||
image: jlhawn/rethinkdb:2.3.0
|
||||
image: jlhawn/rethinkdb:2.3.4
|
||||
volumes:
|
||||
- ./fixtures/rethinkdb:/tls
|
||||
- rdb-02-data:/var/data
|
||||
networks:
|
||||
rdb:
|
||||
aliases:
|
||||
- rdb
|
||||
- rdb.rdb
|
||||
- rdb-02.rdb
|
||||
command: "--bind all --no-http-admin --server-name rdb_02 --canonical-address rdb-02.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
|
||||
command: "--bind all --no-http-admin --server-name rdb_02 --canonical-address rdb-02.rdb --directory /var/data/rethinkdb --join rdb-01 --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
|
||||
depends_on:
|
||||
- rdb-01
|
||||
rdb-03:
|
||||
image: jlhawn/rethinkdb:2.3.0
|
||||
image: jlhawn/rethinkdb:2.3.4
|
||||
volumes:
|
||||
- ./fixtures/rethinkdb:/tls
|
||||
- rdb-03-data:/var/data
|
||||
networks:
|
||||
rdb:
|
||||
aliases:
|
||||
- rdb
|
||||
- rdb.rdb
|
||||
- rdb-03.rdb
|
||||
command: "--bind all --no-http-admin --server-name rdb_03 --canonical-address rdb-03.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
|
||||
command: "--bind all --no-http-admin --server-name rdb_03 --canonical-address rdb-03.rdb --directory /var/data/rethinkdb --join rdb-02 --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
|
||||
depends_on:
|
||||
- rdb-01
|
||||
- rdb-02
|
||||
rdb-proxy:
|
||||
image: jlhawn/rethinkdb:2.3.0
|
||||
image: jlhawn/rethinkdb:2.3.4
|
||||
ports:
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
|
@ -85,7 +79,7 @@ services:
|
|||
aliases:
|
||||
- rdb-proxy
|
||||
- rdb-proxy.rdp
|
||||
command: "proxy --bind all --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
|
||||
command: "proxy --bind all --join rdb-03 --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
|
||||
depends_on:
|
||||
- rdb-01
|
||||
- rdb-02
|
||||
|
|
|
@ -1,34 +1,49 @@
|
|||
version: "2"
|
||||
services:
|
||||
server:
|
||||
build: .
|
||||
build:
|
||||
context: .
|
||||
dockerfile: server.Dockerfile
|
||||
links:
|
||||
- mysql
|
||||
- signer
|
||||
- signer:notarysigner
|
||||
environment:
|
||||
- SERVICE_NAME=notary_server
|
||||
networks:
|
||||
- mdb
|
||||
- sig
|
||||
ports:
|
||||
- "8080"
|
||||
- "4443:4443"
|
||||
entrypoint: /usr/bin/env sh
|
||||
command: -c "./migrations/migrate.sh && notary-server -config=fixtures/server-config.json"
|
||||
signer:
|
||||
build: .
|
||||
dockerfile: signer.Dockerfile
|
||||
links:
|
||||
depends_on:
|
||||
- mysql
|
||||
environment:
|
||||
- SERVICE_NAME=notary_signer
|
||||
- signer
|
||||
signer:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: signer.Dockerfile
|
||||
networks:
|
||||
mdb:
|
||||
sig:
|
||||
aliases:
|
||||
- notarysigner
|
||||
entrypoint: /usr/bin/env sh
|
||||
command: -c "./migrations/migrate.sh && notary-signer -config=fixtures/signer-config.json"
|
||||
depends_on:
|
||||
- mysql
|
||||
mysql:
|
||||
networks:
|
||||
- mdb
|
||||
volumes:
|
||||
- ./notarymysql/docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d
|
||||
- notary_data:/var/lib/mysql
|
||||
image: mariadb:10.1.10
|
||||
ports:
|
||||
- "3306:3306"
|
||||
environment:
|
||||
- TERM=dumb
|
||||
- MYSQL_ALLOW_EMPTY_PASSWORD="true"
|
||||
command: mysqld --innodb_file_per_table
|
||||
volumes:
|
||||
notary_data:
|
||||
external: false
|
||||
networks:
|
||||
mdb:
|
||||
external: false
|
||||
sig:
|
||||
external: false
|
||||
|
|
7
vendor/src/github.com/docker/notary/notary.go
vendored
Normal file
7
vendor/src/github.com/docker/notary/notary.go
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
package notary
|
||||
|
||||
// PassRetriever is a callback function that should retrieve a passphrase
|
||||
// for a given named key. If it should be treated as new passphrase (e.g. with
|
||||
// confirmation), createNew will be true. Attempts is passed in so that implementers
|
||||
// decide how many chances to give to a human, for example.
|
||||
type PassRetriever func(keyName, alias string, createNew bool, attempts int) (passphrase string, giveup bool, err error)
|
|
@ -8,19 +8,13 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/docker/notary"
|
||||
)
|
||||
|
||||
// Retriever is a callback function that should retrieve a passphrase
|
||||
// for a given named key. If it should be treated as new passphrase (e.g. with
|
||||
// confirmation), createNew will be true. Attempts is passed in so that implementers
|
||||
// decide how many chances to give to a human, for example.
|
||||
type Retriever func(keyName, alias string, createNew bool, attempts int) (passphrase string, giveup bool, err error)
|
||||
|
||||
const (
|
||||
idBytesToDisplay = 7
|
||||
tufRootAlias = "root"
|
||||
|
@ -46,72 +40,70 @@ var (
|
|||
// ErrTooManyAttempts is returned if the maximum number of passphrase
|
||||
// entry attempts is reached.
|
||||
ErrTooManyAttempts = errors.New("Too many attempts")
|
||||
|
||||
// ErrNoInput is returned if we do not have a valid input method for passphrases
|
||||
ErrNoInput = errors.New("Please either use environment variables or STDIN with a terminal to provide key passphrases")
|
||||
)
|
||||
|
||||
// PromptRetriever returns a new Retriever which will provide a prompt on stdin
|
||||
// and stdout to retrieve a passphrase. The passphrase will be cached such that
|
||||
// and stdout to retrieve a passphrase. stdin will be checked if it is a terminal,
|
||||
// else the PromptRetriever will error when attempting to retrieve a passphrase.
|
||||
// Upon successful passphrase retrievals, the passphrase will be cached such that
|
||||
// subsequent prompts will produce the same passphrase.
|
||||
func PromptRetriever() Retriever {
|
||||
func PromptRetriever() notary.PassRetriever {
|
||||
if !term.IsTerminal(os.Stdin.Fd()) {
|
||||
return func(string, string, bool, int) (string, bool, error) {
|
||||
return "", false, ErrNoInput
|
||||
}
|
||||
}
|
||||
return PromptRetrieverWithInOut(os.Stdin, os.Stdout, nil)
|
||||
}
|
||||
|
||||
// PromptRetrieverWithInOut returns a new Retriever which will provide a
|
||||
// prompt using the given in and out readers. The passphrase will be cached
|
||||
// such that subsequent prompts will produce the same passphrase.
|
||||
// aliasMap can be used to specify display names for TUF key aliases. If aliasMap
|
||||
// is nil, a sensible default will be used.
|
||||
func PromptRetrieverWithInOut(in io.Reader, out io.Writer, aliasMap map[string]string) Retriever {
|
||||
userEnteredTargetsSnapshotsPass := false
|
||||
targetsSnapshotsPass := ""
|
||||
userEnteredRootsPass := false
|
||||
rootsPass := ""
|
||||
|
||||
return func(keyName string, alias string, createNew bool, numAttempts int) (string, bool, error) {
|
||||
if alias == tufRootAlias && createNew && numAttempts == 0 {
|
||||
fmt.Fprintln(out, tufRootKeyGenerationWarning)
|
||||
}
|
||||
if numAttempts > 0 {
|
||||
if !createNew {
|
||||
fmt.Fprintln(out, "Passphrase incorrect. Please retry.")
|
||||
}
|
||||
type boundRetriever struct {
|
||||
in io.Reader
|
||||
out io.Writer
|
||||
aliasMap map[string]string
|
||||
passphraseCache map[string]string
|
||||
}
|
||||
|
||||
func (br *boundRetriever) getPassphrase(keyName, alias string, createNew bool, numAttempts int) (string, bool, error) {
|
||||
if numAttempts == 0 {
|
||||
if alias == tufRootAlias && createNew {
|
||||
fmt.Fprintln(br.out, tufRootKeyGenerationWarning)
|
||||
}
|
||||
|
||||
if pass, ok := br.passphraseCache[alias]; ok {
|
||||
return pass, false, nil
|
||||
}
|
||||
} else if !createNew { // per `if`, numAttempts > 0 if we're at this `else`
|
||||
if numAttempts > 3 {
|
||||
return "", true, ErrTooManyAttempts
|
||||
}
|
||||
fmt.Fprintln(br.out, "Passphrase incorrect. Please retry.")
|
||||
}
|
||||
|
||||
// passphrase not cached and we're not aborting, get passphrase from user!
|
||||
return br.requestPassphrase(keyName, alias, createNew, numAttempts)
|
||||
}
|
||||
|
||||
func (br *boundRetriever) requestPassphrase(keyName, alias string, createNew bool, numAttempts int) (string, bool, error) {
|
||||
// Figure out if we should display a different string for this alias
|
||||
displayAlias := alias
|
||||
if aliasMap != nil {
|
||||
if val, ok := aliasMap[alias]; ok {
|
||||
if val, ok := br.aliasMap[alias]; ok {
|
||||
displayAlias = val
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// First, check if we have a password cached for this alias.
|
||||
if numAttempts == 0 {
|
||||
if userEnteredTargetsSnapshotsPass && (alias == tufSnapshotAlias || alias == tufTargetsAlias) {
|
||||
return targetsSnapshotsPass, false, nil
|
||||
}
|
||||
if userEnteredRootsPass && (alias == "root") {
|
||||
return rootsPass, false, nil
|
||||
}
|
||||
}
|
||||
|
||||
if numAttempts > 3 && !createNew {
|
||||
return "", true, ErrTooManyAttempts
|
||||
}
|
||||
|
||||
// If typing on the terminal, we do not want the terminal to echo the
|
||||
// password that is typed (so it doesn't display)
|
||||
if term.IsTerminal(0) {
|
||||
state, err := term.SaveState(0)
|
||||
if term.IsTerminal(os.Stdin.Fd()) {
|
||||
state, err := term.SaveState(os.Stdin.Fd())
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
term.DisableEcho(0, state)
|
||||
defer term.RestoreTerminal(0, state)
|
||||
term.DisableEcho(os.Stdin.Fd(), state)
|
||||
defer term.RestoreTerminal(os.Stdin.Fd(), state)
|
||||
}
|
||||
|
||||
stdin := bufio.NewReader(in)
|
||||
|
||||
indexOfLastSeparator := strings.LastIndex(keyName, string(filepath.Separator))
|
||||
if indexOfLastSeparator == -1 {
|
||||
indexOfLastSeparator = 0
|
||||
|
@ -133,68 +125,80 @@ func PromptRetrieverWithInOut(in io.Reader, out io.Writer, aliasMap map[string]s
|
|||
withID = ""
|
||||
}
|
||||
|
||||
if createNew {
|
||||
fmt.Fprintf(out, "Enter passphrase for new %s key%s: ", displayAlias, withID)
|
||||
} else if displayAlias == "yubikey" {
|
||||
fmt.Fprintf(out, "Enter the %s for the attached Yubikey: ", keyName)
|
||||
} else {
|
||||
fmt.Fprintf(out, "Enter passphrase for %s key%s: ", displayAlias, withID)
|
||||
switch {
|
||||
case createNew:
|
||||
fmt.Fprintf(br.out, "Enter passphrase for new %s key%s: ", displayAlias, withID)
|
||||
case displayAlias == "yubikey":
|
||||
fmt.Fprintf(br.out, "Enter the %s for the attached Yubikey: ", keyName)
|
||||
default:
|
||||
fmt.Fprintf(br.out, "Enter passphrase for %s key%s: ", displayAlias, withID)
|
||||
}
|
||||
|
||||
stdin := bufio.NewReader(br.in)
|
||||
passphrase, err := stdin.ReadBytes('\n')
|
||||
fmt.Fprintln(out)
|
||||
fmt.Fprintln(br.out)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
|
||||
retPass := strings.TrimSpace(string(passphrase))
|
||||
|
||||
if !createNew {
|
||||
if alias == tufSnapshotAlias || alias == tufTargetsAlias {
|
||||
userEnteredTargetsSnapshotsPass = true
|
||||
targetsSnapshotsPass = retPass
|
||||
if createNew {
|
||||
err = br.verifyAndConfirmPassword(stdin, retPass, displayAlias, withID)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
if alias == tufRootAlias {
|
||||
userEnteredRootsPass = true
|
||||
rootsPass = retPass
|
||||
}
|
||||
|
||||
br.cachePassword(alias, retPass)
|
||||
|
||||
return retPass, false, nil
|
||||
}
|
||||
|
||||
func (br *boundRetriever) verifyAndConfirmPassword(stdin *bufio.Reader, retPass, displayAlias, withID string) error {
|
||||
if len(retPass) < 8 {
|
||||
fmt.Fprintln(out, "Passphrase is too short. Please use a password manager to generate and store a good random passphrase.")
|
||||
return "", false, ErrTooShort
|
||||
fmt.Fprintln(br.out, "Passphrase is too short. Please use a password manager to generate and store a good random passphrase.")
|
||||
return ErrTooShort
|
||||
}
|
||||
|
||||
fmt.Fprintf(out, "Repeat passphrase for new %s key%s: ", displayAlias, withID)
|
||||
fmt.Fprintf(br.out, "Repeat passphrase for new %s key%s: ", displayAlias, withID)
|
||||
confirmation, err := stdin.ReadBytes('\n')
|
||||
fmt.Fprintln(out)
|
||||
fmt.Fprintln(br.out)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
return err
|
||||
}
|
||||
confirmationStr := strings.TrimSpace(string(confirmation))
|
||||
|
||||
if retPass != confirmationStr {
|
||||
fmt.Fprintln(out, "Passphrases do not match. Please retry.")
|
||||
return "", false, ErrDontMatch
|
||||
fmt.Fprintln(br.out, "Passphrases do not match. Please retry.")
|
||||
return ErrDontMatch
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if alias == tufSnapshotAlias || alias == tufTargetsAlias {
|
||||
userEnteredTargetsSnapshotsPass = true
|
||||
targetsSnapshotsPass = retPass
|
||||
}
|
||||
if alias == tufRootAlias {
|
||||
userEnteredRootsPass = true
|
||||
rootsPass = retPass
|
||||
func (br *boundRetriever) cachePassword(alias, retPass string) {
|
||||
br.passphraseCache[alias] = retPass
|
||||
}
|
||||
|
||||
return retPass, false, nil
|
||||
// PromptRetrieverWithInOut returns a new Retriever which will provide a
|
||||
// prompt using the given in and out readers. The passphrase will be cached
|
||||
// such that subsequent prompts will produce the same passphrase.
|
||||
// aliasMap can be used to specify display names for TUF key aliases. If aliasMap
|
||||
// is nil, a sensible default will be used.
|
||||
func PromptRetrieverWithInOut(in io.Reader, out io.Writer, aliasMap map[string]string) notary.PassRetriever {
|
||||
bound := &boundRetriever{
|
||||
in: in,
|
||||
out: out,
|
||||
aliasMap: aliasMap,
|
||||
passphraseCache: make(map[string]string),
|
||||
}
|
||||
|
||||
return bound.getPassphrase
|
||||
}
|
||||
|
||||
// ConstantRetriever returns a new Retriever which will return a constant string
|
||||
// as a passphrase.
|
||||
func ConstantRetriever(constantPassphrase string) Retriever {
|
||||
func ConstantRetriever(constantPassphrase string) notary.PassRetriever {
|
||||
return func(k, a string, c bool, n int) (string, bool, error) {
|
||||
return constantPassphrase, false, nil
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.6.1-alpine
|
||||
FROM golang:1.7.1-alpine
|
||||
MAINTAINER David Lawrence "david.lawrence@docker.com"
|
||||
|
||||
RUN apk add --update git gcc libc-dev && rm -rf /var/cache/apk/*
|
||||
|
@ -13,6 +13,7 @@ COPY . /go/src/${NOTARYPKG}
|
|||
|
||||
WORKDIR /go/src/${NOTARYPKG}
|
||||
|
||||
ENV SERVICE_NAME=notary_server
|
||||
EXPOSE 4443
|
||||
|
||||
# Install notary-server
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.6.1-alpine
|
||||
FROM golang:1.7.1-alpine
|
||||
MAINTAINER David Lawrence "david.lawrence@docker.com"
|
||||
|
||||
RUN apk add --update git gcc libc-dev && rm -rf /var/cache/apk/*
|
||||
|
@ -13,11 +13,10 @@ COPY . /go/src/${NOTARYPKG}
|
|||
|
||||
WORKDIR /go/src/${NOTARYPKG}
|
||||
|
||||
ENV SERVICE_NAME=notary_signer
|
||||
ENV NOTARY_SIGNER_DEFAULT_ALIAS="timestamp_1"
|
||||
ENV NOTARY_SIGNER_TIMESTAMP_1="testpassword"
|
||||
|
||||
EXPOSE 4444
|
||||
|
||||
# Install notary-signer
|
||||
RUN go install \
|
||||
-tags pkcs11 \
|
||||
|
|
|
@ -1,6 +1,15 @@
|
|||
package store
|
||||
package storage
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrPathOutsideStore indicates that the returned path would be
|
||||
// outside the store
|
||||
ErrPathOutsideStore = errors.New("path outside file store")
|
||||
)
|
||||
|
||||
// ErrMetaNotFound indicates we did not find a particular piece
|
||||
// of metadata in the store
|
222
vendor/src/github.com/docker/notary/storage/filestore.go
vendored
Normal file
222
vendor/src/github.com/docker/notary/storage/filestore.go
vendored
Normal file
|
@ -0,0 +1,222 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/notary"
|
||||
)
|
||||
|
||||
// NewFilesystemStore creates a new store in a directory tree
|
||||
func NewFilesystemStore(baseDir, subDir, extension string) (*FilesystemStore, error) {
|
||||
baseDir = filepath.Join(baseDir, subDir)
|
||||
|
||||
return NewFileStore(baseDir, extension, notary.PrivKeyPerms)
|
||||
}
|
||||
|
||||
// NewFileStore creates a fully configurable file store
|
||||
func NewFileStore(baseDir, fileExt string, perms os.FileMode) (*FilesystemStore, error) {
|
||||
baseDir = filepath.Clean(baseDir)
|
||||
if err := createDirectory(baseDir, perms); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !strings.HasPrefix(fileExt, ".") {
|
||||
fileExt = "." + fileExt
|
||||
}
|
||||
|
||||
return &FilesystemStore{
|
||||
baseDir: baseDir,
|
||||
ext: fileExt,
|
||||
perms: perms,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewSimpleFileStore is a convenience wrapper to create a world readable,
|
||||
// owner writeable filestore
|
||||
func NewSimpleFileStore(baseDir, fileExt string) (*FilesystemStore, error) {
|
||||
return NewFileStore(baseDir, fileExt, notary.PubCertPerms)
|
||||
}
|
||||
|
||||
// NewPrivateKeyFileStorage initializes a new filestore for private keys, appending
|
||||
// the notary.PrivDir to the baseDir.
|
||||
func NewPrivateKeyFileStorage(baseDir, fileExt string) (*FilesystemStore, error) {
|
||||
baseDir = filepath.Join(baseDir, notary.PrivDir)
|
||||
return NewFileStore(baseDir, fileExt, notary.PrivKeyPerms)
|
||||
}
|
||||
|
||||
// NewPrivateSimpleFileStore is a wrapper to create an owner readable/writeable
|
||||
// _only_ filestore
|
||||
func NewPrivateSimpleFileStore(baseDir, fileExt string) (*FilesystemStore, error) {
|
||||
return NewFileStore(baseDir, fileExt, notary.PrivKeyPerms)
|
||||
}
|
||||
|
||||
// FilesystemStore is a store in a locally accessible directory
|
||||
type FilesystemStore struct {
|
||||
baseDir string
|
||||
ext string
|
||||
perms os.FileMode
|
||||
}
|
||||
|
||||
func (f *FilesystemStore) getPath(name string) (string, error) {
|
||||
fileName := fmt.Sprintf("%s%s", name, f.ext)
|
||||
fullPath := filepath.Join(f.baseDir, fileName)
|
||||
|
||||
if !strings.HasPrefix(fullPath, f.baseDir) {
|
||||
return "", ErrPathOutsideStore
|
||||
}
|
||||
return fullPath, nil
|
||||
}
|
||||
|
||||
// GetSized returns the meta for the given name (a role) up to size bytes
|
||||
// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a
|
||||
// predefined threshold "notary.MaxDownloadSize". If the file is larger than size
|
||||
// we return ErrMaliciousServer for consistency with the HTTPStore
|
||||
func (f *FilesystemStore) GetSized(name string, size int64) ([]byte, error) {
|
||||
p, err := f.getPath(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
file, err := os.OpenFile(p, os.O_RDONLY, f.perms)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = ErrMetaNotFound{Resource: name}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
if size == NoSizeLimit {
|
||||
size = notary.MaxDownloadSize
|
||||
}
|
||||
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if stat.Size() > size {
|
||||
return nil, ErrMaliciousServer{}
|
||||
}
|
||||
|
||||
l := io.LimitReader(file, size)
|
||||
return ioutil.ReadAll(l)
|
||||
}
|
||||
|
||||
// Get returns the meta for the given name.
|
||||
func (f *FilesystemStore) Get(name string) ([]byte, error) {
|
||||
p, err := f.getPath(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
meta, err := ioutil.ReadFile(p)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = ErrMetaNotFound{Resource: name}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
// SetMulti sets the metadata for multiple roles in one operation
|
||||
func (f *FilesystemStore) SetMulti(metas map[string][]byte) error {
|
||||
for role, blob := range metas {
|
||||
err := f.Set(role, blob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set sets the meta for a single role
|
||||
func (f *FilesystemStore) Set(name string, meta []byte) error {
|
||||
fp, err := f.getPath(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Ensures the parent directories of the file we are about to write exist
|
||||
err = os.MkdirAll(filepath.Dir(fp), f.perms)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if something already exists, just delete it and re-write it
|
||||
os.RemoveAll(fp)
|
||||
|
||||
// Write the file to disk
|
||||
if err = ioutil.WriteFile(fp, meta, f.perms); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveAll clears the existing filestore by removing its base directory
|
||||
func (f *FilesystemStore) RemoveAll() error {
|
||||
return os.RemoveAll(f.baseDir)
|
||||
}
|
||||
|
||||
// Remove removes the metadata for a single role - if the metadata doesn't
|
||||
// exist, no error is returned
|
||||
func (f *FilesystemStore) Remove(name string) error {
|
||||
p, err := f.getPath(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.RemoveAll(p) // RemoveAll succeeds if path doesn't exist
|
||||
}
|
||||
|
||||
// Location returns a human readable name for the storage location
|
||||
func (f FilesystemStore) Location() string {
|
||||
return f.baseDir
|
||||
}
|
||||
|
||||
// ListFiles returns a list of all the filenames that can be used with Get*
|
||||
// to retrieve content from this filestore
|
||||
func (f FilesystemStore) ListFiles() []string {
|
||||
files := make([]string, 0, 0)
|
||||
filepath.Walk(f.baseDir, func(fp string, fi os.FileInfo, err error) error {
|
||||
// If there are errors, ignore this particular file
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
// Ignore if it is a directory
|
||||
if fi.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If this is a symlink, ignore it
|
||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Only allow matches that end with our certificate extension (e.g. *.crt)
|
||||
matched, _ := filepath.Match("*"+f.ext, fi.Name())
|
||||
|
||||
if matched {
|
||||
// Find the relative path for this file relative to the base path.
|
||||
fp, err = filepath.Rel(f.baseDir, fp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
trimmed := strings.TrimSuffix(fp, f.ext)
|
||||
files = append(files, trimmed)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return files
|
||||
}
|
||||
|
||||
// createDirectory receives a string of the path to a directory.
|
||||
// It does not support passing files, so the caller has to remove
|
||||
// the filename by doing filepath.Dir(full_path_to_file)
|
||||
func createDirectory(dir string, perms os.FileMode) error {
|
||||
// This prevents someone passing /path/to/dir and 'dir' not being created
|
||||
// If two '//' exist, MkdirAll deals it with correctly
|
||||
dir = dir + "/"
|
||||
return os.MkdirAll(dir, perms)
|
||||
}
|
|
@ -8,7 +8,7 @@
|
|||
// If writing your own server, please have a look at
|
||||
// github.com/docker/distribution/registry/api/errcode
|
||||
|
||||
package store
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
@ -33,6 +33,15 @@ type ErrServerUnavailable struct {
|
|||
code int
|
||||
}
|
||||
|
||||
// NetworkError represents any kind of network error when attempting to make a request
|
||||
type NetworkError struct {
|
||||
Wrapped error
|
||||
}
|
||||
|
||||
func (n NetworkError) Error() string {
|
||||
return n.Wrapped.Error()
|
||||
}
|
||||
|
||||
func (err ErrServerUnavailable) Error() string {
|
||||
if err.code == 401 {
|
||||
return fmt.Sprintf("you are not authorized to perform this operation: server returned 401.")
|
||||
|
@ -136,12 +145,12 @@ func translateStatusToError(resp *http.Response, resource string) error {
|
|||
}
|
||||
}
|
||||
|
||||
// GetMeta downloads the named meta file with the given size. A short body
|
||||
// GetSized downloads the named meta file with the given size. A short body
|
||||
// is acceptable because in the case of timestamp.json, the size is a cap,
|
||||
// not an exact length.
|
||||
// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a
|
||||
// predefined threshold "notary.MaxDownloadSize".
|
||||
func (s HTTPStore) GetMeta(name string, size int64) ([]byte, error) {
|
||||
func (s HTTPStore) GetSized(name string, size int64) ([]byte, error) {
|
||||
url, err := s.buildMetaURL(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -152,7 +161,7 @@ func (s HTTPStore) GetMeta(name string, size int64) ([]byte, error) {
|
|||
}
|
||||
resp, err := s.roundTrip.RoundTrip(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, NetworkError{Wrapped: err}
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if err := translateStatusToError(resp, name); err != nil {
|
||||
|
@ -174,28 +183,15 @@ func (s HTTPStore) GetMeta(name string, size int64) ([]byte, error) {
|
|||
return body, nil
|
||||
}
|
||||
|
||||
// SetMeta uploads a piece of TUF metadata to the server
|
||||
func (s HTTPStore) SetMeta(name string, blob []byte) error {
|
||||
url, err := s.buildMetaURL("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest("POST", url.String(), bytes.NewReader(blob))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := s.roundTrip.RoundTrip(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return translateStatusToError(resp, "POST "+name)
|
||||
// Set sends a single piece of metadata to the TUF server
|
||||
func (s HTTPStore) Set(name string, blob []byte) error {
|
||||
return s.SetMulti(map[string][]byte{name: blob})
|
||||
}
|
||||
|
||||
// RemoveMeta always fails, because we should never be able to delete metadata
|
||||
// Remove always fails, because we should never be able to delete metadata
|
||||
// remotely
|
||||
func (s HTTPStore) RemoveMeta(name string) error {
|
||||
return ErrInvalidOperation{msg: "cannot delete metadata"}
|
||||
func (s HTTPStore) Remove(name string) error {
|
||||
return ErrInvalidOperation{msg: "cannot delete individual metadata files"}
|
||||
}
|
||||
|
||||
// NewMultiPartMetaRequest builds a request with the provided metadata updates
|
||||
|
@ -205,6 +201,9 @@ func NewMultiPartMetaRequest(url string, metas map[string][]byte) (*http.Request
|
|||
writer := multipart.NewWriter(body)
|
||||
for role, blob := range metas {
|
||||
part, err := writer.CreateFormFile("files", role)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = io.Copy(part, bytes.NewBuffer(blob))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -222,10 +221,10 @@ func NewMultiPartMetaRequest(url string, metas map[string][]byte) (*http.Request
|
|||
return req, nil
|
||||
}
|
||||
|
||||
// SetMultiMeta does a single batch upload of multiple pieces of TUF metadata.
|
||||
// SetMulti does a single batch upload of multiple pieces of TUF metadata.
|
||||
// This should be preferred for updating a remote server as it enable the server
|
||||
// to remain consistent, either accepting or rejecting the complete update.
|
||||
func (s HTTPStore) SetMultiMeta(metas map[string][]byte) error {
|
||||
func (s HTTPStore) SetMulti(metas map[string][]byte) error {
|
||||
url, err := s.buildMetaURL("")
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -236,16 +235,29 @@ func (s HTTPStore) SetMultiMeta(metas map[string][]byte) error {
|
|||
}
|
||||
resp, err := s.roundTrip.RoundTrip(req)
|
||||
if err != nil {
|
||||
return err
|
||||
return NetworkError{Wrapped: err}
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
// if this 404's something is pretty wrong
|
||||
return translateStatusToError(resp, "POST metadata endpoint")
|
||||
}
|
||||
|
||||
// RemoveAll in the interface is not supported, admins should use the DeleteHandler endpoint directly to delete remote data for a GUN
|
||||
// RemoveAll will attempt to delete all TUF metadata for a GUN
|
||||
func (s HTTPStore) RemoveAll() error {
|
||||
return errors.New("remove all functionality not supported for HTTPStore")
|
||||
url, err := s.buildMetaURL("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest("DELETE", url.String(), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := s.roundTrip.RoundTrip(req)
|
||||
if err != nil {
|
||||
return NetworkError{Wrapped: err}
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return translateStatusToError(resp, "DELETE metadata for GUN endpoint")
|
||||
}
|
||||
|
||||
func (s HTTPStore) buildMetaURL(name string) (*url.URL, error) {
|
||||
|
@ -283,7 +295,7 @@ func (s HTTPStore) GetKey(role string) ([]byte, error) {
|
|||
}
|
||||
resp, err := s.roundTrip.RoundTrip(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, NetworkError{Wrapped: err}
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if err := translateStatusToError(resp, role+" key"); err != nil {
|
||||
|
@ -295,3 +307,33 @@ func (s HTTPStore) GetKey(role string) ([]byte, error) {
|
|||
}
|
||||
return body, nil
|
||||
}
|
||||
|
||||
// RotateKey rotates a private key and returns the public component from the remote server
|
||||
func (s HTTPStore) RotateKey(role string) ([]byte, error) {
|
||||
url, err := s.buildKeyURL(role)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := http.NewRequest("POST", url.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := s.roundTrip.RoundTrip(req)
|
||||
if err != nil {
|
||||
return nil, NetworkError{Wrapped: err}
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if err := translateStatusToError(resp, role+" key"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
|
||||
// Location returns a human readable name for the storage location
|
||||
func (s HTTPStore) Location() string {
|
||||
return s.baseURL.String()
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package store
|
||||
package storage
|
||||
|
||||
// NoSizeLimit is represented as -1 for arguments to GetMeta
|
||||
const NoSizeLimit int64 = -1
|
||||
|
@ -6,21 +6,17 @@ const NoSizeLimit int64 = -1
|
|||
// MetadataStore must be implemented by anything that intends to interact
|
||||
// with a store of TUF files
|
||||
type MetadataStore interface {
|
||||
GetMeta(name string, size int64) ([]byte, error)
|
||||
SetMeta(name string, blob []byte) error
|
||||
SetMultiMeta(map[string][]byte) error
|
||||
GetSized(name string, size int64) ([]byte, error)
|
||||
Set(name string, blob []byte) error
|
||||
SetMulti(map[string][]byte) error
|
||||
RemoveAll() error
|
||||
RemoveMeta(name string) error
|
||||
Remove(name string) error
|
||||
}
|
||||
|
||||
// PublicKeyStore must be implemented by a key service
|
||||
type PublicKeyStore interface {
|
||||
GetKey(role string) ([]byte, error)
|
||||
}
|
||||
|
||||
// LocalStore represents a local TUF sture
|
||||
type LocalStore interface {
|
||||
MetadataStore
|
||||
RotateKey(role string) ([]byte, error)
|
||||
}
|
||||
|
||||
// RemoteStore is similar to LocalStore with the added expectation that it should
|
||||
|
@ -29,3 +25,10 @@ type RemoteStore interface {
|
|||
MetadataStore
|
||||
PublicKeyStore
|
||||
}
|
||||
|
||||
// Bootstrapper is a thing that can set itself up
|
||||
type Bootstrapper interface {
|
||||
// Bootstrap instructs a configured Bootstrapper to perform
|
||||
// its setup operations.
|
||||
Bootstrap() error
|
||||
}
|
|
@ -1,50 +1,46 @@
|
|||
package store
|
||||
package storage
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/notary"
|
||||
"github.com/docker/notary/tuf/data"
|
||||
"github.com/docker/notary/tuf/utils"
|
||||
)
|
||||
|
||||
// NewMemoryStore returns a MetadataStore that operates entirely in memory.
|
||||
// Very useful for testing
|
||||
func NewMemoryStore(meta map[string][]byte) *MemoryStore {
|
||||
func NewMemoryStore(initial map[string][]byte) *MemoryStore {
|
||||
var consistent = make(map[string][]byte)
|
||||
if meta == nil {
|
||||
meta = make(map[string][]byte)
|
||||
if initial == nil {
|
||||
initial = make(map[string][]byte)
|
||||
} else {
|
||||
// add all seed meta to consistent
|
||||
for name, data := range meta {
|
||||
for name, data := range initial {
|
||||
checksum := sha256.Sum256(data)
|
||||
path := utils.ConsistentName(name, checksum[:])
|
||||
consistent[path] = data
|
||||
}
|
||||
}
|
||||
return &MemoryStore{
|
||||
meta: meta,
|
||||
data: initial,
|
||||
consistent: consistent,
|
||||
keys: make(map[string][]data.PrivateKey),
|
||||
}
|
||||
}
|
||||
|
||||
// MemoryStore implements a mock RemoteStore entirely in memory.
|
||||
// For testing purposes only.
|
||||
type MemoryStore struct {
|
||||
meta map[string][]byte
|
||||
data map[string][]byte
|
||||
consistent map[string][]byte
|
||||
keys map[string][]data.PrivateKey
|
||||
}
|
||||
|
||||
// GetMeta returns up to size bytes of data references by name.
|
||||
// GetSized returns up to size bytes of data references by name.
|
||||
// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a
|
||||
// predefined threshold "notary.MaxDownloadSize", as we will always know the
|
||||
// size for everything but a timestamp and sometimes a root,
|
||||
// neither of which should be exceptionally large
|
||||
func (m *MemoryStore) GetMeta(name string, size int64) ([]byte, error) {
|
||||
d, ok := m.meta[name]
|
||||
func (m MemoryStore) GetSized(name string, size int64) ([]byte, error) {
|
||||
d, ok := m.data[name]
|
||||
if ok {
|
||||
if size == NoSizeLimit {
|
||||
size = notary.MaxDownloadSize
|
||||
|
@ -64,9 +60,20 @@ func (m *MemoryStore) GetMeta(name string, size int64) ([]byte, error) {
|
|||
return nil, ErrMetaNotFound{Resource: name}
|
||||
}
|
||||
|
||||
// SetMeta sets the metadata value for the given name
|
||||
func (m *MemoryStore) SetMeta(name string, meta []byte) error {
|
||||
m.meta[name] = meta
|
||||
// Get returns the data associated with name
|
||||
func (m MemoryStore) Get(name string) ([]byte, error) {
|
||||
if d, ok := m.data[name]; ok {
|
||||
return d, nil
|
||||
}
|
||||
if d, ok := m.consistent[name]; ok {
|
||||
return d, nil
|
||||
}
|
||||
return nil, ErrMetaNotFound{Resource: name}
|
||||
}
|
||||
|
||||
// Set sets the metadata value for the given name
|
||||
func (m *MemoryStore) Set(name string, meta []byte) error {
|
||||
m.data[name] = meta
|
||||
|
||||
checksum := sha256.Sum256(meta)
|
||||
path := utils.ConsistentName(name, checksum[:])
|
||||
|
@ -74,34 +81,44 @@ func (m *MemoryStore) SetMeta(name string, meta []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// SetMultiMeta sets multiple pieces of metadata for multiple names
|
||||
// SetMulti sets multiple pieces of metadata for multiple names
|
||||
// in a single operation.
|
||||
func (m *MemoryStore) SetMultiMeta(metas map[string][]byte) error {
|
||||
func (m *MemoryStore) SetMulti(metas map[string][]byte) error {
|
||||
for role, blob := range metas {
|
||||
m.SetMeta(role, blob)
|
||||
m.Set(role, blob)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveMeta removes the metadata for a single role - if the metadata doesn't
|
||||
// Remove removes the metadata for a single role - if the metadata doesn't
|
||||
// exist, no error is returned
|
||||
func (m *MemoryStore) RemoveMeta(name string) error {
|
||||
if meta, ok := m.meta[name]; ok {
|
||||
func (m *MemoryStore) Remove(name string) error {
|
||||
if meta, ok := m.data[name]; ok {
|
||||
checksum := sha256.Sum256(meta)
|
||||
path := utils.ConsistentName(name, checksum[:])
|
||||
delete(m.meta, name)
|
||||
delete(m.data, name)
|
||||
delete(m.consistent, path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetKey returns the public key for the given role
|
||||
func (m *MemoryStore) GetKey(role string) ([]byte, error) {
|
||||
return nil, fmt.Errorf("GetKey is not implemented for the MemoryStore")
|
||||
}
|
||||
|
||||
// RemoveAll clears the existing memory store by setting this store as new empty one
|
||||
func (m *MemoryStore) RemoveAll() error {
|
||||
*m = *NewMemoryStore(nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Location provides a human readable name for the storage location
|
||||
func (m MemoryStore) Location() string {
|
||||
return "memory"
|
||||
}
|
||||
|
||||
// ListFiles returns a list of all files. The names returned should be
|
||||
// usable with Get directly, with no modification.
|
||||
func (m *MemoryStore) ListFiles() []string {
|
||||
names := make([]string, 0, len(m.data))
|
||||
for n := range m.data {
|
||||
names = append(names, n)
|
||||
}
|
||||
return names
|
||||
}
|
|
@ -1,8 +1,4 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
package storage
|
||||
|
||||
// ErrOffline is used to indicate we are operating offline
|
||||
type ErrOffline struct{}
|
||||
|
@ -17,23 +13,23 @@ var err = ErrOffline{}
|
|||
// returns ErrOffline for every operation
|
||||
type OfflineStore struct{}
|
||||
|
||||
// GetMeta returns ErrOffline
|
||||
func (es OfflineStore) GetMeta(name string, size int64) ([]byte, error) {
|
||||
// GetSized returns ErrOffline
|
||||
func (es OfflineStore) GetSized(name string, size int64) ([]byte, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// SetMeta returns ErrOffline
|
||||
func (es OfflineStore) SetMeta(name string, blob []byte) error {
|
||||
// Set returns ErrOffline
|
||||
func (es OfflineStore) Set(name string, blob []byte) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// SetMultiMeta returns ErrOffline
|
||||
func (es OfflineStore) SetMultiMeta(map[string][]byte) error {
|
||||
// SetMulti returns ErrOffline
|
||||
func (es OfflineStore) SetMulti(map[string][]byte) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// RemoveMeta returns ErrOffline
|
||||
func (es OfflineStore) RemoveMeta(name string) error {
|
||||
// Remove returns ErrOffline
|
||||
func (es OfflineStore) Remove(name string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -42,8 +38,8 @@ func (es OfflineStore) GetKey(role string) ([]byte, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// GetTarget returns ErrOffline
|
||||
func (es OfflineStore) GetTarget(path string) (io.ReadCloser, error) {
|
||||
// RotateKey returns ErrOffline
|
||||
func (es OfflineStore) RotateKey(role string) ([]byte, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -51,3 +47,8 @@ func (es OfflineStore) GetTarget(path string) (io.ReadCloser, error) {
|
|||
func (es OfflineStore) RemoveAll() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Location returns a human readable name for the storage location
|
||||
func (es OfflineStore) Location() string {
|
||||
return "offline"
|
||||
}
|
|
@ -1,150 +0,0 @@
|
|||
package trustmanager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// SimpleFileStore implements FileStore
|
||||
type SimpleFileStore struct {
|
||||
baseDir string
|
||||
fileExt string
|
||||
perms os.FileMode
|
||||
}
|
||||
|
||||
// NewFileStore creates a fully configurable file store
|
||||
func NewFileStore(baseDir, fileExt string, perms os.FileMode) (*SimpleFileStore, error) {
|
||||
baseDir = filepath.Clean(baseDir)
|
||||
if err := createDirectory(baseDir, perms); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !strings.HasPrefix(fileExt, ".") {
|
||||
fileExt = "." + fileExt
|
||||
}
|
||||
|
||||
return &SimpleFileStore{
|
||||
baseDir: baseDir,
|
||||
fileExt: fileExt,
|
||||
perms: perms,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewSimpleFileStore is a convenience wrapper to create a world readable,
|
||||
// owner writeable filestore
|
||||
func NewSimpleFileStore(baseDir, fileExt string) (*SimpleFileStore, error) {
|
||||
return NewFileStore(baseDir, fileExt, visible)
|
||||
}
|
||||
|
||||
// NewPrivateSimpleFileStore is a wrapper to create an owner readable/writeable
|
||||
// _only_ filestore
|
||||
func NewPrivateSimpleFileStore(baseDir, fileExt string) (*SimpleFileStore, error) {
|
||||
return NewFileStore(baseDir, fileExt, private)
|
||||
}
|
||||
|
||||
// Add writes data to a file with a given name
|
||||
func (f *SimpleFileStore) Add(name string, data []byte) error {
|
||||
filePath, err := f.GetPath(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
createDirectory(filepath.Dir(filePath), f.perms)
|
||||
return ioutil.WriteFile(filePath, data, f.perms)
|
||||
}
|
||||
|
||||
// Remove removes a file identified by name
|
||||
func (f *SimpleFileStore) Remove(name string) error {
|
||||
// Attempt to remove
|
||||
filePath, err := f.GetPath(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Remove(filePath)
|
||||
}
|
||||
|
||||
// Get returns the data given a file name
|
||||
func (f *SimpleFileStore) Get(name string) ([]byte, error) {
|
||||
filePath, err := f.GetPath(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data, err := ioutil.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// GetPath returns the full final path of a file with a given name
|
||||
func (f *SimpleFileStore) GetPath(name string) (string, error) {
|
||||
fileName := f.genFileName(name)
|
||||
fullPath := filepath.Clean(filepath.Join(f.baseDir, fileName))
|
||||
|
||||
if !strings.HasPrefix(fullPath, f.baseDir) {
|
||||
return "", ErrPathOutsideStore
|
||||
}
|
||||
return fullPath, nil
|
||||
}
|
||||
|
||||
// ListFiles lists all the files inside of a store
|
||||
func (f *SimpleFileStore) ListFiles() []string {
|
||||
return f.list(f.baseDir)
|
||||
}
|
||||
|
||||
// list lists all the files in a directory given a full path. Ignores symlinks.
|
||||
func (f *SimpleFileStore) list(path string) []string {
|
||||
files := make([]string, 0, 0)
|
||||
filepath.Walk(path, func(fp string, fi os.FileInfo, err error) error {
|
||||
// If there are errors, ignore this particular file
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
// Ignore if it is a directory
|
||||
if fi.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If this is a symlink, ignore it
|
||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Only allow matches that end with our certificate extension (e.g. *.crt)
|
||||
matched, _ := filepath.Match("*"+f.fileExt, fi.Name())
|
||||
|
||||
if matched {
|
||||
// Find the relative path for this file relative to the base path.
|
||||
fp, err = filepath.Rel(path, fp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
trimmed := strings.TrimSuffix(fp, f.fileExt)
|
||||
files = append(files, trimmed)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return files
|
||||
}
|
||||
|
||||
// genFileName returns the name using the right extension
|
||||
func (f *SimpleFileStore) genFileName(name string) string {
|
||||
return fmt.Sprintf("%s%s", name, f.fileExt)
|
||||
}
|
||||
|
||||
// BaseDir returns the base directory of the filestore
|
||||
func (f *SimpleFileStore) BaseDir() string {
|
||||
return f.baseDir
|
||||
}
|
||||
|
||||
// createDirectory receives a string of the path to a directory.
|
||||
// It does not support passing files, so the caller has to remove
|
||||
// the filename by doing filepath.Dir(full_path_to_file)
|
||||
func createDirectory(dir string, perms os.FileMode) error {
|
||||
// This prevents someone passing /path/to/dir and 'dir' not being created
|
||||
// If two '//' exist, MkdirAll deals it with correctly
|
||||
dir = dir + "/"
|
||||
return os.MkdirAll(dir, perms)
|
||||
}
|
82
vendor/src/github.com/docker/notary/trustmanager/interfaces.go
vendored
Normal file
82
vendor/src/github.com/docker/notary/trustmanager/interfaces.go
vendored
Normal file
|
@ -0,0 +1,82 @@
|
|||
package trustmanager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/notary/tuf/data"
|
||||
)
|
||||
|
||||
// Storage implements the bare bones primitives (no hierarchy)
|
||||
type Storage interface {
|
||||
// Add writes a file to the specified location, returning an error if this
|
||||
// is not possible (reasons may include permissions errors). The path is cleaned
|
||||
// before being made absolute against the store's base dir.
|
||||
Set(fileName string, data []byte) error
|
||||
|
||||
// Remove deletes a file from the store relative to the store's base directory.
|
||||
// The path is cleaned before being made absolute to ensure no path traversal
|
||||
// outside the base directory is possible.
|
||||
Remove(fileName string) error
|
||||
|
||||
// Get returns the file content found at fileName relative to the base directory
|
||||
// of the file store. The path is cleaned before being made absolute to ensure
|
||||
// path traversal outside the store is not possible. If the file is not found
|
||||
// an error to that effect is returned.
|
||||
Get(fileName string) ([]byte, error)
|
||||
|
||||
// ListFiles returns a list of paths relative to the base directory of the
|
||||
// filestore. Any of these paths must be retrievable via the
|
||||
// Storage.Get method.
|
||||
ListFiles() []string
|
||||
|
||||
// Location returns a human readable name indicating where the implementer
|
||||
// is storing keys
|
||||
Location() string
|
||||
}
|
||||
|
||||
// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key
|
||||
type ErrAttemptsExceeded struct{}
|
||||
|
||||
// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key
|
||||
func (err ErrAttemptsExceeded) Error() string {
|
||||
return "maximum number of passphrase attempts exceeded"
|
||||
}
|
||||
|
||||
// ErrPasswordInvalid is returned when signing fails. It could also mean the signing
|
||||
// key file was corrupted, but we have no way to distinguish.
|
||||
type ErrPasswordInvalid struct{}
|
||||
|
||||
// ErrPasswordInvalid is returned when signing fails. It could also mean the signing
|
||||
// key file was corrupted, but we have no way to distinguish.
|
||||
func (err ErrPasswordInvalid) Error() string {
|
||||
return "password invalid, operation has failed."
|
||||
}
|
||||
|
||||
// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key.
|
||||
type ErrKeyNotFound struct {
|
||||
KeyID string
|
||||
}
|
||||
|
||||
// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key.
|
||||
func (err ErrKeyNotFound) Error() string {
|
||||
return fmt.Sprintf("signing key not found: %s", err.KeyID)
|
||||
}
|
||||
|
||||
// KeyStore is a generic interface for private key storage
|
||||
type KeyStore interface {
|
||||
// AddKey adds a key to the KeyStore, and if the key already exists,
|
||||
// succeeds. Otherwise, returns an error if it cannot add.
|
||||
AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error
|
||||
// Should fail with ErrKeyNotFound if the keystore is operating normally
|
||||
// and knows that it does not store the requested key.
|
||||
GetKey(keyID string) (data.PrivateKey, string, error)
|
||||
GetKeyInfo(keyID string) (KeyInfo, error)
|
||||
ListKeys() map[string]KeyInfo
|
||||
RemoveKey(keyID string) error
|
||||
Name() string
|
||||
}
|
||||
|
||||
type cachedKey struct {
|
||||
alias string
|
||||
key data.PrivateKey
|
||||
}
|
|
@ -1,497 +0,0 @@
|
|||
package trustmanager
|
||||
|
||||
import (
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/notary"
|
||||
"github.com/docker/notary/passphrase"
|
||||
"github.com/docker/notary/tuf/data"
|
||||
)
|
||||
|
||||
type keyInfoMap map[string]KeyInfo
|
||||
|
||||
// KeyFileStore persists and manages private keys on disk
|
||||
type KeyFileStore struct {
|
||||
sync.Mutex
|
||||
SimpleFileStore
|
||||
passphrase.Retriever
|
||||
cachedKeys map[string]*cachedKey
|
||||
keyInfoMap
|
||||
}
|
||||
|
||||
// KeyMemoryStore manages private keys in memory
|
||||
type KeyMemoryStore struct {
|
||||
sync.Mutex
|
||||
MemoryFileStore
|
||||
passphrase.Retriever
|
||||
cachedKeys map[string]*cachedKey
|
||||
keyInfoMap
|
||||
}
|
||||
|
||||
// KeyInfo stores the role, path, and gun for a corresponding private key ID
|
||||
// It is assumed that each private key ID is unique
|
||||
type KeyInfo struct {
|
||||
Gun string
|
||||
Role string
|
||||
}
|
||||
|
||||
// NewKeyFileStore returns a new KeyFileStore creating a private directory to
|
||||
// hold the keys.
|
||||
func NewKeyFileStore(baseDir string, passphraseRetriever passphrase.Retriever) (*KeyFileStore, error) {
|
||||
baseDir = filepath.Join(baseDir, notary.PrivDir)
|
||||
fileStore, err := NewPrivateSimpleFileStore(baseDir, keyExtension)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cachedKeys := make(map[string]*cachedKey)
|
||||
keyInfoMap := make(keyInfoMap)
|
||||
|
||||
keyStore := &KeyFileStore{SimpleFileStore: *fileStore,
|
||||
Retriever: passphraseRetriever,
|
||||
cachedKeys: cachedKeys,
|
||||
keyInfoMap: keyInfoMap,
|
||||
}
|
||||
|
||||
// Load this keystore's ID --> gun/role map
|
||||
keyStore.loadKeyInfo()
|
||||
return keyStore, nil
|
||||
}
|
||||
|
||||
func generateKeyInfoMap(s Storage) map[string]KeyInfo {
|
||||
keyInfoMap := make(map[string]KeyInfo)
|
||||
for _, keyPath := range s.ListFiles() {
|
||||
d, err := s.Get(keyPath)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
continue
|
||||
}
|
||||
keyID, keyInfo, err := KeyInfoFromPEM(d, keyPath)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
continue
|
||||
}
|
||||
keyInfoMap[keyID] = keyInfo
|
||||
}
|
||||
return keyInfoMap
|
||||
}
|
||||
|
||||
// Attempts to infer the keyID, role, and GUN from the specified key path.
|
||||
// Note that non-root roles can only be inferred if this is a legacy style filename: KEYID_ROLE.key
|
||||
func inferKeyInfoFromKeyPath(keyPath string) (string, string, string) {
|
||||
var keyID, role, gun string
|
||||
keyID = filepath.Base(keyPath)
|
||||
underscoreIndex := strings.LastIndex(keyID, "_")
|
||||
|
||||
// This is the legacy KEYID_ROLE filename
|
||||
// The keyID is the first part of the keyname
|
||||
// The keyRole is the second part of the keyname
|
||||
// in a key named abcde_root, abcde is the keyID and root is the KeyAlias
|
||||
if underscoreIndex != -1 {
|
||||
role = keyID[underscoreIndex+1:]
|
||||
keyID = keyID[:underscoreIndex]
|
||||
}
|
||||
|
||||
if filepath.HasPrefix(keyPath, notary.RootKeysSubdir+"/") {
|
||||
return keyID, data.CanonicalRootRole, ""
|
||||
}
|
||||
|
||||
keyPath = strings.TrimPrefix(keyPath, notary.NonRootKeysSubdir+"/")
|
||||
gun = getGunFromFullID(keyPath)
|
||||
return keyID, role, gun
|
||||
}
|
||||
|
||||
func getGunFromFullID(fullKeyID string) string {
|
||||
keyGun := filepath.Dir(fullKeyID)
|
||||
// If the gun is empty, Dir will return .
|
||||
if keyGun == "." {
|
||||
keyGun = ""
|
||||
}
|
||||
return keyGun
|
||||
}
|
||||
|
||||
func (s *KeyFileStore) loadKeyInfo() {
|
||||
s.keyInfoMap = generateKeyInfoMap(s)
|
||||
}
|
||||
|
||||
func (s *KeyMemoryStore) loadKeyInfo() {
|
||||
s.keyInfoMap = generateKeyInfoMap(s)
|
||||
}
|
||||
|
||||
// GetKeyInfo returns the corresponding gun and role key info for a keyID
|
||||
func (s *KeyFileStore) GetKeyInfo(keyID string) (KeyInfo, error) {
|
||||
if info, ok := s.keyInfoMap[keyID]; ok {
|
||||
return info, nil
|
||||
}
|
||||
return KeyInfo{}, fmt.Errorf("Could not find info for keyID %s", keyID)
|
||||
}
|
||||
|
||||
// GetKeyInfo returns the corresponding gun and role key info for a keyID
|
||||
func (s *KeyMemoryStore) GetKeyInfo(keyID string) (KeyInfo, error) {
|
||||
if info, ok := s.keyInfoMap[keyID]; ok {
|
||||
return info, nil
|
||||
}
|
||||
return KeyInfo{}, fmt.Errorf("Could not find info for keyID %s", keyID)
|
||||
}
|
||||
|
||||
// Name returns a user friendly name for the location this store
|
||||
// keeps its data
|
||||
func (s *KeyFileStore) Name() string {
|
||||
return fmt.Sprintf("file (%s)", s.SimpleFileStore.BaseDir())
|
||||
}
|
||||
|
||||
// AddKey stores the contents of a PEM-encoded private key as a PEM block
|
||||
func (s *KeyFileStore) AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if keyInfo.Role == data.CanonicalRootRole || data.IsDelegation(keyInfo.Role) || !data.ValidRole(keyInfo.Role) {
|
||||
keyInfo.Gun = ""
|
||||
}
|
||||
err := addKey(s, s.Retriever, s.cachedKeys, filepath.Join(keyInfo.Gun, privKey.ID()), keyInfo.Role, privKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.keyInfoMap[privKey.ID()] = keyInfo
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetKey returns the PrivateKey given a KeyID
|
||||
func (s *KeyFileStore) GetKey(name string) (data.PrivateKey, string, error) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
// If this is a bare key ID without the gun, prepend the gun so the filestore lookup succeeds
|
||||
if keyInfo, ok := s.keyInfoMap[name]; ok {
|
||||
name = filepath.Join(keyInfo.Gun, name)
|
||||
}
|
||||
return getKey(s, s.Retriever, s.cachedKeys, name)
|
||||
}
|
||||
|
||||
// ListKeys returns a list of unique PublicKeys present on the KeyFileStore, by returning a copy of the keyInfoMap
|
||||
func (s *KeyFileStore) ListKeys() map[string]KeyInfo {
|
||||
return copyKeyInfoMap(s.keyInfoMap)
|
||||
}
|
||||
|
||||
// RemoveKey removes the key from the keyfilestore
|
||||
func (s *KeyFileStore) RemoveKey(keyID string) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
// If this is a bare key ID without the gun, prepend the gun so the filestore lookup succeeds
|
||||
if keyInfo, ok := s.keyInfoMap[keyID]; ok {
|
||||
keyID = filepath.Join(keyInfo.Gun, keyID)
|
||||
}
|
||||
err := removeKey(s, s.cachedKeys, keyID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Remove this key from our keyInfo map if we removed from our filesystem
|
||||
delete(s.keyInfoMap, filepath.Base(keyID))
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExportKey exports the encrypted bytes from the keystore
|
||||
func (s *KeyFileStore) ExportKey(keyID string) ([]byte, error) {
|
||||
if keyInfo, ok := s.keyInfoMap[keyID]; ok {
|
||||
keyID = filepath.Join(keyInfo.Gun, keyID)
|
||||
}
|
||||
keyBytes, _, err := getRawKey(s, keyID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return keyBytes, nil
|
||||
}
|
||||
|
||||
// NewKeyMemoryStore returns a new KeyMemoryStore which holds keys in memory
|
||||
func NewKeyMemoryStore(passphraseRetriever passphrase.Retriever) *KeyMemoryStore {
|
||||
memStore := NewMemoryFileStore()
|
||||
cachedKeys := make(map[string]*cachedKey)
|
||||
|
||||
keyInfoMap := make(keyInfoMap)
|
||||
|
||||
keyStore := &KeyMemoryStore{MemoryFileStore: *memStore,
|
||||
Retriever: passphraseRetriever,
|
||||
cachedKeys: cachedKeys,
|
||||
keyInfoMap: keyInfoMap,
|
||||
}
|
||||
|
||||
// Load this keystore's ID --> gun/role map
|
||||
keyStore.loadKeyInfo()
|
||||
return keyStore
|
||||
}
|
||||
|
||||
// Name returns a user friendly name for the location this store
|
||||
// keeps its data
|
||||
func (s *KeyMemoryStore) Name() string {
|
||||
return "memory"
|
||||
}
|
||||
|
||||
// AddKey stores the contents of a PEM-encoded private key as a PEM block
|
||||
func (s *KeyMemoryStore) AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if keyInfo.Role == data.CanonicalRootRole || data.IsDelegation(keyInfo.Role) || !data.ValidRole(keyInfo.Role) {
|
||||
keyInfo.Gun = ""
|
||||
}
|
||||
err := addKey(s, s.Retriever, s.cachedKeys, filepath.Join(keyInfo.Gun, privKey.ID()), keyInfo.Role, privKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.keyInfoMap[privKey.ID()] = keyInfo
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetKey returns the PrivateKey given a KeyID
|
||||
func (s *KeyMemoryStore) GetKey(name string) (data.PrivateKey, string, error) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
// If this is a bare key ID without the gun, prepend the gun so the filestore lookup succeeds
|
||||
if keyInfo, ok := s.keyInfoMap[name]; ok {
|
||||
name = filepath.Join(keyInfo.Gun, name)
|
||||
}
|
||||
return getKey(s, s.Retriever, s.cachedKeys, name)
|
||||
}
|
||||
|
||||
// ListKeys returns a list of unique PublicKeys present on the KeyFileStore, by returning a copy of the keyInfoMap
|
||||
func (s *KeyMemoryStore) ListKeys() map[string]KeyInfo {
|
||||
return copyKeyInfoMap(s.keyInfoMap)
|
||||
}
|
||||
|
||||
// copyKeyInfoMap returns a deep copy of the passed-in keyInfoMap
|
||||
func copyKeyInfoMap(keyInfoMap map[string]KeyInfo) map[string]KeyInfo {
|
||||
copyMap := make(map[string]KeyInfo)
|
||||
for keyID, keyInfo := range keyInfoMap {
|
||||
copyMap[keyID] = KeyInfo{Role: keyInfo.Role, Gun: keyInfo.Gun}
|
||||
}
|
||||
return copyMap
|
||||
}
|
||||
|
||||
// RemoveKey removes the key from the keystore
|
||||
func (s *KeyMemoryStore) RemoveKey(keyID string) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
// If this is a bare key ID without the gun, prepend the gun so the filestore lookup succeeds
|
||||
if keyInfo, ok := s.keyInfoMap[keyID]; ok {
|
||||
keyID = filepath.Join(keyInfo.Gun, keyID)
|
||||
}
|
||||
err := removeKey(s, s.cachedKeys, keyID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Remove this key from our keyInfo map if we removed from our filesystem
|
||||
delete(s.keyInfoMap, filepath.Base(keyID))
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExportKey exports the encrypted bytes from the keystore
|
||||
func (s *KeyMemoryStore) ExportKey(keyID string) ([]byte, error) {
|
||||
keyBytes, _, err := getRawKey(s, keyID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return keyBytes, nil
|
||||
}
|
||||
|
||||
// KeyInfoFromPEM attempts to get a keyID and KeyInfo from the filename and PEM bytes of a key
|
||||
func KeyInfoFromPEM(pemBytes []byte, filename string) (string, KeyInfo, error) {
|
||||
keyID, role, gun := inferKeyInfoFromKeyPath(filename)
|
||||
if role == "" {
|
||||
block, _ := pem.Decode(pemBytes)
|
||||
if block == nil {
|
||||
return "", KeyInfo{}, fmt.Errorf("could not decode PEM block for key %s", filename)
|
||||
}
|
||||
if keyRole, ok := block.Headers["role"]; ok {
|
||||
role = keyRole
|
||||
}
|
||||
}
|
||||
return keyID, KeyInfo{Gun: gun, Role: role}, nil
|
||||
}
|
||||
|
||||
func addKey(s Storage, passphraseRetriever passphrase.Retriever, cachedKeys map[string]*cachedKey, name, role string, privKey data.PrivateKey) error {
|
||||
|
||||
var (
|
||||
chosenPassphrase string
|
||||
giveup bool
|
||||
err error
|
||||
)
|
||||
|
||||
for attempts := 0; ; attempts++ {
|
||||
chosenPassphrase, giveup, err = passphraseRetriever(name, role, true, attempts)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if giveup {
|
||||
return ErrAttemptsExceeded{}
|
||||
}
|
||||
if attempts > 10 {
|
||||
return ErrAttemptsExceeded{}
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
return encryptAndAddKey(s, chosenPassphrase, cachedKeys, name, role, privKey)
|
||||
}
|
||||
|
||||
// getKeyRole finds the role for the given keyID. It attempts to look
|
||||
// both in the newer format PEM headers, and also in the legacy filename
|
||||
// format. It returns: the role, whether it was found in the legacy format
|
||||
// (true == legacy), and an error
|
||||
func getKeyRole(s Storage, keyID string) (string, bool, error) {
|
||||
name := strings.TrimSpace(strings.TrimSuffix(filepath.Base(keyID), filepath.Ext(keyID)))
|
||||
|
||||
for _, file := range s.ListFiles() {
|
||||
filename := filepath.Base(file)
|
||||
|
||||
if strings.HasPrefix(filename, name) {
|
||||
d, err := s.Get(file)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
block, _ := pem.Decode(d)
|
||||
if block != nil {
|
||||
if role, ok := block.Headers["role"]; ok {
|
||||
return role, false, nil
|
||||
}
|
||||
}
|
||||
|
||||
role := strings.TrimPrefix(filename, name+"_")
|
||||
return role, true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", false, ErrKeyNotFound{KeyID: keyID}
|
||||
}
|
||||
|
||||
// GetKey returns the PrivateKey given a KeyID
|
||||
func getKey(s Storage, passphraseRetriever passphrase.Retriever, cachedKeys map[string]*cachedKey, name string) (data.PrivateKey, string, error) {
|
||||
cachedKeyEntry, ok := cachedKeys[name]
|
||||
if ok {
|
||||
return cachedKeyEntry.key, cachedKeyEntry.alias, nil
|
||||
}
|
||||
|
||||
keyBytes, keyAlias, err := getRawKey(s, name)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
// See if the key is encrypted. If its encrypted we'll fail to parse the private key
|
||||
privKey, err := ParsePEMPrivateKey(keyBytes, "")
|
||||
if err != nil {
|
||||
privKey, _, err = GetPasswdDecryptBytes(passphraseRetriever, keyBytes, name, string(keyAlias))
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
}
|
||||
cachedKeys[name] = &cachedKey{alias: keyAlias, key: privKey}
|
||||
return privKey, keyAlias, nil
|
||||
}
|
||||
|
||||
// RemoveKey removes the key from the keyfilestore
|
||||
func removeKey(s Storage, cachedKeys map[string]*cachedKey, name string) error {
|
||||
role, legacy, err := getKeyRole(s, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
delete(cachedKeys, name)
|
||||
|
||||
if legacy {
|
||||
name = name + "_" + role
|
||||
}
|
||||
|
||||
// being in a subdirectory is for backwards compatibliity
|
||||
err = s.Remove(filepath.Join(getSubdir(role), name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Assumes 2 subdirectories, 1 containing root keys and 1 containing tuf keys
|
||||
func getSubdir(alias string) string {
|
||||
if alias == data.CanonicalRootRole {
|
||||
return notary.RootKeysSubdir
|
||||
}
|
||||
return notary.NonRootKeysSubdir
|
||||
}
|
||||
|
||||
// Given a key ID, gets the bytes and alias belonging to that key if the key
|
||||
// exists
|
||||
func getRawKey(s Storage, name string) ([]byte, string, error) {
|
||||
role, legacy, err := getKeyRole(s, name)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
if legacy {
|
||||
name = name + "_" + role
|
||||
}
|
||||
|
||||
var keyBytes []byte
|
||||
keyBytes, err = s.Get(filepath.Join(getSubdir(role), name))
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return keyBytes, role, nil
|
||||
}
|
||||
|
||||
// GetPasswdDecryptBytes gets the password to decrypt the given pem bytes.
|
||||
// Returns the password and private key
|
||||
func GetPasswdDecryptBytes(passphraseRetriever passphrase.Retriever, pemBytes []byte, name, alias string) (data.PrivateKey, string, error) {
|
||||
var (
|
||||
passwd string
|
||||
retErr error
|
||||
privKey data.PrivateKey
|
||||
)
|
||||
for attempts := 0; ; attempts++ {
|
||||
var (
|
||||
giveup bool
|
||||
err error
|
||||
)
|
||||
passwd, giveup, err = passphraseRetriever(name, alias, false, attempts)
|
||||
// Check if the passphrase retriever got an error or if it is telling us to give up
|
||||
if giveup || err != nil {
|
||||
return nil, "", ErrPasswordInvalid{}
|
||||
}
|
||||
if attempts > 10 {
|
||||
return nil, "", ErrAttemptsExceeded{}
|
||||
}
|
||||
|
||||
// Try to convert PEM encoded bytes back to a PrivateKey using the passphrase
|
||||
privKey, err = ParsePEMPrivateKey(pemBytes, passwd)
|
||||
if err != nil {
|
||||
retErr = ErrPasswordInvalid{}
|
||||
} else {
|
||||
// We managed to parse the PrivateKey. We've succeeded!
|
||||
retErr = nil
|
||||
break
|
||||
}
|
||||
}
|
||||
if retErr != nil {
|
||||
return nil, "", retErr
|
||||
}
|
||||
return privKey, passwd, nil
|
||||
}
|
||||
|
||||
func encryptAndAddKey(s Storage, passwd string, cachedKeys map[string]*cachedKey, name, role string, privKey data.PrivateKey) error {
|
||||
|
||||
var (
|
||||
pemPrivKey []byte
|
||||
err error
|
||||
)
|
||||
|
||||
if passwd != "" {
|
||||
pemPrivKey, err = EncryptPrivateKey(privKey, role, passwd)
|
||||
} else {
|
||||
pemPrivKey, err = KeyToPEM(privKey, role)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cachedKeys[name] = &cachedKey{alias: role, key: privKey}
|
||||
return s.Add(filepath.Join(getSubdir(role), name), pemPrivKey)
|
||||
}
|
|
@ -1,59 +1,325 @@
|
|||
package trustmanager
|
||||
|
||||
import (
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/notary"
|
||||
store "github.com/docker/notary/storage"
|
||||
"github.com/docker/notary/tuf/data"
|
||||
"github.com/docker/notary/tuf/utils"
|
||||
)
|
||||
|
||||
// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key
|
||||
type ErrAttemptsExceeded struct{}
|
||||
type keyInfoMap map[string]KeyInfo
|
||||
|
||||
// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key
|
||||
func (err ErrAttemptsExceeded) Error() string {
|
||||
return "maximum number of passphrase attempts exceeded"
|
||||
// KeyInfo stores the role, path, and gun for a corresponding private key ID
|
||||
// It is assumed that each private key ID is unique
|
||||
type KeyInfo struct {
|
||||
Gun string
|
||||
Role string
|
||||
}
|
||||
|
||||
// ErrPasswordInvalid is returned when signing fails. It could also mean the signing
|
||||
// key file was corrupted, but we have no way to distinguish.
|
||||
type ErrPasswordInvalid struct{}
|
||||
|
||||
// ErrPasswordInvalid is returned when signing fails. It could also mean the signing
|
||||
// key file was corrupted, but we have no way to distinguish.
|
||||
func (err ErrPasswordInvalid) Error() string {
|
||||
return "password invalid, operation has failed."
|
||||
// GenericKeyStore is a wrapper for Storage instances that provides
|
||||
// translation between the []byte form and Public/PrivateKey objects
|
||||
type GenericKeyStore struct {
|
||||
store Storage
|
||||
sync.Mutex
|
||||
notary.PassRetriever
|
||||
cachedKeys map[string]*cachedKey
|
||||
keyInfoMap
|
||||
}
|
||||
|
||||
// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key.
|
||||
type ErrKeyNotFound struct {
|
||||
KeyID string
|
||||
// NewKeyFileStore returns a new KeyFileStore creating a private directory to
|
||||
// hold the keys.
|
||||
func NewKeyFileStore(baseDir string, p notary.PassRetriever) (*GenericKeyStore, error) {
|
||||
fileStore, err := store.NewPrivateKeyFileStorage(baseDir, notary.KeyExtension)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewGenericKeyStore(fileStore, p), nil
|
||||
}
|
||||
|
||||
// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key.
|
||||
func (err ErrKeyNotFound) Error() string {
|
||||
return fmt.Sprintf("signing key not found: %s", err.KeyID)
|
||||
// NewKeyMemoryStore returns a new KeyMemoryStore which holds keys in memory
|
||||
func NewKeyMemoryStore(p notary.PassRetriever) *GenericKeyStore {
|
||||
memStore := store.NewMemoryStore(nil)
|
||||
return NewGenericKeyStore(memStore, p)
|
||||
}
|
||||
|
||||
const (
|
||||
keyExtension = "key"
|
||||
// NewGenericKeyStore creates a GenericKeyStore wrapping the provided
|
||||
// Storage instance, using the PassRetriever to enc/decrypt keys
|
||||
func NewGenericKeyStore(s Storage, p notary.PassRetriever) *GenericKeyStore {
|
||||
ks := GenericKeyStore{
|
||||
store: s,
|
||||
PassRetriever: p,
|
||||
cachedKeys: make(map[string]*cachedKey),
|
||||
keyInfoMap: make(keyInfoMap),
|
||||
}
|
||||
ks.loadKeyInfo()
|
||||
return &ks
|
||||
}
|
||||
|
||||
func generateKeyInfoMap(s Storage) map[string]KeyInfo {
|
||||
keyInfoMap := make(map[string]KeyInfo)
|
||||
for _, keyPath := range s.ListFiles() {
|
||||
d, err := s.Get(keyPath)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
continue
|
||||
}
|
||||
keyID, keyInfo, err := KeyInfoFromPEM(d, keyPath)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
continue
|
||||
}
|
||||
keyInfoMap[keyID] = keyInfo
|
||||
}
|
||||
return keyInfoMap
|
||||
}
|
||||
|
||||
// Attempts to infer the keyID, role, and GUN from the specified key path.
|
||||
// Note that non-root roles can only be inferred if this is a legacy style filename: KEYID_ROLE.key
|
||||
func inferKeyInfoFromKeyPath(keyPath string) (string, string, string) {
|
||||
var keyID, role, gun string
|
||||
keyID = filepath.Base(keyPath)
|
||||
underscoreIndex := strings.LastIndex(keyID, "_")
|
||||
|
||||
// This is the legacy KEYID_ROLE filename
|
||||
// The keyID is the first part of the keyname
|
||||
// The keyRole is the second part of the keyname
|
||||
// in a key named abcde_root, abcde is the keyID and root is the KeyAlias
|
||||
if underscoreIndex != -1 {
|
||||
role = keyID[underscoreIndex+1:]
|
||||
keyID = keyID[:underscoreIndex]
|
||||
}
|
||||
|
||||
if filepath.HasPrefix(keyPath, notary.RootKeysSubdir+"/") {
|
||||
return keyID, data.CanonicalRootRole, ""
|
||||
}
|
||||
|
||||
keyPath = strings.TrimPrefix(keyPath, notary.NonRootKeysSubdir+"/")
|
||||
gun = getGunFromFullID(keyPath)
|
||||
return keyID, role, gun
|
||||
}
|
||||
|
||||
func getGunFromFullID(fullKeyID string) string {
|
||||
keyGun := filepath.Dir(fullKeyID)
|
||||
// If the gun is empty, Dir will return .
|
||||
if keyGun == "." {
|
||||
keyGun = ""
|
||||
}
|
||||
return keyGun
|
||||
}
|
||||
|
||||
func (s *GenericKeyStore) loadKeyInfo() {
|
||||
s.keyInfoMap = generateKeyInfoMap(s.store)
|
||||
}
|
||||
|
||||
// GetKeyInfo returns the corresponding gun and role key info for a keyID
|
||||
func (s *GenericKeyStore) GetKeyInfo(keyID string) (KeyInfo, error) {
|
||||
if info, ok := s.keyInfoMap[keyID]; ok {
|
||||
return info, nil
|
||||
}
|
||||
return KeyInfo{}, fmt.Errorf("Could not find info for keyID %s", keyID)
|
||||
}
|
||||
|
||||
// AddKey stores the contents of a PEM-encoded private key as a PEM block
|
||||
func (s *GenericKeyStore) AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error {
|
||||
var (
|
||||
chosenPassphrase string
|
||||
giveup bool
|
||||
err error
|
||||
pemPrivKey []byte
|
||||
)
|
||||
|
||||
// KeyStore is a generic interface for private key storage
|
||||
type KeyStore interface {
|
||||
// AddKey adds a key to the KeyStore, and if the key already exists,
|
||||
// succeeds. Otherwise, returns an error if it cannot add.
|
||||
AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error
|
||||
// Should fail with ErrKeyNotFound if the keystore is operating normally
|
||||
// and knows that it does not store the requested key.
|
||||
GetKey(keyID string) (data.PrivateKey, string, error)
|
||||
GetKeyInfo(keyID string) (KeyInfo, error)
|
||||
ListKeys() map[string]KeyInfo
|
||||
RemoveKey(keyID string) error
|
||||
ExportKey(keyID string) ([]byte, error)
|
||||
Name() string
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if keyInfo.Role == data.CanonicalRootRole || data.IsDelegation(keyInfo.Role) || !data.ValidRole(keyInfo.Role) {
|
||||
keyInfo.Gun = ""
|
||||
}
|
||||
name := filepath.Join(keyInfo.Gun, privKey.ID())
|
||||
for attempts := 0; ; attempts++ {
|
||||
chosenPassphrase, giveup, err = s.PassRetriever(name, keyInfo.Role, true, attempts)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if giveup || attempts > 10 {
|
||||
return ErrAttemptsExceeded{}
|
||||
}
|
||||
}
|
||||
|
||||
type cachedKey struct {
|
||||
alias string
|
||||
key data.PrivateKey
|
||||
if chosenPassphrase != "" {
|
||||
pemPrivKey, err = utils.EncryptPrivateKey(privKey, keyInfo.Role, keyInfo.Gun, chosenPassphrase)
|
||||
} else {
|
||||
pemPrivKey, err = utils.KeyToPEM(privKey, keyInfo.Role)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.cachedKeys[name] = &cachedKey{alias: keyInfo.Role, key: privKey}
|
||||
err = s.store.Set(filepath.Join(getSubdir(keyInfo.Role), name), pemPrivKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.keyInfoMap[privKey.ID()] = keyInfo
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetKey returns the PrivateKey given a KeyID
|
||||
func (s *GenericKeyStore) GetKey(name string) (data.PrivateKey, string, error) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
cachedKeyEntry, ok := s.cachedKeys[name]
|
||||
if ok {
|
||||
return cachedKeyEntry.key, cachedKeyEntry.alias, nil
|
||||
}
|
||||
|
||||
keyBytes, _, keyAlias, err := getKey(s.store, name)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
// See if the key is encrypted. If its encrypted we'll fail to parse the private key
|
||||
privKey, err := utils.ParsePEMPrivateKey(keyBytes, "")
|
||||
if err != nil {
|
||||
privKey, _, err = GetPasswdDecryptBytes(s.PassRetriever, keyBytes, name, string(keyAlias))
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
}
|
||||
s.cachedKeys[name] = &cachedKey{alias: keyAlias, key: privKey}
|
||||
return privKey, keyAlias, nil
|
||||
}
|
||||
|
||||
// ListKeys returns a list of unique PublicKeys present on the KeyFileStore, by returning a copy of the keyInfoMap
|
||||
func (s *GenericKeyStore) ListKeys() map[string]KeyInfo {
|
||||
return copyKeyInfoMap(s.keyInfoMap)
|
||||
}
|
||||
|
||||
// RemoveKey removes the key from the keyfilestore
|
||||
func (s *GenericKeyStore) RemoveKey(keyID string) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
_, filename, _, err := getKey(s.store, keyID)
|
||||
switch err.(type) {
|
||||
case ErrKeyNotFound, nil:
|
||||
break
|
||||
default:
|
||||
return err
|
||||
}
|
||||
|
||||
delete(s.cachedKeys, keyID)
|
||||
|
||||
err = s.store.Remove(filename) // removing a file that doesn't exist doesn't fail
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove this key from our keyInfo map if we removed from our filesystem
|
||||
delete(s.keyInfoMap, filepath.Base(keyID))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Name returns a user friendly name for the location this store
|
||||
// keeps its data
|
||||
func (s *GenericKeyStore) Name() string {
|
||||
return s.store.Location()
|
||||
}
|
||||
|
||||
// copyKeyInfoMap returns a deep copy of the passed-in keyInfoMap
|
||||
func copyKeyInfoMap(keyInfoMap map[string]KeyInfo) map[string]KeyInfo {
|
||||
copyMap := make(map[string]KeyInfo)
|
||||
for keyID, keyInfo := range keyInfoMap {
|
||||
copyMap[keyID] = KeyInfo{Role: keyInfo.Role, Gun: keyInfo.Gun}
|
||||
}
|
||||
return copyMap
|
||||
}
|
||||
|
||||
// KeyInfoFromPEM attempts to get a keyID and KeyInfo from the filename and PEM bytes of a key
|
||||
func KeyInfoFromPEM(pemBytes []byte, filename string) (string, KeyInfo, error) {
|
||||
keyID, role, gun := inferKeyInfoFromKeyPath(filename)
|
||||
if role == "" {
|
||||
block, _ := pem.Decode(pemBytes)
|
||||
if block == nil {
|
||||
return "", KeyInfo{}, fmt.Errorf("could not decode PEM block for key %s", filename)
|
||||
}
|
||||
if keyRole, ok := block.Headers["role"]; ok {
|
||||
role = keyRole
|
||||
}
|
||||
}
|
||||
return keyID, KeyInfo{Gun: gun, Role: role}, nil
|
||||
}
|
||||
|
||||
// getKey finds the key and role for the given keyID. It attempts to
|
||||
// look both in the newer format PEM headers, and also in the legacy filename
|
||||
// format. It returns: the key bytes, the filename it was found under, the role,
|
||||
// and an error
|
||||
func getKey(s Storage, keyID string) ([]byte, string, string, error) {
|
||||
name := strings.TrimSpace(strings.TrimSuffix(filepath.Base(keyID), filepath.Ext(keyID)))
|
||||
|
||||
for _, file := range s.ListFiles() {
|
||||
filename := filepath.Base(file)
|
||||
|
||||
if strings.HasPrefix(filename, name) {
|
||||
d, err := s.Get(file)
|
||||
if err != nil {
|
||||
return nil, "", "", err
|
||||
}
|
||||
block, _ := pem.Decode(d)
|
||||
if block != nil {
|
||||
if role, ok := block.Headers["role"]; ok {
|
||||
return d, file, role, nil
|
||||
}
|
||||
}
|
||||
|
||||
role := strings.TrimPrefix(filename, name+"_")
|
||||
return d, file, role, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, "", "", ErrKeyNotFound{KeyID: keyID}
|
||||
}
|
||||
|
||||
// Assumes 2 subdirectories, 1 containing root keys and 1 containing TUF keys
|
||||
func getSubdir(alias string) string {
|
||||
if alias == data.CanonicalRootRole {
|
||||
return notary.RootKeysSubdir
|
||||
}
|
||||
return notary.NonRootKeysSubdir
|
||||
}
|
||||
|
||||
// GetPasswdDecryptBytes gets the password to decrypt the given pem bytes.
|
||||
// Returns the password and private key
|
||||
func GetPasswdDecryptBytes(passphraseRetriever notary.PassRetriever, pemBytes []byte, name, alias string) (data.PrivateKey, string, error) {
|
||||
var (
|
||||
passwd string
|
||||
privKey data.PrivateKey
|
||||
)
|
||||
for attempts := 0; ; attempts++ {
|
||||
var (
|
||||
giveup bool
|
||||
err error
|
||||
)
|
||||
if attempts > 10 {
|
||||
return nil, "", ErrAttemptsExceeded{}
|
||||
}
|
||||
passwd, giveup, err = passphraseRetriever(name, alias, false, attempts)
|
||||
// Check if the passphrase retriever got an error or if it is telling us to give up
|
||||
if giveup || err != nil {
|
||||
return nil, "", ErrPasswordInvalid{}
|
||||
}
|
||||
|
||||
// Try to convert PEM encoded bytes back to a PrivateKey using the passphrase
|
||||
privKey, err = utils.ParsePEMPrivateKey(pemBytes, passwd)
|
||||
if err == nil {
|
||||
// We managed to parse the PrivateKey. We've succeeded!
|
||||
break
|
||||
}
|
||||
}
|
||||
return privKey, passwd, nil
|
||||
}
|
||||
|
|
|
@ -1,67 +0,0 @@
|
|||
package trustmanager
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// MemoryFileStore is an implementation of Storage that keeps
|
||||
// the contents in memory.
|
||||
type MemoryFileStore struct {
|
||||
sync.Mutex
|
||||
|
||||
files map[string][]byte
|
||||
}
|
||||
|
||||
// NewMemoryFileStore creates a MemoryFileStore
|
||||
func NewMemoryFileStore() *MemoryFileStore {
|
||||
return &MemoryFileStore{
|
||||
files: make(map[string][]byte),
|
||||
}
|
||||
}
|
||||
|
||||
// Add writes data to a file with a given name
|
||||
func (f *MemoryFileStore) Add(name string, data []byte) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
f.files[name] = data
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove removes a file identified by name
|
||||
func (f *MemoryFileStore) Remove(name string) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
if _, present := f.files[name]; !present {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
delete(f.files, name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns the data given a file name
|
||||
func (f *MemoryFileStore) Get(name string) ([]byte, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
fileData, present := f.files[name]
|
||||
if !present {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
return fileData, nil
|
||||
}
|
||||
|
||||
// ListFiles lists all the files inside of a store
|
||||
func (f *MemoryFileStore) ListFiles() []string {
|
||||
var list []string
|
||||
|
||||
for name := range f.files {
|
||||
list = append(list, name)
|
||||
}
|
||||
|
||||
return list
|
||||
}
|
|
@ -1,42 +0,0 @@
|
|||
package trustmanager
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/docker/notary"
|
||||
)
|
||||
|
||||
const (
|
||||
visible = notary.PubCertPerms
|
||||
private = notary.PrivKeyPerms
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrPathOutsideStore indicates that the returned path would be
|
||||
// outside the store
|
||||
ErrPathOutsideStore = errors.New("path outside file store")
|
||||
)
|
||||
|
||||
// Storage implements the bare bones primitives (no hierarchy)
|
||||
type Storage interface {
|
||||
// Add writes a file to the specified location, returning an error if this
|
||||
// is not possible (reasons may include permissions errors). The path is cleaned
|
||||
// before being made absolute against the store's base dir.
|
||||
Add(fileName string, data []byte) error
|
||||
|
||||
// Remove deletes a file from the store relative to the store's base directory.
|
||||
// The path is cleaned before being made absolute to ensure no path traversal
|
||||
// outside the base directory is possible.
|
||||
Remove(fileName string) error
|
||||
|
||||
// Get returns the file content found at fileName relative to the base directory
|
||||
// of the file store. The path is cleaned before being made absolute to ensure
|
||||
// path traversal outside the store is not possible. If the file is not found
|
||||
// an error to that effect is returned.
|
||||
Get(fileName string) ([]byte, error)
|
||||
|
||||
// ListFiles returns a list of paths relative to the base directory of the
|
||||
// filestore. Any of these paths must be retrievable via the
|
||||
// Storage.Get method.
|
||||
ListFiles() []string
|
||||
}
|
57
vendor/src/github.com/docker/notary/trustmanager/yubikey/import.go
vendored
Normal file
57
vendor/src/github.com/docker/notary/trustmanager/yubikey/import.go
vendored
Normal file
|
@ -0,0 +1,57 @@
|
|||
// +build pkcs11
|
||||
|
||||
package yubikey
|
||||
|
||||
import (
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"github.com/docker/notary"
|
||||
"github.com/docker/notary/trustmanager"
|
||||
"github.com/docker/notary/tuf/utils"
|
||||
)
|
||||
|
||||
// YubiImport is a wrapper around the YubiStore that allows us to import private
|
||||
// keys to the yubikey
|
||||
type YubiImport struct {
|
||||
dest *YubiStore
|
||||
passRetriever notary.PassRetriever
|
||||
}
|
||||
|
||||
// NewImporter returns a wrapper for the YubiStore provided that enables importing
|
||||
// keys via the simple Set(string, []byte) interface
|
||||
func NewImporter(ys *YubiStore, ret notary.PassRetriever) *YubiImport {
|
||||
return &YubiImport{
|
||||
dest: ys,
|
||||
passRetriever: ret,
|
||||
}
|
||||
}
|
||||
|
||||
// Set determines if we are allowed to set the given key on the Yubikey and
|
||||
// calls through to YubiStore.AddKey if it's valid
|
||||
func (s *YubiImport) Set(name string, bytes []byte) error {
|
||||
block, _ := pem.Decode(bytes)
|
||||
if block == nil {
|
||||
return errors.New("invalid PEM data, could not parse")
|
||||
}
|
||||
role, ok := block.Headers["role"]
|
||||
if !ok {
|
||||
return errors.New("no role found for key")
|
||||
}
|
||||
ki := trustmanager.KeyInfo{
|
||||
// GUN is ignored by YubiStore
|
||||
Role: role,
|
||||
}
|
||||
privKey, err := utils.ParsePEMPrivateKey(bytes, "")
|
||||
if err != nil {
|
||||
privKey, _, err = trustmanager.GetPasswdDecryptBytes(
|
||||
s.passRetriever,
|
||||
bytes,
|
||||
name,
|
||||
ki.Role,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return s.dest.AddKey(ki, privKey)
|
||||
}
|
|
@ -17,10 +17,11 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/notary/passphrase"
|
||||
"github.com/docker/notary"
|
||||
"github.com/docker/notary/trustmanager"
|
||||
"github.com/docker/notary/tuf/data"
|
||||
"github.com/docker/notary/tuf/signed"
|
||||
"github.com/docker/notary/tuf/utils"
|
||||
"github.com/miekg/pkcs11"
|
||||
)
|
||||
|
||||
|
@ -132,7 +133,7 @@ type yubiSlot struct {
|
|||
// YubiPrivateKey represents a private key inside of a yubikey
|
||||
type YubiPrivateKey struct {
|
||||
data.ECDSAPublicKey
|
||||
passRetriever passphrase.Retriever
|
||||
passRetriever notary.PassRetriever
|
||||
slot []byte
|
||||
libLoader pkcs11LibLoader
|
||||
}
|
||||
|
@ -143,9 +144,9 @@ type yubikeySigner struct {
|
|||
}
|
||||
|
||||
// NewYubiPrivateKey returns a YubiPrivateKey, which implements the data.PrivateKey
|
||||
// interface except that the private material is inacessible
|
||||
// interface except that the private material is inaccessible
|
||||
func NewYubiPrivateKey(slot []byte, pubKey data.ECDSAPublicKey,
|
||||
passRetriever passphrase.Retriever) *YubiPrivateKey {
|
||||
passRetriever notary.PassRetriever) *YubiPrivateKey {
|
||||
|
||||
return &YubiPrivateKey{
|
||||
ECDSAPublicKey: pubKey,
|
||||
|
@ -228,7 +229,7 @@ func addECDSAKey(
|
|||
session pkcs11.SessionHandle,
|
||||
privKey data.PrivateKey,
|
||||
pkcs11KeyID []byte,
|
||||
passRetriever passphrase.Retriever,
|
||||
passRetriever notary.PassRetriever,
|
||||
role string,
|
||||
) error {
|
||||
logrus.Debugf("Attempting to add key to yubikey with ID: %s", privKey.ID())
|
||||
|
@ -249,7 +250,7 @@ func addECDSAKey(
|
|||
|
||||
// Hard-coded policy: the generated certificate expires in 10 years.
|
||||
startTime := time.Now()
|
||||
template, err := trustmanager.NewCertificate(role, startTime, startTime.AddDate(10, 0, 0))
|
||||
template, err := utils.NewCertificate(role, startTime, startTime.AddDate(10, 0, 0))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create the certificate template: %v", err)
|
||||
}
|
||||
|
@ -345,7 +346,7 @@ func getECDSAKey(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byt
|
|||
}
|
||||
|
||||
// sign returns a signature for a given signature request
|
||||
func sign(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byte, passRetriever passphrase.Retriever, payload []byte) ([]byte, error) {
|
||||
func sign(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byte, passRetriever notary.PassRetriever, payload []byte) ([]byte, error) {
|
||||
err := login(ctx, session, passRetriever, pkcs11.CKU_USER, UserPin)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error logging in: %v", err)
|
||||
|
@ -404,7 +405,7 @@ func sign(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byte, pass
|
|||
return sig[:], nil
|
||||
}
|
||||
|
||||
func yubiRemoveKey(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byte, passRetriever passphrase.Retriever, keyID string) error {
|
||||
func yubiRemoveKey(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byte, passRetriever notary.PassRetriever, keyID string) error {
|
||||
err := login(ctx, session, passRetriever, pkcs11.CKU_SO, SOUserPin)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -615,7 +616,7 @@ func getNextEmptySlot(ctx IPKCS11Ctx, session pkcs11.SessionHandle) ([]byte, err
|
|||
|
||||
// YubiStore is a KeyStore for private keys inside a Yubikey
|
||||
type YubiStore struct {
|
||||
passRetriever passphrase.Retriever
|
||||
passRetriever notary.PassRetriever
|
||||
keys map[string]yubiSlot
|
||||
backupStore trustmanager.KeyStore
|
||||
libLoader pkcs11LibLoader
|
||||
|
@ -623,7 +624,7 @@ type YubiStore struct {
|
|||
|
||||
// NewYubiStore returns a YubiStore, given a backup key store to write any
|
||||
// generated keys to (usually a KeyFileStore)
|
||||
func NewYubiStore(backupStore trustmanager.KeyStore, passphraseRetriever passphrase.Retriever) (
|
||||
func NewYubiStore(backupStore trustmanager.KeyStore, passphraseRetriever notary.PassRetriever) (
|
||||
*YubiStore, error) {
|
||||
|
||||
s := &YubiStore{
|
||||
|
@ -653,7 +654,7 @@ func (s *YubiStore) ListKeys() map[string]trustmanager.KeyInfo {
|
|||
}
|
||||
ctx, session, err := SetupHSMEnv(pkcs11Lib, s.libLoader)
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to initialize PKCS11 environment: %s", err.Error())
|
||||
logrus.Debugf("No yubikey found, using alternative key storage: %s", err.Error())
|
||||
return nil
|
||||
}
|
||||
defer cleanup(ctx, session)
|
||||
|
@ -697,7 +698,7 @@ func (s *YubiStore) addKey(keyID, role string, privKey data.PrivateKey) (
|
|||
|
||||
ctx, session, err := SetupHSMEnv(pkcs11Lib, s.libLoader)
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to initialize PKCS11 environment: %s", err.Error())
|
||||
logrus.Debugf("No yubikey found, using alternative key storage: %s", err.Error())
|
||||
return false, err
|
||||
}
|
||||
defer cleanup(ctx, session)
|
||||
|
@ -735,7 +736,7 @@ func (s *YubiStore) addKey(keyID, role string, privKey data.PrivateKey) (
|
|||
func (s *YubiStore) GetKey(keyID string) (data.PrivateKey, string, error) {
|
||||
ctx, session, err := SetupHSMEnv(pkcs11Lib, s.libLoader)
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to initialize PKCS11 environment: %s", err.Error())
|
||||
logrus.Debugf("No yubikey found, using alternative key storage: %s", err.Error())
|
||||
if _, ok := err.(errHSMNotPresent); ok {
|
||||
err = trustmanager.ErrKeyNotFound{KeyID: keyID}
|
||||
}
|
||||
|
@ -770,7 +771,7 @@ func (s *YubiStore) GetKey(keyID string) (data.PrivateKey, string, error) {
|
|||
func (s *YubiStore) RemoveKey(keyID string) error {
|
||||
ctx, session, err := SetupHSMEnv(pkcs11Lib, s.libLoader)
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to initialize PKCS11 environment: %s", err.Error())
|
||||
logrus.Debugf("No yubikey found, using alternative key storage: %s", err.Error())
|
||||
return nil
|
||||
}
|
||||
defer cleanup(ctx, session)
|
||||
|
@ -789,12 +790,6 @@ func (s *YubiStore) RemoveKey(keyID string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// ExportKey doesn't work, because you can't export data from a Yubikey
|
||||
func (s *YubiStore) ExportKey(keyID string) ([]byte, error) {
|
||||
logrus.Debugf("Attempting to export: %s key inside of YubiStore", keyID)
|
||||
return nil, errors.New("Keys cannot be exported from a Yubikey.")
|
||||
}
|
||||
|
||||
// GetKeyInfo is not yet implemented
|
||||
func (s *YubiStore) GetKeyInfo(keyID string) (trustmanager.KeyInfo, error) {
|
||||
return trustmanager.KeyInfo{}, fmt.Errorf("Not yet implemented")
|
||||
|
@ -874,7 +869,7 @@ func IsAccessible() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func login(ctx IPKCS11Ctx, session pkcs11.SessionHandle, passRetriever passphrase.Retriever, userFlag uint, defaultPassw string) error {
|
||||
func login(ctx IPKCS11Ctx, session pkcs11.SessionHandle, passRetriever notary.PassRetriever, userFlag uint, defaultPassw string) error {
|
||||
// try default password
|
||||
err := ctx.Login(session, userFlag, defaultPassw)
|
||||
if err == nil {
|
||||
|
@ -902,13 +897,12 @@ func login(ctx IPKCS11Ctx, session pkcs11.SessionHandle, passRetriever passphras
|
|||
return trustmanager.ErrAttemptsExceeded{}
|
||||
}
|
||||
|
||||
// Try to convert PEM encoded bytes back to a PrivateKey using the passphrase
|
||||
// attempt to login. Loop if failed
|
||||
err = ctx.Login(session, userFlag, passwd)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildKeyMap(keys map[string]yubiSlot) map[string]trustmanager.KeyInfo {
|
||||
|
|
37
vendor/src/github.com/docker/notary/trustpinning/ca.crt
vendored
Normal file
37
vendor/src/github.com/docker/notary/trustpinning/ca.crt
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIGMzCCBBugAwIBAgIBATANBgkqhkiG9w0BAQsFADBfMQswCQYDVQQGEwJVUzEL
|
||||
MAkGA1UECAwCQ0ExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDzANBgNVBAoMBkRv
|
||||
Y2tlcjEaMBgGA1UEAwwRTm90YXJ5IFRlc3RpbmcgQ0EwHhcNMTUwNzE2MDQyNTAz
|
||||
WhcNMjUwNzEzMDQyNTAzWjBfMRowGAYDVQQDDBFOb3RhcnkgVGVzdGluZyBDQTEL
|
||||
MAkGA1UEBhMCVVMxFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDzANBgNVBAoMBkRv
|
||||
Y2tlcjELMAkGA1UECAwCQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
|
||||
AQCwVVD4pK7z7pXPpJbaZ1Hg5eRXIcaYtbFPCnN0iqy9HsVEGnEn5BPNSEsuP+m0
|
||||
5N0qVV7DGb1SjiloLXD1qDDvhXWk+giS9ppqPHPLVPB4bvzsqwDYrtpbqkYvO0YK
|
||||
0SL3kxPXUFdlkFfgu0xjlczm2PhWG3Jd8aAtspL/L+VfPA13JUaWxSLpui1In8rh
|
||||
gAyQTK6Q4Of6GbJYTnAHb59UoLXSzB5AfqiUq6L7nEYYKoPflPbRAIWL/UBm0c+H
|
||||
ocms706PYpmPS2RQv3iOGmnn9hEVp3P6jq7WAevbA4aYGx5EsbVtYABqJBbFWAuw
|
||||
wTGRYmzn0Mj0eTMge9ztYB2/2sxdTe6uhmFgpUXngDqJI5O9N3zPfvlEImCky3HM
|
||||
jJoL7g5smqX9o1P+ESLh0VZzhh7IDPzQTXpcPIS/6z0l22QGkK/1N1PaADaUHdLL
|
||||
vSav3y2BaEmPvf2fkZj8yP5eYgi7Cw5ONhHLDYHFcl9Zm/ywmdxHJETz9nfgXnsW
|
||||
HNxDqrkCVO46r/u6rSrUt6hr3oddJG8s8Jo06earw6XU3MzM+3giwkK0SSM3uRPq
|
||||
4AscR1Tv+E31AuOAmjqYQoT29bMIxoSzeljj/YnedwjW45pWyc3JoHaibDwvW9Uo
|
||||
GSZBVy4hrM/Fa7XCWv1WfHNW1gDwaLYwDnl5jFmRBvcfuQIDAQABo4H5MIH2MIGR
|
||||
BgNVHSMEgYkwgYaAFHUM1U3E4WyL1nvFd+dPY8f4O2hZoWOkYTBfMQswCQYDVQQG
|
||||
EwJVUzELMAkGA1UECAwCQ0ExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDzANBgNV
|
||||
BAoMBkRvY2tlcjEaMBgGA1UEAwwRTm90YXJ5IFRlc3RpbmcgQ0GCCQDCeDLbemIT
|
||||
SzASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEF
|
||||
BQcDATAOBgNVHQ8BAf8EBAMCAUYwHQYDVR0OBBYEFHe48hcBcAp0bUVlTxXeRA4o
|
||||
E16pMA0GCSqGSIb3DQEBCwUAA4ICAQAWUtAPdUFpwRq+N1SzGUejSikeMGyPZscZ
|
||||
JBUCmhZoFufgXGbLO5OpcRLaV3Xda0t/5PtdGMSEzczeoZHWknDtw+79OBittPPj
|
||||
Sh1oFDuPo35R7eP624lUCch/InZCphTaLx9oDLGcaK3ailQ9wjBdKdlBl8KNKIZp
|
||||
a13aP5rnSm2Jva+tXy/yi3BSds3dGD8ITKZyI/6AFHxGvObrDIBpo4FF/zcWXVDj
|
||||
paOmxplRtM4Hitm+sXGvfqJe4x5DuOXOnPrT3dHvRT6vSZUoKobxMqmRTOcrOIPa
|
||||
EeMpOobshORuRntMDYvvgO3D6p6iciDW2Vp9N6rdMdfOWEQN8JVWvB7IxRHk9qKJ
|
||||
vYOWVbczAt0qpMvXF3PXLjZbUM0knOdUKIEbqP4YUbgdzx6RtgiiY930Aj6tAtce
|
||||
0fpgNlvjMRpSBuWTlAfNNjG/YhndMz9uI68TMfFpR3PcgVIv30krw/9VzoLi2Dpe
|
||||
ow6DrGO6oi+DhN78P4jY/O9UczZK2roZL1Oi5P0RIxf23UZC7x1DlcN3nBr4sYSv
|
||||
rBx4cFTMNpwU+nzsIi4djcFDKmJdEOyjMnkP2v0Lwe7yvK08pZdEu+0zbrq17kue
|
||||
XpXLc7K68QB15yxzGylU5rRwzmC/YsAVyE4eoGu8PxWxrERvHby4B8YP0vAfOraL
|
||||
lKmXlK4dTg==
|
||||
-----END CERTIFICATE-----
|
||||
|
|
@ -5,12 +5,11 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/notary/trustmanager"
|
||||
"github.com/docker/notary/tuf/data"
|
||||
"github.com/docker/notary/tuf/signed"
|
||||
"github.com/docker/notary/tuf/utils"
|
||||
)
|
||||
|
||||
// ErrValidationFail is returned when there is no valid trusted certificates
|
||||
|
@ -98,18 +97,25 @@ func ValidateRoot(prevRoot *data.SignedRoot, root *data.Signed, gun string, trus
|
|||
// Retrieve all the leaf and intermediate certificates in root for which the CN matches the GUN
|
||||
allLeafCerts, allIntCerts := parseAllCerts(signedRoot)
|
||||
certsFromRoot, err := validRootLeafCerts(allLeafCerts, gun, true)
|
||||
validIntCerts := validRootIntCerts(allIntCerts)
|
||||
|
||||
if err != nil {
|
||||
logrus.Debugf("error retrieving valid leaf certificates for: %s, %v", gun, err)
|
||||
return nil, &ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"}
|
||||
}
|
||||
|
||||
logrus.Debugf("found %d leaf certs, of which %d are valid leaf certs for %s", len(allLeafCerts), len(certsFromRoot), gun)
|
||||
|
||||
// If we have a previous root, let's try to use it to validate that this new root is valid.
|
||||
if prevRoot != nil {
|
||||
havePrevRoot := prevRoot != nil
|
||||
if havePrevRoot {
|
||||
// Retrieve all the trusted certificates from our previous root
|
||||
// Note that we do not validate expiries here since our originally trusted root might have expired certs
|
||||
allTrustedLeafCerts, allTrustedIntCerts := parseAllCerts(prevRoot)
|
||||
trustedLeafCerts, err := validRootLeafCerts(allTrustedLeafCerts, gun, false)
|
||||
if err != nil {
|
||||
return nil, &ErrValidationFail{Reason: "could not retrieve trusted certs from previous root role data"}
|
||||
}
|
||||
|
||||
// Use the certificates we found in the previous root for the GUN to verify its signatures
|
||||
// This could potentially be an empty set, in which case we will fail to verify
|
||||
|
@ -121,23 +127,30 @@ func ValidateRoot(prevRoot *data.SignedRoot, root *data.Signed, gun string, trus
|
|||
if !ok {
|
||||
return nil, &ErrValidationFail{Reason: "could not retrieve previous root role data"}
|
||||
}
|
||||
|
||||
err = signed.VerifySignatures(
|
||||
root, data.BaseRole{Keys: trustmanager.CertsToKeys(trustedLeafCerts, allTrustedIntCerts), Threshold: prevRootRoleData.Threshold})
|
||||
root, data.BaseRole{Keys: utils.CertsToKeys(trustedLeafCerts, allTrustedIntCerts), Threshold: prevRootRoleData.Threshold})
|
||||
if err != nil {
|
||||
logrus.Debugf("failed to verify TUF data for: %s, %v", gun, err)
|
||||
return nil, &ErrRootRotationFail{Reason: "failed to validate data with current trusted certificates"}
|
||||
}
|
||||
} else {
|
||||
logrus.Debugf("found no currently valid root certificates for %s, using trust_pinning config to bootstrap trust", gun)
|
||||
trustPinCheckFunc, err := NewTrustPinChecker(trustPinning, gun)
|
||||
// Clear the IsValid marks we could have received from VerifySignatures
|
||||
for i := range root.Signatures {
|
||||
root.Signatures[i].IsValid = false
|
||||
}
|
||||
}
|
||||
|
||||
// Regardless of having a previous root or not, confirm that the new root validates against the trust pinning
|
||||
logrus.Debugf("checking root against trust_pinning config", gun)
|
||||
trustPinCheckFunc, err := NewTrustPinChecker(trustPinning, gun, !havePrevRoot)
|
||||
if err != nil {
|
||||
return nil, &ErrValidationFail{Reason: err.Error()}
|
||||
}
|
||||
|
||||
validPinnedCerts := map[string]*x509.Certificate{}
|
||||
for id, cert := range certsFromRoot {
|
||||
if ok := trustPinCheckFunc(cert, allIntCerts[id]); !ok {
|
||||
logrus.Debugf("checking trust-pinning for cert: %s", id)
|
||||
if ok := trustPinCheckFunc(cert, validIntCerts[id]); !ok {
|
||||
logrus.Debugf("trust-pinning check failed for cert: %s", id)
|
||||
continue
|
||||
}
|
||||
validPinnedCerts[id] = cert
|
||||
|
@ -146,20 +159,20 @@ func ValidateRoot(prevRoot *data.SignedRoot, root *data.Signed, gun string, trus
|
|||
return nil, &ErrValidationFail{Reason: "unable to match any certificates to trust_pinning config"}
|
||||
}
|
||||
certsFromRoot = validPinnedCerts
|
||||
}
|
||||
|
||||
// Validate the integrity of the new root (does it have valid signatures)
|
||||
// Note that certsFromRoot is guaranteed to be unchanged only if we had prior cert data for this GUN or enabled TOFUS
|
||||
// If we attempted to pin a certain certificate or CA, certsFromRoot could have been pruned accordingly
|
||||
err = signed.VerifySignatures(root, data.BaseRole{
|
||||
Keys: trustmanager.CertsToKeys(certsFromRoot, allIntCerts), Threshold: rootRole.Threshold})
|
||||
Keys: utils.CertsToKeys(certsFromRoot, validIntCerts), Threshold: rootRole.Threshold})
|
||||
if err != nil {
|
||||
logrus.Debugf("failed to verify TUF data for: %s, %v", gun, err)
|
||||
return nil, &ErrValidationFail{Reason: "failed to validate integrity of roots"}
|
||||
}
|
||||
|
||||
logrus.Debugf("Root validation succeeded for %s", gun)
|
||||
return signedRoot, nil
|
||||
logrus.Debugf("root validation succeeded for %s", gun)
|
||||
// Call RootFromSigned to make sure we pick up on the IsValid markings from VerifySignatures
|
||||
return data.RootFromSigned(root)
|
||||
}
|
||||
|
||||
// validRootLeafCerts returns a list of possibly (if checkExpiry is true) non-expired, non-sha1 certificates
|
||||
|
@ -177,17 +190,9 @@ func validRootLeafCerts(allLeafCerts map[string]*x509.Certificate, gun string, c
|
|||
continue
|
||||
}
|
||||
// Make sure the certificate is not expired if checkExpiry is true
|
||||
if checkExpiry && time.Now().After(cert.NotAfter) {
|
||||
logrus.Debugf("error leaf certificate is expired")
|
||||
continue
|
||||
}
|
||||
|
||||
// We don't allow root certificates that use SHA1
|
||||
if cert.SignatureAlgorithm == x509.SHA1WithRSA ||
|
||||
cert.SignatureAlgorithm == x509.DSAWithSHA1 ||
|
||||
cert.SignatureAlgorithm == x509.ECDSAWithSHA1 {
|
||||
|
||||
logrus.Debugf("error certificate uses deprecated hashing algorithm (SHA1)")
|
||||
// and warn if it hasn't expired yet but is within 6 months of expiry
|
||||
if err := utils.ValidateCertificate(cert, checkExpiry); err != nil {
|
||||
logrus.Debugf("%s is invalid: %s", id, err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -204,6 +209,24 @@ func validRootLeafCerts(allLeafCerts map[string]*x509.Certificate, gun string, c
|
|||
return validLeafCerts, nil
|
||||
}
|
||||
|
||||
// validRootIntCerts filters the passed in structure of intermediate certificates to only include non-expired, non-sha1 certificates
|
||||
// Note that this "validity" alone does not imply any measure of trust.
|
||||
func validRootIntCerts(allIntCerts map[string][]*x509.Certificate) map[string][]*x509.Certificate {
|
||||
validIntCerts := make(map[string][]*x509.Certificate)
|
||||
|
||||
// Go through every leaf cert ID, and build its valid intermediate certificate list
|
||||
for leafID, intCertList := range allIntCerts {
|
||||
for _, intCert := range intCertList {
|
||||
if err := utils.ValidateCertificate(intCert, true); err != nil {
|
||||
continue
|
||||
}
|
||||
validIntCerts[leafID] = append(validIntCerts[leafID], intCert)
|
||||
}
|
||||
|
||||
}
|
||||
return validIntCerts
|
||||
}
|
||||
|
||||
// parseAllCerts returns two maps, one with all of the leafCertificates and one
|
||||
// with all the intermediate certificates found in signedRoot
|
||||
func parseAllCerts(signedRoot *data.SignedRoot) (map[string]*x509.Certificate, map[string][]*x509.Certificate) {
|
||||
|
@ -233,14 +256,14 @@ func parseAllCerts(signedRoot *data.SignedRoot) (map[string]*x509.Certificate, m
|
|||
|
||||
// Decode all the x509 certificates that were bundled with this
|
||||
// Specific root key
|
||||
decodedCerts, err := trustmanager.LoadCertBundleFromPEM(key.Public())
|
||||
decodedCerts, err := utils.LoadCertBundleFromPEM(key.Public())
|
||||
if err != nil {
|
||||
logrus.Debugf("error while parsing root certificate with keyID: %s, %v", keyID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Get all non-CA certificates in the decoded certificates
|
||||
leafCertList := trustmanager.GetLeafCerts(decodedCerts)
|
||||
leafCertList := utils.GetLeafCerts(decodedCerts)
|
||||
|
||||
// If we got no leaf certificates or we got more than one, fail
|
||||
if len(leafCertList) != 1 {
|
||||
|
@ -260,7 +283,7 @@ func parseAllCerts(signedRoot *data.SignedRoot) (map[string]*x509.Certificate, m
|
|||
leafCerts[key.ID()] = leafCert
|
||||
|
||||
// Get all the remainder certificates marked as a CA to be used as intermediates
|
||||
intermediateCerts := trustmanager.GetIntermediateCerts(decodedCerts)
|
||||
intermediateCerts := utils.GetIntermediateCerts(decodedCerts)
|
||||
intCerts[key.ID()] = intermediateCerts
|
||||
}
|
||||
|
||||
|
|
31
vendor/src/github.com/docker/notary/trustpinning/test.crt
vendored
Normal file
31
vendor/src/github.com/docker/notary/trustpinning/test.crt
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIFKzCCAxWgAwIBAgIQRyp9QqcJfd3ayqdjiz8xIDALBgkqhkiG9w0BAQswODEa
|
||||
MBgGA1UEChMRZG9ja2VyLmNvbS9ub3RhcnkxGjAYBgNVBAMTEWRvY2tlci5jb20v
|
||||
bm90YXJ5MB4XDTE1MDcxNzA2MzQyM1oXDTE3MDcxNjA2MzQyM1owODEaMBgGA1UE
|
||||
ChMRZG9ja2VyLmNvbS9ub3RhcnkxGjAYBgNVBAMTEWRvY2tlci5jb20vbm90YXJ5
|
||||
MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAoQffrzsYnsH8vGf4Jh55
|
||||
Cj5wrjUGzD/sHkaFHptjJ6ToJGJv5yMAPxzyInu5sIoGLJapnYVBoAU0YgI9qlAc
|
||||
YA6SxaSwgm6rpvmnl8Qn0qc6ger3inpGaUJylWHuPwWkvcimQAqHZx2dQtL7g6kp
|
||||
rmKeTWpWoWLw3JoAUZUVhZMd6a22ZL/DvAw+Hrogbz4XeyahFb9IH402zPxN6vga
|
||||
JEFTF0Ji1jtNg0Mo4pb9SHsMsiw+LZK7SffHVKPxvd21m/biNmwsgExA3U8OOG8p
|
||||
uygfacys5c8+ZrX+ZFG/cvwKz0k6/QfJU40s6MhXw5C2WttdVmsG9/7rGFYjHoIJ
|
||||
weDyxgWk7vxKzRJI/un7cagDIaQsKrJQcCHIGFRlpIR5TwX7vl3R7cRncrDRMVvc
|
||||
VSEG2esxbw7jtzIp/ypnVRxcOny7IypyjKqVeqZ6HgxZtTBVrF1O/aHo2kvlwyRS
|
||||
Aus4kvh6z3+jzTm9EzfXiPQzY9BEk5gOLxhW9rc6UhlS+pe5lkaN/Hyqy/lPuq89
|
||||
fMr2rr7lf5WFdFnze6WNYMAaW7dNA4NE0dyD53428ZLXxNVPL4WU66Gac6lynQ8l
|
||||
r5tPsYIFXzh6FVaRKGQUtW1hz9ecO6Y27Rh2JsyiIxgUqk2ooxE69uN42t+dtqKC
|
||||
1s8G/7VtY8GDALFLYTnzLvsCAwEAAaM1MDMwDgYDVR0PAQH/BAQDAgCgMBMGA1Ud
|
||||
JQQMMAoGCCsGAQUFBwMDMAwGA1UdEwEB/wQCMAAwCwYJKoZIhvcNAQELA4ICAQBM
|
||||
Oll3G/XBz8idiNdNJDWUh+5w3ojmwanrTBdCdqEk1WenaR6DtcflJx6Z3f/mwV4o
|
||||
b1skOAX1yX5RCahJHUMxMicz/Q38pOVelGPrWnc3TJB+VKjGyHXlQDVkZFb+4+ef
|
||||
wtj7HngXhHFFDSgjm3EdMndvgDQ7SQb4skOnCNS9iyX7eXxhFBCZmZL+HALKBj2B
|
||||
yhV4IcBDqmp504t14rx9/Jvty0dG7fY7I51gEQpm4S02JML5xvTm1xfboWIhZODI
|
||||
swEAO+ekBoFHbS1Q9KMPjIAw3TrCHH8x8XZq5zsYtAC1yZHdCKa26aWdy56A9eHj
|
||||
O1VxzwmbNyXRenVuBYP+0wr3HVKFG4JJ4ZZpNZzQW/pqEPghCTJIvIueK652ByUc
|
||||
//sv+nXd5f19LeES9pf0l253NDaFZPb6aegKfquWh8qlQBmUQ2GzaTLbtmNd28M6
|
||||
W7iL7tkKZe1ZnBz9RKgtPrDjjWGZInjjcOU8EtT4SLq7kCVDmPs5MD8vaAm96JsE
|
||||
jmLC3Uu/4k7HiDYX0i0mOWkFjZQMdVatcIF5FPSppwsSbW8QidnXt54UtwtFDEPz
|
||||
lpjs7ybeQE71JXcMZnVIK4bjRXsEFPI98RpIlEdedbSUdYAncLNJRT7HZBMPGSwZ
|
||||
0PNJuglnlr3srVzdW1dz2xQjdvLwxy6mNUF6rbQBWA==
|
||||
-----END CERTIFICATE-----
|
||||
|
|
@ -4,7 +4,6 @@ import (
|
|||
"crypto/x509"
|
||||
"fmt"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/notary/trustmanager"
|
||||
"github.com/docker/notary/tuf/utils"
|
||||
"strings"
|
||||
)
|
||||
|
@ -28,25 +27,29 @@ type trustPinChecker struct {
|
|||
type CertChecker func(leafCert *x509.Certificate, intCerts []*x509.Certificate) bool
|
||||
|
||||
// NewTrustPinChecker returns a new certChecker function from a TrustPinConfig for a GUN
|
||||
func NewTrustPinChecker(trustPinConfig TrustPinConfig, gun string) (CertChecker, error) {
|
||||
func NewTrustPinChecker(trustPinConfig TrustPinConfig, gun string, firstBootstrap bool) (CertChecker, error) {
|
||||
t := trustPinChecker{gun: gun, config: trustPinConfig}
|
||||
// Determine the mode, and if it's even valid
|
||||
if pinnedCerts, ok := trustPinConfig.Certs[gun]; ok {
|
||||
logrus.Debugf("trust-pinning using Cert IDs")
|
||||
t.pinnedCertIDs = pinnedCerts
|
||||
return t.certsCheck, nil
|
||||
}
|
||||
|
||||
if caFilepath, err := getPinnedCAFilepathByPrefix(gun, trustPinConfig); err == nil {
|
||||
logrus.Debugf("trust-pinning using root CA bundle at: %s", caFilepath)
|
||||
|
||||
// Try to add the CA certs from its bundle file to our certificate store,
|
||||
// and use it to validate certs in the root.json later
|
||||
caCerts, err := trustmanager.LoadCertBundleFromFile(caFilepath)
|
||||
caCerts, err := utils.LoadCertBundleFromFile(caFilepath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not load root cert from CA path")
|
||||
}
|
||||
// Now only consider certificates that are direct children from this CA cert chain
|
||||
caRootPool := x509.NewCertPool()
|
||||
for _, caCert := range caCerts {
|
||||
if err = trustmanager.ValidateCertificate(caCert); err != nil {
|
||||
if err = utils.ValidateCertificate(caCert, true); err != nil {
|
||||
logrus.Debugf("ignoring root CA certificate with CN %s in bundle: %s", caCert.Subject.CommonName, err)
|
||||
continue
|
||||
}
|
||||
caRootPool.AddCert(caCert)
|
||||
|
@ -59,16 +62,18 @@ func NewTrustPinChecker(trustPinConfig TrustPinConfig, gun string) (CertChecker,
|
|||
return t.caCheck, nil
|
||||
}
|
||||
|
||||
if !trustPinConfig.DisableTOFU {
|
||||
return t.tofusCheck, nil
|
||||
}
|
||||
// If TOFUs is disabled and we don't have any previous trusted root data for this GUN, we error out
|
||||
if trustPinConfig.DisableTOFU && firstBootstrap {
|
||||
return nil, fmt.Errorf("invalid trust pinning specified")
|
||||
|
||||
}
|
||||
return t.tofusCheck, nil
|
||||
}
|
||||
|
||||
func (t trustPinChecker) certsCheck(leafCert *x509.Certificate, intCerts []*x509.Certificate) bool {
|
||||
// reconstruct the leaf + intermediate cert chain, which is bundled as {leaf, intermediates...},
|
||||
// in order to get the matching id in the root file
|
||||
key, err := trustmanager.CertBundleToKey(leafCert, intCerts)
|
||||
key, err := utils.CertBundleToKey(leafCert, intCerts)
|
||||
if err != nil {
|
||||
logrus.Debug("error creating cert bundle: ", err.Error())
|
||||
return false
|
||||
|
@ -84,9 +89,11 @@ func (t trustPinChecker) caCheck(leafCert *x509.Certificate, intCerts []*x509.Ce
|
|||
}
|
||||
// Attempt to find a valid certificate chain from the leaf cert to CA root
|
||||
// Use this certificate if such a valid chain exists (possibly using intermediates)
|
||||
if _, err := leafCert.Verify(x509.VerifyOptions{Roots: t.pinnedCAPool, Intermediates: caIntPool}); err == nil {
|
||||
var err error
|
||||
if _, err = leafCert.Verify(x509.VerifyOptions{Roots: t.pinnedCAPool, Intermediates: caIntPool}); err == nil {
|
||||
return true
|
||||
}
|
||||
logrus.Debugf("unable to find a valid certificate chain from leaf cert to CA root: %s", err)
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
|
@ -1,36 +1,6 @@
|
|||
# GOTUF
|
||||
|
||||
This is still a work in progress but will shortly be a fully compliant
|
||||
Go implementation of [The Update Framework (TUF)](http://theupdateframework.com/).
|
||||
|
||||
## Where's the CLI
|
||||
|
||||
This repository provides a library only. The [Notary project](https://github.com/docker/notary)
|
||||
from Docker should be considered the official CLI to be used with this implementation of TUF.
|
||||
|
||||
## TODOs:
|
||||
|
||||
- [X] Add Targets to existing repo
|
||||
- [X] Sign metadata files
|
||||
- [X] Refactor TufRepo to take care of signing ~~and verification~~
|
||||
- [ ] Ensure consistent capitalization in naming (TUF\_\_\_ vs Tuf\_\_\_)
|
||||
- [X] Make caching of metadata files smarter - PR #5
|
||||
- [ ] ~~Add configuration for CLI commands. Order of configuration priority from most to least: flags, config file, defaults~~ Notary should be the official CLI
|
||||
- [X] Reasses organization of data types. Possibly consolidate a few things into the data package but break up package into a few more distinct files
|
||||
- [ ] Comprehensive test cases
|
||||
- [ ] Delete files no longer in use
|
||||
- [ ] Fix up errors. Some have to be instantiated, others don't, the inconsistency is annoying.
|
||||
- [X] Bump version numbers in meta files (could probably be done better)
|
||||
|
||||
## Credits
|
||||
|
||||
This implementation was originally forked from [flynn/go-tuf](https://github.com/flynn/go-tuf),
|
||||
however in attempting to add delegations I found I was making such
|
||||
significant changes that I could not maintain backwards compatibility
|
||||
without the code becoming overly convoluted.
|
||||
|
||||
Some features such as pluggable verifiers have already been merged upstream to flynn/go-tuf
|
||||
and we are in discussion with [titanous](https://github.com/titanous) about working to merge the 2 implementations.
|
||||
This implementation was originally forked from [flynn/go-tuf](https://github.com/flynn/go-tuf)
|
||||
|
||||
This implementation retains the same 3 Clause BSD license present on
|
||||
the original flynn implementation.
|
||||
|
|
|
@ -59,8 +59,9 @@ type RepoBuilder interface {
|
|||
Load(roleName string, content []byte, minVersion int, allowExpired bool) error
|
||||
GenerateSnapshot(prev *data.SignedSnapshot) ([]byte, int, error)
|
||||
GenerateTimestamp(prev *data.SignedTimestamp) ([]byte, int, error)
|
||||
Finish() (*Repo, error)
|
||||
Finish() (*Repo, *Repo, error)
|
||||
BootstrapNewBuilder() RepoBuilder
|
||||
BootstrapNewBuilderWithNewTrustpin(trustpin trustpinning.TrustPinConfig) RepoBuilder
|
||||
|
||||
// informative functions
|
||||
IsLoaded(roleName string) bool
|
||||
|
@ -80,8 +81,11 @@ func (f finishedBuilder) GenerateSnapshot(prev *data.SignedSnapshot) ([]byte, in
|
|||
func (f finishedBuilder) GenerateTimestamp(prev *data.SignedTimestamp) ([]byte, int, error) {
|
||||
return nil, 0, ErrBuildDone
|
||||
}
|
||||
func (f finishedBuilder) Finish() (*Repo, error) { return nil, ErrBuildDone }
|
||||
func (f finishedBuilder) Finish() (*Repo, *Repo, error) { return nil, nil, ErrBuildDone }
|
||||
func (f finishedBuilder) BootstrapNewBuilder() RepoBuilder { return f }
|
||||
func (f finishedBuilder) BootstrapNewBuilderWithNewTrustpin(trustpin trustpinning.TrustPinConfig) RepoBuilder {
|
||||
return f
|
||||
}
|
||||
func (f finishedBuilder) IsLoaded(roleName string) bool { return false }
|
||||
func (f finishedBuilder) GetLoadedVersion(roleName string) int { return 0 }
|
||||
func (f finishedBuilder) GetConsistentInfo(roleName string) ConsistentInfo {
|
||||
|
@ -90,12 +94,21 @@ func (f finishedBuilder) GetConsistentInfo(roleName string) ConsistentInfo {
|
|||
|
||||
// NewRepoBuilder is the only way to get a pre-built RepoBuilder
|
||||
func NewRepoBuilder(gun string, cs signed.CryptoService, trustpin trustpinning.TrustPinConfig) RepoBuilder {
|
||||
return &repoBuilderWrapper{RepoBuilder: &repoBuilder{
|
||||
repo: NewRepo(cs),
|
||||
return NewBuilderFromRepo(gun, NewRepo(cs), trustpin)
|
||||
}
|
||||
|
||||
// NewBuilderFromRepo allows us to bootstrap a builder given existing repo data.
|
||||
// YOU PROBABLY SHOULDN'T BE USING THIS OUTSIDE OF TESTING CODE!!!
|
||||
func NewBuilderFromRepo(gun string, repo *Repo, trustpin trustpinning.TrustPinConfig) RepoBuilder {
|
||||
return &repoBuilderWrapper{
|
||||
RepoBuilder: &repoBuilder{
|
||||
repo: repo,
|
||||
invalidRoles: NewRepo(nil),
|
||||
gun: gun,
|
||||
trustpin: trustpin,
|
||||
loadedNotChecksummed: make(map[string][]byte),
|
||||
}}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// repoBuilderWrapper embeds a repoBuilder, but once Finish is called, swaps
|
||||
|
@ -104,7 +117,7 @@ type repoBuilderWrapper struct {
|
|||
RepoBuilder
|
||||
}
|
||||
|
||||
func (rbw *repoBuilderWrapper) Finish() (*Repo, error) {
|
||||
func (rbw *repoBuilderWrapper) Finish() (*Repo, *Repo, error) {
|
||||
switch rbw.RepoBuilder.(type) {
|
||||
case finishedBuilder:
|
||||
return rbw.RepoBuilder.Finish()
|
||||
|
@ -118,6 +131,7 @@ func (rbw *repoBuilderWrapper) Finish() (*Repo, error) {
|
|||
// repoBuilder actually builds a tuf.Repo
|
||||
type repoBuilder struct {
|
||||
repo *Repo
|
||||
invalidRoles *Repo
|
||||
|
||||
// needed for root trust pininng verification
|
||||
gun string
|
||||
|
@ -136,13 +150,14 @@ type repoBuilder struct {
|
|||
nextRootChecksum *data.FileMeta
|
||||
}
|
||||
|
||||
func (rb *repoBuilder) Finish() (*Repo, error) {
|
||||
return rb.repo, nil
|
||||
func (rb *repoBuilder) Finish() (*Repo, *Repo, error) {
|
||||
return rb.repo, rb.invalidRoles, nil
|
||||
}
|
||||
|
||||
func (rb *repoBuilder) BootstrapNewBuilder() RepoBuilder {
|
||||
return &repoBuilderWrapper{RepoBuilder: &repoBuilder{
|
||||
repo: NewRepo(rb.repo.cryptoService),
|
||||
invalidRoles: NewRepo(nil),
|
||||
gun: rb.gun,
|
||||
loadedNotChecksummed: make(map[string][]byte),
|
||||
trustpin: rb.trustpin,
|
||||
|
@ -152,6 +167,18 @@ func (rb *repoBuilder) BootstrapNewBuilder() RepoBuilder {
|
|||
}}
|
||||
}
|
||||
|
||||
func (rb *repoBuilder) BootstrapNewBuilderWithNewTrustpin(trustpin trustpinning.TrustPinConfig) RepoBuilder {
|
||||
return &repoBuilderWrapper{RepoBuilder: &repoBuilder{
|
||||
repo: NewRepo(rb.repo.cryptoService),
|
||||
gun: rb.gun,
|
||||
loadedNotChecksummed: make(map[string][]byte),
|
||||
trustpin: trustpin,
|
||||
|
||||
prevRoot: rb.repo.Root,
|
||||
bootstrappedRootChecksum: rb.nextRootChecksum,
|
||||
}}
|
||||
}
|
||||
|
||||
// IsLoaded returns whether a particular role has already been loaded
|
||||
func (rb *repoBuilder) IsLoaded(roleName string) bool {
|
||||
switch roleName {
|
||||
|
@ -338,7 +365,7 @@ func (rb *repoBuilder) GenerateTimestamp(prev *data.SignedTimestamp) ([]byte, in
|
|||
return nil, 0, ErrInvalidBuilderInput{msg: "timestamp has already been loaded"}
|
||||
}
|
||||
|
||||
// SignTimetamp always serializes the loaded snapshot and signs in the data, so we must always
|
||||
// SignTimestamp always serializes the loaded snapshot and signs in the data, so we must always
|
||||
// have the snapshot loaded first
|
||||
if err := rb.checkPrereqsLoaded([]string{data.CanonicalRootRole, data.CanonicalSnapshotRole}); err != nil {
|
||||
return nil, 0, err
|
||||
|
@ -411,7 +438,6 @@ func (rb *repoBuilder) loadRoot(content []byte, minVersion int, allowExpired boo
|
|||
if err != nil { // this should never happen since the root has been validated
|
||||
return err
|
||||
}
|
||||
|
||||
rb.repo.Root = signedRoot
|
||||
rb.repo.originalRootRole = rootRole
|
||||
return nil
|
||||
|
@ -524,6 +550,7 @@ func (rb *repoBuilder) loadTargets(content []byte, minVersion int, allowExpired
|
|||
}
|
||||
}
|
||||
|
||||
signedTargets.Signatures = signedObj.Signatures
|
||||
rb.repo.Targets[roleName] = signedTargets
|
||||
return nil
|
||||
}
|
||||
|
@ -534,7 +561,8 @@ func (rb *repoBuilder) loadDelegation(roleName string, content []byte, minVersio
|
|||
return err
|
||||
}
|
||||
|
||||
signedObj, err := rb.bytesToSignedAndValidateSigs(delegationRole.BaseRole, content)
|
||||
// bytesToSigned checks checksum
|
||||
signedObj, err := rb.bytesToSigned(content, roleName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -545,15 +573,24 @@ func (rb *repoBuilder) loadDelegation(roleName string, content []byte, minVersio
|
|||
}
|
||||
|
||||
if err := signed.VerifyVersion(&(signedTargets.Signed.SignedCommon), minVersion); err != nil {
|
||||
// don't capture in invalidRoles because the role we received is a rollback
|
||||
return err
|
||||
}
|
||||
|
||||
// verify signature
|
||||
if err := signed.VerifySignatures(signedObj, delegationRole.BaseRole); err != nil {
|
||||
rb.invalidRoles.Targets[roleName] = signedTargets
|
||||
return err
|
||||
}
|
||||
|
||||
if !allowExpired { // check must go at the end because all other validation should pass
|
||||
if err := signed.VerifyExpiry(&(signedTargets.Signed.SignedCommon), roleName); err != nil {
|
||||
rb.invalidRoles.Targets[roleName] = signedTargets
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
signedTargets.Signatures = signedObj.Signatures
|
||||
rb.repo.Targets[roleName] = signedTargets
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,14 +0,0 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ErrCorruptedCache - local data is incorrect
|
||||
type ErrCorruptedCache struct {
|
||||
file string
|
||||
}
|
||||
|
||||
func (e ErrCorruptedCache) Error() string {
|
||||
return fmt.Sprintf("cache is corrupted: %s", e.file)
|
||||
}
|
|
@ -42,3 +42,12 @@ func (e ErrMismatchedChecksum) Error() string {
|
|||
return fmt.Sprintf("%s checksum for %s did not match: expected %s", e.alg, e.name,
|
||||
e.expected)
|
||||
}
|
||||
|
||||
// ErrCertExpired is the error to be returned when a certificate has expired
|
||||
type ErrCertExpired struct {
|
||||
CN string
|
||||
}
|
||||
|
||||
func (e ErrCertExpired) Error() string {
|
||||
return fmt.Sprintf("certificate with CN %s is expired", e.CN)
|
||||
}
|
||||
|
|
|
@ -86,6 +86,31 @@ func IsDelegation(role string) bool {
|
|||
isClean
|
||||
}
|
||||
|
||||
// IsBaseRole checks if the role is a base role
|
||||
func IsBaseRole(role string) bool {
|
||||
for _, baseRole := range BaseRoles {
|
||||
if role == baseRole {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsWildDelegation determines if a role represents a valid wildcard delegation
|
||||
// path, i.e. targets/*, targets/foo/*.
|
||||
// The wildcard may only appear as the final part of the delegation and must
|
||||
// be a whole segment, i.e. targets/foo* is not a valid wildcard delegation.
|
||||
func IsWildDelegation(role string) bool {
|
||||
if path.Clean(role) != role {
|
||||
return false
|
||||
}
|
||||
base := path.Dir(role)
|
||||
if !(IsDelegation(base) || base == CanonicalTargetsRole) {
|
||||
return false
|
||||
}
|
||||
return role[len(role)-2:] == "/*"
|
||||
}
|
||||
|
||||
// BaseRole is an internal representation of a root/targets/snapshot/timestamp role, with its public keys included
|
||||
type BaseRole struct {
|
||||
Keys map[string]PublicKey
|
||||
|
|
|
@ -107,7 +107,10 @@ func (t *SignedTargets) BuildDelegationRole(roleName string) (DelegationRole, er
|
|||
pubKey, ok := t.Signed.Delegations.Keys[keyID]
|
||||
if !ok {
|
||||
// Couldn't retrieve all keys, so stop walking and return invalid role
|
||||
return DelegationRole{}, ErrInvalidRole{Role: roleName, Reason: "delegation does not exist with all specified keys"}
|
||||
return DelegationRole{}, ErrInvalidRole{
|
||||
Role: roleName,
|
||||
Reason: "role lists unknown key " + keyID + " as a signing key",
|
||||
}
|
||||
}
|
||||
pubKeys[keyID] = pubKey
|
||||
}
|
||||
|
|
|
@ -111,6 +111,7 @@ type Signature struct {
|
|||
KeyID string `json:"keyid"`
|
||||
Method SigAlgorithm `json:"method"`
|
||||
Signature []byte `json:"sig"`
|
||||
IsValid bool `json:"-"`
|
||||
}
|
||||
|
||||
// Files is the map of paths to file meta container in targets and delegations
|
||||
|
@ -161,6 +162,40 @@ func CheckHashes(payload []byte, name string, hashes Hashes) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// CompareMultiHashes verifies that the two Hashes passed in can represent the same data.
|
||||
// This means that both maps must have at least one key defined for which they map, and no conflicts.
|
||||
// Note that we check the intersection of map keys, which adds support for non-default hash algorithms in notary
|
||||
func CompareMultiHashes(hashes1, hashes2 Hashes) error {
|
||||
// First check if the two hash structures are valid
|
||||
if err := CheckValidHashStructures(hashes1); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := CheckValidHashStructures(hashes2); err != nil {
|
||||
return err
|
||||
}
|
||||
// Check if they have at least one matching hash, and no conflicts
|
||||
cnt := 0
|
||||
for hashAlg, hash1 := range hashes1 {
|
||||
|
||||
hash2, ok := hashes2[hashAlg]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if subtle.ConstantTimeCompare(hash1[:], hash2[:]) == 0 {
|
||||
return fmt.Errorf("mismatched %s checksum", hashAlg)
|
||||
}
|
||||
// If we reached here, we had a match
|
||||
cnt++
|
||||
}
|
||||
|
||||
if cnt == 0 {
|
||||
return fmt.Errorf("at least one matching hash needed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckValidHashStructures returns an error, or nil, depending on whether
|
||||
// the content of the hashes is valid or not.
|
||||
func CheckValidHashStructures(hashes Hashes) error {
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
|
||||
"github.com/docker/notary/trustmanager"
|
||||
"github.com/docker/notary/tuf/data"
|
||||
"github.com/docker/notary/tuf/utils"
|
||||
)
|
||||
|
||||
type edCryptoKey struct {
|
||||
|
@ -72,7 +73,7 @@ func (e *Ed25519) Create(role, gun, algorithm string) (data.PublicKey, error) {
|
|||
return nil, errors.New("only ED25519 supported by this cryptoservice")
|
||||
}
|
||||
|
||||
private, err := trustmanager.GenerateED25519Key(rand.Reader)
|
||||
private, err := utils.GenerateED25519Key(rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -95,7 +96,10 @@ func (e *Ed25519) PublicKeys(keyIDs ...string) (map[string]data.PublicKey, error
|
|||
|
||||
// GetKey returns a single public key based on the ID
|
||||
func (e *Ed25519) GetKey(keyID string) data.PublicKey {
|
||||
return data.PublicKeyFromPrivate(e.keys[keyID].privKey)
|
||||
if privKey, _, err := e.GetPrivateKey(keyID); err == nil {
|
||||
return data.PublicKeyFromPrivate(privKey)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetPrivateKey returns a single private key and role if present, based on the ID
|
||||
|
|
|
@ -14,12 +14,17 @@ type ErrInsufficientSignatures struct {
|
|||
}
|
||||
|
||||
func (e ErrInsufficientSignatures) Error() string {
|
||||
candidates := strings.Join(e.MissingKeyIDs, ", ")
|
||||
if e.FoundKeys == 0 {
|
||||
return fmt.Sprintf("signing keys not available, need %d keys out of: %s", e.NeededKeys, candidates)
|
||||
candidates := ""
|
||||
if len(e.MissingKeyIDs) > 0 {
|
||||
candidates = fmt.Sprintf(" (%s)", strings.Join(e.MissingKeyIDs, ", "))
|
||||
}
|
||||
return fmt.Sprintf("not enough signing keys: got %d of %d needed keys, other candidates: %s",
|
||||
e.FoundKeys, e.NeededKeys, candidates)
|
||||
|
||||
if e.FoundKeys == 0 {
|
||||
return fmt.Sprintf("signing keys not available: need %d keys from %d possible keys%s",
|
||||
e.NeededKeys, len(e.MissingKeyIDs), candidates)
|
||||
}
|
||||
return fmt.Sprintf("not enough signing keys: found %d of %d needed keys - %d other possible keys%s",
|
||||
e.FoundKeys, e.NeededKeys, len(e.MissingKeyIDs), candidates)
|
||||
}
|
||||
|
||||
// ErrExpired indicates a piece of metadata has expired
|
||||
|
|
|
@ -100,7 +100,7 @@ func Sign(service CryptoService, s *data.Signed, signingKeys []data.PublicKey,
|
|||
// key is no longer a valid signing key
|
||||
continue
|
||||
}
|
||||
if err := VerifySignature(*s.Signed, sig, k); err != nil {
|
||||
if err := VerifySignature(*s.Signed, &sig, k); err != nil {
|
||||
// signature is no longer valid
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -66,7 +66,8 @@ func VerifySignatures(s *data.Signed, roleData data.BaseRole) error {
|
|||
}
|
||||
|
||||
valid := make(map[string]struct{})
|
||||
for _, sig := range s.Signatures {
|
||||
for i := range s.Signatures {
|
||||
sig := &(s.Signatures[i])
|
||||
logrus.Debug("verifying signature for key ID: ", sig.KeyID)
|
||||
key, ok := roleData.Keys[sig.KeyID]
|
||||
if !ok {
|
||||
|
@ -82,17 +83,20 @@ func VerifySignatures(s *data.Signed, roleData data.BaseRole) error {
|
|||
continue
|
||||
}
|
||||
valid[sig.KeyID] = struct{}{}
|
||||
|
||||
}
|
||||
if len(valid) < roleData.Threshold {
|
||||
return ErrRoleThreshold{}
|
||||
return ErrRoleThreshold{
|
||||
Msg: fmt.Sprintf("valid signatures did not meet threshold for %s", roleData.Name),
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifySignature checks a single signature and public key against a payload
|
||||
func VerifySignature(msg []byte, sig data.Signature, pk data.PublicKey) error {
|
||||
// If the signature is verified, the signature's is valid field will actually
|
||||
// be mutated to be equal to the boolean true
|
||||
func VerifySignature(msg []byte, sig *data.Signature, pk data.PublicKey) error {
|
||||
// method lookup is consistent due to Unmarshal JSON doing lower case for us.
|
||||
method := sig.Method
|
||||
verifier, ok := Verifiers[method]
|
||||
|
@ -103,5 +107,6 @@ func VerifySignature(msg []byte, sig data.Signature, pk data.PublicKey) error {
|
|||
if err := verifier.Verify(pk, sig.Signature, msg); err != nil {
|
||||
return fmt.Errorf("signature was invalid\n")
|
||||
}
|
||||
sig.IsValid = true
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,102 +0,0 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/docker/notary"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// NewFilesystemStore creates a new store in a directory tree
|
||||
func NewFilesystemStore(baseDir, metaSubDir, metaExtension string) (*FilesystemStore, error) {
|
||||
metaDir := path.Join(baseDir, metaSubDir)
|
||||
|
||||
// Make sure we can create the necessary dirs and they are writable
|
||||
err := os.MkdirAll(metaDir, 0700)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &FilesystemStore{
|
||||
baseDir: baseDir,
|
||||
metaDir: metaDir,
|
||||
metaExtension: metaExtension,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// FilesystemStore is a store in a locally accessible directory
|
||||
type FilesystemStore struct {
|
||||
baseDir string
|
||||
metaDir string
|
||||
metaExtension string
|
||||
}
|
||||
|
||||
func (f *FilesystemStore) getPath(name string) string {
|
||||
fileName := fmt.Sprintf("%s.%s", name, f.metaExtension)
|
||||
return filepath.Join(f.metaDir, fileName)
|
||||
}
|
||||
|
||||
// GetMeta returns the meta for the given name (a role) up to size bytes
|
||||
// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a
|
||||
// predefined threshold "notary.MaxDownloadSize".
|
||||
func (f *FilesystemStore) GetMeta(name string, size int64) ([]byte, error) {
|
||||
meta, err := ioutil.ReadFile(f.getPath(name))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = ErrMetaNotFound{Resource: name}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if size == NoSizeLimit {
|
||||
size = notary.MaxDownloadSize
|
||||
}
|
||||
// Only return up to size bytes
|
||||
if int64(len(meta)) < size {
|
||||
return meta, nil
|
||||
}
|
||||
return meta[:size], nil
|
||||
}
|
||||
|
||||
// SetMultiMeta sets the metadata for multiple roles in one operation
|
||||
func (f *FilesystemStore) SetMultiMeta(metas map[string][]byte) error {
|
||||
for role, blob := range metas {
|
||||
err := f.SetMeta(role, blob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetMeta sets the meta for a single role
|
||||
func (f *FilesystemStore) SetMeta(name string, meta []byte) error {
|
||||
fp := f.getPath(name)
|
||||
|
||||
// Ensures the parent directories of the file we are about to write exist
|
||||
err := os.MkdirAll(filepath.Dir(fp), 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if something already exists, just delete it and re-write it
|
||||
os.RemoveAll(fp)
|
||||
|
||||
// Write the file to disk
|
||||
if err = ioutil.WriteFile(fp, meta, 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveAll clears the existing filestore by removing its base directory
|
||||
func (f *FilesystemStore) RemoveAll() error {
|
||||
return os.RemoveAll(f.baseDir)
|
||||
}
|
||||
|
||||
// RemoveMeta removes the metadata for a single role - if the metadata doesn't
|
||||
// exist, no error is returned
|
||||
func (f *FilesystemStore) RemoveMeta(name string) error {
|
||||
return os.RemoveAll(f.getPath(name)) // RemoveAll succeeds if path doesn't exist
|
||||
}
|
103
vendor/src/github.com/docker/notary/tuf/tuf.go
vendored
103
vendor/src/github.com/docker/notary/tuf/tuf.go
vendored
|
@ -77,11 +77,10 @@ type Repo struct {
|
|||
// If the Repo will only be used for reading, the CryptoService
|
||||
// can be nil.
|
||||
func NewRepo(cryptoService signed.CryptoService) *Repo {
|
||||
repo := &Repo{
|
||||
return &Repo{
|
||||
Targets: make(map[string]*data.SignedTargets),
|
||||
cryptoService: cryptoService,
|
||||
}
|
||||
return repo
|
||||
}
|
||||
|
||||
// AddBaseKeys is used to add keys to the role in root.json
|
||||
|
@ -245,6 +244,21 @@ func (tr *Repo) GetDelegationRole(name string) (data.DelegationRole, error) {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Check all public key certificates in the role for expiry
|
||||
// Currently we do not reject expired delegation keys but warn if they might expire soon or have already
|
||||
for keyID, pubKey := range delgRole.Keys {
|
||||
certFromKey, err := utils.LoadCertFromPEM(pubKey.Public())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if err := utils.ValidateCertificate(certFromKey, true); err != nil {
|
||||
if _, ok := err.(data.ErrCertExpired); !ok {
|
||||
// do not allow other invalid cert errors
|
||||
return err
|
||||
}
|
||||
logrus.Warnf("error with delegation %s key ID %d: %s", delgRole.Name, keyID, err)
|
||||
}
|
||||
}
|
||||
foundRole = &delgRole
|
||||
return StopWalk{}
|
||||
}
|
||||
|
@ -325,17 +339,16 @@ func delegationUpdateVisitor(roleName string, addKeys data.KeyList, removeKeys,
|
|||
break
|
||||
}
|
||||
}
|
||||
// We didn't find the role earlier, so create it only if we have keys to add
|
||||
// We didn't find the role earlier, so create it.
|
||||
if addKeys == nil {
|
||||
addKeys = data.KeyList{} // initialize to empty list if necessary so calling .IDs() below won't panic
|
||||
}
|
||||
if delgRole == nil {
|
||||
if len(addKeys) > 0 {
|
||||
delgRole, err = data.NewRole(roleName, newThreshold, addKeys.IDs(), addPaths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// If we can't find the role and didn't specify keys to add, this is an error
|
||||
return data.ErrInvalidRole{Role: roleName, Reason: "cannot create new delegation without keys"}
|
||||
}
|
||||
|
||||
}
|
||||
// Add the key IDs to the role and the keys themselves to the parent
|
||||
for _, k := range addKeys {
|
||||
|
@ -345,7 +358,7 @@ func delegationUpdateVisitor(roleName string, addKeys data.KeyList, removeKeys,
|
|||
}
|
||||
// Make sure we have a valid role still
|
||||
if len(delgRole.KeyIDs) < delgRole.Threshold {
|
||||
return data.ErrInvalidRole{Role: roleName, Reason: "insufficient keys to meet threshold"}
|
||||
logrus.Warnf("role %s has fewer keys than its threshold of %d; it will not be usable until keys are added to it", delgRole.Name, delgRole.Threshold)
|
||||
}
|
||||
// NOTE: this closure CANNOT error after this point, as we've committed to editing the SignedTargets metadata in the repo object.
|
||||
// Any errors related to updating this delegation must occur before this point.
|
||||
|
@ -392,12 +405,78 @@ func (tr *Repo) UpdateDelegationKeys(roleName string, addKeys data.KeyList, remo
|
|||
// Walk to the parent of this delegation, since that is where its role metadata exists
|
||||
// We do not have to verify that the walker reached its desired role in this scenario
|
||||
// since we've already done another walk to the parent role in VerifyCanSign, and potentially made a targets file
|
||||
err := tr.WalkTargets("", parent, delegationUpdateVisitor(roleName, addKeys, removeKeys, []string{}, []string{}, false, newThreshold))
|
||||
return tr.WalkTargets("", parent, delegationUpdateVisitor(roleName, addKeys, removeKeys, []string{}, []string{}, false, newThreshold))
|
||||
}
|
||||
|
||||
// PurgeDelegationKeys removes the provided canonical key IDs from all delegations
|
||||
// present in the subtree rooted at role. The role argument must be provided in a wildcard
|
||||
// format, i.e. targets/* would remove the key from all delegations in the repo
|
||||
func (tr *Repo) PurgeDelegationKeys(role string, removeKeys []string) error {
|
||||
if !data.IsWildDelegation(role) {
|
||||
return data.ErrInvalidRole{
|
||||
Role: role,
|
||||
Reason: "only wildcard roles can be used in a purge",
|
||||
}
|
||||
}
|
||||
|
||||
removeIDs := make(map[string]struct{})
|
||||
for _, id := range removeKeys {
|
||||
removeIDs[id] = struct{}{}
|
||||
}
|
||||
|
||||
start := path.Dir(role)
|
||||
tufIDToCanon := make(map[string]string)
|
||||
|
||||
purgeKeys := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
|
||||
var (
|
||||
deleteCandidates []string
|
||||
err error
|
||||
)
|
||||
for id, key := range tgt.Signed.Delegations.Keys {
|
||||
var (
|
||||
canonID string
|
||||
ok bool
|
||||
)
|
||||
if canonID, ok = tufIDToCanon[id]; !ok {
|
||||
canonID, err = utils.CanonicalKeyID(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tufIDToCanon[id] = canonID
|
||||
}
|
||||
if _, ok := removeIDs[canonID]; ok {
|
||||
deleteCandidates = append(deleteCandidates, id)
|
||||
}
|
||||
}
|
||||
if len(deleteCandidates) == 0 {
|
||||
// none of the interesting keys were present. We're done with this role
|
||||
return nil
|
||||
}
|
||||
// now we know there are changes, check if we'll be able to sign them in
|
||||
if err := tr.VerifyCanSign(validRole.Name); err != nil {
|
||||
logrus.Warnf(
|
||||
"role %s contains keys being purged but you do not have the necessary keys present to sign it; keys will not be purged from %s or its immediate children",
|
||||
validRole.Name,
|
||||
validRole.Name,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
// we know we can sign in the changes, delete the keys
|
||||
for _, id := range deleteCandidates {
|
||||
delete(tgt.Signed.Delegations.Keys, id)
|
||||
}
|
||||
// delete candidate keys from all roles.
|
||||
for _, role := range tgt.Signed.Delegations.Roles {
|
||||
role.RemoveKeys(deleteCandidates)
|
||||
if len(role.KeyIDs) < role.Threshold {
|
||||
logrus.Warnf("role %s has fewer keys than its threshold of %d; it will not be usable until keys are added to it", role.Name, role.Threshold)
|
||||
}
|
||||
}
|
||||
tgt.Dirty = true
|
||||
return nil
|
||||
}
|
||||
return tr.WalkTargets("", start, purgeKeys)
|
||||
}
|
||||
|
||||
// UpdateDelegationPaths updates the appropriate delegation's paths.
|
||||
// It is not allowed to create a new delegation.
|
||||
|
@ -655,7 +734,7 @@ func (tr *Repo) WalkTargets(targetPath, rolePath string, visitTargets walkVisito
|
|||
}
|
||||
|
||||
// Determine whether to visit this role or not:
|
||||
// If the paths validate against the specified targetPath and the rolePath is empty or is in the subtree
|
||||
// If the paths validate against the specified targetPath and the rolePath is empty or is in the subtree.
|
||||
// Also check if we are choosing to skip visiting this role on this walk (see ListTargets and GetTargetByName priority)
|
||||
if isValidPath(targetPath, role) && isAncestorRole(role.Name, rolePath) && !utils.StrSliceContains(skipRoles, role.Name) {
|
||||
// If we had matching path or role name, visit this target and determine whether or not to keep walking
|
||||
|
@ -948,7 +1027,7 @@ func (tr *Repo) SignTargets(role string, expires time.Time) (*data.Signed, error
|
|||
if _, ok := tr.Targets[role]; !ok {
|
||||
return nil, data.ErrInvalidRole{
|
||||
Role: role,
|
||||
Reason: "SignTargets called with non-existant targets role",
|
||||
Reason: "SignTargets called with non-existent targets role",
|
||||
}
|
||||
}
|
||||
tr.Targets[role].Signed.Expires = expires
|
||||
|
|
|
@ -1,109 +0,0 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
gopath "path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/notary/trustmanager"
|
||||
"github.com/docker/notary/tuf/data"
|
||||
)
|
||||
|
||||
// ErrWrongLength indicates the length was different to that expected
|
||||
var ErrWrongLength = errors.New("wrong length")
|
||||
|
||||
// ErrWrongHash indicates the hash was different to that expected
|
||||
type ErrWrongHash struct {
|
||||
Type string
|
||||
Expected []byte
|
||||
Actual []byte
|
||||
}
|
||||
|
||||
// Error implements error interface
|
||||
func (e ErrWrongHash) Error() string {
|
||||
return fmt.Sprintf("wrong %s hash, expected %#x got %#x", e.Type, e.Expected, e.Actual)
|
||||
}
|
||||
|
||||
// ErrNoCommonHash indicates the metadata did not provide any hashes this
|
||||
// client recognizes
|
||||
type ErrNoCommonHash struct {
|
||||
Expected data.Hashes
|
||||
Actual data.Hashes
|
||||
}
|
||||
|
||||
// Error implements error interface
|
||||
func (e ErrNoCommonHash) Error() string {
|
||||
types := func(a data.Hashes) []string {
|
||||
t := make([]string, 0, len(a))
|
||||
for typ := range a {
|
||||
t = append(t, typ)
|
||||
}
|
||||
return t
|
||||
}
|
||||
return fmt.Sprintf("no common hash function, expected one of %s, got %s", types(e.Expected), types(e.Actual))
|
||||
}
|
||||
|
||||
// ErrUnknownHashAlgorithm - client was ashed to use a hash algorithm
|
||||
// it is not familiar with
|
||||
type ErrUnknownHashAlgorithm struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
// Error implements error interface
|
||||
func (e ErrUnknownHashAlgorithm) Error() string {
|
||||
return fmt.Sprintf("unknown hash algorithm: %s", e.Name)
|
||||
}
|
||||
|
||||
// PassphraseFunc type for func that request a passphrase
|
||||
type PassphraseFunc func(role string, confirm bool) ([]byte, error)
|
||||
|
||||
// FileMetaEqual checks whether 2 FileMeta objects are consistent with eachother
|
||||
func FileMetaEqual(actual data.FileMeta, expected data.FileMeta) error {
|
||||
if actual.Length != expected.Length {
|
||||
return ErrWrongLength
|
||||
}
|
||||
hashChecked := false
|
||||
for typ, hash := range expected.Hashes {
|
||||
if h, ok := actual.Hashes[typ]; ok {
|
||||
hashChecked = true
|
||||
if !hmac.Equal(h, hash) {
|
||||
return ErrWrongHash{typ, hash, h}
|
||||
}
|
||||
}
|
||||
}
|
||||
if !hashChecked {
|
||||
return ErrNoCommonHash{expected.Hashes, actual.Hashes}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NormalizeTarget adds a slash, if required, to the front of a target path
|
||||
func NormalizeTarget(path string) string {
|
||||
return gopath.Join("/", path)
|
||||
}
|
||||
|
||||
// HashedPaths prefixes the filename with the known hashes for the file,
|
||||
// returning a list of possible consistent paths.
|
||||
func HashedPaths(path string, hashes data.Hashes) []string {
|
||||
paths := make([]string, 0, len(hashes))
|
||||
for _, hash := range hashes {
|
||||
hashedPath := filepath.Join(filepath.Dir(path), hex.EncodeToString(hash)+"."+filepath.Base(path))
|
||||
paths = append(paths, hashedPath)
|
||||
}
|
||||
return paths
|
||||
}
|
||||
|
||||
// CanonicalKeyID returns the ID of the public bytes version of a TUF key.
|
||||
// On regular RSA/ECDSA TUF keys, this is just the key ID. On X509 RSA/ECDSA
|
||||
// TUF keys, this is the key ID of the public key part of the key in the leaf cert
|
||||
func CanonicalKeyID(k data.PublicKey) (string, error) {
|
||||
switch k.Algorithm() {
|
||||
case data.ECDSAx509Key, data.RSAx509Key:
|
||||
return trustmanager.X509PublicKeyID(k)
|
||||
default:
|
||||
return k.ID(), nil
|
||||
}
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package trustmanager
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
@ -22,22 +22,16 @@ import (
|
|||
"github.com/docker/notary/tuf/data"
|
||||
)
|
||||
|
||||
// CertToPEM is a utility function returns a PEM encoded x509 Certificate
|
||||
func CertToPEM(cert *x509.Certificate) []byte {
|
||||
pemCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})
|
||||
|
||||
return pemCert
|
||||
// CanonicalKeyID returns the ID of the public bytes version of a TUF key.
|
||||
// On regular RSA/ECDSA TUF keys, this is just the key ID. On X509 RSA/ECDSA
|
||||
// TUF keys, this is the key ID of the public key part of the key in the leaf cert
|
||||
func CanonicalKeyID(k data.PublicKey) (string, error) {
|
||||
switch k.Algorithm() {
|
||||
case data.ECDSAx509Key, data.RSAx509Key:
|
||||
return X509PublicKeyID(k)
|
||||
default:
|
||||
return k.ID(), nil
|
||||
}
|
||||
|
||||
// CertChainToPEM is a utility function returns a PEM encoded chain of x509 Certificates, in the order they are passed
|
||||
func CertChainToPEM(certChain []*x509.Certificate) ([]byte, error) {
|
||||
var pemBytes bytes.Buffer
|
||||
for _, cert := range certChain {
|
||||
if err := pem.Encode(&pemBytes, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return pemBytes.Bytes(), nil
|
||||
}
|
||||
|
||||
// LoadCertFromPEM returns the first certificate found in a bunch of bytes or error
|
||||
|
@ -64,6 +58,108 @@ func LoadCertFromPEM(pemBytes []byte) (*x509.Certificate, error) {
|
|||
return nil, errors.New("no certificates found in PEM data")
|
||||
}
|
||||
|
||||
// X509PublicKeyID returns a public key ID as a string, given a
|
||||
// data.PublicKey that contains an X509 Certificate
|
||||
func X509PublicKeyID(certPubKey data.PublicKey) (string, error) {
|
||||
// Note that this only loads the first certificate from the public key
|
||||
cert, err := LoadCertFromPEM(certPubKey.Public())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
pubKeyBytes, err := x509.MarshalPKIXPublicKey(cert.PublicKey)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var key data.PublicKey
|
||||
switch certPubKey.Algorithm() {
|
||||
case data.ECDSAx509Key:
|
||||
key = data.NewECDSAPublicKey(pubKeyBytes)
|
||||
case data.RSAx509Key:
|
||||
key = data.NewRSAPublicKey(pubKeyBytes)
|
||||
}
|
||||
|
||||
return key.ID(), nil
|
||||
}
|
||||
|
||||
// ParsePEMPrivateKey returns a data.PrivateKey from a PEM encoded private key. It
|
||||
// only supports RSA (PKCS#1) and attempts to decrypt using the passphrase, if encrypted.
|
||||
func ParsePEMPrivateKey(pemBytes []byte, passphrase string) (data.PrivateKey, error) {
|
||||
block, _ := pem.Decode(pemBytes)
|
||||
if block == nil {
|
||||
return nil, errors.New("no valid private key found")
|
||||
}
|
||||
|
||||
var privKeyBytes []byte
|
||||
var err error
|
||||
if x509.IsEncryptedPEMBlock(block) {
|
||||
privKeyBytes, err = x509.DecryptPEMBlock(block, []byte(passphrase))
|
||||
if err != nil {
|
||||
return nil, errors.New("could not decrypt private key")
|
||||
}
|
||||
} else {
|
||||
privKeyBytes = block.Bytes
|
||||
}
|
||||
|
||||
switch block.Type {
|
||||
case "RSA PRIVATE KEY":
|
||||
rsaPrivKey, err := x509.ParsePKCS1PrivateKey(privKeyBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse DER encoded key: %v", err)
|
||||
}
|
||||
|
||||
tufRSAPrivateKey, err := RSAToPrivateKey(rsaPrivKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not convert rsa.PrivateKey to data.PrivateKey: %v", err)
|
||||
}
|
||||
|
||||
return tufRSAPrivateKey, nil
|
||||
case "EC PRIVATE KEY":
|
||||
ecdsaPrivKey, err := x509.ParseECPrivateKey(privKeyBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse DER encoded private key: %v", err)
|
||||
}
|
||||
|
||||
tufECDSAPrivateKey, err := ECDSAToPrivateKey(ecdsaPrivKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not convert ecdsa.PrivateKey to data.PrivateKey: %v", err)
|
||||
}
|
||||
|
||||
return tufECDSAPrivateKey, nil
|
||||
case "ED25519 PRIVATE KEY":
|
||||
// We serialize ED25519 keys by concatenating the private key
|
||||
// to the public key and encoding with PEM. See the
|
||||
// ED25519ToPrivateKey function.
|
||||
tufECDSAPrivateKey, err := ED25519ToPrivateKey(privKeyBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not convert ecdsa.PrivateKey to data.PrivateKey: %v", err)
|
||||
}
|
||||
|
||||
return tufECDSAPrivateKey, nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported key type %q", block.Type)
|
||||
}
|
||||
}
|
||||
|
||||
// CertToPEM is a utility function returns a PEM encoded x509 Certificate
|
||||
func CertToPEM(cert *x509.Certificate) []byte {
|
||||
pemCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})
|
||||
|
||||
return pemCert
|
||||
}
|
||||
|
||||
// CertChainToPEM is a utility function returns a PEM encoded chain of x509 Certificates, in the order they are passed
|
||||
func CertChainToPEM(certChain []*x509.Certificate) ([]byte, error) {
|
||||
var pemBytes bytes.Buffer
|
||||
for _, cert := range certChain {
|
||||
if err := pem.Encode(&pemBytes, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return pemBytes.Bytes(), nil
|
||||
}
|
||||
|
||||
// LoadCertFromFile loads the first certificate from the file provided. The
|
||||
// data is expected to be PEM Encoded and contain one of more certificates
|
||||
// with PEM type "CERTIFICATE"
|
||||
|
@ -138,66 +234,6 @@ func GetIntermediateCerts(certs []*x509.Certificate) []*x509.Certificate {
|
|||
return intCerts
|
||||
}
|
||||
|
||||
// ParsePEMPrivateKey returns a data.PrivateKey from a PEM encoded private key. It
|
||||
// only supports RSA (PKCS#1) and attempts to decrypt using the passphrase, if encrypted.
|
||||
func ParsePEMPrivateKey(pemBytes []byte, passphrase string) (data.PrivateKey, error) {
|
||||
block, _ := pem.Decode(pemBytes)
|
||||
if block == nil {
|
||||
return nil, errors.New("no valid private key found")
|
||||
}
|
||||
|
||||
var privKeyBytes []byte
|
||||
var err error
|
||||
if x509.IsEncryptedPEMBlock(block) {
|
||||
privKeyBytes, err = x509.DecryptPEMBlock(block, []byte(passphrase))
|
||||
if err != nil {
|
||||
return nil, errors.New("could not decrypt private key")
|
||||
}
|
||||
} else {
|
||||
privKeyBytes = block.Bytes
|
||||
}
|
||||
|
||||
switch block.Type {
|
||||
case "RSA PRIVATE KEY":
|
||||
rsaPrivKey, err := x509.ParsePKCS1PrivateKey(privKeyBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse DER encoded key: %v", err)
|
||||
}
|
||||
|
||||
tufRSAPrivateKey, err := RSAToPrivateKey(rsaPrivKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not convert rsa.PrivateKey to data.PrivateKey: %v", err)
|
||||
}
|
||||
|
||||
return tufRSAPrivateKey, nil
|
||||
case "EC PRIVATE KEY":
|
||||
ecdsaPrivKey, err := x509.ParseECPrivateKey(privKeyBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse DER encoded private key: %v", err)
|
||||
}
|
||||
|
||||
tufECDSAPrivateKey, err := ECDSAToPrivateKey(ecdsaPrivKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not convert ecdsa.PrivateKey to data.PrivateKey: %v", err)
|
||||
}
|
||||
|
||||
return tufECDSAPrivateKey, nil
|
||||
case "ED25519 PRIVATE KEY":
|
||||
// We serialize ED25519 keys by concatenating the private key
|
||||
// to the public key and encoding with PEM. See the
|
||||
// ED25519ToPrivateKey function.
|
||||
tufECDSAPrivateKey, err := ED25519ToPrivateKey(privKeyBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not convert ecdsa.PrivateKey to data.PrivateKey: %v", err)
|
||||
}
|
||||
|
||||
return tufECDSAPrivateKey, nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported key type %q", block.Type)
|
||||
}
|
||||
}
|
||||
|
||||
// ParsePEMPublicKey returns a data.PublicKey from a PEM encoded public key or certificate.
|
||||
func ParsePEMPublicKey(pubKeyBytes []byte) (data.PublicKey, error) {
|
||||
pemBlock, _ := pem.Decode(pubKeyBytes)
|
||||
|
@ -211,7 +247,7 @@ func ParsePEMPublicKey(pubKeyBytes []byte) (data.PublicKey, error) {
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse provided certificate: %v", err)
|
||||
}
|
||||
err = ValidateCertificate(cert)
|
||||
err = ValidateCertificate(cert, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid certificate: %v", err)
|
||||
}
|
||||
|
@ -222,16 +258,15 @@ func ParsePEMPublicKey(pubKeyBytes []byte) (data.PublicKey, error) {
|
|||
}
|
||||
|
||||
// ValidateCertificate returns an error if the certificate is not valid for notary
|
||||
// Currently this is only a time expiry check, and ensuring the public key has a large enough modulus if RSA
|
||||
func ValidateCertificate(c *x509.Certificate) error {
|
||||
// Currently this is only ensuring the public key has a large enough modulus if RSA,
|
||||
// using a non SHA1 signature algorithm, and an optional time expiry check
|
||||
func ValidateCertificate(c *x509.Certificate, checkExpiry bool) error {
|
||||
if (c.NotBefore).After(c.NotAfter) {
|
||||
return fmt.Errorf("certificate validity window is invalid")
|
||||
}
|
||||
now := time.Now()
|
||||
tomorrow := now.AddDate(0, 0, 1)
|
||||
// Give one day leeway on creation "before" time, check "after" against today
|
||||
if (tomorrow).Before(c.NotBefore) || now.After(c.NotAfter) {
|
||||
return fmt.Errorf("certificate is expired")
|
||||
// Can't have SHA1 sig algorithm
|
||||
if c.SignatureAlgorithm == x509.SHA1WithRSA || c.SignatureAlgorithm == x509.DSAWithSHA1 || c.SignatureAlgorithm == x509.ECDSAWithSHA1 {
|
||||
return fmt.Errorf("certificate with CN %s uses invalid SHA1 signature algorithm", c.Subject.CommonName)
|
||||
}
|
||||
// If we have an RSA key, make sure it's long enough
|
||||
if c.PublicKeyAlgorithm == x509.RSA {
|
||||
|
@ -243,6 +278,18 @@ func ValidateCertificate(c *x509.Certificate) error {
|
|||
return fmt.Errorf("RSA bit length is too short")
|
||||
}
|
||||
}
|
||||
if checkExpiry {
|
||||
now := time.Now()
|
||||
tomorrow := now.AddDate(0, 0, 1)
|
||||
// Give one day leeway on creation "before" time, check "after" against today
|
||||
if (tomorrow).Before(c.NotBefore) || now.After(c.NotAfter) {
|
||||
return data.ErrCertExpired{CN: c.Subject.CommonName}
|
||||
}
|
||||
// If this certificate is expiring within 6 months, put out a warning
|
||||
if (c.NotAfter).Before(time.Now().AddDate(0, 6, 0)) {
|
||||
logrus.Warnf("certificate with CN %s is near expiry", c.Subject.CommonName)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -385,7 +432,7 @@ func KeyToPEM(privKey data.PrivateKey, role string) ([]byte, error) {
|
|||
|
||||
// EncryptPrivateKey returns an encrypted PEM key given a Privatekey
|
||||
// and a passphrase
|
||||
func EncryptPrivateKey(key data.PrivateKey, role, passphrase string) ([]byte, error) {
|
||||
func EncryptPrivateKey(key data.PrivateKey, role, gun, passphrase string) ([]byte, error) {
|
||||
bt, err := blockType(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -408,6 +455,10 @@ func EncryptPrivateKey(key data.PrivateKey, role, passphrase string) ([]byte, er
|
|||
}
|
||||
encryptedPEMBlock.Headers["role"] = role
|
||||
|
||||
if gun != "" {
|
||||
encryptedPEMBlock.Headers["gun"] = gun
|
||||
}
|
||||
|
||||
return pem.EncodeToMemory(encryptedPEMBlock), nil
|
||||
}
|
||||
|
||||
|
@ -498,27 +549,3 @@ func NewCertificate(gun string, startTime, endTime time.Time) (*x509.Certificate
|
|||
BasicConstraintsValid: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// X509PublicKeyID returns a public key ID as a string, given a
|
||||
// data.PublicKey that contains an X509 Certificate
|
||||
func X509PublicKeyID(certPubKey data.PublicKey) (string, error) {
|
||||
// Note that this only loads the first certificate from the public key
|
||||
cert, err := LoadCertFromPEM(certPubKey.Public())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
pubKeyBytes, err := x509.MarshalPKIXPublicKey(cert.PublicKey)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var key data.PublicKey
|
||||
switch certPubKey.Algorithm() {
|
||||
case data.ECDSAx509Key:
|
||||
key = data.NewECDSAPublicKey(pubKeyBytes)
|
||||
case data.RSAx509Key:
|
||||
key = data.NewRSAPublicKey(pubKeyBytes)
|
||||
}
|
||||
|
||||
return key.ID(), nil
|
||||
}
|
Loading…
Reference in a new issue