1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Update containerd to v1.0.0

Signed-off-by: Michael Crosby <crosbymichael@gmail.com>
This commit is contained in:
Michael Crosby 2017-12-05 09:44:42 -05:00
parent 4047cede65
commit 18a53d0374
15 changed files with 512 additions and 475 deletions

View file

@ -4,7 +4,7 @@ TOMLV_COMMIT=9baf8a8a9f2ed20a8e54160840c492f937eeaf9a
# When updating RUNC_COMMIT, also update runc in vendor.conf accordingly # When updating RUNC_COMMIT, also update runc in vendor.conf accordingly
RUNC_COMMIT=b2567b37d7b75eb4cf325b77297b140ea686ce8f RUNC_COMMIT=b2567b37d7b75eb4cf325b77297b140ea686ce8f
CONTAINERD_COMMIT=cc969fb42f427a68a8cc6870ef47f17304b83962 CONTAINERD_COMMIT=v1.0.0
TINI_COMMIT=949e6facb77383876aeff8a6944dde66b3089574 TINI_COMMIT=949e6facb77383876aeff8a6944dde66b3089574
LIBNETWORK_COMMIT=7b2b1feb1de4817d522cc372af149ff48d25028e LIBNETWORK_COMMIT=7b2b1feb1de4817d522cc372af149ff48d25028e
VNDR_COMMIT=a6e196d8b4b0cbbdc29aebdb20c59ac6926bb384 VNDR_COMMIT=a6e196d8b4b0cbbdc29aebdb20c59ac6926bb384

View file

@ -103,7 +103,7 @@ github.com/googleapis/gax-go da06d194a00e19ce00d9011a13931c3f6f6887c7
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
# containerd # containerd
github.com/containerd/containerd cc969fb42f427a68a8cc6870ef47f17304b83962 github.com/containerd/containerd v1.0.0
github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6 github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6
github.com/containerd/continuity 35d55c5e8dd23b32037d56cf97174aff3efdfa83 github.com/containerd/continuity 35d55c5e8dd23b32037d56cf97174aff3efdfa83
github.com/containerd/cgroups 29da22c6171a4316169f9205ab6c49f59b5b852f github.com/containerd/cgroups 29da22c6171a4316169f9205ab6c49f59b5b852f

View file

@ -13,13 +13,23 @@ containerd is designed to be embedded into a larger system, rather than being us
## Getting Started ## Getting Started
If you are interested in trying out containerd please see our [Getting Started Guide](docs/getting-started.md). See our documentation on [containerd.io](containerd.io):
* [for ops and admins](docs/ops.md)
* [namespaces](docs/namespaces.md)
* [client options](docs/client-opts.md)
See how to build containerd from source at [BUILDING](BUILDING.md).
If you are interested in trying out containerd see our example at [Getting Started](docs/getting-started.md).
## Runtime Requirements ## Runtime Requirements
Runtime requirements for containerd are very minimal. Most interactions with Runtime requirements for containerd are very minimal. Most interactions with
the Linux and Windows container feature sets are handled via [runc](https://github.com/opencontainers/runc) and/or the Linux and Windows container feature sets are handled via [runc](https://github.com/opencontainers/runc) and/or
OS-specific libraries (e.g. [hcsshim](https://github.com/Microsoft/hcsshim) for Microsoft). There are specific features OS-specific libraries (e.g. [hcsshim](https://github.com/Microsoft/hcsshim) for Microsoft). The current required version of `runc` is always listed in [RUNC.md](/RUNC.md).
There are specific features
used by containerd core code and snapshotters that will require a minimum kernel used by containerd core code and snapshotters that will require a minimum kernel
version on Linux. With the understood caveat of distro kernel versioning, a version on Linux. With the understood caveat of distro kernel versioning, a
reasonable starting point for Linux is a minimum 4.x kernel version. reasonable starting point for Linux is a minimum 4.x kernel version.
@ -33,9 +43,7 @@ distribution.
To use Linux checkpoint and restore features, you will need `criu` installed on To use Linux checkpoint and restore features, you will need `criu` installed on
your system. See more details in [Checkpoint and Restore](#checkpoint-and-restore). your system. See more details in [Checkpoint and Restore](#checkpoint-and-restore).
The current required version of runc is always listed in [RUNC.md](/RUNC.md). Build requirements for developers are listed in [BUILDING](BUILDING.md).
Build requirements for developers are listed in the [Developer Quick-Start](#developer-quick-start) section.
## Features ## Features
@ -45,7 +53,11 @@ containerd offers a full client package to help you integrate containerd into yo
```go ```go
import "github.com/containerd/containerd" import (
"github.com/containerd/containerd"
"github.com/containerd/containerd/cio"
)
func main() { func main() {
client, err := containerd.New("/run/containerd/containerd.sock") client, err := containerd.New("/run/containerd/containerd.sock")
@ -133,7 +145,7 @@ Taking a container object and turning it into a runnable process on a system is
```go ```go
// create a new task // create a new task
task, err := redis.NewTask(context, containerd.Stdio) task, err := redis.NewTask(context, cio.Stdio)
defer task.Delete(context) defer task.Delete(context)
// the task is now running and has a pid that can be use to setup networking // the task is now running and has a pid that can be use to setup networking
@ -165,37 +177,12 @@ checkpoint := image.Target()
redis, err = client.NewContainer(context, "redis-master", containerd.WithCheckpoint(checkpoint, "redis-rootfs")) redis, err = client.NewContainer(context, "redis-master", containerd.WithCheckpoint(checkpoint, "redis-rootfs"))
defer container.Delete(context) defer container.Delete(context)
task, err = redis.NewTask(context, containerd.Stdio, containerd.WithTaskCheckpoint(checkpoint)) task, err = redis.NewTask(context, cio.Stdio, containerd.WithTaskCheckpoint(checkpoint))
defer task.Delete(context) defer task.Delete(context)
err := task.Start(context) err := task.Start(context)
``` ```
## Developer Quick Start
To build the daemon and `ctr` simple test client, the following build system dependencies are required:
* Go 1.9.x or above
* Protoc 3.x compiler and headers (download at the [Google protobuf releases page](https://github.com/google/protobuf/releases))
* Btrfs headers and libraries for your distribution. Note that building the btrfs driver can be disabled via build tag removing this dependency.
For proper results, install the `protoc` release into `/usr/local` on your build system. For example, the following commands will download and install the 3.5.0 release for a 64-bit Linux host:
```
$ wget -c https://github.com/google/protobuf/releases/download/v3.5.0/protoc-3.5.0-linux-x86_64.zip
$ sudo unzip protoc-3.5.0-linux-x86_64.zip -d /usr/local
```
With the required dependencies installed, the `Makefile` target named **binaries** will compile the `ctr` and `containerd` binaries and place them in the `bin/` directory. Using `sudo make install` will place the binaries in `/usr/local/bin`. When making any changes to the gRPC API, `make generate` will use the installed `protoc` compiler to regenerate the API generated code packages.
> *Note*: A build tag is currently available to disable building the btrfs snapshot driver.
> Adding `BUILDTAGS=no_btrfs` to your environment before calling the **binaries**
> Makefile target will disable the btrfs driver within the containerd Go build.
Vendoring of external imports uses the [`vndr` tool](https://github.com/LK4D4/vndr) which uses a simple config file, `vendor.conf`, to provide the URL and version or hash details for each vendored import. After modifying `vendor.conf` run the `vndr` tool to update the `vendor/` directory contents. Combining the `vendor.conf` update with the changeset in `vendor/` after running `vndr` should become a single commit for a PR which relies on vendored updates.
Please refer to [RUNC.md](/RUNC.md) for the currently supported version of `runc` that is used by containerd.
### Releases and API Stability ### Releases and API Stability
Please see [RELEASES.md](RELEASES.md) for details on versioning and stability Please see [RELEASES.md](RELEASES.md) for details on versioning and stability

View file

@ -7,6 +7,7 @@ import (
"net/http" "net/http"
"runtime" "runtime"
"strconv" "strconv"
"strings"
"sync" "sync"
"time" "time"
@ -29,7 +30,6 @@ import (
"github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/platforms" "github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/plugin" "github.com/containerd/containerd/plugin"
"github.com/containerd/containerd/reference"
"github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker" "github.com/containerd/containerd/remotes/docker"
"github.com/containerd/containerd/remotes/docker/schema1" "github.com/containerd/containerd/remotes/docker/schema1"
@ -334,6 +334,14 @@ func (c *Client) Push(ctx context.Context, ref string, desc ocispec.Descriptor,
for i := len(manifestStack) - 1; i >= 0; i-- { for i := len(manifestStack) - 1; i >= 0; i-- {
_, err := pushHandler(ctx, manifestStack[i]) _, err := pushHandler(ctx, manifestStack[i])
if err != nil { if err != nil {
// TODO(estesp): until we have a more complete method for index push, we need to report
// missing dependencies in an index/manifest list by sensing the "400 Bad Request"
// as a marker for this problem
if (manifestStack[i].MediaType == ocispec.MediaTypeImageIndex ||
manifestStack[i].MediaType == images.MediaTypeDockerSchema2ManifestList) &&
errors.Cause(err) != nil && strings.Contains(errors.Cause(err).Error(), "400 Bad Request") {
return errors.Wrap(err, "manifest list/index references to blobs and/or manifests are missing in your target registry")
}
return err return err
} }
} }
@ -494,95 +502,27 @@ func (c *Client) Version(ctx context.Context) (Version, error) {
}, nil }, nil
} }
type imageFormat string
const (
ociImageFormat imageFormat = "oci"
)
type importOpts struct { type importOpts struct {
format imageFormat
refObject string
labels map[string]string
} }
// ImportOpt allows the caller to specify import specific options // ImportOpt allows the caller to specify import specific options
type ImportOpt func(c *importOpts) error type ImportOpt func(c *importOpts) error
// WithImportLabel sets a label to be associated with an imported image func resolveImportOpt(opts ...ImportOpt) (importOpts, error) {
func WithImportLabel(key, value string) ImportOpt {
return func(opts *importOpts) error {
if opts.labels == nil {
opts.labels = make(map[string]string)
}
opts.labels[key] = value
return nil
}
}
// WithImportLabels associates a set of labels to an imported image
func WithImportLabels(labels map[string]string) ImportOpt {
return func(opts *importOpts) error {
if opts.labels == nil {
opts.labels = make(map[string]string)
}
for k, v := range labels {
opts.labels[k] = v
}
return nil
}
}
// WithOCIImportFormat sets the import format for an OCI image format
func WithOCIImportFormat() ImportOpt {
return func(c *importOpts) error {
if c.format != "" {
return errors.New("format already set")
}
c.format = ociImageFormat
return nil
}
}
// WithRefObject specifies the ref object to import.
// If refObject is empty, it is copied from the ref argument of Import().
func WithRefObject(refObject string) ImportOpt {
return func(c *importOpts) error {
c.refObject = refObject
return nil
}
}
func resolveImportOpt(ref string, opts ...ImportOpt) (importOpts, error) {
var iopts importOpts var iopts importOpts
for _, o := range opts { for _, o := range opts {
if err := o(&iopts); err != nil { if err := o(&iopts); err != nil {
return iopts, err return iopts, err
} }
} }
// use OCI as the default format
if iopts.format == "" {
iopts.format = ociImageFormat
}
// if refObject is not explicitly specified, use the one specified in ref
if iopts.refObject == "" {
refSpec, err := reference.Parse(ref)
if err != nil {
return iopts, err
}
iopts.refObject = refSpec.Object
}
return iopts, nil return iopts, nil
} }
// Import imports an image from a Tar stream using reader. // Import imports an image from a Tar stream using reader.
// OCI format is assumed by default. // Caller needs to specify importer. Future version may use oci.v1 as the default.
// // Note that unreferrenced blobs may be imported to the content store as well.
// Note that unreferenced blobs are imported to the content store as well. func (c *Client) Import(ctx context.Context, importer images.Importer, reader io.Reader, opts ...ImportOpt) ([]Image, error) {
func (c *Client) Import(ctx context.Context, ref string, reader io.Reader, opts ...ImportOpt) (Image, error) { _, err := resolveImportOpt(opts...) // unused now
iopts, err := resolveImportOpt(ref, opts...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -593,58 +533,66 @@ func (c *Client) Import(ctx context.Context, ref string, reader io.Reader, opts
} }
defer done() defer done()
switch iopts.format { imgrecs, err := importer.Import(ctx, c.ContentStore(), reader)
case ociImageFormat: if err != nil {
return c.importFromOCITar(ctx, ref, reader, iopts) // is.Update() is not called on error
default: return nil, err
return nil, errors.Errorf("unsupported format: %s", iopts.format)
} }
is := c.ImageService()
var images []Image
for _, imgrec := range imgrecs {
if updated, err := is.Update(ctx, imgrec, "target"); err != nil {
if !errdefs.IsNotFound(err) {
return nil, err
}
created, err := is.Create(ctx, imgrec)
if err != nil {
return nil, err
}
imgrec = created
} else {
imgrec = updated
}
images = append(images, &image{
client: c,
i: imgrec,
})
}
return images, nil
} }
type exportOpts struct { type exportOpts struct {
format imageFormat
} }
// ExportOpt allows callers to set export options // ExportOpt allows the caller to specify export-specific options
type ExportOpt func(c *exportOpts) error type ExportOpt func(c *exportOpts) error
// WithOCIExportFormat sets the OCI image format as the export target func resolveExportOpt(opts ...ExportOpt) (exportOpts, error) {
func WithOCIExportFormat() ExportOpt { var eopts exportOpts
return func(c *exportOpts) error { for _, o := range opts {
if c.format != "" { if err := o(&eopts); err != nil {
return errors.New("format already set") return eopts, err
}
c.format = ociImageFormat
return nil
} }
} }
return eopts, nil
// TODO: add WithMediaTypeTranslation that transforms media types according to the format. }
// e.g. application/vnd.docker.image.rootfs.diff.tar.gzip
// -> application/vnd.oci.image.layer.v1.tar+gzip
// Export exports an image to a Tar stream. // Export exports an image to a Tar stream.
// OCI format is used by default. // OCI format is used by default.
// It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc. // It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc.
func (c *Client) Export(ctx context.Context, desc ocispec.Descriptor, opts ...ExportOpt) (io.ReadCloser, error) { // TODO(AkihiroSuda): support exporting multiple descriptors at once to a single archive stream.
var eopts exportOpts func (c *Client) Export(ctx context.Context, exporter images.Exporter, desc ocispec.Descriptor, opts ...ExportOpt) (io.ReadCloser, error) {
for _, o := range opts { _, err := resolveExportOpt(opts...) // unused now
if err := o(&eopts); err != nil { if err != nil {
return nil, err return nil, err
} }
}
// use OCI as the default format
if eopts.format == "" {
eopts.format = ociImageFormat
}
pr, pw := io.Pipe() pr, pw := io.Pipe()
switch eopts.format {
case ociImageFormat:
go func() { go func() {
pw.CloseWithError(c.exportToOCITar(ctx, desc, pw, eopts)) pw.CloseWithError(exporter.Export(ctx, c.ContentStore(), desc, pw))
}() }()
default:
return nil, errors.Errorf("unsupported format: %s", eopts.format)
}
return pr, nil return pr, nil
} }

View file

@ -1,189 +0,0 @@
package containerd
import (
"archive/tar"
"context"
"encoding/json"
"io"
"sort"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/platforms"
ocispecs "github.com/opencontainers/image-spec/specs-go"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
func (c *Client) exportToOCITar(ctx context.Context, desc ocispec.Descriptor, writer io.Writer, eopts exportOpts) error {
tw := tar.NewWriter(writer)
defer tw.Close()
records := []tarRecord{
ociLayoutFile(""),
ociIndexRecord(desc),
}
cs := c.ContentStore()
algorithms := map[string]struct{}{}
exportHandler := func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
records = append(records, blobRecord(cs, desc))
algorithms[desc.Digest.Algorithm().String()] = struct{}{}
return nil, nil
}
handlers := images.Handlers(
images.ChildrenHandler(cs, platforms.Default()),
images.HandlerFunc(exportHandler),
)
// Walk sequentially since the number of fetchs is likely one and doing in
// parallel requires locking the export handler
if err := images.Walk(ctx, handlers, desc); err != nil {
return err
}
if len(algorithms) > 0 {
records = append(records, directoryRecord("blobs/", 0755))
for alg := range algorithms {
records = append(records, directoryRecord("blobs/"+alg+"/", 0755))
}
}
return writeTar(ctx, tw, records)
}
type tarRecord struct {
Header *tar.Header
CopyTo func(context.Context, io.Writer) (int64, error)
}
func blobRecord(cs content.Store, desc ocispec.Descriptor) tarRecord {
path := "blobs/" + desc.Digest.Algorithm().String() + "/" + desc.Digest.Hex()
return tarRecord{
Header: &tar.Header{
Name: path,
Mode: 0444,
Size: desc.Size,
Typeflag: tar.TypeReg,
},
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
r, err := cs.ReaderAt(ctx, desc.Digest)
if err != nil {
return 0, err
}
defer r.Close()
// Verify digest
dgstr := desc.Digest.Algorithm().Digester()
n, err := io.Copy(io.MultiWriter(w, dgstr.Hash()), content.NewReader(r))
if err != nil {
return 0, err
}
if dgstr.Digest() != desc.Digest {
return 0, errors.Errorf("unexpected digest %s copied", dgstr.Digest())
}
return n, nil
},
}
}
func directoryRecord(name string, mode int64) tarRecord {
return tarRecord{
Header: &tar.Header{
Name: name,
Mode: mode,
Typeflag: tar.TypeDir,
},
}
}
func ociLayoutFile(version string) tarRecord {
if version == "" {
version = ocispec.ImageLayoutVersion
}
layout := ocispec.ImageLayout{
Version: version,
}
b, err := json.Marshal(layout)
if err != nil {
panic(err)
}
return tarRecord{
Header: &tar.Header{
Name: ocispec.ImageLayoutFile,
Mode: 0444,
Size: int64(len(b)),
Typeflag: tar.TypeReg,
},
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
n, err := w.Write(b)
return int64(n), err
},
}
}
func ociIndexRecord(manifests ...ocispec.Descriptor) tarRecord {
index := ocispec.Index{
Versioned: ocispecs.Versioned{
SchemaVersion: 2,
},
Manifests: manifests,
}
b, err := json.Marshal(index)
if err != nil {
panic(err)
}
return tarRecord{
Header: &tar.Header{
Name: "index.json",
Mode: 0644,
Size: int64(len(b)),
Typeflag: tar.TypeReg,
},
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
n, err := w.Write(b)
return int64(n), err
},
}
}
func writeTar(ctx context.Context, tw *tar.Writer, records []tarRecord) error {
sort.Sort(tarRecordsByName(records))
for _, record := range records {
if err := tw.WriteHeader(record.Header); err != nil {
return err
}
if record.CopyTo != nil {
n, err := record.CopyTo(ctx, tw)
if err != nil {
return err
}
if n != record.Header.Size {
return errors.Errorf("unexpected copy size for %s", record.Header.Name)
}
} else if record.Header.Size > 0 {
return errors.Errorf("no content to write to record with non-zero size for %s", record.Header.Name)
}
}
return nil
}
type tarRecordsByName []tarRecord
func (t tarRecordsByName) Len() int {
return len(t)
}
func (t tarRecordsByName) Swap(i, j int) {
t[i], t[j] = t[j], t[i]
}
func (t tarRecordsByName) Less(i, j int) bool {
return t[i].Header.Name < t[j].Header.Name
}

View file

@ -3,7 +3,6 @@ package filters
import ( import (
"fmt" "fmt"
"io" "io"
"strconv"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -134,7 +133,12 @@ func (p *parser) selector() (selector, error) {
return selector{}, err return selector{}, err
} }
value, err := p.value() var allowAltQuotes bool
if op == operatorMatches {
allowAltQuotes = true
}
value, err := p.value(allowAltQuotes)
if err != nil { if err != nil {
if err == io.EOF { if err == io.EOF {
return selector{}, io.ErrUnexpectedEOF return selector{}, io.ErrUnexpectedEOF
@ -188,7 +192,7 @@ func (p *parser) field() (string, error) {
case tokenField: case tokenField:
return s, nil return s, nil
case tokenQuoted: case tokenQuoted:
return p.unquote(pos, s) return p.unquote(pos, s, false)
} }
return "", p.mkerr(pos, "expected field or quoted") return "", p.mkerr(pos, "expected field or quoted")
@ -213,21 +217,25 @@ func (p *parser) operator() (operator, error) {
return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`) return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`)
} }
func (p *parser) value() (string, error) { func (p *parser) value(allowAltQuotes bool) (string, error) {
pos, tok, s := p.scanner.scan() pos, tok, s := p.scanner.scan()
switch tok { switch tok {
case tokenValue, tokenField: case tokenValue, tokenField:
return s, nil return s, nil
case tokenQuoted: case tokenQuoted:
return p.unquote(pos, s) return p.unquote(pos, s, allowAltQuotes)
} }
return "", p.mkerr(pos, "expected value or quoted") return "", p.mkerr(pos, "expected value or quoted")
} }
func (p *parser) unquote(pos int, s string) (string, error) { func (p *parser) unquote(pos int, s string, allowAlts bool) (string, error) {
uq, err := strconv.Unquote(s) if !allowAlts && s[0] != '\'' && s[0] != '"' {
return "", p.mkerr(pos, "invalid quote encountered")
}
uq, err := unquote(s)
if err != nil { if err != nil {
return "", p.mkerr(pos, "unquoting failed: %v", err) return "", p.mkerr(pos, "unquoting failed: %v", err)
} }

View file

@ -0,0 +1,237 @@
package filters
import (
"unicode/utf8"
"github.com/pkg/errors"
)
// NOTE(stevvooe): Most of this code in this file is copied from the stdlib
// strconv package and modified to be able to handle quoting with `/` and `|`
// as delimiters. The copyright is held by the Go authors.
var errQuoteSyntax = errors.New("quote syntax error")
// UnquoteChar decodes the first character or byte in the escaped string
// or character literal represented by the string s.
// It returns four values:
//
// 1) value, the decoded Unicode code point or byte value;
// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation;
// 3) tail, the remainder of the string after the character; and
// 4) an error that will be nil if the character is syntactically valid.
//
// The second argument, quote, specifies the type of literal being parsed
// and therefore which escaped quote character is permitted.
// If set to a single quote, it permits the sequence \' and disallows unescaped '.
// If set to a double quote, it permits \" and disallows unescaped ".
// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped.
//
// This is from Go strconv package, modified to support `|` and `/` as double
// quotes for use with regular expressions.
func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
// easy cases
switch c := s[0]; {
case c == quote && (quote == '\'' || quote == '"' || quote == '/' || quote == '|'):
err = errQuoteSyntax
return
case c >= utf8.RuneSelf:
r, size := utf8.DecodeRuneInString(s)
return r, true, s[size:], nil
case c != '\\':
return rune(s[0]), false, s[1:], nil
}
// hard case: c is backslash
if len(s) <= 1 {
err = errQuoteSyntax
return
}
c := s[1]
s = s[2:]
switch c {
case 'a':
value = '\a'
case 'b':
value = '\b'
case 'f':
value = '\f'
case 'n':
value = '\n'
case 'r':
value = '\r'
case 't':
value = '\t'
case 'v':
value = '\v'
case 'x', 'u', 'U':
n := 0
switch c {
case 'x':
n = 2
case 'u':
n = 4
case 'U':
n = 8
}
var v rune
if len(s) < n {
err = errQuoteSyntax
return
}
for j := 0; j < n; j++ {
x, ok := unhex(s[j])
if !ok {
err = errQuoteSyntax
return
}
v = v<<4 | x
}
s = s[n:]
if c == 'x' {
// single-byte string, possibly not UTF-8
value = v
break
}
if v > utf8.MaxRune {
err = errQuoteSyntax
return
}
value = v
multibyte = true
case '0', '1', '2', '3', '4', '5', '6', '7':
v := rune(c) - '0'
if len(s) < 2 {
err = errQuoteSyntax
return
}
for j := 0; j < 2; j++ { // one digit already; two more
x := rune(s[j]) - '0'
if x < 0 || x > 7 {
err = errQuoteSyntax
return
}
v = (v << 3) | x
}
s = s[2:]
if v > 255 {
err = errQuoteSyntax
return
}
value = v
case '\\':
value = '\\'
case '\'', '"', '|', '/':
if c != quote {
err = errQuoteSyntax
return
}
value = rune(c)
default:
err = errQuoteSyntax
return
}
tail = s
return
}
// unquote interprets s as a single-quoted, double-quoted,
// or backquoted Go string literal, returning the string value
// that s quotes. (If s is single-quoted, it would be a Go
// character literal; Unquote returns the corresponding
// one-character string.)
//
// This is modified from the standard library to support `|` and `/` as quote
// characters for use with regular expressions.
func unquote(s string) (string, error) {
n := len(s)
if n < 2 {
return "", errQuoteSyntax
}
quote := s[0]
if quote != s[n-1] {
return "", errQuoteSyntax
}
s = s[1 : n-1]
if quote == '`' {
if contains(s, '`') {
return "", errQuoteSyntax
}
if contains(s, '\r') {
// -1 because we know there is at least one \r to remove.
buf := make([]byte, 0, len(s)-1)
for i := 0; i < len(s); i++ {
if s[i] != '\r' {
buf = append(buf, s[i])
}
}
return string(buf), nil
}
return s, nil
}
if quote != '"' && quote != '\'' && quote != '|' && quote != '/' {
return "", errQuoteSyntax
}
if contains(s, '\n') {
return "", errQuoteSyntax
}
// Is it trivial? Avoid allocation.
if !contains(s, '\\') && !contains(s, quote) {
switch quote {
case '"', '/', '|': // pipe and slash are treated like double quote
return s, nil
case '\'':
r, size := utf8.DecodeRuneInString(s)
if size == len(s) && (r != utf8.RuneError || size != 1) {
return s, nil
}
}
}
var runeTmp [utf8.UTFMax]byte
buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
for len(s) > 0 {
c, multibyte, ss, err := unquoteChar(s, quote)
if err != nil {
return "", err
}
s = ss
if c < utf8.RuneSelf || !multibyte {
buf = append(buf, byte(c))
} else {
n := utf8.EncodeRune(runeTmp[:], c)
buf = append(buf, runeTmp[:n]...)
}
if quote == '\'' && len(s) != 0 {
// single-quoted must be single character
return "", errQuoteSyntax
}
}
return string(buf), nil
}
// contains reports whether the string contains the byte c.
func contains(s string, c byte) bool {
for i := 0; i < len(s); i++ {
if s[i] == c {
return true
}
}
return false
}
func unhex(b byte) (v rune, ok bool) {
c := rune(b)
switch {
case '0' <= c && c <= '9':
return c - '0', true
case 'a' <= c && c <= 'f':
return c - 'a' + 10, true
case 'A' <= c && c <= 'F':
return c - 'A' + 10, true
}
return
}

View file

@ -87,7 +87,7 @@ func (s *scanner) peek() rune {
return ch return ch
} }
func (s *scanner) scan() (int, token, string) { func (s *scanner) scan() (nextp int, tk token, text string) {
var ( var (
ch = s.next() ch = s.next()
pos = s.pos pos = s.pos
@ -101,6 +101,7 @@ chomp:
s.scanQuoted(ch) s.scanQuoted(ch)
return pos, tokenQuoted, s.input[pos:s.ppos] return pos, tokenQuoted, s.input[pos:s.ppos]
case isSeparatorRune(ch): case isSeparatorRune(ch):
s.value = false
return pos, tokenSeparator, s.input[pos:s.ppos] return pos, tokenSeparator, s.input[pos:s.ppos]
case isOperatorRune(ch): case isOperatorRune(ch):
s.scanOperator() s.scanOperator()
@ -241,7 +242,7 @@ func isOperatorRune(r rune) bool {
func isQuoteRune(r rune) bool { func isQuoteRune(r rune) bool {
switch r { switch r {
case '"': // maybe add single quoting? case '/', '|', '"': // maybe add single quoting?
return true return true
} }

View file

@ -224,6 +224,8 @@ func doubleWalkDiff(ctx context.Context, changeFn ChangeFunc, a, b string) (err
f1, f2 *currentPath f1, f2 *currentPath
rmdir string rmdir string
lastEmittedDir = string(filepath.Separator)
parents []os.FileInfo
) )
g.Go(func() error { g.Go(func() error {
defer close(c1) defer close(c1)
@ -258,7 +260,10 @@ func doubleWalkDiff(ctx context.Context, changeFn ChangeFunc, a, b string) (err
continue continue
} }
var f os.FileInfo var (
f os.FileInfo
emit = true
)
k, p := pathChange(f1, f2) k, p := pathChange(f1, f2)
switch k { switch k {
case ChangeKindAdd: case ChangeKindAdd:
@ -294,17 +299,83 @@ func doubleWalkDiff(ctx context.Context, changeFn ChangeFunc, a, b string) (err
f2 = nil f2 = nil
if same { if same {
if !isLinked(f) { if !isLinked(f) {
continue emit = false
} }
k = ChangeKindUnmodified k = ChangeKindUnmodified
} }
} }
if emit {
emittedDir, emitParents := commonParents(lastEmittedDir, p, parents)
for _, pf := range emitParents {
p := filepath.Join(emittedDir, pf.Name())
if err := changeFn(ChangeKindUnmodified, p, pf, nil); err != nil {
return err
}
emittedDir = p
}
if err := changeFn(k, p, f, nil); err != nil { if err := changeFn(k, p, f, nil); err != nil {
return err return err
} }
if f != nil && f.IsDir() {
lastEmittedDir = p
} else {
lastEmittedDir = emittedDir
}
parents = parents[:0]
} else if f.IsDir() {
lastEmittedDir, parents = commonParents(lastEmittedDir, p, parents)
parents = append(parents, f)
}
} }
return nil return nil
}) })
return g.Wait() return g.Wait()
} }
func commonParents(base, updated string, dirs []os.FileInfo) (string, []os.FileInfo) {
if basePrefix := makePrefix(base); strings.HasPrefix(updated, basePrefix) {
var (
parents []os.FileInfo
last = base
)
for _, d := range dirs {
next := filepath.Join(last, d.Name())
if strings.HasPrefix(updated, makePrefix(last)) {
parents = append(parents, d)
last = next
} else {
break
}
}
return base, parents
}
baseS := strings.Split(base, string(filepath.Separator))
updatedS := strings.Split(updated, string(filepath.Separator))
commonS := []string{string(filepath.Separator)}
min := len(baseS)
if len(updatedS) < min {
min = len(updatedS)
}
for i := 0; i < min; i++ {
if baseS[i] == updatedS[i] {
commonS = append(commonS, baseS[i])
} else {
break
}
}
return filepath.Join(commonS...), []os.FileInfo{}
}
func makePrefix(d string) string {
if d == "" || d[len(d)-1] != filepath.Separator {
return d + string(filepath.Separator)
}
return d
}

View file

@ -1,5 +1,7 @@
package fs package fs
import "context"
// Usage of disk information // Usage of disk information
type Usage struct { type Usage struct {
Inodes int64 Inodes int64
@ -11,3 +13,10 @@ type Usage struct {
func DiskUsage(roots ...string) (Usage, error) { func DiskUsage(roots ...string) (Usage, error) {
return diskUsage(roots...) return diskUsage(roots...)
} }
// DiffUsage counts the numbers of inodes and disk usage in the
// diff between the 2 directories. The first path is intended
// as the base directory and the second as the changed directory.
func DiffUsage(ctx context.Context, a, b string) (Usage, error) {
return diffUsage(ctx, a, b)
}

View file

@ -3,18 +3,20 @@
package fs package fs
import ( import (
"context"
"os" "os"
"path/filepath" "path/filepath"
"syscall" "syscall"
) )
func diskUsage(roots ...string) (Usage, error) {
type inode struct { type inode struct {
// TODO(stevvooe): Can probably reduce memory usage by not tracking // TODO(stevvooe): Can probably reduce memory usage by not tracking
// device, but we can leave this right for now. // device, but we can leave this right for now.
dev, ino uint64 dev, ino uint64
} }
func diskUsage(roots ...string) (Usage, error) {
var ( var (
size int64 size int64
inodes = map[inode]struct{}{} // expensive! inodes = map[inode]struct{}{} // expensive!
@ -45,3 +47,37 @@ func diskUsage(roots ...string) (Usage, error) {
Size: size, Size: size,
}, nil }, nil
} }
func diffUsage(ctx context.Context, a, b string) (Usage, error) {
var (
size int64
inodes = map[inode]struct{}{} // expensive!
)
if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if kind == ChangeKindAdd || kind == ChangeKindModify {
stat := fi.Sys().(*syscall.Stat_t)
inoKey := inode{dev: uint64(stat.Dev), ino: uint64(stat.Ino)}
if _, ok := inodes[inoKey]; !ok {
inodes[inoKey] = struct{}{}
size += fi.Size()
}
return nil
}
return nil
}); err != nil {
return Usage{}, err
}
return Usage{
Inodes: int64(len(inodes)),
Size: size,
}, nil
}

View file

@ -3,6 +3,7 @@
package fs package fs
import ( import (
"context"
"os" "os"
"path/filepath" "path/filepath"
) )
@ -31,3 +32,29 @@ func diskUsage(roots ...string) (Usage, error) {
Size: size, Size: size,
}, nil }, nil
} }
func diffUsage(ctx context.Context, a, b string) (Usage, error) {
var (
size int64
)
if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if kind == ChangeKindAdd || kind == ChangeKindModify {
size += fi.Size()
return nil
}
return nil
}); err != nil {
return Usage{}, err
}
return Usage{
Size: size,
}, nil
}

View file

@ -0,0 +1,21 @@
package images
import (
"context"
"io"
"github.com/containerd/containerd/content"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// Importer is the interface for image importer.
type Importer interface {
// Import imports an image from a tar stream.
Import(ctx context.Context, store content.Store, reader io.Reader) ([]Image, error)
}
// Exporter is the interface for image exporter.
type Exporter interface {
// Export exports an image to a tar stream.
Export(ctx context.Context, store content.Store, desc ocispec.Descriptor, writer io.Writer) error
}

View file

@ -1,120 +0,0 @@
package containerd
import (
"archive/tar"
"context"
"encoding/json"
"io"
"io/ioutil"
"strings"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/reference"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
func resolveOCIIndex(idx ocispec.Index, refObject string) (*ocispec.Descriptor, error) {
tag, dgst := reference.SplitObject(refObject)
if tag == "" && dgst == "" {
return nil, errors.Errorf("unexpected object: %q", refObject)
}
for _, m := range idx.Manifests {
if m.Digest == dgst {
return &m, nil
}
annot, ok := m.Annotations[ocispec.AnnotationRefName]
if ok && annot == tag && tag != "" {
return &m, nil
}
}
return nil, errors.Errorf("not found: %q", refObject)
}
func (c *Client) importFromOCITar(ctx context.Context, ref string, reader io.Reader, iopts importOpts) (Image, error) {
tr := tar.NewReader(reader)
store := c.ContentStore()
var desc *ocispec.Descriptor
for {
hdr, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
if hdr.Typeflag != tar.TypeReg && hdr.Typeflag != tar.TypeRegA {
continue
}
if hdr.Name == "index.json" {
desc, err = onUntarIndexJSON(tr, iopts.refObject)
if err != nil {
return nil, err
}
continue
}
if strings.HasPrefix(hdr.Name, "blobs/") {
if err := onUntarBlob(ctx, tr, store, hdr.Name, hdr.Size); err != nil {
return nil, err
}
}
}
if desc == nil {
return nil, errors.Errorf("no descriptor found for reference object %q", iopts.refObject)
}
imgrec := images.Image{
Name: ref,
Target: *desc,
Labels: iopts.labels,
}
is := c.ImageService()
if updated, err := is.Update(ctx, imgrec, "target"); err != nil {
if !errdefs.IsNotFound(err) {
return nil, err
}
created, err := is.Create(ctx, imgrec)
if err != nil {
return nil, err
}
imgrec = created
} else {
imgrec = updated
}
img := &image{
client: c,
i: imgrec,
}
return img, nil
}
func onUntarIndexJSON(r io.Reader, refObject string) (*ocispec.Descriptor, error) {
b, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
var idx ocispec.Index
if err := json.Unmarshal(b, &idx); err != nil {
return nil, err
}
return resolveOCIIndex(idx, refObject)
}
func onUntarBlob(ctx context.Context, r io.Reader, store content.Store, name string, size int64) error {
// name is like "blobs/sha256/deadbeef"
split := strings.Split(name, "/")
if len(split) != 3 {
return errors.Errorf("unexpected name: %q", name)
}
algo := digest.Algorithm(split[1])
if !algo.Available() {
return errors.Errorf("unsupported algorithm: %s", algo)
}
dgst := digest.NewDigestFromHex(algo.String(), split[2])
return content.WriteBlob(ctx, store, "unknown-"+dgst.String(), r, size, dgst)
}

View file

@ -114,6 +114,7 @@ func fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc
func commitOpts(desc ocispec.Descriptor, r io.Reader) (io.Reader, []content.Opt) { func commitOpts(desc ocispec.Descriptor, r io.Reader) (io.Reader, []content.Opt) {
var childrenF func(r io.Reader) ([]ocispec.Descriptor, error) var childrenF func(r io.Reader) ([]ocispec.Descriptor, error)
// TODO(AkihiroSuda): use images/oci.GetChildrenDescriptors?
switch desc.MediaType { switch desc.MediaType {
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
childrenF = func(r io.Reader) ([]ocispec.Descriptor, error) { childrenF = func(r io.Reader) ([]ocispec.Descriptor, error) {