mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Update containerd to v1.0.0
Signed-off-by: Michael Crosby <crosbymichael@gmail.com>
This commit is contained in:
parent
4047cede65
commit
18a53d0374
15 changed files with 512 additions and 475 deletions
|
@ -4,7 +4,7 @@ TOMLV_COMMIT=9baf8a8a9f2ed20a8e54160840c492f937eeaf9a
|
|||
|
||||
# When updating RUNC_COMMIT, also update runc in vendor.conf accordingly
|
||||
RUNC_COMMIT=b2567b37d7b75eb4cf325b77297b140ea686ce8f
|
||||
CONTAINERD_COMMIT=cc969fb42f427a68a8cc6870ef47f17304b83962
|
||||
CONTAINERD_COMMIT=v1.0.0
|
||||
TINI_COMMIT=949e6facb77383876aeff8a6944dde66b3089574
|
||||
LIBNETWORK_COMMIT=7b2b1feb1de4817d522cc372af149ff48d25028e
|
||||
VNDR_COMMIT=a6e196d8b4b0cbbdc29aebdb20c59ac6926bb384
|
||||
|
|
|
@ -103,7 +103,7 @@ github.com/googleapis/gax-go da06d194a00e19ce00d9011a13931c3f6f6887c7
|
|||
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
|
||||
|
||||
# containerd
|
||||
github.com/containerd/containerd cc969fb42f427a68a8cc6870ef47f17304b83962
|
||||
github.com/containerd/containerd v1.0.0
|
||||
github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6
|
||||
github.com/containerd/continuity 35d55c5e8dd23b32037d56cf97174aff3efdfa83
|
||||
github.com/containerd/cgroups 29da22c6171a4316169f9205ab6c49f59b5b852f
|
||||
|
|
53
vendor/github.com/containerd/containerd/README.md
generated
vendored
53
vendor/github.com/containerd/containerd/README.md
generated
vendored
|
@ -13,13 +13,23 @@ containerd is designed to be embedded into a larger system, rather than being us
|
|||
|
||||
## Getting Started
|
||||
|
||||
If you are interested in trying out containerd please see our [Getting Started Guide](docs/getting-started.md).
|
||||
See our documentation on [containerd.io](containerd.io):
|
||||
* [for ops and admins](docs/ops.md)
|
||||
* [namespaces](docs/namespaces.md)
|
||||
* [client options](docs/client-opts.md)
|
||||
|
||||
See how to build containerd from source at [BUILDING](BUILDING.md).
|
||||
|
||||
If you are interested in trying out containerd see our example at [Getting Started](docs/getting-started.md).
|
||||
|
||||
|
||||
## Runtime Requirements
|
||||
|
||||
Runtime requirements for containerd are very minimal. Most interactions with
|
||||
the Linux and Windows container feature sets are handled via [runc](https://github.com/opencontainers/runc) and/or
|
||||
OS-specific libraries (e.g. [hcsshim](https://github.com/Microsoft/hcsshim) for Microsoft). There are specific features
|
||||
OS-specific libraries (e.g. [hcsshim](https://github.com/Microsoft/hcsshim) for Microsoft). The current required version of `runc` is always listed in [RUNC.md](/RUNC.md).
|
||||
|
||||
There are specific features
|
||||
used by containerd core code and snapshotters that will require a minimum kernel
|
||||
version on Linux. With the understood caveat of distro kernel versioning, a
|
||||
reasonable starting point for Linux is a minimum 4.x kernel version.
|
||||
|
@ -33,9 +43,7 @@ distribution.
|
|||
To use Linux checkpoint and restore features, you will need `criu` installed on
|
||||
your system. See more details in [Checkpoint and Restore](#checkpoint-and-restore).
|
||||
|
||||
The current required version of runc is always listed in [RUNC.md](/RUNC.md).
|
||||
|
||||
Build requirements for developers are listed in the [Developer Quick-Start](#developer-quick-start) section.
|
||||
Build requirements for developers are listed in [BUILDING](BUILDING.md).
|
||||
|
||||
## Features
|
||||
|
||||
|
@ -45,7 +53,11 @@ containerd offers a full client package to help you integrate containerd into yo
|
|||
|
||||
```go
|
||||
|
||||
import "github.com/containerd/containerd"
|
||||
import (
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/cio"
|
||||
)
|
||||
|
||||
|
||||
func main() {
|
||||
client, err := containerd.New("/run/containerd/containerd.sock")
|
||||
|
@ -133,7 +145,7 @@ Taking a container object and turning it into a runnable process on a system is
|
|||
|
||||
```go
|
||||
// create a new task
|
||||
task, err := redis.NewTask(context, containerd.Stdio)
|
||||
task, err := redis.NewTask(context, cio.Stdio)
|
||||
defer task.Delete(context)
|
||||
|
||||
// the task is now running and has a pid that can be use to setup networking
|
||||
|
@ -165,37 +177,12 @@ checkpoint := image.Target()
|
|||
redis, err = client.NewContainer(context, "redis-master", containerd.WithCheckpoint(checkpoint, "redis-rootfs"))
|
||||
defer container.Delete(context)
|
||||
|
||||
task, err = redis.NewTask(context, containerd.Stdio, containerd.WithTaskCheckpoint(checkpoint))
|
||||
task, err = redis.NewTask(context, cio.Stdio, containerd.WithTaskCheckpoint(checkpoint))
|
||||
defer task.Delete(context)
|
||||
|
||||
err := task.Start(context)
|
||||
```
|
||||
|
||||
## Developer Quick Start
|
||||
|
||||
To build the daemon and `ctr` simple test client, the following build system dependencies are required:
|
||||
|
||||
* Go 1.9.x or above
|
||||
* Protoc 3.x compiler and headers (download at the [Google protobuf releases page](https://github.com/google/protobuf/releases))
|
||||
* Btrfs headers and libraries for your distribution. Note that building the btrfs driver can be disabled via build tag removing this dependency.
|
||||
|
||||
For proper results, install the `protoc` release into `/usr/local` on your build system. For example, the following commands will download and install the 3.5.0 release for a 64-bit Linux host:
|
||||
|
||||
```
|
||||
$ wget -c https://github.com/google/protobuf/releases/download/v3.5.0/protoc-3.5.0-linux-x86_64.zip
|
||||
$ sudo unzip protoc-3.5.0-linux-x86_64.zip -d /usr/local
|
||||
```
|
||||
|
||||
With the required dependencies installed, the `Makefile` target named **binaries** will compile the `ctr` and `containerd` binaries and place them in the `bin/` directory. Using `sudo make install` will place the binaries in `/usr/local/bin`. When making any changes to the gRPC API, `make generate` will use the installed `protoc` compiler to regenerate the API generated code packages.
|
||||
|
||||
> *Note*: A build tag is currently available to disable building the btrfs snapshot driver.
|
||||
> Adding `BUILDTAGS=no_btrfs` to your environment before calling the **binaries**
|
||||
> Makefile target will disable the btrfs driver within the containerd Go build.
|
||||
|
||||
Vendoring of external imports uses the [`vndr` tool](https://github.com/LK4D4/vndr) which uses a simple config file, `vendor.conf`, to provide the URL and version or hash details for each vendored import. After modifying `vendor.conf` run the `vndr` tool to update the `vendor/` directory contents. Combining the `vendor.conf` update with the changeset in `vendor/` after running `vndr` should become a single commit for a PR which relies on vendored updates.
|
||||
|
||||
Please refer to [RUNC.md](/RUNC.md) for the currently supported version of `runc` that is used by containerd.
|
||||
|
||||
### Releases and API Stability
|
||||
|
||||
Please see [RELEASES.md](RELEASES.md) for details on versioning and stability
|
||||
|
|
164
vendor/github.com/containerd/containerd/client.go
generated
vendored
164
vendor/github.com/containerd/containerd/client.go
generated
vendored
|
@ -7,6 +7,7 @@ import (
|
|||
"net/http"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -29,7 +30,6 @@ import (
|
|||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/plugin"
|
||||
"github.com/containerd/containerd/reference"
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
"github.com/containerd/containerd/remotes/docker/schema1"
|
||||
|
@ -334,6 +334,14 @@ func (c *Client) Push(ctx context.Context, ref string, desc ocispec.Descriptor,
|
|||
for i := len(manifestStack) - 1; i >= 0; i-- {
|
||||
_, err := pushHandler(ctx, manifestStack[i])
|
||||
if err != nil {
|
||||
// TODO(estesp): until we have a more complete method for index push, we need to report
|
||||
// missing dependencies in an index/manifest list by sensing the "400 Bad Request"
|
||||
// as a marker for this problem
|
||||
if (manifestStack[i].MediaType == ocispec.MediaTypeImageIndex ||
|
||||
manifestStack[i].MediaType == images.MediaTypeDockerSchema2ManifestList) &&
|
||||
errors.Cause(err) != nil && strings.Contains(errors.Cause(err).Error(), "400 Bad Request") {
|
||||
return errors.Wrap(err, "manifest list/index references to blobs and/or manifests are missing in your target registry")
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -494,95 +502,27 @@ func (c *Client) Version(ctx context.Context) (Version, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
type imageFormat string
|
||||
|
||||
const (
|
||||
ociImageFormat imageFormat = "oci"
|
||||
)
|
||||
|
||||
type importOpts struct {
|
||||
format imageFormat
|
||||
refObject string
|
||||
labels map[string]string
|
||||
}
|
||||
|
||||
// ImportOpt allows the caller to specify import specific options
|
||||
type ImportOpt func(c *importOpts) error
|
||||
|
||||
// WithImportLabel sets a label to be associated with an imported image
|
||||
func WithImportLabel(key, value string) ImportOpt {
|
||||
return func(opts *importOpts) error {
|
||||
if opts.labels == nil {
|
||||
opts.labels = make(map[string]string)
|
||||
}
|
||||
|
||||
opts.labels[key] = value
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithImportLabels associates a set of labels to an imported image
|
||||
func WithImportLabels(labels map[string]string) ImportOpt {
|
||||
return func(opts *importOpts) error {
|
||||
if opts.labels == nil {
|
||||
opts.labels = make(map[string]string)
|
||||
}
|
||||
|
||||
for k, v := range labels {
|
||||
opts.labels[k] = v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithOCIImportFormat sets the import format for an OCI image format
|
||||
func WithOCIImportFormat() ImportOpt {
|
||||
return func(c *importOpts) error {
|
||||
if c.format != "" {
|
||||
return errors.New("format already set")
|
||||
}
|
||||
c.format = ociImageFormat
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithRefObject specifies the ref object to import.
|
||||
// If refObject is empty, it is copied from the ref argument of Import().
|
||||
func WithRefObject(refObject string) ImportOpt {
|
||||
return func(c *importOpts) error {
|
||||
c.refObject = refObject
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func resolveImportOpt(ref string, opts ...ImportOpt) (importOpts, error) {
|
||||
func resolveImportOpt(opts ...ImportOpt) (importOpts, error) {
|
||||
var iopts importOpts
|
||||
for _, o := range opts {
|
||||
if err := o(&iopts); err != nil {
|
||||
return iopts, err
|
||||
}
|
||||
}
|
||||
// use OCI as the default format
|
||||
if iopts.format == "" {
|
||||
iopts.format = ociImageFormat
|
||||
}
|
||||
// if refObject is not explicitly specified, use the one specified in ref
|
||||
if iopts.refObject == "" {
|
||||
refSpec, err := reference.Parse(ref)
|
||||
if err != nil {
|
||||
return iopts, err
|
||||
}
|
||||
iopts.refObject = refSpec.Object
|
||||
}
|
||||
return iopts, nil
|
||||
}
|
||||
|
||||
// Import imports an image from a Tar stream using reader.
|
||||
// OCI format is assumed by default.
|
||||
//
|
||||
// Note that unreferenced blobs are imported to the content store as well.
|
||||
func (c *Client) Import(ctx context.Context, ref string, reader io.Reader, opts ...ImportOpt) (Image, error) {
|
||||
iopts, err := resolveImportOpt(ref, opts...)
|
||||
// Caller needs to specify importer. Future version may use oci.v1 as the default.
|
||||
// Note that unreferrenced blobs may be imported to the content store as well.
|
||||
func (c *Client) Import(ctx context.Context, importer images.Importer, reader io.Reader, opts ...ImportOpt) ([]Image, error) {
|
||||
_, err := resolveImportOpt(opts...) // unused now
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -593,58 +533,66 @@ func (c *Client) Import(ctx context.Context, ref string, reader io.Reader, opts
|
|||
}
|
||||
defer done()
|
||||
|
||||
switch iopts.format {
|
||||
case ociImageFormat:
|
||||
return c.importFromOCITar(ctx, ref, reader, iopts)
|
||||
default:
|
||||
return nil, errors.Errorf("unsupported format: %s", iopts.format)
|
||||
imgrecs, err := importer.Import(ctx, c.ContentStore(), reader)
|
||||
if err != nil {
|
||||
// is.Update() is not called on error
|
||||
return nil, err
|
||||
}
|
||||
|
||||
is := c.ImageService()
|
||||
var images []Image
|
||||
for _, imgrec := range imgrecs {
|
||||
if updated, err := is.Update(ctx, imgrec, "target"); err != nil {
|
||||
if !errdefs.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
created, err := is.Create(ctx, imgrec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
imgrec = created
|
||||
} else {
|
||||
imgrec = updated
|
||||
}
|
||||
|
||||
images = append(images, &image{
|
||||
client: c,
|
||||
i: imgrec,
|
||||
})
|
||||
}
|
||||
return images, nil
|
||||
}
|
||||
|
||||
type exportOpts struct {
|
||||
format imageFormat
|
||||
}
|
||||
|
||||
// ExportOpt allows callers to set export options
|
||||
// ExportOpt allows the caller to specify export-specific options
|
||||
type ExportOpt func(c *exportOpts) error
|
||||
|
||||
// WithOCIExportFormat sets the OCI image format as the export target
|
||||
func WithOCIExportFormat() ExportOpt {
|
||||
return func(c *exportOpts) error {
|
||||
if c.format != "" {
|
||||
return errors.New("format already set")
|
||||
}
|
||||
c.format = ociImageFormat
|
||||
return nil
|
||||
func resolveExportOpt(opts ...ExportOpt) (exportOpts, error) {
|
||||
var eopts exportOpts
|
||||
for _, o := range opts {
|
||||
if err := o(&eopts); err != nil {
|
||||
return eopts, err
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: add WithMediaTypeTranslation that transforms media types according to the format.
|
||||
// e.g. application/vnd.docker.image.rootfs.diff.tar.gzip
|
||||
// -> application/vnd.oci.image.layer.v1.tar+gzip
|
||||
return eopts, nil
|
||||
}
|
||||
|
||||
// Export exports an image to a Tar stream.
|
||||
// OCI format is used by default.
|
||||
// It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc.
|
||||
func (c *Client) Export(ctx context.Context, desc ocispec.Descriptor, opts ...ExportOpt) (io.ReadCloser, error) {
|
||||
var eopts exportOpts
|
||||
for _, o := range opts {
|
||||
if err := o(&eopts); err != nil {
|
||||
// TODO(AkihiroSuda): support exporting multiple descriptors at once to a single archive stream.
|
||||
func (c *Client) Export(ctx context.Context, exporter images.Exporter, desc ocispec.Descriptor, opts ...ExportOpt) (io.ReadCloser, error) {
|
||||
_, err := resolveExportOpt(opts...) // unused now
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// use OCI as the default format
|
||||
if eopts.format == "" {
|
||||
eopts.format = ociImageFormat
|
||||
}
|
||||
pr, pw := io.Pipe()
|
||||
switch eopts.format {
|
||||
case ociImageFormat:
|
||||
go func() {
|
||||
pw.CloseWithError(c.exportToOCITar(ctx, desc, pw, eopts))
|
||||
pw.CloseWithError(exporter.Export(ctx, c.ContentStore(), desc, pw))
|
||||
}()
|
||||
default:
|
||||
return nil, errors.Errorf("unsupported format: %s", eopts.format)
|
||||
}
|
||||
return pr, nil
|
||||
}
|
||||
|
|
189
vendor/github.com/containerd/containerd/export.go
generated
vendored
189
vendor/github.com/containerd/containerd/export.go
generated
vendored
|
@ -1,189 +0,0 @@
|
|||
package containerd
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (c *Client) exportToOCITar(ctx context.Context, desc ocispec.Descriptor, writer io.Writer, eopts exportOpts) error {
|
||||
tw := tar.NewWriter(writer)
|
||||
defer tw.Close()
|
||||
|
||||
records := []tarRecord{
|
||||
ociLayoutFile(""),
|
||||
ociIndexRecord(desc),
|
||||
}
|
||||
|
||||
cs := c.ContentStore()
|
||||
algorithms := map[string]struct{}{}
|
||||
exportHandler := func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
records = append(records, blobRecord(cs, desc))
|
||||
algorithms[desc.Digest.Algorithm().String()] = struct{}{}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
handlers := images.Handlers(
|
||||
images.ChildrenHandler(cs, platforms.Default()),
|
||||
images.HandlerFunc(exportHandler),
|
||||
)
|
||||
|
||||
// Walk sequentially since the number of fetchs is likely one and doing in
|
||||
// parallel requires locking the export handler
|
||||
if err := images.Walk(ctx, handlers, desc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(algorithms) > 0 {
|
||||
records = append(records, directoryRecord("blobs/", 0755))
|
||||
for alg := range algorithms {
|
||||
records = append(records, directoryRecord("blobs/"+alg+"/", 0755))
|
||||
}
|
||||
}
|
||||
|
||||
return writeTar(ctx, tw, records)
|
||||
}
|
||||
|
||||
type tarRecord struct {
|
||||
Header *tar.Header
|
||||
CopyTo func(context.Context, io.Writer) (int64, error)
|
||||
}
|
||||
|
||||
func blobRecord(cs content.Store, desc ocispec.Descriptor) tarRecord {
|
||||
path := "blobs/" + desc.Digest.Algorithm().String() + "/" + desc.Digest.Hex()
|
||||
return tarRecord{
|
||||
Header: &tar.Header{
|
||||
Name: path,
|
||||
Mode: 0444,
|
||||
Size: desc.Size,
|
||||
Typeflag: tar.TypeReg,
|
||||
},
|
||||
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
|
||||
r, err := cs.ReaderAt(ctx, desc.Digest)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
// Verify digest
|
||||
dgstr := desc.Digest.Algorithm().Digester()
|
||||
|
||||
n, err := io.Copy(io.MultiWriter(w, dgstr.Hash()), content.NewReader(r))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if dgstr.Digest() != desc.Digest {
|
||||
return 0, errors.Errorf("unexpected digest %s copied", dgstr.Digest())
|
||||
}
|
||||
return n, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func directoryRecord(name string, mode int64) tarRecord {
|
||||
return tarRecord{
|
||||
Header: &tar.Header{
|
||||
Name: name,
|
||||
Mode: mode,
|
||||
Typeflag: tar.TypeDir,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func ociLayoutFile(version string) tarRecord {
|
||||
if version == "" {
|
||||
version = ocispec.ImageLayoutVersion
|
||||
}
|
||||
layout := ocispec.ImageLayout{
|
||||
Version: version,
|
||||
}
|
||||
|
||||
b, err := json.Marshal(layout)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return tarRecord{
|
||||
Header: &tar.Header{
|
||||
Name: ocispec.ImageLayoutFile,
|
||||
Mode: 0444,
|
||||
Size: int64(len(b)),
|
||||
Typeflag: tar.TypeReg,
|
||||
},
|
||||
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
|
||||
n, err := w.Write(b)
|
||||
return int64(n), err
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func ociIndexRecord(manifests ...ocispec.Descriptor) tarRecord {
|
||||
index := ocispec.Index{
|
||||
Versioned: ocispecs.Versioned{
|
||||
SchemaVersion: 2,
|
||||
},
|
||||
Manifests: manifests,
|
||||
}
|
||||
|
||||
b, err := json.Marshal(index)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return tarRecord{
|
||||
Header: &tar.Header{
|
||||
Name: "index.json",
|
||||
Mode: 0644,
|
||||
Size: int64(len(b)),
|
||||
Typeflag: tar.TypeReg,
|
||||
},
|
||||
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
|
||||
n, err := w.Write(b)
|
||||
return int64(n), err
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func writeTar(ctx context.Context, tw *tar.Writer, records []tarRecord) error {
|
||||
sort.Sort(tarRecordsByName(records))
|
||||
|
||||
for _, record := range records {
|
||||
if err := tw.WriteHeader(record.Header); err != nil {
|
||||
return err
|
||||
}
|
||||
if record.CopyTo != nil {
|
||||
n, err := record.CopyTo(ctx, tw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n != record.Header.Size {
|
||||
return errors.Errorf("unexpected copy size for %s", record.Header.Name)
|
||||
}
|
||||
} else if record.Header.Size > 0 {
|
||||
return errors.Errorf("no content to write to record with non-zero size for %s", record.Header.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type tarRecordsByName []tarRecord
|
||||
|
||||
func (t tarRecordsByName) Len() int {
|
||||
return len(t)
|
||||
}
|
||||
func (t tarRecordsByName) Swap(i, j int) {
|
||||
t[i], t[j] = t[j], t[i]
|
||||
}
|
||||
func (t tarRecordsByName) Less(i, j int) bool {
|
||||
return t[i].Header.Name < t[j].Header.Name
|
||||
}
|
22
vendor/github.com/containerd/containerd/filters/parser.go
generated
vendored
22
vendor/github.com/containerd/containerd/filters/parser.go
generated
vendored
|
@ -3,7 +3,6 @@ package filters
|
|||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -134,7 +133,12 @@ func (p *parser) selector() (selector, error) {
|
|||
return selector{}, err
|
||||
}
|
||||
|
||||
value, err := p.value()
|
||||
var allowAltQuotes bool
|
||||
if op == operatorMatches {
|
||||
allowAltQuotes = true
|
||||
}
|
||||
|
||||
value, err := p.value(allowAltQuotes)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return selector{}, io.ErrUnexpectedEOF
|
||||
|
@ -188,7 +192,7 @@ func (p *parser) field() (string, error) {
|
|||
case tokenField:
|
||||
return s, nil
|
||||
case tokenQuoted:
|
||||
return p.unquote(pos, s)
|
||||
return p.unquote(pos, s, false)
|
||||
}
|
||||
|
||||
return "", p.mkerr(pos, "expected field or quoted")
|
||||
|
@ -213,21 +217,25 @@ func (p *parser) operator() (operator, error) {
|
|||
return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`)
|
||||
}
|
||||
|
||||
func (p *parser) value() (string, error) {
|
||||
func (p *parser) value(allowAltQuotes bool) (string, error) {
|
||||
pos, tok, s := p.scanner.scan()
|
||||
|
||||
switch tok {
|
||||
case tokenValue, tokenField:
|
||||
return s, nil
|
||||
case tokenQuoted:
|
||||
return p.unquote(pos, s)
|
||||
return p.unquote(pos, s, allowAltQuotes)
|
||||
}
|
||||
|
||||
return "", p.mkerr(pos, "expected value or quoted")
|
||||
}
|
||||
|
||||
func (p *parser) unquote(pos int, s string) (string, error) {
|
||||
uq, err := strconv.Unquote(s)
|
||||
func (p *parser) unquote(pos int, s string, allowAlts bool) (string, error) {
|
||||
if !allowAlts && s[0] != '\'' && s[0] != '"' {
|
||||
return "", p.mkerr(pos, "invalid quote encountered")
|
||||
}
|
||||
|
||||
uq, err := unquote(s)
|
||||
if err != nil {
|
||||
return "", p.mkerr(pos, "unquoting failed: %v", err)
|
||||
}
|
||||
|
|
237
vendor/github.com/containerd/containerd/filters/quote.go
generated
vendored
Normal file
237
vendor/github.com/containerd/containerd/filters/quote.go
generated
vendored
Normal file
|
@ -0,0 +1,237 @@
|
|||
package filters
|
||||
|
||||
import (
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NOTE(stevvooe): Most of this code in this file is copied from the stdlib
|
||||
// strconv package and modified to be able to handle quoting with `/` and `|`
|
||||
// as delimiters. The copyright is held by the Go authors.
|
||||
|
||||
var errQuoteSyntax = errors.New("quote syntax error")
|
||||
|
||||
// UnquoteChar decodes the first character or byte in the escaped string
|
||||
// or character literal represented by the string s.
|
||||
// It returns four values:
|
||||
//
|
||||
// 1) value, the decoded Unicode code point or byte value;
|
||||
// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation;
|
||||
// 3) tail, the remainder of the string after the character; and
|
||||
// 4) an error that will be nil if the character is syntactically valid.
|
||||
//
|
||||
// The second argument, quote, specifies the type of literal being parsed
|
||||
// and therefore which escaped quote character is permitted.
|
||||
// If set to a single quote, it permits the sequence \' and disallows unescaped '.
|
||||
// If set to a double quote, it permits \" and disallows unescaped ".
|
||||
// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped.
|
||||
//
|
||||
// This is from Go strconv package, modified to support `|` and `/` as double
|
||||
// quotes for use with regular expressions.
|
||||
func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
|
||||
// easy cases
|
||||
switch c := s[0]; {
|
||||
case c == quote && (quote == '\'' || quote == '"' || quote == '/' || quote == '|'):
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
case c >= utf8.RuneSelf:
|
||||
r, size := utf8.DecodeRuneInString(s)
|
||||
return r, true, s[size:], nil
|
||||
case c != '\\':
|
||||
return rune(s[0]), false, s[1:], nil
|
||||
}
|
||||
|
||||
// hard case: c is backslash
|
||||
if len(s) <= 1 {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
c := s[1]
|
||||
s = s[2:]
|
||||
|
||||
switch c {
|
||||
case 'a':
|
||||
value = '\a'
|
||||
case 'b':
|
||||
value = '\b'
|
||||
case 'f':
|
||||
value = '\f'
|
||||
case 'n':
|
||||
value = '\n'
|
||||
case 'r':
|
||||
value = '\r'
|
||||
case 't':
|
||||
value = '\t'
|
||||
case 'v':
|
||||
value = '\v'
|
||||
case 'x', 'u', 'U':
|
||||
n := 0
|
||||
switch c {
|
||||
case 'x':
|
||||
n = 2
|
||||
case 'u':
|
||||
n = 4
|
||||
case 'U':
|
||||
n = 8
|
||||
}
|
||||
var v rune
|
||||
if len(s) < n {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
for j := 0; j < n; j++ {
|
||||
x, ok := unhex(s[j])
|
||||
if !ok {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
v = v<<4 | x
|
||||
}
|
||||
s = s[n:]
|
||||
if c == 'x' {
|
||||
// single-byte string, possibly not UTF-8
|
||||
value = v
|
||||
break
|
||||
}
|
||||
if v > utf8.MaxRune {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
value = v
|
||||
multibyte = true
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
v := rune(c) - '0'
|
||||
if len(s) < 2 {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
for j := 0; j < 2; j++ { // one digit already; two more
|
||||
x := rune(s[j]) - '0'
|
||||
if x < 0 || x > 7 {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
v = (v << 3) | x
|
||||
}
|
||||
s = s[2:]
|
||||
if v > 255 {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
value = v
|
||||
case '\\':
|
||||
value = '\\'
|
||||
case '\'', '"', '|', '/':
|
||||
if c != quote {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
value = rune(c)
|
||||
default:
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
tail = s
|
||||
return
|
||||
}
|
||||
|
||||
// unquote interprets s as a single-quoted, double-quoted,
|
||||
// or backquoted Go string literal, returning the string value
|
||||
// that s quotes. (If s is single-quoted, it would be a Go
|
||||
// character literal; Unquote returns the corresponding
|
||||
// one-character string.)
|
||||
//
|
||||
// This is modified from the standard library to support `|` and `/` as quote
|
||||
// characters for use with regular expressions.
|
||||
func unquote(s string) (string, error) {
|
||||
n := len(s)
|
||||
if n < 2 {
|
||||
return "", errQuoteSyntax
|
||||
}
|
||||
quote := s[0]
|
||||
if quote != s[n-1] {
|
||||
return "", errQuoteSyntax
|
||||
}
|
||||
s = s[1 : n-1]
|
||||
|
||||
if quote == '`' {
|
||||
if contains(s, '`') {
|
||||
return "", errQuoteSyntax
|
||||
}
|
||||
if contains(s, '\r') {
|
||||
// -1 because we know there is at least one \r to remove.
|
||||
buf := make([]byte, 0, len(s)-1)
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] != '\r' {
|
||||
buf = append(buf, s[i])
|
||||
}
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
if quote != '"' && quote != '\'' && quote != '|' && quote != '/' {
|
||||
return "", errQuoteSyntax
|
||||
}
|
||||
if contains(s, '\n') {
|
||||
return "", errQuoteSyntax
|
||||
}
|
||||
|
||||
// Is it trivial? Avoid allocation.
|
||||
if !contains(s, '\\') && !contains(s, quote) {
|
||||
switch quote {
|
||||
case '"', '/', '|': // pipe and slash are treated like double quote
|
||||
return s, nil
|
||||
case '\'':
|
||||
r, size := utf8.DecodeRuneInString(s)
|
||||
if size == len(s) && (r != utf8.RuneError || size != 1) {
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var runeTmp [utf8.UTFMax]byte
|
||||
buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
|
||||
for len(s) > 0 {
|
||||
c, multibyte, ss, err := unquoteChar(s, quote)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
s = ss
|
||||
if c < utf8.RuneSelf || !multibyte {
|
||||
buf = append(buf, byte(c))
|
||||
} else {
|
||||
n := utf8.EncodeRune(runeTmp[:], c)
|
||||
buf = append(buf, runeTmp[:n]...)
|
||||
}
|
||||
if quote == '\'' && len(s) != 0 {
|
||||
// single-quoted must be single character
|
||||
return "", errQuoteSyntax
|
||||
}
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// contains reports whether the string contains the byte c.
|
||||
func contains(s string, c byte) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] == c {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func unhex(b byte) (v rune, ok bool) {
|
||||
c := rune(b)
|
||||
switch {
|
||||
case '0' <= c && c <= '9':
|
||||
return c - '0', true
|
||||
case 'a' <= c && c <= 'f':
|
||||
return c - 'a' + 10, true
|
||||
case 'A' <= c && c <= 'F':
|
||||
return c - 'A' + 10, true
|
||||
}
|
||||
return
|
||||
}
|
5
vendor/github.com/containerd/containerd/filters/scanner.go
generated
vendored
5
vendor/github.com/containerd/containerd/filters/scanner.go
generated
vendored
|
@ -87,7 +87,7 @@ func (s *scanner) peek() rune {
|
|||
return ch
|
||||
}
|
||||
|
||||
func (s *scanner) scan() (int, token, string) {
|
||||
func (s *scanner) scan() (nextp int, tk token, text string) {
|
||||
var (
|
||||
ch = s.next()
|
||||
pos = s.pos
|
||||
|
@ -101,6 +101,7 @@ chomp:
|
|||
s.scanQuoted(ch)
|
||||
return pos, tokenQuoted, s.input[pos:s.ppos]
|
||||
case isSeparatorRune(ch):
|
||||
s.value = false
|
||||
return pos, tokenSeparator, s.input[pos:s.ppos]
|
||||
case isOperatorRune(ch):
|
||||
s.scanOperator()
|
||||
|
@ -241,7 +242,7 @@ func isOperatorRune(r rune) bool {
|
|||
|
||||
func isQuoteRune(r rune) bool {
|
||||
switch r {
|
||||
case '"': // maybe add single quoting?
|
||||
case '/', '|', '"': // maybe add single quoting?
|
||||
return true
|
||||
}
|
||||
|
||||
|
|
75
vendor/github.com/containerd/containerd/fs/diff.go
generated
vendored
75
vendor/github.com/containerd/containerd/fs/diff.go
generated
vendored
|
@ -224,6 +224,8 @@ func doubleWalkDiff(ctx context.Context, changeFn ChangeFunc, a, b string) (err
|
|||
|
||||
f1, f2 *currentPath
|
||||
rmdir string
|
||||
lastEmittedDir = string(filepath.Separator)
|
||||
parents []os.FileInfo
|
||||
)
|
||||
g.Go(func() error {
|
||||
defer close(c1)
|
||||
|
@ -258,7 +260,10 @@ func doubleWalkDiff(ctx context.Context, changeFn ChangeFunc, a, b string) (err
|
|||
continue
|
||||
}
|
||||
|
||||
var f os.FileInfo
|
||||
var (
|
||||
f os.FileInfo
|
||||
emit = true
|
||||
)
|
||||
k, p := pathChange(f1, f2)
|
||||
switch k {
|
||||
case ChangeKindAdd:
|
||||
|
@ -294,17 +299,83 @@ func doubleWalkDiff(ctx context.Context, changeFn ChangeFunc, a, b string) (err
|
|||
f2 = nil
|
||||
if same {
|
||||
if !isLinked(f) {
|
||||
continue
|
||||
emit = false
|
||||
}
|
||||
k = ChangeKindUnmodified
|
||||
}
|
||||
}
|
||||
if emit {
|
||||
emittedDir, emitParents := commonParents(lastEmittedDir, p, parents)
|
||||
for _, pf := range emitParents {
|
||||
p := filepath.Join(emittedDir, pf.Name())
|
||||
if err := changeFn(ChangeKindUnmodified, p, pf, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
emittedDir = p
|
||||
}
|
||||
|
||||
if err := changeFn(k, p, f, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if f != nil && f.IsDir() {
|
||||
lastEmittedDir = p
|
||||
} else {
|
||||
lastEmittedDir = emittedDir
|
||||
}
|
||||
|
||||
parents = parents[:0]
|
||||
} else if f.IsDir() {
|
||||
lastEmittedDir, parents = commonParents(lastEmittedDir, p, parents)
|
||||
parents = append(parents, f)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return g.Wait()
|
||||
}
|
||||
|
||||
func commonParents(base, updated string, dirs []os.FileInfo) (string, []os.FileInfo) {
|
||||
if basePrefix := makePrefix(base); strings.HasPrefix(updated, basePrefix) {
|
||||
var (
|
||||
parents []os.FileInfo
|
||||
last = base
|
||||
)
|
||||
for _, d := range dirs {
|
||||
next := filepath.Join(last, d.Name())
|
||||
if strings.HasPrefix(updated, makePrefix(last)) {
|
||||
parents = append(parents, d)
|
||||
last = next
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return base, parents
|
||||
}
|
||||
|
||||
baseS := strings.Split(base, string(filepath.Separator))
|
||||
updatedS := strings.Split(updated, string(filepath.Separator))
|
||||
commonS := []string{string(filepath.Separator)}
|
||||
|
||||
min := len(baseS)
|
||||
if len(updatedS) < min {
|
||||
min = len(updatedS)
|
||||
}
|
||||
for i := 0; i < min; i++ {
|
||||
if baseS[i] == updatedS[i] {
|
||||
commonS = append(commonS, baseS[i])
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return filepath.Join(commonS...), []os.FileInfo{}
|
||||
}
|
||||
|
||||
func makePrefix(d string) string {
|
||||
if d == "" || d[len(d)-1] != filepath.Separator {
|
||||
return d + string(filepath.Separator)
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
|
9
vendor/github.com/containerd/containerd/fs/du.go
generated
vendored
9
vendor/github.com/containerd/containerd/fs/du.go
generated
vendored
|
@ -1,5 +1,7 @@
|
|||
package fs
|
||||
|
||||
import "context"
|
||||
|
||||
// Usage of disk information
|
||||
type Usage struct {
|
||||
Inodes int64
|
||||
|
@ -11,3 +13,10 @@ type Usage struct {
|
|||
func DiskUsage(roots ...string) (Usage, error) {
|
||||
return diskUsage(roots...)
|
||||
}
|
||||
|
||||
// DiffUsage counts the numbers of inodes and disk usage in the
|
||||
// diff between the 2 directories. The first path is intended
|
||||
// as the base directory and the second as the changed directory.
|
||||
func DiffUsage(ctx context.Context, a, b string) (Usage, error) {
|
||||
return diffUsage(ctx, a, b)
|
||||
}
|
||||
|
|
38
vendor/github.com/containerd/containerd/fs/du_unix.go
generated
vendored
38
vendor/github.com/containerd/containerd/fs/du_unix.go
generated
vendored
|
@ -3,18 +3,20 @@
|
|||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func diskUsage(roots ...string) (Usage, error) {
|
||||
type inode struct {
|
||||
// TODO(stevvooe): Can probably reduce memory usage by not tracking
|
||||
// device, but we can leave this right for now.
|
||||
dev, ino uint64
|
||||
}
|
||||
|
||||
func diskUsage(roots ...string) (Usage, error) {
|
||||
|
||||
var (
|
||||
size int64
|
||||
inodes = map[inode]struct{}{} // expensive!
|
||||
|
@ -45,3 +47,37 @@ func diskUsage(roots ...string) (Usage, error) {
|
|||
Size: size,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func diffUsage(ctx context.Context, a, b string) (Usage, error) {
|
||||
var (
|
||||
size int64
|
||||
inodes = map[inode]struct{}{} // expensive!
|
||||
)
|
||||
|
||||
if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if kind == ChangeKindAdd || kind == ChangeKindModify {
|
||||
stat := fi.Sys().(*syscall.Stat_t)
|
||||
|
||||
inoKey := inode{dev: uint64(stat.Dev), ino: uint64(stat.Ino)}
|
||||
if _, ok := inodes[inoKey]; !ok {
|
||||
inodes[inoKey] = struct{}{}
|
||||
size += fi.Size()
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return Usage{}, err
|
||||
}
|
||||
|
||||
return Usage{
|
||||
Inodes: int64(len(inodes)),
|
||||
Size: size,
|
||||
}, nil
|
||||
}
|
||||
|
|
27
vendor/github.com/containerd/containerd/fs/du_windows.go
generated
vendored
27
vendor/github.com/containerd/containerd/fs/du_windows.go
generated
vendored
|
@ -3,6 +3,7 @@
|
|||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
@ -31,3 +32,29 @@ func diskUsage(roots ...string) (Usage, error) {
|
|||
Size: size,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func diffUsage(ctx context.Context, a, b string) (Usage, error) {
|
||||
var (
|
||||
size int64
|
||||
)
|
||||
|
||||
if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if kind == ChangeKindAdd || kind == ChangeKindModify {
|
||||
size += fi.Size()
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return Usage{}, err
|
||||
}
|
||||
|
||||
return Usage{
|
||||
Size: size,
|
||||
}, nil
|
||||
}
|
||||
|
|
21
vendor/github.com/containerd/containerd/images/importexport.go
generated
vendored
Normal file
21
vendor/github.com/containerd/containerd/images/importexport.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
package images
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// Importer is the interface for image importer.
|
||||
type Importer interface {
|
||||
// Import imports an image from a tar stream.
|
||||
Import(ctx context.Context, store content.Store, reader io.Reader) ([]Image, error)
|
||||
}
|
||||
|
||||
// Exporter is the interface for image exporter.
|
||||
type Exporter interface {
|
||||
// Export exports an image to a tar stream.
|
||||
Export(ctx context.Context, store content.Store, desc ocispec.Descriptor, writer io.Writer) error
|
||||
}
|
120
vendor/github.com/containerd/containerd/import.go
generated
vendored
120
vendor/github.com/containerd/containerd/import.go
generated
vendored
|
@ -1,120 +0,0 @@
|
|||
package containerd
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/reference"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func resolveOCIIndex(idx ocispec.Index, refObject string) (*ocispec.Descriptor, error) {
|
||||
tag, dgst := reference.SplitObject(refObject)
|
||||
if tag == "" && dgst == "" {
|
||||
return nil, errors.Errorf("unexpected object: %q", refObject)
|
||||
}
|
||||
for _, m := range idx.Manifests {
|
||||
if m.Digest == dgst {
|
||||
return &m, nil
|
||||
}
|
||||
annot, ok := m.Annotations[ocispec.AnnotationRefName]
|
||||
if ok && annot == tag && tag != "" {
|
||||
return &m, nil
|
||||
}
|
||||
}
|
||||
return nil, errors.Errorf("not found: %q", refObject)
|
||||
}
|
||||
|
||||
func (c *Client) importFromOCITar(ctx context.Context, ref string, reader io.Reader, iopts importOpts) (Image, error) {
|
||||
tr := tar.NewReader(reader)
|
||||
store := c.ContentStore()
|
||||
var desc *ocispec.Descriptor
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if hdr.Typeflag != tar.TypeReg && hdr.Typeflag != tar.TypeRegA {
|
||||
continue
|
||||
}
|
||||
if hdr.Name == "index.json" {
|
||||
desc, err = onUntarIndexJSON(tr, iopts.refObject)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(hdr.Name, "blobs/") {
|
||||
if err := onUntarBlob(ctx, tr, store, hdr.Name, hdr.Size); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
if desc == nil {
|
||||
return nil, errors.Errorf("no descriptor found for reference object %q", iopts.refObject)
|
||||
}
|
||||
imgrec := images.Image{
|
||||
Name: ref,
|
||||
Target: *desc,
|
||||
Labels: iopts.labels,
|
||||
}
|
||||
is := c.ImageService()
|
||||
if updated, err := is.Update(ctx, imgrec, "target"); err != nil {
|
||||
if !errdefs.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
created, err := is.Create(ctx, imgrec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
imgrec = created
|
||||
} else {
|
||||
imgrec = updated
|
||||
}
|
||||
|
||||
img := &image{
|
||||
client: c,
|
||||
i: imgrec,
|
||||
}
|
||||
return img, nil
|
||||
}
|
||||
|
||||
func onUntarIndexJSON(r io.Reader, refObject string) (*ocispec.Descriptor, error) {
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var idx ocispec.Index
|
||||
if err := json.Unmarshal(b, &idx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resolveOCIIndex(idx, refObject)
|
||||
}
|
||||
|
||||
func onUntarBlob(ctx context.Context, r io.Reader, store content.Store, name string, size int64) error {
|
||||
// name is like "blobs/sha256/deadbeef"
|
||||
split := strings.Split(name, "/")
|
||||
if len(split) != 3 {
|
||||
return errors.Errorf("unexpected name: %q", name)
|
||||
}
|
||||
algo := digest.Algorithm(split[1])
|
||||
if !algo.Available() {
|
||||
return errors.Errorf("unsupported algorithm: %s", algo)
|
||||
}
|
||||
dgst := digest.NewDigestFromHex(algo.String(), split[2])
|
||||
return content.WriteBlob(ctx, store, "unknown-"+dgst.String(), r, size, dgst)
|
||||
}
|
1
vendor/github.com/containerd/containerd/remotes/handlers.go
generated
vendored
1
vendor/github.com/containerd/containerd/remotes/handlers.go
generated
vendored
|
@ -114,6 +114,7 @@ func fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc
|
|||
func commitOpts(desc ocispec.Descriptor, r io.Reader) (io.Reader, []content.Opt) {
|
||||
var childrenF func(r io.Reader) ([]ocispec.Descriptor, error)
|
||||
|
||||
// TODO(AkihiroSuda): use images/oci.GetChildrenDescriptors?
|
||||
switch desc.MediaType {
|
||||
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
|
||||
childrenF = func(r io.Reader) ([]ocispec.Descriptor, error) {
|
||||
|
|
Loading…
Reference in a new issue