mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
vendor: update buildkit to b3028967a
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
parent
c12f09bf99
commit
b6aae9919f
78 changed files with 1964 additions and 572 deletions
|
@ -11,6 +11,7 @@ import (
|
|||
"github.com/containerd/containerd/snapshots"
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
|
@ -73,6 +74,10 @@ func NewSnapshotter(opt Opt) (snapshot.SnapshotterBase, error) {
|
|||
return s, nil
|
||||
}
|
||||
|
||||
func (s *snapshotter) IdentityMapping() *idtools.IdentityMapping {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) error {
|
||||
origParent := parent
|
||||
if parent != "" {
|
||||
|
@ -469,3 +474,7 @@ func (m *mountable) Release() error {
|
|||
m.mounts = nil
|
||||
return m.release()
|
||||
}
|
||||
|
||||
func (m *mountable) IdentityMapping() *idtools.IdentityMapping {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
"github.com/moby/buildkit/control"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/solver/llbsolver"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/moby/buildkit/util/resolver"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
|
@ -64,10 +63,6 @@ var cacheFields = map[string]bool{
|
|||
"immutable": false,
|
||||
}
|
||||
|
||||
func init() {
|
||||
llbsolver.AllowNetworkHostUnstable = true
|
||||
}
|
||||
|
||||
// Opt is option struct required for creating the builder
|
||||
type Opt struct {
|
||||
SessionManager *session.Manager
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
"github.com/moby/buildkit/snapshot/blobmapping"
|
||||
"github.com/moby/buildkit/solver/bboltcachestorage"
|
||||
"github.com/moby/buildkit/util/binfmt_misc"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/moby/buildkit/worker"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -189,6 +190,10 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
|||
ResolveCacheExporterFuncs: map[string]remotecache.ResolveCacheExporterFunc{
|
||||
"inline": inlineremotecache.ResolveCacheExporterFunc(),
|
||||
},
|
||||
Entitlements: []string{
|
||||
string(entitlements.EntitlementNetworkHost),
|
||||
// string(entitlements.EntitlementSecurityInsecure),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/moby/buildkit/executor"
|
||||
"github.com/moby/buildkit/exporter"
|
||||
localexporter "github.com/moby/buildkit/exporter/local"
|
||||
tarexporter "github.com/moby/buildkit/exporter/tar"
|
||||
"github.com/moby/buildkit/frontend"
|
||||
gw "github.com/moby/buildkit/frontend/gateway/client"
|
||||
"github.com/moby/buildkit/session"
|
||||
|
@ -213,6 +214,10 @@ func (w *Worker) Exporter(name string, sm *session.Manager) (exporter.Exporter,
|
|||
return localexporter.New(localexporter.Opt{
|
||||
SessionManager: sm,
|
||||
})
|
||||
case client.ExporterTar:
|
||||
return tarexporter.New(tarexporter.Opt{
|
||||
SessionManager: sm,
|
||||
})
|
||||
default:
|
||||
return nil, errors.Errorf("exporter %q could not be found", name)
|
||||
}
|
||||
|
|
|
@ -27,8 +27,8 @@ github.com/imdario/mergo v0.3.6
|
|||
golang.org/x/sync 1d60e4601c6fd243af51cc01ddf169918a5407ca
|
||||
|
||||
# buildkit
|
||||
github.com/moby/buildkit e9aca5bef87e19173b99d8668db0338dcaaa5f33
|
||||
github.com/tonistiigi/fsutil 1bdbf124ad494a771e99e0cdcd16326375f8b2c9
|
||||
github.com/moby/buildkit b3028967ae6259c9a31c1a1deeccd30fe3469cce
|
||||
github.com/tonistiigi/fsutil 3bbb99cdbd76619ab717299830c60f6f2a533a6b
|
||||
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
|
||||
github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7
|
||||
github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716
|
||||
|
@ -120,7 +120,7 @@ github.com/googleapis/gax-go v2.0.0
|
|||
google.golang.org/genproto 694d95ba50e67b2e363f3483057db5d4910c18f9
|
||||
|
||||
# containerd
|
||||
github.com/containerd/containerd a15b6e2097c48b632dbdc63254bad4c62b69e709
|
||||
github.com/containerd/containerd ceba56893a76f22cf0126c46d835c80fb3833408
|
||||
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
|
||||
github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d
|
||||
github.com/containerd/cgroups 4994991857f9b0ae8dc439551e8bebdbb4bf66c1
|
||||
|
|
72
vendor/github.com/containerd/containerd/cio/io.go
generated
vendored
72
vendor/github.com/containerd/containerd/cio/io.go
generated
vendored
|
@ -20,8 +20,8 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/defaults"
|
||||
|
@ -222,46 +222,76 @@ type DirectIO struct {
|
|||
cio
|
||||
}
|
||||
|
||||
var _ IO = &DirectIO{}
|
||||
var (
|
||||
_ IO = &DirectIO{}
|
||||
_ IO = &logURI{}
|
||||
)
|
||||
|
||||
// LogFile creates a file on disk that logs the task's STDOUT,STDERR.
|
||||
// If the log file already exists, the logs will be appended to the file.
|
||||
func LogFile(path string) Creator {
|
||||
// LogURI provides the raw logging URI
|
||||
func LogURI(uri *url.URL) Creator {
|
||||
return func(_ string) (IO, error) {
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.Close()
|
||||
return &logIO{
|
||||
return &logURI{
|
||||
config: Config{
|
||||
Stdout: path,
|
||||
Stderr: path,
|
||||
Stdout: uri.String(),
|
||||
Stderr: uri.String(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
type logIO struct {
|
||||
// BinaryIO forwards container STDOUT|STDERR directly to a logging binary
|
||||
func BinaryIO(binary string, args map[string]string) Creator {
|
||||
return func(_ string) (IO, error) {
|
||||
uri := &url.URL{
|
||||
Scheme: "binary",
|
||||
Host: binary,
|
||||
}
|
||||
for k, v := range args {
|
||||
uri.Query().Set(k, v)
|
||||
}
|
||||
return &logURI{
|
||||
config: Config{
|
||||
Stdout: uri.String(),
|
||||
Stderr: uri.String(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// LogFile creates a file on disk that logs the task's STDOUT,STDERR.
|
||||
// If the log file already exists, the logs will be appended to the file.
|
||||
func LogFile(path string) Creator {
|
||||
return func(_ string) (IO, error) {
|
||||
uri := &url.URL{
|
||||
Scheme: "file",
|
||||
Host: path,
|
||||
}
|
||||
return &logURI{
|
||||
config: Config{
|
||||
Stdout: uri.String(),
|
||||
Stderr: uri.String(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
type logURI struct {
|
||||
config Config
|
||||
}
|
||||
|
||||
func (l *logIO) Config() Config {
|
||||
func (l *logURI) Config() Config {
|
||||
return l.config
|
||||
}
|
||||
|
||||
func (l *logIO) Cancel() {
|
||||
func (l *logURI) Cancel() {
|
||||
|
||||
}
|
||||
|
||||
func (l *logIO) Wait() {
|
||||
func (l *logURI) Wait() {
|
||||
|
||||
}
|
||||
|
||||
func (l *logIO) Close() error {
|
||||
func (l *logURI) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
4
vendor/github.com/containerd/containerd/client.go
generated
vendored
4
vendor/github.com/containerd/containerd/client.go
generated
vendored
|
@ -300,6 +300,10 @@ type RemoteContext struct {
|
|||
|
||||
// MaxConcurrentDownloads is the max concurrent content downloads for each pull.
|
||||
MaxConcurrentDownloads int
|
||||
|
||||
// AppendDistributionSourceLabel allows fetcher to add distribute source
|
||||
// label for each blob content, which doesn't work for legacy schema1.
|
||||
AppendDistributionSourceLabel bool
|
||||
}
|
||||
|
||||
func defaultRemoteContext() *RemoteContext {
|
||||
|
|
9
vendor/github.com/containerd/containerd/client_opts.go
generated
vendored
9
vendor/github.com/containerd/containerd/client_opts.go
generated
vendored
|
@ -194,3 +194,12 @@ func WithMaxConcurrentDownloads(max int) RemoteOpt {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithAppendDistributionSourceLabel allows fetcher to add distribute source
|
||||
// label for each blob content, which doesn't work for legacy schema1.
|
||||
func WithAppendDistributionSourceLabel() RemoteOpt {
|
||||
return func(_ *Client, c *RemoteContext) error {
|
||||
c.AppendDistributionSourceLabel = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
24
vendor/github.com/containerd/containerd/contrib/nvidia/nvidia.go
generated
vendored
24
vendor/github.com/containerd/containerd/contrib/nvidia/nvidia.go
generated
vendored
|
@ -29,7 +29,8 @@ import (
|
|||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
const nvidiaCLI = "nvidia-container-cli"
|
||||
// NvidiaCLI is the path to the Nvidia helper binary
|
||||
const NvidiaCLI = "nvidia-container-cli"
|
||||
|
||||
// Capability specifies capabilities for the gpu inside the container
|
||||
// Detailed explanation of options can be found:
|
||||
|
@ -51,13 +52,16 @@ const (
|
|||
Display Capability = "display"
|
||||
)
|
||||
|
||||
var allCaps = []Capability{
|
||||
Compute,
|
||||
Compat32,
|
||||
Graphics,
|
||||
Utility,
|
||||
Video,
|
||||
Display,
|
||||
// AllCaps returns the complete list of supported Nvidia capabilties.
|
||||
func AllCaps() []Capability {
|
||||
return []Capability{
|
||||
Compute,
|
||||
Compat32,
|
||||
Graphics,
|
||||
Utility,
|
||||
Video,
|
||||
Display,
|
||||
}
|
||||
}
|
||||
|
||||
// WithGPUs adds NVIDIA gpu support to a container
|
||||
|
@ -76,7 +80,7 @@ func WithGPUs(opts ...Opts) oci.SpecOpts {
|
|||
}
|
||||
c.OCIHookPath = path
|
||||
}
|
||||
nvidiaPath, err := exec.LookPath(nvidiaCLI)
|
||||
nvidiaPath, err := exec.LookPath(NvidiaCLI)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -166,7 +170,7 @@ func WithAllDevices(c *config) error {
|
|||
|
||||
// WithAllCapabilities adds all capabilities to the container for the gpus
|
||||
func WithAllCapabilities(c *config) error {
|
||||
c.Capabilities = allCaps
|
||||
c.Capabilities = AllCaps()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
2
vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go
generated
vendored
2
vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go
generated
vendored
|
@ -161,6 +161,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
|
|||
"ioctl",
|
||||
"io_destroy",
|
||||
"io_getevents",
|
||||
"io_pgetevents",
|
||||
"ioprio_get",
|
||||
"ioprio_set",
|
||||
"io_setup",
|
||||
|
@ -319,6 +320,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
|
|||
"stat64",
|
||||
"statfs",
|
||||
"statfs64",
|
||||
"statx",
|
||||
"symlink",
|
||||
"symlinkat",
|
||||
"sync",
|
||||
|
|
21
vendor/github.com/containerd/containerd/import.go
generated
vendored
21
vendor/github.com/containerd/containerd/import.go
generated
vendored
|
@ -25,14 +25,16 @@ import (
|
|||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/images/archive"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
type importOpts struct {
|
||||
indexName string
|
||||
imageRefT func(string) string
|
||||
dgstRefT func(digest.Digest) string
|
||||
indexName string
|
||||
imageRefT func(string) string
|
||||
dgstRefT func(digest.Digest) string
|
||||
allPlatforms bool
|
||||
}
|
||||
|
||||
// ImportOpt allows the caller to specify import specific options
|
||||
|
@ -64,6 +66,14 @@ func WithIndexName(name string) ImportOpt {
|
|||
}
|
||||
}
|
||||
|
||||
// WithAllPlatforms is used to import content for all platforms.
|
||||
func WithAllPlatforms(allPlatforms bool) ImportOpt {
|
||||
return func(c *importOpts) error {
|
||||
c.allPlatforms = allPlatforms
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Import imports an image from a Tar stream using reader.
|
||||
// Caller needs to specify importer. Future version may use oci.v1 as the default.
|
||||
// Note that unreferrenced blobs may be imported to the content store as well.
|
||||
|
@ -98,6 +108,10 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt
|
|||
Target: index,
|
||||
})
|
||||
}
|
||||
var platformMatcher = platforms.All
|
||||
if !iopts.allPlatforms {
|
||||
platformMatcher = platforms.Default()
|
||||
}
|
||||
|
||||
var handler images.HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
// Only save images at top level
|
||||
|
@ -141,6 +155,7 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt
|
|||
return idx.Manifests, nil
|
||||
}
|
||||
|
||||
handler = images.FilterPlatforms(handler, platformMatcher)
|
||||
handler = images.SetChildrenLabels(cs, handler)
|
||||
if err := images.Walk(ctx, handler, index); err != nil {
|
||||
return nil, err
|
||||
|
|
63
vendor/github.com/containerd/containerd/oci/spec_opts.go
generated
vendored
63
vendor/github.com/containerd/containerd/oci/spec_opts.go
generated
vendored
|
@ -33,7 +33,7 @@ import (
|
|||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/opencontainers/runc/libcontainer/user"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -741,9 +741,11 @@ func WithCapabilities(caps []string) SpecOpts {
|
|||
}
|
||||
|
||||
// WithAllCapabilities sets all linux capabilities for the process
|
||||
var WithAllCapabilities = WithCapabilities(getAllCapabilities())
|
||||
var WithAllCapabilities = WithCapabilities(GetAllCapabilities())
|
||||
|
||||
func getAllCapabilities() []string {
|
||||
// GetAllCapabilities returns all caps up to CAP_LAST_CAP
|
||||
// or CAP_BLOCK_SUSPEND on RHEL6
|
||||
func GetAllCapabilities() []string {
|
||||
last := capability.CAP_LAST_CAP
|
||||
// hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap
|
||||
if last == capability.Cap(63) {
|
||||
|
@ -759,6 +761,61 @@ func getAllCapabilities() []string {
|
|||
return caps
|
||||
}
|
||||
|
||||
func capsContain(caps []string, s string) bool {
|
||||
for _, c := range caps {
|
||||
if c == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func removeCap(caps *[]string, s string) {
|
||||
for i, c := range *caps {
|
||||
if c == s {
|
||||
*caps = append((*caps)[:i], (*caps)[i+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithAddedCapabilities adds the provided capabilities
|
||||
func WithAddedCapabilities(caps []string) SpecOpts {
|
||||
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
|
||||
setCapabilities(s)
|
||||
for _, c := range caps {
|
||||
for _, cl := range []*[]string{
|
||||
&s.Process.Capabilities.Bounding,
|
||||
&s.Process.Capabilities.Effective,
|
||||
&s.Process.Capabilities.Permitted,
|
||||
&s.Process.Capabilities.Inheritable,
|
||||
} {
|
||||
if !capsContain(*cl, c) {
|
||||
*cl = append(*cl, c)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDroppedCapabilities removes the provided capabilities
|
||||
func WithDroppedCapabilities(caps []string) SpecOpts {
|
||||
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
|
||||
setCapabilities(s)
|
||||
for _, c := range caps {
|
||||
for _, cl := range []*[]string{
|
||||
&s.Process.Capabilities.Bounding,
|
||||
&s.Process.Capabilities.Effective,
|
||||
&s.Process.Capabilities.Permitted,
|
||||
&s.Process.Capabilities.Inheritable,
|
||||
} {
|
||||
removeCap(cl, c)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithAmbientCapabilities set the Linux ambient capabilities for the process
|
||||
// Ambient capabilities should only be set for non-root users or the caller should
|
||||
// understand how these capabilities are used and set
|
||||
|
|
22
vendor/github.com/containerd/containerd/pull.go
generated
vendored
22
vendor/github.com/containerd/containerd/pull.go
generated
vendored
|
@ -112,8 +112,9 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim
|
|||
childrenHandler := images.ChildrenHandler(store)
|
||||
// Set any children labels for that content
|
||||
childrenHandler = images.SetChildrenLabels(store, childrenHandler)
|
||||
// Filter children by platforms
|
||||
childrenHandler = images.FilterPlatforms(childrenHandler, rCtx.PlatformMatcher)
|
||||
// Filter manifests by platforms but allow to handle manifest
|
||||
// and configuration for not-target platforms
|
||||
childrenHandler = remotes.FilterManifestByPlatformHandler(childrenHandler, rCtx.PlatformMatcher)
|
||||
// Sort and limit manifests if a finite number is needed
|
||||
if limit > 0 {
|
||||
childrenHandler = images.LimitManifests(childrenHandler, rCtx.PlatformMatcher, limit)
|
||||
|
@ -130,11 +131,23 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim
|
|||
},
|
||||
)
|
||||
|
||||
handler = images.Handlers(append(rCtx.BaseHandlers,
|
||||
handlers := append(rCtx.BaseHandlers,
|
||||
remotes.FetchHandler(store, fetcher),
|
||||
convertibleHandler,
|
||||
childrenHandler,
|
||||
)...)
|
||||
)
|
||||
|
||||
// append distribution source label to blob data
|
||||
if rCtx.AppendDistributionSourceLabel {
|
||||
appendDistSrcLabelHandler, err := docker.AppendDistributionSourceLabel(store, ref)
|
||||
if err != nil {
|
||||
return images.Image{}, err
|
||||
}
|
||||
|
||||
handlers = append(handlers, appendDistSrcLabelHandler)
|
||||
}
|
||||
|
||||
handler = images.Handlers(handlers...)
|
||||
|
||||
converterFunc = func(ctx context.Context, desc ocispec.Descriptor) (ocispec.Descriptor, error) {
|
||||
return docker.ConvertManifest(ctx, store, desc)
|
||||
|
@ -148,6 +161,7 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim
|
|||
if rCtx.MaxConcurrentDownloads > 0 {
|
||||
limiter = semaphore.NewWeighted(int64(rCtx.MaxConcurrentDownloads))
|
||||
}
|
||||
|
||||
if err := images.Dispatch(ctx, handler, limiter, desc); err != nil {
|
||||
return images.Image{}, err
|
||||
}
|
||||
|
|
112
vendor/github.com/containerd/containerd/remotes/docker/handler.go
generated
vendored
Normal file
112
vendor/github.com/containerd/containerd/remotes/docker/handler.go
generated
vendored
Normal file
|
@ -0,0 +1,112 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/labels"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/reference"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
// labelDistributionSource describes the source blob comes from.
|
||||
labelDistributionSource = "containerd.io/distribution.source"
|
||||
)
|
||||
|
||||
// AppendDistributionSourceLabel updates the label of blob with distribution source.
|
||||
func AppendDistributionSourceLabel(manager content.Manager, ref string) (images.HandlerFunc, error) {
|
||||
refspec, err := reference.Parse(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
u, err := url.Parse("dummy://" + refspec.Locator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
source, repo := u.Hostname(), strings.TrimPrefix(u.Path, "/")
|
||||
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
info, err := manager.Info(ctx, desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key := distributionSourceLabelKey(source)
|
||||
|
||||
originLabel := ""
|
||||
if info.Labels != nil {
|
||||
originLabel = info.Labels[key]
|
||||
}
|
||||
value := appendDistributionSourceLabel(originLabel, repo)
|
||||
|
||||
// The repo name has been limited under 256 and the distribution
|
||||
// label might hit the limitation of label size, when blob data
|
||||
// is used as the very, very common layer.
|
||||
if err := labels.Validate(key, value); err != nil {
|
||||
log.G(ctx).Warnf("skip to append distribution label: %s", err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
info = content.Info{
|
||||
Digest: desc.Digest,
|
||||
Labels: map[string]string{
|
||||
key: value,
|
||||
},
|
||||
}
|
||||
_, err = manager.Update(ctx, info, fmt.Sprintf("labels.%s", key))
|
||||
return nil, err
|
||||
}, nil
|
||||
}
|
||||
|
||||
func appendDistributionSourceLabel(originLabel, repo string) string {
|
||||
repos := []string{}
|
||||
if originLabel != "" {
|
||||
repos = strings.Split(originLabel, ",")
|
||||
}
|
||||
repos = append(repos, repo)
|
||||
|
||||
// use emtpy string to present duplicate items
|
||||
for i := 1; i < len(repos); i++ {
|
||||
tmp, j := repos[i], i-1
|
||||
for ; j >= 0 && repos[j] >= tmp; j-- {
|
||||
if repos[j] == tmp {
|
||||
tmp = ""
|
||||
}
|
||||
repos[j+1] = repos[j]
|
||||
}
|
||||
repos[j+1] = tmp
|
||||
}
|
||||
|
||||
i := 0
|
||||
for ; i < len(repos) && repos[i] == ""; i++ {
|
||||
}
|
||||
|
||||
return strings.Join(repos[i:], ",")
|
||||
}
|
||||
|
||||
func distributionSourceLabelKey(source string) string {
|
||||
return fmt.Sprintf("%s.%s", labelDistributionSource, source)
|
||||
}
|
35
vendor/github.com/containerd/containerd/remotes/handlers.go
generated
vendored
35
vendor/github.com/containerd/containerd/remotes/handlers.go
generated
vendored
|
@ -206,3 +206,38 @@ func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, pr
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FilterManifestByPlatformHandler allows Handler to handle non-target
|
||||
// platform's manifest and configuration data.
|
||||
func FilterManifestByPlatformHandler(f images.HandlerFunc, m platforms.Matcher) images.HandlerFunc {
|
||||
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
children, err := f(ctx, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// no platform information
|
||||
if desc.Platform == nil || m == nil {
|
||||
return children, nil
|
||||
}
|
||||
|
||||
var descs []ocispec.Descriptor
|
||||
switch desc.MediaType {
|
||||
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
|
||||
if m.Match(*desc.Platform) {
|
||||
descs = children
|
||||
} else {
|
||||
for _, child := range children {
|
||||
if child.MediaType == images.MediaTypeDockerSchema2Config ||
|
||||
child.MediaType == ocispec.MediaTypeImageConfig {
|
||||
|
||||
descs = append(descs, child)
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
descs = children
|
||||
}
|
||||
return descs, nil
|
||||
}
|
||||
}
|
||||
|
|
2
vendor/github.com/containerd/containerd/runtime/proc/proc.go
generated
vendored
2
vendor/github.com/containerd/containerd/runtime/proc/proc.go
generated
vendored
|
@ -72,7 +72,7 @@ type Process interface {
|
|||
// platform implementations
|
||||
type Platform interface {
|
||||
CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string,
|
||||
wg, cwg *sync.WaitGroup) (console.Console, error)
|
||||
wg *sync.WaitGroup) (console.Console, error)
|
||||
ShutdownConsole(ctx context.Context, console console.Console) error
|
||||
Close() error
|
||||
}
|
||||
|
|
27
vendor/github.com/containerd/containerd/runtime/v1/linux/bundle.go
generated
vendored
27
vendor/github.com/containerd/containerd/runtime/v1/linux/bundle.go
generated
vendored
|
@ -20,6 +20,7 @@ package linux
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
@ -103,7 +104,7 @@ func ShimLocal(c *Config, exchange *exchange.Exchange) ShimOpt {
|
|||
// ShimConnect is a ShimOpt for connecting to an existing remote shim
|
||||
func ShimConnect(c *Config, onClose func()) ShimOpt {
|
||||
return func(b *bundle, ns string, ropts *runctypes.RuncOptions) (shim.Config, client.Opt) {
|
||||
return b.shimConfig(ns, c, ropts), client.WithConnect(b.shimAddress(ns), onClose)
|
||||
return b.shimConfig(ns, c, ropts), client.WithConnect(b.decideShimAddress(ns), onClose)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -127,10 +128,32 @@ func (b *bundle) Delete() error {
|
|||
return errors.Wrapf(err, "Failed to remove both bundle and workdir locations: %v", err2)
|
||||
}
|
||||
|
||||
func (b *bundle) shimAddress(namespace string) string {
|
||||
func (b *bundle) legacyShimAddress(namespace string) string {
|
||||
return filepath.Join(string(filepath.Separator), "containerd-shim", namespace, b.id, "shim.sock")
|
||||
}
|
||||
|
||||
func (b *bundle) shimAddress(namespace string) string {
|
||||
d := sha256.Sum256([]byte(filepath.Join(namespace, b.id)))
|
||||
return filepath.Join(string(filepath.Separator), "containerd-shim", fmt.Sprintf("%x.sock", d))
|
||||
}
|
||||
|
||||
func (b *bundle) loadAddress() (string, error) {
|
||||
addressPath := filepath.Join(b.path, "address")
|
||||
data, err := ioutil.ReadFile(addressPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
func (b *bundle) decideShimAddress(namespace string) string {
|
||||
address, err := b.loadAddress()
|
||||
if err != nil {
|
||||
return b.legacyShimAddress(namespace)
|
||||
}
|
||||
return address
|
||||
}
|
||||
|
||||
func (b *bundle) shimConfig(namespace string, c *Config, runcOptions *runctypes.RuncOptions) shim.Config {
|
||||
var (
|
||||
criuPath string
|
||||
|
|
48
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec.go
generated
vendored
48
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec.go
generated
vendored
|
@ -46,7 +46,7 @@ type execProcess struct {
|
|||
mu sync.Mutex
|
||||
id string
|
||||
console console.Console
|
||||
io runc.IO
|
||||
io *processIO
|
||||
status int
|
||||
exited time.Time
|
||||
pid *safePid
|
||||
|
@ -172,29 +172,30 @@ func (e *execProcess) start(ctx context.Context) (err error) {
|
|||
// access e.pid until it is updated.
|
||||
e.pid.Lock()
|
||||
defer e.pid.Unlock()
|
||||
|
||||
var (
|
||||
socket *runc.Socket
|
||||
pidfile = filepath.Join(e.path, fmt.Sprintf("%s.pid", e.id))
|
||||
pio *processIO
|
||||
pidFile = newExecPidFile(e.path, e.id)
|
||||
)
|
||||
if e.stdio.Terminal {
|
||||
if socket, err = runc.NewTempConsoleSocket(); err != nil {
|
||||
return errors.Wrap(err, "failed to create runc console socket")
|
||||
}
|
||||
defer socket.Close()
|
||||
} else if e.stdio.IsNull() {
|
||||
if e.io, err = runc.NewNullIO(); err != nil {
|
||||
return errors.Wrap(err, "creating new NULL IO")
|
||||
}
|
||||
} else {
|
||||
if e.io, err = runc.NewPipeIO(e.parent.IoUID, e.parent.IoGID, withConditionalIO(e.stdio)); err != nil {
|
||||
return errors.Wrap(err, "failed to create runc io pipes")
|
||||
if pio, err = createIO(ctx, e.id, e.parent.IoUID, e.parent.IoGID, e.stdio); err != nil {
|
||||
return errors.Wrap(err, "failed to create init process I/O")
|
||||
}
|
||||
e.io = pio
|
||||
}
|
||||
opts := &runc.ExecOpts{
|
||||
PidFile: pidfile,
|
||||
IO: e.io,
|
||||
PidFile: pidFile.Path(),
|
||||
Detach: true,
|
||||
}
|
||||
if pio != nil {
|
||||
opts.IO = pio.IO()
|
||||
}
|
||||
if socket != nil {
|
||||
opts.ConsoleSocket = socket
|
||||
}
|
||||
|
@ -203,14 +204,10 @@ func (e *execProcess) start(ctx context.Context) (err error) {
|
|||
return e.parent.runtimeError(err, "OCI runtime exec failed")
|
||||
}
|
||||
if e.stdio.Stdin != "" {
|
||||
sc, err := fifo.OpenFifo(context.Background(), e.stdio.Stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open stdin fifo %s", e.stdio.Stdin)
|
||||
if err := e.openStdin(e.stdio.Stdin); err != nil {
|
||||
return err
|
||||
}
|
||||
e.closers = append(e.closers, sc)
|
||||
e.stdin = sc
|
||||
}
|
||||
var copyWaitGroup sync.WaitGroup
|
||||
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
if socket != nil {
|
||||
|
@ -218,16 +215,15 @@ func (e *execProcess) start(ctx context.Context) (err error) {
|
|||
if err != nil {
|
||||
return errors.Wrap(err, "failed to retrieve console master")
|
||||
}
|
||||
if e.console, err = e.parent.Platform.CopyConsole(ctx, console, e.stdio.Stdin, e.stdio.Stdout, e.stdio.Stderr, &e.wg, ©WaitGroup); err != nil {
|
||||
if e.console, err = e.parent.Platform.CopyConsole(ctx, console, e.stdio.Stdin, e.stdio.Stdout, e.stdio.Stderr, &e.wg); err != nil {
|
||||
return errors.Wrap(err, "failed to start console copy")
|
||||
}
|
||||
} else if !e.stdio.IsNull() {
|
||||
if err := copyPipes(ctx, e.io, e.stdio.Stdin, e.stdio.Stdout, e.stdio.Stderr, &e.wg, ©WaitGroup); err != nil {
|
||||
} else {
|
||||
if err := pio.Copy(ctx, &e.wg); err != nil {
|
||||
return errors.Wrap(err, "failed to start io pipe copy")
|
||||
}
|
||||
}
|
||||
copyWaitGroup.Wait()
|
||||
pid, err := runc.ReadPidFile(opts.PidFile)
|
||||
pid, err := pidFile.Read()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to retrieve OCI runtime exec pid")
|
||||
}
|
||||
|
@ -235,6 +231,16 @@ func (e *execProcess) start(ctx context.Context) (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (e *execProcess) openStdin(path string) error {
|
||||
sc, err := fifo.OpenFifo(context.Background(), path, syscall.O_WRONLY|syscall.O_NONBLOCK, 0)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open stdin fifo %s", path)
|
||||
}
|
||||
e.stdin = sc
|
||||
e.closers = append(e.closers, sc)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *execProcess) Status(ctx context.Context) (string, error) {
|
||||
s, err := e.parent.Status(ctx)
|
||||
if err != nil {
|
||||
|
|
93
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init.go
generated
vendored
93
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init.go
generated
vendored
|
@ -41,9 +41,6 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// InitPidFile name of the file that contains the init pid
|
||||
const InitPidFile = "init.pid"
|
||||
|
||||
// Init represents an initial process for a container
|
||||
type Init struct {
|
||||
wg sync.WaitGroup
|
||||
|
@ -63,7 +60,7 @@ type Init struct {
|
|||
Bundle string
|
||||
console console.Console
|
||||
Platform proc.Platform
|
||||
io runc.IO
|
||||
io *processIO
|
||||
runtime *runc.Runc
|
||||
status int
|
||||
exited time.Time
|
||||
|
@ -111,49 +108,33 @@ func New(id string, runtime *runc.Runc, stdio proc.Stdio) *Init {
|
|||
// Create the process with the provided config
|
||||
func (p *Init) Create(ctx context.Context, r *CreateConfig) error {
|
||||
var (
|
||||
err error
|
||||
socket *runc.Socket
|
||||
err error
|
||||
socket *runc.Socket
|
||||
pio *processIO
|
||||
pidFile = newPidFile(p.Bundle)
|
||||
)
|
||||
if r.Terminal {
|
||||
if socket, err = runc.NewTempConsoleSocket(); err != nil {
|
||||
return errors.Wrap(err, "failed to create OCI runtime console socket")
|
||||
}
|
||||
defer socket.Close()
|
||||
} else if hasNoIO(r) {
|
||||
if p.io, err = runc.NewNullIO(); err != nil {
|
||||
return errors.Wrap(err, "creating new NULL IO")
|
||||
}
|
||||
} else {
|
||||
if p.io, err = runc.NewPipeIO(p.IoUID, p.IoGID, withConditionalIO(p.stdio)); err != nil {
|
||||
return errors.Wrap(err, "failed to create OCI runtime io pipes")
|
||||
if pio, err = createIO(ctx, p.id, p.IoUID, p.IoGID, p.stdio); err != nil {
|
||||
return errors.Wrap(err, "failed to create init process I/O")
|
||||
}
|
||||
p.io = pio
|
||||
}
|
||||
pidFile := filepath.Join(p.Bundle, InitPidFile)
|
||||
if r.Checkpoint != "" {
|
||||
opts := &runc.RestoreOpts{
|
||||
CheckpointOpts: runc.CheckpointOpts{
|
||||
ImagePath: r.Checkpoint,
|
||||
WorkDir: p.CriuWorkPath,
|
||||
ParentPath: r.ParentCheckpoint,
|
||||
},
|
||||
PidFile: pidFile,
|
||||
IO: p.io,
|
||||
NoPivot: p.NoPivotRoot,
|
||||
Detach: true,
|
||||
NoSubreaper: true,
|
||||
}
|
||||
p.initState = &createdCheckpointState{
|
||||
p: p,
|
||||
opts: opts,
|
||||
}
|
||||
return nil
|
||||
return p.createCheckpointedState(r, pidFile)
|
||||
}
|
||||
opts := &runc.CreateOpts{
|
||||
PidFile: pidFile,
|
||||
IO: p.io,
|
||||
PidFile: pidFile.Path(),
|
||||
NoPivot: p.NoPivotRoot,
|
||||
NoNewKeyring: p.NoNewKeyring,
|
||||
}
|
||||
if p.io != nil {
|
||||
opts.IO = p.io.IO()
|
||||
}
|
||||
if socket != nil {
|
||||
opts.ConsoleSocket = socket
|
||||
}
|
||||
|
@ -161,14 +142,10 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) error {
|
|||
return p.runtimeError(err, "OCI runtime create failed")
|
||||
}
|
||||
if r.Stdin != "" {
|
||||
sc, err := fifo.OpenFifo(context.Background(), r.Stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open stdin fifo %s", r.Stdin)
|
||||
if err := p.openStdin(r.Stdin); err != nil {
|
||||
return err
|
||||
}
|
||||
p.stdin = sc
|
||||
p.closers = append(p.closers, sc)
|
||||
}
|
||||
var copyWaitGroup sync.WaitGroup
|
||||
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
if socket != nil {
|
||||
|
@ -176,19 +153,17 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) error {
|
|||
if err != nil {
|
||||
return errors.Wrap(err, "failed to retrieve console master")
|
||||
}
|
||||
console, err = p.Platform.CopyConsole(ctx, console, r.Stdin, r.Stdout, r.Stderr, &p.wg, ©WaitGroup)
|
||||
console, err = p.Platform.CopyConsole(ctx, console, r.Stdin, r.Stdout, r.Stderr, &p.wg)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to start console copy")
|
||||
}
|
||||
p.console = console
|
||||
} else if !hasNoIO(r) {
|
||||
if err := copyPipes(ctx, p.io, r.Stdin, r.Stdout, r.Stderr, &p.wg, ©WaitGroup); err != nil {
|
||||
} else {
|
||||
if err := pio.Copy(ctx, &p.wg); err != nil {
|
||||
return errors.Wrap(err, "failed to start io pipe copy")
|
||||
}
|
||||
}
|
||||
|
||||
copyWaitGroup.Wait()
|
||||
pid, err := runc.ReadPidFile(pidFile)
|
||||
pid, err := pidFile.Read()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to retrieve OCI runtime container pid")
|
||||
}
|
||||
|
@ -196,6 +171,36 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *Init) openStdin(path string) error {
|
||||
sc, err := fifo.OpenFifo(context.Background(), path, syscall.O_WRONLY|syscall.O_NONBLOCK, 0)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open stdin fifo %s", path)
|
||||
}
|
||||
p.stdin = sc
|
||||
p.closers = append(p.closers, sc)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Init) createCheckpointedState(r *CreateConfig, pidFile *pidFile) error {
|
||||
opts := &runc.RestoreOpts{
|
||||
CheckpointOpts: runc.CheckpointOpts{
|
||||
ImagePath: r.Checkpoint,
|
||||
WorkDir: p.CriuWorkPath,
|
||||
ParentPath: r.ParentCheckpoint,
|
||||
},
|
||||
PidFile: pidFile.Path(),
|
||||
IO: p.io.IO(),
|
||||
NoPivot: p.NoPivotRoot,
|
||||
Detach: true,
|
||||
NoSubreaper: true,
|
||||
}
|
||||
p.initState = &createdCheckpointState{
|
||||
p: p,
|
||||
opts: opts,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wait for the process to exit
|
||||
func (p *Init) Wait() {
|
||||
<-p.waitBlock
|
||||
|
|
17
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init_state.go
generated
vendored
17
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init_state.go
generated
vendored
|
@ -20,12 +20,9 @@ package proc
|
|||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/console"
|
||||
"github.com/containerd/containerd/runtime/proc"
|
||||
"github.com/containerd/fifo"
|
||||
runc "github.com/containerd/go-runc"
|
||||
google_protobuf "github.com/gogo/protobuf/types"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -172,31 +169,25 @@ func (s *createdCheckpointState) Start(ctx context.Context) error {
|
|||
return p.runtimeError(err, "OCI runtime restore failed")
|
||||
}
|
||||
if sio.Stdin != "" {
|
||||
sc, err := fifo.OpenFifo(context.Background(), sio.Stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0)
|
||||
if err != nil {
|
||||
if err := p.openStdin(sio.Stdin); err != nil {
|
||||
return errors.Wrapf(err, "failed to open stdin fifo %s", sio.Stdin)
|
||||
}
|
||||
p.stdin = sc
|
||||
p.closers = append(p.closers, sc)
|
||||
}
|
||||
var copyWaitGroup sync.WaitGroup
|
||||
if socket != nil {
|
||||
console, err := socket.ReceiveMaster()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to retrieve console master")
|
||||
}
|
||||
console, err = p.Platform.CopyConsole(ctx, console, sio.Stdin, sio.Stdout, sio.Stderr, &p.wg, ©WaitGroup)
|
||||
console, err = p.Platform.CopyConsole(ctx, console, sio.Stdin, sio.Stdout, sio.Stderr, &p.wg)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to start console copy")
|
||||
}
|
||||
p.console = console
|
||||
} else if !sio.IsNull() {
|
||||
if err := copyPipes(ctx, p.io, sio.Stdin, sio.Stdout, sio.Stderr, &p.wg, ©WaitGroup); err != nil {
|
||||
} else {
|
||||
if err := p.io.Copy(ctx, &p.wg); err != nil {
|
||||
return errors.Wrap(err, "failed to start io pipe copy")
|
||||
}
|
||||
}
|
||||
|
||||
copyWaitGroup.Wait()
|
||||
pid, err := runc.ReadPidFile(s.opts.PidFile)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to retrieve OCI runtime container pid")
|
||||
|
|
215
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/io.go
generated
vendored
215
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/io.go
generated
vendored
|
@ -22,12 +22,18 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/containerd/containerd/runtime/proc"
|
||||
"github.com/containerd/fifo"
|
||||
runc "github.com/containerd/go-runc"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
|
@ -37,6 +43,84 @@ var bufPool = sync.Pool{
|
|||
},
|
||||
}
|
||||
|
||||
type processIO struct {
|
||||
io runc.IO
|
||||
|
||||
uri *url.URL
|
||||
copy bool
|
||||
stdio proc.Stdio
|
||||
}
|
||||
|
||||
func (p *processIO) Close() error {
|
||||
if p.io != nil {
|
||||
return p.io.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *processIO) IO() runc.IO {
|
||||
return p.io
|
||||
}
|
||||
|
||||
func (p *processIO) Copy(ctx context.Context, wg *sync.WaitGroup) error {
|
||||
if !p.copy {
|
||||
return nil
|
||||
}
|
||||
var cwg sync.WaitGroup
|
||||
if err := copyPipes(ctx, p.IO(), p.stdio.Stdin, p.stdio.Stdout, p.stdio.Stderr, wg, &cwg); err != nil {
|
||||
return errors.Wrap(err, "unable to copy pipes")
|
||||
}
|
||||
cwg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func createIO(ctx context.Context, id string, ioUID, ioGID int, stdio proc.Stdio) (*processIO, error) {
|
||||
pio := &processIO{
|
||||
stdio: stdio,
|
||||
}
|
||||
if stdio.IsNull() {
|
||||
i, err := runc.NewNullIO()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pio.io = i
|
||||
return pio, nil
|
||||
}
|
||||
u, err := url.Parse(stdio.Stdout)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to parse stdout uri")
|
||||
}
|
||||
if u.Scheme == "" {
|
||||
u.Scheme = "fifo"
|
||||
}
|
||||
pio.uri = u
|
||||
switch u.Scheme {
|
||||
case "fifo":
|
||||
pio.copy = true
|
||||
pio.io, err = runc.NewPipeIO(ioUID, ioGID, withConditionalIO(stdio))
|
||||
case "binary":
|
||||
pio.io, err = newBinaryIO(ctx, id, u)
|
||||
case "file":
|
||||
if err := os.MkdirAll(filepath.Dir(u.Host), 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var f *os.File
|
||||
f, err = os.OpenFile(u.Host, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.Close()
|
||||
pio.copy = true
|
||||
pio.io, err = runc.NewPipeIO(ioUID, ioGID, withConditionalIO(stdio))
|
||||
default:
|
||||
return nil, errors.Errorf("unknown STDIO scheme %s", u.Scheme)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pio, nil
|
||||
}
|
||||
|
||||
func copyPipes(ctx context.Context, rio runc.IO, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) error {
|
||||
var sameFile io.WriteCloser
|
||||
for _, i := range []struct {
|
||||
|
@ -143,3 +227,134 @@ func isFifo(path string) (bool, error) {
|
|||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func newBinaryIO(ctx context.Context, id string, uri *url.URL) (runc.IO, error) {
|
||||
ns, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var args []string
|
||||
for k, vs := range uri.Query() {
|
||||
args = append(args, k)
|
||||
if len(vs) > 0 {
|
||||
args = append(args, vs[0])
|
||||
}
|
||||
}
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
cmd := exec.CommandContext(ctx, uri.Host, args...)
|
||||
cmd.Env = append(cmd.Env,
|
||||
"CONTAINER_ID="+id,
|
||||
"CONTAINER_NAMESPACE="+ns,
|
||||
)
|
||||
out, err := newPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serr, err := newPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r, w, err := os.Pipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cmd.ExtraFiles = append(cmd.ExtraFiles, out.r, serr.r, w)
|
||||
// don't need to register this with the reaper or wait when
|
||||
// running inside a shim
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// close our side of the pipe after start
|
||||
w.Close()
|
||||
// wait for the logging binary to be ready
|
||||
b := make([]byte, 1)
|
||||
if _, err := r.Read(b); err != nil && err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
return &binaryIO{
|
||||
cmd: cmd,
|
||||
cancel: cancel,
|
||||
out: out,
|
||||
err: serr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type binaryIO struct {
|
||||
cmd *exec.Cmd
|
||||
cancel func()
|
||||
out, err *pipe
|
||||
}
|
||||
|
||||
func (b *binaryIO) CloseAfterStart() (err error) {
|
||||
for _, v := range []*pipe{
|
||||
b.out,
|
||||
b.err,
|
||||
} {
|
||||
if v != nil {
|
||||
if cerr := v.r.Close(); err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *binaryIO) Close() (err error) {
|
||||
b.cancel()
|
||||
for _, v := range []*pipe{
|
||||
b.out,
|
||||
b.err,
|
||||
} {
|
||||
if v != nil {
|
||||
if cerr := v.Close(); err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *binaryIO) Stdin() io.WriteCloser {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *binaryIO) Stdout() io.ReadCloser {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *binaryIO) Stderr() io.ReadCloser {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *binaryIO) Set(cmd *exec.Cmd) {
|
||||
if b.out != nil {
|
||||
cmd.Stdout = b.out.w
|
||||
}
|
||||
if b.err != nil {
|
||||
cmd.Stderr = b.err.w
|
||||
}
|
||||
}
|
||||
|
||||
func newPipe() (*pipe, error) {
|
||||
r, w, err := os.Pipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pipe{
|
||||
r: r,
|
||||
w: w,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type pipe struct {
|
||||
r *os.File
|
||||
w *os.File
|
||||
}
|
||||
|
||||
func (p *pipe) Close() error {
|
||||
err := p.w.Close()
|
||||
if rerr := p.r.Close(); err == nil {
|
||||
err = rerr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
29
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/utils.go
generated
vendored
29
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/utils.go
generated
vendored
|
@ -20,8 +20,10 @@ package proc
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -114,6 +116,29 @@ func checkKillError(err error) error {
|
|||
return errors.Wrapf(err, "unknown error after kill")
|
||||
}
|
||||
|
||||
func hasNoIO(r *CreateConfig) bool {
|
||||
return r.Stdin == "" && r.Stdout == "" && r.Stderr == ""
|
||||
// InitPidFile name of the file that contains the init pid
|
||||
const InitPidFile = "init.pid"
|
||||
|
||||
func newPidFile(bundle string) *pidFile {
|
||||
return &pidFile{
|
||||
path: filepath.Join(bundle, InitPidFile),
|
||||
}
|
||||
}
|
||||
|
||||
func newExecPidFile(bundle, id string) *pidFile {
|
||||
return &pidFile{
|
||||
path: filepath.Join(bundle, fmt.Sprintf("%s.pid", id)),
|
||||
}
|
||||
}
|
||||
|
||||
type pidFile struct {
|
||||
path string
|
||||
}
|
||||
|
||||
func (p *pidFile) Path() string {
|
||||
return p.path
|
||||
}
|
||||
|
||||
func (p *pidFile) Read() (int, error) {
|
||||
return runc.ReadPidFile(p.path)
|
||||
}
|
||||
|
|
25
vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go
generated
vendored
25
vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go
generated
vendored
|
@ -20,10 +20,12 @@ package client
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
@ -107,6 +109,10 @@ func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHa
|
|||
"address": address,
|
||||
"debug": debug,
|
||||
}).Infof("shim %s started", binary)
|
||||
|
||||
if err := writeAddress(filepath.Join(config.Path, "address"), address); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// set shim in cgroup if it is provided
|
||||
if cgroup != "" {
|
||||
if err := setCgroup(cgroup, cmd); err != nil {
|
||||
|
@ -166,6 +172,25 @@ func newCommand(binary, daemonAddress string, debug bool, config shim.Config, so
|
|||
return cmd, nil
|
||||
}
|
||||
|
||||
// writeAddress writes a address file atomically
|
||||
func writeAddress(path, address string) error {
|
||||
path, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tempPath := filepath.Join(filepath.Dir(path), fmt.Sprintf(".%s", filepath.Base(path)))
|
||||
f, err := os.OpenFile(tempPath, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = f.WriteString(address)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Rename(tempPath, path)
|
||||
}
|
||||
|
||||
func newSocket(address string) (*net.UnixListener, error) {
|
||||
if len(address) > 106 {
|
||||
return nil, errors.Errorf("%q: unix socket path too long (> 106)", address)
|
||||
|
|
4
vendor/github.com/containerd/containerd/runtime/v1/shim/service_linux.go
generated
vendored
4
vendor/github.com/containerd/containerd/runtime/v1/shim/service_linux.go
generated
vendored
|
@ -31,7 +31,7 @@ type linuxPlatform struct {
|
|||
epoller *console.Epoller
|
||||
}
|
||||
|
||||
func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error) {
|
||||
func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg *sync.WaitGroup) (console.Console, error) {
|
||||
if p.epoller == nil {
|
||||
return nil, errors.New("uninitialized epoller")
|
||||
}
|
||||
|
@ -40,6 +40,7 @@ func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var cwg sync.WaitGroup
|
||||
|
||||
if stdin != "" {
|
||||
in, err := fifo.OpenFifo(ctx, stdin, syscall.O_RDONLY, 0)
|
||||
|
@ -77,6 +78,7 @@ func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console
|
|||
outw.Close()
|
||||
wg.Done()
|
||||
}()
|
||||
cwg.Wait()
|
||||
return epollConsole, nil
|
||||
}
|
||||
|
||||
|
|
4
vendor/github.com/containerd/containerd/runtime/v1/shim/service_unix.go
generated
vendored
4
vendor/github.com/containerd/containerd/runtime/v1/shim/service_unix.go
generated
vendored
|
@ -31,7 +31,8 @@ import (
|
|||
type unixPlatform struct {
|
||||
}
|
||||
|
||||
func (p *unixPlatform) CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error) {
|
||||
func (p *unixPlatform) CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg *sync.WaitGroup) (console.Console, error) {
|
||||
var cwg sync.WaitGroup
|
||||
if stdin != "" {
|
||||
in, err := fifo.OpenFifo(ctx, stdin, syscall.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
|
@ -67,6 +68,7 @@ func (p *unixPlatform) CopyConsole(ctx context.Context, console console.Console,
|
|||
outw.Close()
|
||||
wg.Done()
|
||||
}()
|
||||
cwg.Wait()
|
||||
return console, nil
|
||||
}
|
||||
|
||||
|
|
62
vendor/github.com/containerd/containerd/runtime/v2/README.md
generated
vendored
62
vendor/github.com/containerd/containerd/runtime/v2/README.md
generated
vendored
|
@ -173,6 +173,68 @@ The Runtime v2 supports an async event model. In order for the an upstream calle
|
|||
| `runtime.TaskExitEventTopic` | MUST (follow `TaskExecStartedEventTopic`) | When an exec (other than the init exec) exits expected or unexpected |
|
||||
| `runtime.TaskDeleteEventTopic` | SHOULD (follow `TaskExitEventTopic` or `TaskExecAddedEventTopic` if never started) | When an exec is removed from a shim |
|
||||
|
||||
#### Logging
|
||||
|
||||
Shims may support pluggable logging via STDIO URIs.
|
||||
Current supported schemes for logging are:
|
||||
|
||||
* fifo - Linux
|
||||
* binary - Linux & Windows
|
||||
* file - Linux & Windows
|
||||
* npipe - Windows
|
||||
|
||||
Binary logging has the abilty to forward a container's STDIO to an external binary for consumption.
|
||||
A sample logging driver that forwards the container's STDOUT and STDERR to `journald` is:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/runtime/v2/logging"
|
||||
"github.com/coreos/go-systemd/journal"
|
||||
)
|
||||
|
||||
func main() {
|
||||
logging.Run(log)
|
||||
}
|
||||
|
||||
func log(ctx context.Context, config *logging.Config, ready func() error) error {
|
||||
// construct any log metadata for the container
|
||||
vars := map[string]string{
|
||||
"SYSLOG_IDENTIFIER": fmt.Sprintf("%s:%s", config.Namespace, config.ID),
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
// forward both stdout and stderr to the journal
|
||||
go copy(&wg, config.Stdout, journal.PriInfo, vars)
|
||||
go copy(&wg, config.Stderr, journal.PriErr, vars)
|
||||
|
||||
// signal that we are ready and setup for the container to be started
|
||||
if err := ready(); err != nil {
|
||||
return err
|
||||
}
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func copy(wg *sync.WaitGroup, r io.Reader, pri journal.Priority, vars map[string]string) {
|
||||
defer wg.Done()
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
if s.Err() != nil {
|
||||
return
|
||||
}
|
||||
journal.Send(s.Text(), pri, vars)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Other
|
||||
|
||||
#### Unsupported rpcs
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package content
|
||||
package contentserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@ -25,8 +25,6 @@ import (
|
|||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/plugin"
|
||||
"github.com/containerd/containerd/services"
|
||||
ptypes "github.com/gogo/protobuf/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
@ -48,35 +46,8 @@ var bufPool = sync.Pool{
|
|||
},
|
||||
}
|
||||
|
||||
var _ api.ContentServer = &service{}
|
||||
|
||||
func init() {
|
||||
plugin.Register(&plugin.Registration{
|
||||
Type: plugin.GRPCPlugin,
|
||||
ID: "content",
|
||||
Requires: []plugin.Type{
|
||||
plugin.ServicePlugin,
|
||||
},
|
||||
InitFn: func(ic *plugin.InitContext) (interface{}, error) {
|
||||
plugins, err := ic.GetByType(plugin.ServicePlugin)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p, ok := plugins[services.ContentService]
|
||||
if !ok {
|
||||
return nil, errors.New("content store service not found")
|
||||
}
|
||||
cs, err := p.Instance()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewService(cs.(content.Store)), nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// NewService returns the content GRPC server
|
||||
func NewService(cs content.Store) api.ContentServer {
|
||||
// New returns the content GRPC server
|
||||
func New(cs content.Store) api.ContentServer {
|
||||
return &service{store: cs}
|
||||
}
|
||||
|
71
vendor/github.com/containerd/containerd/services/content/store.go
generated
vendored
71
vendor/github.com/containerd/containerd/services/content/store.go
generated
vendored
|
@ -1,71 +0,0 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package content
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
eventstypes "github.com/containerd/containerd/api/events"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/events"
|
||||
"github.com/containerd/containerd/metadata"
|
||||
"github.com/containerd/containerd/plugin"
|
||||
"github.com/containerd/containerd/services"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// store wraps content.Store with proper event published.
|
||||
type store struct {
|
||||
content.Store
|
||||
publisher events.Publisher
|
||||
}
|
||||
|
||||
func init() {
|
||||
plugin.Register(&plugin.Registration{
|
||||
Type: plugin.ServicePlugin,
|
||||
ID: services.ContentService,
|
||||
Requires: []plugin.Type{
|
||||
plugin.MetadataPlugin,
|
||||
},
|
||||
InitFn: func(ic *plugin.InitContext) (interface{}, error) {
|
||||
m, err := ic.Get(plugin.MetadataPlugin)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s, err := newContentStore(m.(*metadata.DB).ContentStore(), ic.Events)
|
||||
return s, err
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func newContentStore(cs content.Store, publisher events.Publisher) (content.Store, error) {
|
||||
return &store{
|
||||
Store: cs,
|
||||
publisher: publisher,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *store) Delete(ctx context.Context, dgst digest.Digest) error {
|
||||
if err := s.Store.Delete(ctx, dgst); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO: Consider whether we should return error here.
|
||||
return s.publisher.Publish(ctx, "/content/delete", &eventstypes.ContentDelete{
|
||||
Digest: dgst,
|
||||
})
|
||||
}
|
36
vendor/github.com/containerd/containerd/services/services.go
generated
vendored
36
vendor/github.com/containerd/containerd/services/services.go
generated
vendored
|
@ -1,36 +0,0 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package services
|
||||
|
||||
const (
|
||||
// ContentService is id of content service.
|
||||
ContentService = "content-service"
|
||||
// SnapshotsService is id of snapshots service.
|
||||
SnapshotsService = "snapshots-service"
|
||||
// ImagesService is id of images service.
|
||||
ImagesService = "images-service"
|
||||
// ContainersService is id of containers service.
|
||||
ContainersService = "containers-service"
|
||||
// TasksService is id of tasks service.
|
||||
TasksService = "tasks-service"
|
||||
// NamespacesService is id of namespaces service.
|
||||
NamespacesService = "namespaces-service"
|
||||
// LeasesService is id of leases service.
|
||||
LeasesService = "leases-service"
|
||||
// DiffService is id of diff service.
|
||||
DiffService = "diff-service"
|
||||
)
|
8
vendor/github.com/moby/buildkit/README.md
generated
vendored
8
vendor/github.com/moby/buildkit/README.md
generated
vendored
|
@ -184,6 +184,14 @@ The local client will copy the files directly to the client. This is useful if B
|
|||
buildctl build ... --output type=local,dest=path/to/output-dir
|
||||
```
|
||||
|
||||
Tar exporter is similar to local exporter but transfers the files through a tarball.
|
||||
|
||||
```
|
||||
buildctl build ... --output type=tar,dest=out.tar
|
||||
buildctl build ... --output type=tar > out.tar
|
||||
```
|
||||
|
||||
|
||||
##### Exporting built image to Docker
|
||||
|
||||
```
|
||||
|
|
17
vendor/github.com/moby/buildkit/cache/contenthash/checksum.go
generated
vendored
17
vendor/github.com/moby/buildkit/cache/contenthash/checksum.go
generated
vendored
|
@ -10,6 +10,7 @@ import (
|
|||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/locker"
|
||||
iradix "github.com/hashicorp/go-immutable-radix"
|
||||
"github.com/hashicorp/golang-lru/simplelru"
|
||||
|
@ -51,8 +52,8 @@ func ChecksumWildcard(ctx context.Context, ref cache.ImmutableRef, path string,
|
|||
return getDefaultManager().ChecksumWildcard(ctx, ref, path, followLinks)
|
||||
}
|
||||
|
||||
func GetCacheContext(ctx context.Context, md *metadata.StorageItem) (CacheContext, error) {
|
||||
return getDefaultManager().GetCacheContext(ctx, md)
|
||||
func GetCacheContext(ctx context.Context, md *metadata.StorageItem, idmap *idtools.IdentityMapping) (CacheContext, error) {
|
||||
return getDefaultManager().GetCacheContext(ctx, md, idmap)
|
||||
}
|
||||
|
||||
func SetCacheContext(ctx context.Context, md *metadata.StorageItem, cc CacheContext) error {
|
||||
|
@ -81,7 +82,7 @@ type cacheManager struct {
|
|||
}
|
||||
|
||||
func (cm *cacheManager) Checksum(ctx context.Context, ref cache.ImmutableRef, p string, followLinks bool) (digest.Digest, error) {
|
||||
cc, err := cm.GetCacheContext(ctx, ensureOriginMetadata(ref.Metadata()))
|
||||
cc, err := cm.GetCacheContext(ctx, ensureOriginMetadata(ref.Metadata()), ref.IdentityMapping())
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
|
@ -89,14 +90,14 @@ func (cm *cacheManager) Checksum(ctx context.Context, ref cache.ImmutableRef, p
|
|||
}
|
||||
|
||||
func (cm *cacheManager) ChecksumWildcard(ctx context.Context, ref cache.ImmutableRef, p string, followLinks bool) (digest.Digest, error) {
|
||||
cc, err := cm.GetCacheContext(ctx, ensureOriginMetadata(ref.Metadata()))
|
||||
cc, err := cm.GetCacheContext(ctx, ensureOriginMetadata(ref.Metadata()), ref.IdentityMapping())
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
return cc.ChecksumWildcard(ctx, ref, p, followLinks)
|
||||
}
|
||||
|
||||
func (cm *cacheManager) GetCacheContext(ctx context.Context, md *metadata.StorageItem) (CacheContext, error) {
|
||||
func (cm *cacheManager) GetCacheContext(ctx context.Context, md *metadata.StorageItem, idmap *idtools.IdentityMapping) (CacheContext, error) {
|
||||
cm.locker.Lock(md.ID())
|
||||
cm.lruMu.Lock()
|
||||
v, ok := cm.lru.Get(md.ID())
|
||||
|
@ -106,7 +107,7 @@ func (cm *cacheManager) GetCacheContext(ctx context.Context, md *metadata.Storag
|
|||
v.(*cacheContext).linkMap = map[string][][]byte{}
|
||||
return v.(*cacheContext), nil
|
||||
}
|
||||
cc, err := newCacheContext(md)
|
||||
cc, err := newCacheContext(md, idmap)
|
||||
if err != nil {
|
||||
cm.locker.Unlock(md.ID())
|
||||
return nil, err
|
||||
|
@ -152,6 +153,7 @@ type cacheContext struct {
|
|||
node *iradix.Node
|
||||
dirtyMap map[string]struct{}
|
||||
linkMap map[string][][]byte
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
type mount struct {
|
||||
|
@ -191,12 +193,13 @@ func (m *mount) clean() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func newCacheContext(md *metadata.StorageItem) (*cacheContext, error) {
|
||||
func newCacheContext(md *metadata.StorageItem, idmap *idtools.IdentityMapping) (*cacheContext, error) {
|
||||
cc := &cacheContext{
|
||||
md: md,
|
||||
tree: iradix.New(),
|
||||
dirtyMap: map[string]struct{}{},
|
||||
linkMap: map[string][][]byte{},
|
||||
idmap: idmap,
|
||||
}
|
||||
if err := cc.load(); err != nil {
|
||||
return nil, err
|
||||
|
|
7
vendor/github.com/moby/buildkit/cache/manager.go
generated
vendored
7
vendor/github.com/moby/buildkit/cache/manager.go
generated
vendored
|
@ -8,6 +8,7 @@ import (
|
|||
|
||||
"github.com/containerd/containerd/filters"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/moby/buildkit/cache/metadata"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/identity"
|
||||
|
@ -34,6 +35,7 @@ type Accessor interface {
|
|||
GetFromSnapshotter(ctx context.Context, id string, opts ...RefOption) (ImmutableRef, error)
|
||||
New(ctx context.Context, s ImmutableRef, opts ...RefOption) (MutableRef, error)
|
||||
GetMutable(ctx context.Context, id string) (MutableRef, error) // Rebase?
|
||||
IdentityMapping() *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
type Controller interface {
|
||||
|
@ -96,6 +98,11 @@ func (cm *cacheManager) init(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// IdentityMapping returns the userns remapping used for refs
|
||||
func (cm *cacheManager) IdentityMapping() *idtools.IdentityMapping {
|
||||
return cm.Snapshotter.IdentityMapping()
|
||||
}
|
||||
|
||||
// Close closes the manager and releases the metadata database lock. No other
|
||||
// method should be called after Close.
|
||||
func (cm *cacheManager) Close() error {
|
||||
|
|
6
vendor/github.com/moby/buildkit/cache/refs.go
generated
vendored
6
vendor/github.com/moby/buildkit/cache/refs.go
generated
vendored
|
@ -5,6 +5,7 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/moby/buildkit/cache/metadata"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
|
@ -20,6 +21,7 @@ type Ref interface {
|
|||
Release(context.Context) error
|
||||
Size(ctx context.Context) (int64, error)
|
||||
Metadata() *metadata.StorageItem
|
||||
IdentityMapping() *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
type ImmutableRef interface {
|
||||
|
@ -83,6 +85,10 @@ func (cr *cacheRecord) isDead() bool {
|
|||
return cr.dead || (cr.equalImmutable != nil && cr.equalImmutable.dead) || (cr.equalMutable != nil && cr.equalMutable.dead)
|
||||
}
|
||||
|
||||
func (cr *cacheRecord) IdentityMapping() *idtools.IdentityMapping {
|
||||
return cr.cm.IdentityMapping()
|
||||
}
|
||||
|
||||
func (cr *cacheRecord) Size(ctx context.Context) (int64, error) {
|
||||
// this expects that usage() is implemented lazily
|
||||
s, err := cr.sizeG.Do(ctx, cr.ID(), func(ctx context.Context) (interface{}, error) {
|
||||
|
|
45
vendor/github.com/moby/buildkit/client/client.go
generated
vendored
45
vendor/github.com/moby/buildkit/client/client.go
generated
vendored
|
@ -5,9 +5,12 @@ import (
|
|||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
|
||||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
"github.com/moby/buildkit/client/connhelper"
|
||||
"github.com/moby/buildkit/util/appdefaults"
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -23,9 +26,8 @@ type ClientOpt interface{}
|
|||
|
||||
// New returns a new buildkit client. Address can be empty for the system-default address.
|
||||
func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error) {
|
||||
gopts := []grpc.DialOption{
|
||||
grpc.WithDialer(dialer),
|
||||
}
|
||||
gopts := []grpc.DialOption{}
|
||||
needDialer := true
|
||||
needWithInsecure := true
|
||||
for _, o := range opts {
|
||||
if _, ok := o.(*withFailFast); ok {
|
||||
|
@ -44,6 +46,19 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error
|
|||
grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(wt.tracer, otgrpc.LogPayloads())),
|
||||
grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(wt.tracer)))
|
||||
}
|
||||
if wd, ok := o.(*withDialer); ok {
|
||||
gopts = append(gopts, grpc.WithDialer(wd.dialer))
|
||||
needDialer = false
|
||||
}
|
||||
}
|
||||
if needDialer {
|
||||
dialFn, err := resolveDialer(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO(AkihiroSuda): use WithContextDialer (requires grpc 1.19)
|
||||
// https://github.com/grpc/grpc-go/commit/40cb5618f475e7b9d61aa7920ae4b04ef9bbaf89
|
||||
gopts = append(gopts, grpc.WithDialer(dialFn))
|
||||
}
|
||||
if needWithInsecure {
|
||||
gopts = append(gopts, grpc.WithInsecure())
|
||||
|
@ -75,6 +90,14 @@ func WithFailFast() ClientOpt {
|
|||
return &withFailFast{}
|
||||
}
|
||||
|
||||
type withDialer struct {
|
||||
dialer func(string, time.Duration) (net.Conn, error)
|
||||
}
|
||||
|
||||
func WithDialer(df func(string, time.Duration) (net.Conn, error)) ClientOpt {
|
||||
return &withDialer{dialer: df}
|
||||
}
|
||||
|
||||
type withCredentials struct {
|
||||
ServerName string
|
||||
CACert string
|
||||
|
@ -128,3 +151,19 @@ func WithTracer(t opentracing.Tracer) ClientOpt {
|
|||
type withTracer struct {
|
||||
tracer opentracing.Tracer
|
||||
}
|
||||
|
||||
func resolveDialer(address string) (func(string, time.Duration) (net.Conn, error), error) {
|
||||
ch, err := connhelper.GetConnectionHelper(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ch != nil {
|
||||
f := func(a string, _ time.Duration) (net.Conn, error) {
|
||||
ctx := context.Background()
|
||||
return ch.ContextDialer(ctx, a)
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
// basic dialer
|
||||
return dialer, nil
|
||||
}
|
||||
|
|
37
vendor/github.com/moby/buildkit/client/connhelper/connhelper.go
generated
vendored
Normal file
37
vendor/github.com/moby/buildkit/client/connhelper/connhelper.go
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
// Package connhelper provides helpers for connecting to a remote daemon host with custom logic.
|
||||
package connhelper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
var helpers = map[string]func(*url.URL) (*ConnectionHelper, error){}
|
||||
|
||||
// ConnectionHelper allows to connect to a remote host with custom stream provider binary.
|
||||
type ConnectionHelper struct {
|
||||
// ContextDialer can be passed to grpc.WithContextDialer
|
||||
ContextDialer func(ctx context.Context, addr string) (net.Conn, error)
|
||||
}
|
||||
|
||||
// GetConnectionHelper returns BuildKit-specific connection helper for the given URL.
|
||||
// GetConnectionHelper returns nil without error when no helper is registered for the scheme.
|
||||
func GetConnectionHelper(daemonURL string) (*ConnectionHelper, error) {
|
||||
u, err := url.Parse(daemonURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fn, ok := helpers[u.Scheme]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return fn(u)
|
||||
}
|
||||
|
||||
// Register registers new connectionhelper for scheme
|
||||
func Register(scheme string, fn func(*url.URL) (*ConnectionHelper, error)) {
|
||||
helpers[scheme] = fn
|
||||
}
|
1
vendor/github.com/moby/buildkit/client/exporters.go
generated
vendored
1
vendor/github.com/moby/buildkit/client/exporters.go
generated
vendored
|
@ -3,6 +3,7 @@ package client
|
|||
const (
|
||||
ExporterImage = "image"
|
||||
ExporterLocal = "local"
|
||||
ExporterTar = "tar"
|
||||
ExporterOCI = "oci"
|
||||
ExporterDocker = "docker"
|
||||
)
|
||||
|
|
33
vendor/github.com/moby/buildkit/client/llb/exec.go
generated
vendored
33
vendor/github.com/moby/buildkit/client/llb/exec.go
generated
vendored
|
@ -20,6 +20,7 @@ type Meta struct {
|
|||
ProxyEnv *ProxyEnv
|
||||
ExtraHosts []HostIP
|
||||
Network pb.NetMode
|
||||
Security pb.SecurityMode
|
||||
}
|
||||
|
||||
func NewExecOp(root Output, meta Meta, readOnly bool, c Constraints) *ExecOp {
|
||||
|
@ -52,7 +53,7 @@ type mount struct {
|
|||
cacheID string
|
||||
tmpfs bool
|
||||
cacheSharing CacheMountSharingMode
|
||||
// hasOutput bool
|
||||
noOutput bool
|
||||
}
|
||||
|
||||
type ExecOp struct {
|
||||
|
@ -79,6 +80,8 @@ func (e *ExecOp) AddMount(target string, source Output, opt ...MountOption) Outp
|
|||
m.output = source
|
||||
} else if m.tmpfs {
|
||||
m.output = &output{vertex: e, err: errors.Errorf("tmpfs mount for %s can't be used as a parent", target)}
|
||||
} else if m.noOutput {
|
||||
m.output = &output{vertex: e, err: errors.Errorf("mount marked no-output and %s can't be used as a parent", target)}
|
||||
} else {
|
||||
o := &output{vertex: e, getIndex: e.getMountIndexFn(m)}
|
||||
if p := e.constraints.Platform; p != nil {
|
||||
|
@ -166,13 +169,18 @@ func (e *ExecOp) Marshal(c *Constraints) (digest.Digest, []byte, *pb.OpMetadata,
|
|||
}
|
||||
|
||||
peo := &pb.ExecOp{
|
||||
Meta: meta,
|
||||
Network: e.meta.Network,
|
||||
Meta: meta,
|
||||
Network: e.meta.Network,
|
||||
Security: e.meta.Security,
|
||||
}
|
||||
if e.meta.Network != NetModeSandbox {
|
||||
addCap(&e.constraints, pb.CapExecMetaNetwork)
|
||||
}
|
||||
|
||||
if e.meta.Security != SecurityModeInsecure {
|
||||
addCap(&e.constraints, pb.CapExecMetaSecurity)
|
||||
}
|
||||
|
||||
if p := e.meta.ProxyEnv; p != nil {
|
||||
peo.Meta.ProxyEnv = &pb.ProxyEnv{
|
||||
HttpProxy: p.HttpProxy,
|
||||
|
@ -242,7 +250,7 @@ func (e *ExecOp) Marshal(c *Constraints) (digest.Digest, []byte, *pb.OpMetadata,
|
|||
}
|
||||
|
||||
outputIndex := pb.OutputIndex(-1)
|
||||
if !m.readonly && m.cacheID == "" && !m.tmpfs {
|
||||
if !m.noOutput && !m.readonly && m.cacheID == "" && !m.tmpfs {
|
||||
outputIndex = pb.OutputIndex(outIndex)
|
||||
outIndex++
|
||||
}
|
||||
|
@ -338,7 +346,7 @@ func (e *ExecOp) getMountIndexFn(m *mount) func() (pb.OutputIndex, error) {
|
|||
|
||||
i := 0
|
||||
for _, m2 := range e.mounts {
|
||||
if m2.readonly || m2.cacheID != "" {
|
||||
if m2.noOutput || m2.readonly || m2.cacheID != "" {
|
||||
continue
|
||||
}
|
||||
if m == m2 {
|
||||
|
@ -379,6 +387,10 @@ func SourcePath(src string) MountOption {
|
|||
}
|
||||
}
|
||||
|
||||
func ForceNoOutput(m *mount) {
|
||||
m.noOutput = true
|
||||
}
|
||||
|
||||
func AsPersistentCacheDir(id string, sharing CacheMountSharingMode) MountOption {
|
||||
return func(m *mount) {
|
||||
m.cacheID = id
|
||||
|
@ -408,6 +420,12 @@ func Network(n pb.NetMode) RunOption {
|
|||
})
|
||||
}
|
||||
|
||||
func Security(s pb.SecurityMode) RunOption {
|
||||
return runOptionFunc(func(ei *ExecInfo) {
|
||||
ei.State = security(s)(ei.State)
|
||||
})
|
||||
}
|
||||
|
||||
func Shlex(str string) RunOption {
|
||||
return Shlexf(str)
|
||||
}
|
||||
|
@ -623,3 +641,8 @@ const (
|
|||
NetModeHost = pb.NetMode_HOST
|
||||
NetModeNone = pb.NetMode_NONE
|
||||
)
|
||||
|
||||
const (
|
||||
SecurityModeInsecure = pb.SecurityMode_INSECURE
|
||||
SecurityModeSandbox = pb.SecurityMode_SANDBOX
|
||||
)
|
||||
|
|
16
vendor/github.com/moby/buildkit/client/llb/meta.go
generated
vendored
16
vendor/github.com/moby/buildkit/client/llb/meta.go
generated
vendored
|
@ -21,6 +21,7 @@ var (
|
|||
keyExtraHost = contextKeyT("llb.exec.extrahost")
|
||||
keyPlatform = contextKeyT("llb.platform")
|
||||
keyNetwork = contextKeyT("llb.network")
|
||||
keySecurity = contextKeyT("llb.security")
|
||||
)
|
||||
|
||||
func addEnvf(key, value string, v ...interface{}) StateOption {
|
||||
|
@ -148,7 +149,6 @@ func network(v pb.NetMode) StateOption {
|
|||
return s.WithValue(keyNetwork, v)
|
||||
}
|
||||
}
|
||||
|
||||
func getNetwork(s State) pb.NetMode {
|
||||
v := s.Value(keyNetwork)
|
||||
if v != nil {
|
||||
|
@ -158,6 +158,20 @@ func getNetwork(s State) pb.NetMode {
|
|||
return NetModeSandbox
|
||||
}
|
||||
|
||||
func security(v pb.SecurityMode) StateOption {
|
||||
return func(s State) State {
|
||||
return s.WithValue(keySecurity, v)
|
||||
}
|
||||
}
|
||||
func getSecurity(s State) pb.SecurityMode {
|
||||
v := s.Value(keySecurity)
|
||||
if v != nil {
|
||||
n := v.(pb.SecurityMode)
|
||||
return n
|
||||
}
|
||||
return SecurityModeSandbox
|
||||
}
|
||||
|
||||
type EnvList []KeyValue
|
||||
|
||||
type KeyValue struct {
|
||||
|
|
8
vendor/github.com/moby/buildkit/client/llb/state.go
generated
vendored
8
vendor/github.com/moby/buildkit/client/llb/state.go
generated
vendored
|
@ -214,6 +214,7 @@ func (s State) Run(ro ...RunOption) ExecState {
|
|||
ProxyEnv: ei.ProxyEnv,
|
||||
ExtraHosts: getExtraHosts(ei.State),
|
||||
Network: getNetwork(ei.State),
|
||||
Security: getSecurity(ei.State),
|
||||
}
|
||||
|
||||
exec := NewExecOp(s.Output(), meta, ei.ReadonlyRootFS, ei.Constraints)
|
||||
|
@ -292,6 +293,13 @@ func (s State) Network(n pb.NetMode) State {
|
|||
func (s State) GetNetwork() pb.NetMode {
|
||||
return getNetwork(s)
|
||||
}
|
||||
func (s State) Security(n pb.SecurityMode) State {
|
||||
return security(n)(s)
|
||||
}
|
||||
|
||||
func (s State) GetSecurity() pb.SecurityMode {
|
||||
return getSecurity(s)
|
||||
}
|
||||
|
||||
func (s State) With(so ...StateOption) State {
|
||||
for _, o := range so {
|
||||
|
|
2
vendor/github.com/moby/buildkit/client/solve.go
generated
vendored
2
vendor/github.com/moby/buildkit/client/solve.go
generated
vendored
|
@ -124,7 +124,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
|
|||
return nil, errors.New("output directory is required for local exporter")
|
||||
}
|
||||
s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir))
|
||||
case ExporterOCI, ExporterDocker:
|
||||
case ExporterOCI, ExporterDocker, ExporterTar:
|
||||
if ex.OutputDir != "" {
|
||||
return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type)
|
||||
}
|
||||
|
|
3
vendor/github.com/moby/buildkit/control/control.go
generated
vendored
3
vendor/github.com/moby/buildkit/control/control.go
generated
vendored
|
@ -32,6 +32,7 @@ type Opt struct {
|
|||
CacheKeyStorage solver.CacheKeyStorage
|
||||
ResolveCacheExporterFuncs map[string]remotecache.ResolveCacheExporterFunc
|
||||
ResolveCacheImporterFuncs map[string]remotecache.ResolveCacheImporterFunc
|
||||
Entitlements []string
|
||||
}
|
||||
|
||||
type Controller struct { // TODO: ControlService
|
||||
|
@ -48,7 +49,7 @@ func NewController(opt Opt) (*Controller, error) {
|
|||
|
||||
gatewayForwarder := controlgateway.NewGatewayForwarder()
|
||||
|
||||
solver, err := llbsolver.New(opt.WorkerController, opt.Frontends, cache, opt.ResolveCacheImporterFuncs, gatewayForwarder, opt.SessionManager)
|
||||
solver, err := llbsolver.New(opt.WorkerController, opt.Frontends, cache, opt.ResolveCacheImporterFuncs, gatewayForwarder, opt.SessionManager, opt.Entitlements)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create solver")
|
||||
}
|
||||
|
|
1
vendor/github.com/moby/buildkit/executor/executor.go
generated
vendored
1
vendor/github.com/moby/buildkit/executor/executor.go
generated
vendored
|
@ -18,6 +18,7 @@ type Meta struct {
|
|||
ReadonlyRootFS bool
|
||||
ExtraHosts []HostIP
|
||||
NetMode pb.NetMode
|
||||
SecurityMode pb.SecurityMode
|
||||
}
|
||||
|
||||
type Mount struct {
|
||||
|
|
41
vendor/github.com/moby/buildkit/executor/oci/spec_unix.go
generated
vendored
41
vendor/github.com/moby/buildkit/executor/oci/spec_unix.go
generated
vendored
|
@ -8,14 +8,19 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/contrib/seccomp"
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/containerd/containerd/oci"
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/mitchellh/hashstructure"
|
||||
"github.com/moby/buildkit/executor"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/moby/buildkit/util/network"
|
||||
"github.com/moby/buildkit/util/system"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
@ -36,7 +41,7 @@ const (
|
|||
|
||||
// GenerateSpec generates spec using containerd functionality.
|
||||
// opts are ignored for s.Process, s.Hostname, and s.Mounts .
|
||||
func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mount, id, resolvConf, hostsFile string, namespace network.Namespace, processMode ProcessMode, opts ...oci.SpecOpts) (*specs.Spec, func(), error) {
|
||||
func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mount, id, resolvConf, hostsFile string, namespace network.Namespace, processMode ProcessMode, idmap *idtools.IdentityMapping, opts ...oci.SpecOpts) (*specs.Spec, func(), error) {
|
||||
c := &containers.Container{
|
||||
ID: id,
|
||||
}
|
||||
|
@ -44,6 +49,11 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou
|
|||
if !ok {
|
||||
ctx = namespaces.WithNamespace(ctx, "buildkit")
|
||||
}
|
||||
if meta.SecurityMode == pb.SecurityMode_INSECURE {
|
||||
opts = append(opts, entitlements.WithInsecureSpec())
|
||||
} else if system.SeccompSupported() && meta.SecurityMode == pb.SecurityMode_SANDBOX {
|
||||
opts = append(opts, seccomp.WithDefaultProfile())
|
||||
}
|
||||
|
||||
switch processMode {
|
||||
case NoProcessSandbox:
|
||||
|
@ -85,7 +95,22 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou
|
|||
Options: []string{"ro", "nosuid", "noexec", "nodev"},
|
||||
})
|
||||
|
||||
// TODO: User
|
||||
if meta.SecurityMode == pb.SecurityMode_INSECURE {
|
||||
//make sysfs rw mount for insecure mode.
|
||||
for _, m := range s.Mounts {
|
||||
if m.Type == "sysfs" {
|
||||
m.Options = []string{"nosuid", "noexec", "nodev", "rw"}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if idmap != nil {
|
||||
s.Linux.Namespaces = append(s.Linux.Namespaces, specs.LinuxNamespace{
|
||||
Type: specs.UserNamespace,
|
||||
})
|
||||
s.Linux.UIDMappings = specMapping(idmap.UIDs())
|
||||
s.Linux.GIDMappings = specMapping(idmap.GIDs())
|
||||
}
|
||||
|
||||
sm := &submounts{}
|
||||
|
||||
|
@ -210,3 +235,15 @@ func sub(m mount.Mount, subPath string) (mount.Mount, error) {
|
|||
m.Source = src
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func specMapping(s []idtools.IDMap) []specs.LinuxIDMapping {
|
||||
var ids []specs.LinuxIDMapping
|
||||
for _, item := range s {
|
||||
ids = append(ids, specs.LinuxIDMapping{
|
||||
HostID: uint32(item.HostID),
|
||||
ContainerID: uint32(item.ContainerID),
|
||||
Size: uint32(item.Size),
|
||||
})
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
|
24
vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go
generated
vendored
24
vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go
generated
vendored
|
@ -13,11 +13,11 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/contrib/seccomp"
|
||||
"github.com/containerd/containerd/mount"
|
||||
containerdoci "github.com/containerd/containerd/oci"
|
||||
"github.com/containerd/continuity/fs"
|
||||
runc "github.com/containerd/go-runc"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/executor"
|
||||
"github.com/moby/buildkit/executor/oci"
|
||||
|
@ -25,7 +25,6 @@ import (
|
|||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/network"
|
||||
rootlessspecconv "github.com/moby/buildkit/util/rootless/specconv"
|
||||
"github.com/moby/buildkit/util/system"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
@ -40,7 +39,8 @@ type Opt struct {
|
|||
// DefaultCgroupParent is the cgroup-parent name for executor
|
||||
DefaultCgroupParent string
|
||||
// ProcessMode
|
||||
ProcessMode oci.ProcessMode
|
||||
ProcessMode oci.ProcessMode
|
||||
IdentityMapping *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
var defaultCommandCandidates = []string{"buildkit-runc", "runc"}
|
||||
|
@ -53,6 +53,7 @@ type runcExecutor struct {
|
|||
rootless bool
|
||||
networkProviders map[pb.NetMode]network.Provider
|
||||
processMode oci.ProcessMode
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Executor, error) {
|
||||
|
@ -109,6 +110,7 @@ func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Ex
|
|||
rootless: opt.Rootless,
|
||||
networkProviders: networkProviders,
|
||||
processMode: opt.ProcessMode,
|
||||
idmap: opt.IdentityMapping,
|
||||
}
|
||||
return w, nil
|
||||
}
|
||||
|
@ -159,8 +161,14 @@ func (w *runcExecutor) Exec(ctx context.Context, meta executor.Meta, root cache.
|
|||
return err
|
||||
}
|
||||
defer os.RemoveAll(bundle)
|
||||
|
||||
identity := idtools.Identity{}
|
||||
if w.idmap != nil {
|
||||
identity = w.idmap.RootPair()
|
||||
}
|
||||
|
||||
rootFSPath := filepath.Join(bundle, "rootfs")
|
||||
if err := os.Mkdir(rootFSPath, 0700); err != nil {
|
||||
if err := idtools.MkdirAllAndChown(rootFSPath, 0700, identity); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := mount.All(rootMount, rootFSPath); err != nil {
|
||||
|
@ -180,9 +188,7 @@ func (w *runcExecutor) Exec(ctx context.Context, meta executor.Meta, root cache.
|
|||
defer f.Close()
|
||||
|
||||
opts := []containerdoci.SpecOpts{oci.WithUIDGID(uid, gid, sgids)}
|
||||
if system.SeccompSupported() {
|
||||
opts = append(opts, seccomp.WithDefaultProfile())
|
||||
}
|
||||
|
||||
if meta.ReadonlyRootFS {
|
||||
opts = append(opts, containerdoci.WithRootFSReadonly())
|
||||
}
|
||||
|
@ -197,7 +203,7 @@ func (w *runcExecutor) Exec(ctx context.Context, meta executor.Meta, root cache.
|
|||
}
|
||||
opts = append(opts, containerdoci.WithCgroup(cgroupsPath))
|
||||
}
|
||||
spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.processMode, opts...)
|
||||
spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.processMode, w.idmap, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -212,7 +218,7 @@ func (w *runcExecutor) Exec(ctx context.Context, meta executor.Meta, root cache.
|
|||
if err != nil {
|
||||
return errors.Wrapf(err, "working dir %s points to invalid target", newp)
|
||||
}
|
||||
if err := os.MkdirAll(newp, 0755); err != nil {
|
||||
if err := idtools.MkdirAllAndChown(newp, 0755, identity); err != nil {
|
||||
return errors.Wrapf(err, "failed to create working directory %s", newp)
|
||||
}
|
||||
|
||||
|
|
31
vendor/github.com/moby/buildkit/exporter/local/export.go
generated
vendored
31
vendor/github.com/moby/buildkit/exporter/local/export.go
generated
vendored
|
@ -7,6 +7,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/exporter"
|
||||
"github.com/moby/buildkit/session"
|
||||
|
@ -68,6 +69,7 @@ func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source)
|
|||
return func() error {
|
||||
var src string
|
||||
var err error
|
||||
var idmap *idtools.IdentityMapping
|
||||
if ref == nil {
|
||||
src, err = ioutil.TempDir("", "buildkit")
|
||||
if err != nil {
|
||||
|
@ -86,17 +88,40 @@ func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source)
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
idmap = mount.IdentityMapping()
|
||||
|
||||
defer lm.Unmount()
|
||||
}
|
||||
|
||||
fs := fsutil.NewFS(src, nil)
|
||||
walkOpt := &fsutil.WalkOpt{}
|
||||
|
||||
if idmap != nil {
|
||||
walkOpt.Map = func(p string, st *fstypes.Stat) bool {
|
||||
uid, gid, err := idmap.ToContainer(idtools.Identity{
|
||||
UID: int(st.Uid),
|
||||
GID: int(st.Gid),
|
||||
})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
st.Uid = uint32(uid)
|
||||
st.Gid = uint32(gid)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
fs := fsutil.NewFS(src, walkOpt)
|
||||
lbl := "copying files"
|
||||
if isMap {
|
||||
lbl += " " + k
|
||||
fs = fsutil.SubDirFS(fs, fstypes.Stat{
|
||||
fs, err = fsutil.SubDirFS([]fsutil.Dir{{FS: fs, Stat: fstypes.Stat{
|
||||
Mode: uint32(os.ModeDir | 0755),
|
||||
Path: strings.Replace(k, "/", "_", -1),
|
||||
})
|
||||
}}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
progress := newProgressHandler(ctx, lbl)
|
||||
|
|
177
vendor/github.com/moby/buildkit/exporter/tar/export.go
generated
vendored
Normal file
177
vendor/github.com/moby/buildkit/exporter/tar/export.go
generated
vendored
Normal file
|
@ -0,0 +1,177 @@
|
|||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/exporter"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/session/filesync"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/moby/buildkit/util/progress"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/tonistiigi/fsutil"
|
||||
fstypes "github.com/tonistiigi/fsutil/types"
|
||||
)
|
||||
|
||||
type Opt struct {
|
||||
SessionManager *session.Manager
|
||||
}
|
||||
|
||||
type localExporter struct {
|
||||
opt Opt
|
||||
// session manager
|
||||
}
|
||||
|
||||
func New(opt Opt) (exporter.Exporter, error) {
|
||||
le := &localExporter{opt: opt}
|
||||
return le, nil
|
||||
}
|
||||
|
||||
func (e *localExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) {
|
||||
id := session.FromContext(ctx)
|
||||
if id == "" {
|
||||
return nil, errors.New("could not access local files without session")
|
||||
}
|
||||
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
caller, err := e.opt.SessionManager.Get(timeoutCtx, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
li := &localExporterInstance{localExporter: e, caller: caller}
|
||||
return li, nil
|
||||
}
|
||||
|
||||
type localExporterInstance struct {
|
||||
*localExporter
|
||||
caller session.Caller
|
||||
}
|
||||
|
||||
func (e *localExporterInstance) Name() string {
|
||||
return "exporting to client"
|
||||
}
|
||||
|
||||
func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source) (map[string]string, error) {
|
||||
var defers []func()
|
||||
|
||||
defer func() {
|
||||
for i := len(defers) - 1; i >= 0; i-- {
|
||||
defers[i]()
|
||||
}
|
||||
}()
|
||||
|
||||
getDir := func(ctx context.Context, k string, ref cache.ImmutableRef) (*fsutil.Dir, error) {
|
||||
var src string
|
||||
var err error
|
||||
var idmap *idtools.IdentityMapping
|
||||
if ref == nil {
|
||||
src, err = ioutil.TempDir("", "buildkit")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defers = append(defers, func() { os.RemoveAll(src) })
|
||||
} else {
|
||||
mount, err := ref.Mount(ctx, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lm := snapshot.LocalMounter(mount)
|
||||
|
||||
src, err = lm.Mount()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
idmap = mount.IdentityMapping()
|
||||
|
||||
defers = append(defers, func() { lm.Unmount() })
|
||||
}
|
||||
|
||||
walkOpt := &fsutil.WalkOpt{}
|
||||
|
||||
if idmap != nil {
|
||||
walkOpt.Map = func(p string, st *fstypes.Stat) bool {
|
||||
uid, gid, err := idmap.ToContainer(idtools.Identity{
|
||||
UID: int(st.Uid),
|
||||
GID: int(st.Gid),
|
||||
})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
st.Uid = uint32(uid)
|
||||
st.Gid = uint32(gid)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return &fsutil.Dir{
|
||||
FS: fsutil.NewFS(src, walkOpt),
|
||||
Stat: fstypes.Stat{
|
||||
Mode: uint32(os.ModeDir | 0755),
|
||||
Path: strings.Replace(k, "/", "_", -1),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var fs fsutil.FS
|
||||
|
||||
if len(inp.Refs) > 0 {
|
||||
dirs := make([]fsutil.Dir, 0, len(inp.Refs))
|
||||
for k, ref := range inp.Refs {
|
||||
d, err := getDir(ctx, k, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dirs = append(dirs, *d)
|
||||
}
|
||||
var err error
|
||||
fs, err = fsutil.SubDirFS(dirs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
d, err := getDir(ctx, "", inp.Ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs = d.FS
|
||||
}
|
||||
|
||||
w, err := filesync.CopyFileWriter(ctx, e.caller)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
report := oneOffProgress(ctx, "sending tarball")
|
||||
if err := fsutil.WriteTar(ctx, fs, w); err != nil {
|
||||
w.Close()
|
||||
return nil, report(err)
|
||||
}
|
||||
return nil, report(w.Close())
|
||||
}
|
||||
|
||||
func oneOffProgress(ctx context.Context, id string) func(err error) error {
|
||||
pw, _, _ := progress.FromContext(ctx)
|
||||
now := time.Now()
|
||||
st := progress.Status{
|
||||
Started: &now,
|
||||
}
|
||||
pw.Write(id, st)
|
||||
return func(err error) error {
|
||||
// TODO: set error on status
|
||||
now := time.Now()
|
||||
st.Completed = &now
|
||||
pw.Write(id, st)
|
||||
pw.Close()
|
||||
return err
|
||||
}
|
||||
}
|
30
vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go
generated
vendored
30
vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go
generated
vendored
|
@ -113,7 +113,7 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
|
|||
name := "load build definition from " + filename
|
||||
|
||||
src := llb.Local(localNameDockerfile,
|
||||
llb.FollowPaths([]string{filename}),
|
||||
llb.FollowPaths([]string{filename, filename + ".dockerignore"}),
|
||||
llb.SessionID(c.BuildOpts().SessionID),
|
||||
llb.SharedKeyHint(localNameDockerfile),
|
||||
dockerfile2llb.WithInternalName(name),
|
||||
|
@ -175,6 +175,8 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
|
|||
|
||||
eg, ctx2 := errgroup.WithContext(ctx)
|
||||
var dtDockerfile []byte
|
||||
var dtDockerignore []byte
|
||||
var dtDockerignoreDefault []byte
|
||||
eg.Go(func() error {
|
||||
res, err := c.Solve(ctx2, client.SolveRequest{
|
||||
Definition: def.ToPB(),
|
||||
|
@ -194,6 +196,13 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
|
|||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to read dockerfile")
|
||||
}
|
||||
|
||||
dt, err := ref.ReadFile(ctx2, client.ReadRequest{
|
||||
Filename: filename + ".dockerignore",
|
||||
})
|
||||
if err == nil {
|
||||
dtDockerignore = dt
|
||||
}
|
||||
return nil
|
||||
})
|
||||
var excludes []string
|
||||
|
@ -223,14 +232,11 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dtDockerignore, err := ref.ReadFile(ctx2, client.ReadRequest{
|
||||
dtDockerignoreDefault, err = ref.ReadFile(ctx2, client.ReadRequest{
|
||||
Filename: dockerignoreFilename,
|
||||
})
|
||||
if err == nil {
|
||||
excludes, err = dockerignore.ReadAll(bytes.NewBuffer(dtDockerignore))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse dockerignore")
|
||||
}
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
@ -240,6 +246,16 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if dtDockerignore == nil {
|
||||
dtDockerignore = dtDockerignoreDefault
|
||||
}
|
||||
if dtDockerignore != nil {
|
||||
excludes, err = dockerignore.ReadAll(bytes.NewBuffer(dtDockerignore))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse dockerignore")
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := opts["cmdline"]; !ok {
|
||||
ref, cmdline, ok := dockerfile2llb.DetectSyntax(bytes.NewBuffer(dtDockerfile))
|
||||
if ok {
|
||||
|
|
10
vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go
generated
vendored
10
vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go
generated
vendored
|
@ -623,8 +623,10 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE
|
|||
env := d.state.Env()
|
||||
opt := []llb.RunOption{llb.Args(args)}
|
||||
for _, arg := range d.buildArgs {
|
||||
env = append(env, fmt.Sprintf("%s=%s", arg.Key, arg.ValueString()))
|
||||
opt = append(opt, llb.AddEnv(arg.Key, arg.ValueString()))
|
||||
if arg.Value != nil {
|
||||
env = append(env, fmt.Sprintf("%s=%s", arg.Key, arg.ValueString()))
|
||||
opt = append(opt, llb.AddEnv(arg.Key, arg.ValueString()))
|
||||
}
|
||||
}
|
||||
opt = append(opt, dfCmd(c))
|
||||
if d.ignoreCache {
|
||||
|
@ -1066,7 +1068,9 @@ func toEnvMap(args []instructions.KeyValuePairOptional, env []string) map[string
|
|||
if _, ok := m[arg.Key]; ok {
|
||||
continue
|
||||
}
|
||||
m[arg.Key] = arg.ValueString()
|
||||
if arg.Value != nil {
|
||||
m[arg.Key] = arg.ValueString()
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
|
|
@ -75,6 +75,8 @@ func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []*
|
|||
}
|
||||
if mount.ReadOnly {
|
||||
mountOpts = append(mountOpts, llb.Readonly)
|
||||
} else if mount.Type == instructions.MountTypeBind {
|
||||
mountOpts = append(mountOpts, llb.ForceNoOutput)
|
||||
}
|
||||
if mount.Type == instructions.MountTypeCache {
|
||||
sharing := llb.CacheMountShared
|
||||
|
|
|
@ -221,7 +221,7 @@ func parseMount(value string) (*Mount, error) {
|
|||
}
|
||||
|
||||
if roAuto {
|
||||
if m.Type == MountTypeCache {
|
||||
if m.Type == MountTypeCache || m.Type == MountTypeTmpfs {
|
||||
m.ReadOnly = false
|
||||
} else {
|
||||
m.ReadOnly = true
|
||||
|
|
4
vendor/github.com/moby/buildkit/session/content/attachable.go
generated
vendored
4
vendor/github.com/moby/buildkit/session/content/attachable.go
generated
vendored
|
@ -6,7 +6,7 @@ import (
|
|||
api "github.com/containerd/containerd/api/services/content/v1"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
contentservice "github.com/containerd/containerd/services/content"
|
||||
"github.com/containerd/containerd/services/content/contentserver"
|
||||
"github.com/moby/buildkit/session"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
@ -120,7 +120,7 @@ type attachable struct {
|
|||
// A key of the store map is an ID string that is used for choosing underlying store.
|
||||
func NewAttachable(stores map[string]content.Store) session.Attachable {
|
||||
store := &attachableContentStore{stores: stores}
|
||||
service := contentservice.NewService(store)
|
||||
service := contentserver.New(store)
|
||||
a := attachable{
|
||||
service: service,
|
||||
}
|
||||
|
|
3
vendor/github.com/moby/buildkit/session/filesync/diffcopy.go
generated
vendored
3
vendor/github.com/moby/buildkit/session/filesync/diffcopy.go
generated
vendored
|
@ -57,7 +57,7 @@ func (wc *streamWriterCloser) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater, progress progressCb) error {
|
||||
func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater, progress progressCb, filter func(string, *fstypes.Stat) bool) error {
|
||||
st := time.Now()
|
||||
defer func() {
|
||||
logrus.Debugf("diffcopy took: %v", time.Since(st))
|
||||
|
@ -73,6 +73,7 @@ func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater, progress progres
|
|||
NotifyHashed: cf,
|
||||
ContentHasher: ch,
|
||||
ProgressCb: progress,
|
||||
Filter: fsutil.FilterFunc(filter),
|
||||
})
|
||||
}
|
||||
|
||||
|
|
5
vendor/github.com/moby/buildkit/session/filesync/filesync.go
generated
vendored
5
vendor/github.com/moby/buildkit/session/filesync/filesync.go
generated
vendored
|
@ -129,7 +129,7 @@ type progressCb func(int, bool)
|
|||
type protocol struct {
|
||||
name string
|
||||
sendFn func(stream grpc.Stream, fs fsutil.FS, progress progressCb) error
|
||||
recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater, progress progressCb) error
|
||||
recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater, progress progressCb, mapFunc func(string, *fstypes.Stat) bool) error
|
||||
}
|
||||
|
||||
func isProtoSupported(p string) bool {
|
||||
|
@ -158,6 +158,7 @@ type FSSendRequestOpt struct {
|
|||
DestDir string
|
||||
CacheUpdater CacheUpdater
|
||||
ProgressCb func(int, bool)
|
||||
Filter func(string, *fstypes.Stat) bool
|
||||
}
|
||||
|
||||
// CacheUpdater is an object capable of sending notifications for the cache hash changes
|
||||
|
@ -225,7 +226,7 @@ func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error {
|
|||
panic(fmt.Sprintf("invalid protocol: %q", pr.name))
|
||||
}
|
||||
|
||||
return pr.recvFn(stream, opt.DestDir, opt.CacheUpdater, opt.ProgressCb)
|
||||
return pr.recvFn(stream, opt.DestDir, opt.CacheUpdater, opt.ProgressCb, opt.Filter)
|
||||
}
|
||||
|
||||
// NewFSSyncTargetDir allows writing into a directory
|
||||
|
|
20
vendor/github.com/moby/buildkit/snapshot/snapshotter.go
generated
vendored
20
vendor/github.com/moby/buildkit/snapshot/snapshotter.go
generated
vendored
|
@ -6,6 +6,7 @@ import (
|
|||
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
|
@ -13,6 +14,7 @@ type Mountable interface {
|
|||
// ID() string
|
||||
Mount() ([]mount.Mount, error)
|
||||
Release() error
|
||||
IdentityMapping() *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
type SnapshotterBase interface {
|
||||
|
@ -27,6 +29,7 @@ type SnapshotterBase interface {
|
|||
Remove(ctx context.Context, key string) error
|
||||
Walk(ctx context.Context, fn func(context.Context, snapshots.Info) error) error
|
||||
Close() error
|
||||
IdentityMapping() *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
// Snapshotter defines interface that any snapshot implementation should satisfy
|
||||
|
@ -40,12 +43,13 @@ type Blobmapper interface {
|
|||
SetBlob(ctx context.Context, key string, diffID, blob digest.Digest) error
|
||||
}
|
||||
|
||||
func FromContainerdSnapshotter(s snapshots.Snapshotter) SnapshotterBase {
|
||||
return &fromContainerd{Snapshotter: s}
|
||||
func FromContainerdSnapshotter(s snapshots.Snapshotter, idmap *idtools.IdentityMapping) SnapshotterBase {
|
||||
return &fromContainerd{Snapshotter: s, idmap: idmap}
|
||||
}
|
||||
|
||||
type fromContainerd struct {
|
||||
snapshots.Snapshotter
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func (s *fromContainerd) Mounts(ctx context.Context, key string) (Mountable, error) {
|
||||
|
@ -53,7 +57,7 @@ func (s *fromContainerd) Mounts(ctx context.Context, key string) (Mountable, err
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &staticMountable{mounts}, nil
|
||||
return &staticMountable{mounts, s.idmap}, nil
|
||||
}
|
||||
func (s *fromContainerd) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) error {
|
||||
_, err := s.Snapshotter.Prepare(ctx, key, parent, opts...)
|
||||
|
@ -64,11 +68,15 @@ func (s *fromContainerd) View(ctx context.Context, key, parent string, opts ...s
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &staticMountable{mounts}, nil
|
||||
return &staticMountable{mounts, s.idmap}, nil
|
||||
}
|
||||
func (s *fromContainerd) IdentityMapping() *idtools.IdentityMapping {
|
||||
return s.idmap
|
||||
}
|
||||
|
||||
type staticMountable struct {
|
||||
mounts []mount.Mount
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func (m *staticMountable) Mount() ([]mount.Mount, error) {
|
||||
|
@ -79,6 +87,10 @@ func (cm *staticMountable) Release() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (cm *staticMountable) IdentityMapping() *idtools.IdentityMapping {
|
||||
return cm.idmap
|
||||
}
|
||||
|
||||
// NewContainerdSnapshotter converts snapshotter to containerd snapshotter
|
||||
func NewContainerdSnapshotter(s Snapshotter) (snapshots.Snapshotter, func() error) {
|
||||
cs := &containerdSnapshotter{Snapshotter: s}
|
||||
|
|
47
vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go
generated
vendored
47
vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go
generated
vendored
|
@ -10,6 +10,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
|
@ -25,12 +26,35 @@ func timestampToTime(ts int64) *time.Time {
|
|||
return &tm
|
||||
}
|
||||
|
||||
func mkdir(ctx context.Context, d string, action pb.FileActionMkDir, user *copy.ChownOpt) error {
|
||||
func mapUser(user *copy.ChownOpt, idmap *idtools.IdentityMapping) (*copy.ChownOpt, error) {
|
||||
if idmap == nil {
|
||||
return user, nil
|
||||
}
|
||||
if user == nil {
|
||||
identity := idmap.RootPair()
|
||||
return ©.ChownOpt{Uid: identity.UID, Gid: identity.GID}, nil
|
||||
}
|
||||
identity, err := idmap.ToHost(idtools.Identity{
|
||||
UID: user.Uid,
|
||||
GID: user.Gid,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ©.ChownOpt{Uid: identity.UID, Gid: identity.GID}, nil
|
||||
}
|
||||
|
||||
func mkdir(ctx context.Context, d string, action pb.FileActionMkDir, user *copy.ChownOpt, idmap *idtools.IdentityMapping) error {
|
||||
p, err := fs.RootPath(d, filepath.Join(filepath.Join("/", action.Path)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
user, err = mapUser(user, idmap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if action.MakeParents {
|
||||
if err := copy.MkdirAll(p, os.FileMode(action.Mode)&0777, user, timestampToTime(action.Timestamp)); err != nil {
|
||||
return err
|
||||
|
@ -53,12 +77,17 @@ func mkdir(ctx context.Context, d string, action pb.FileActionMkDir, user *copy.
|
|||
return nil
|
||||
}
|
||||
|
||||
func mkfile(ctx context.Context, d string, action pb.FileActionMkFile, user *copy.ChownOpt) error {
|
||||
func mkfile(ctx context.Context, d string, action pb.FileActionMkFile, user *copy.ChownOpt, idmap *idtools.IdentityMapping) error {
|
||||
p, err := fs.RootPath(d, filepath.Join(filepath.Join("/", action.Path)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
user, err = mapUser(user, idmap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(p, action.Data, os.FileMode(action.Mode)&0777); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -90,7 +119,7 @@ func rm(ctx context.Context, d string, action pb.FileActionRm) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy, u *copy.ChownOpt) error {
|
||||
func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy, u *copy.ChownOpt, idmap *idtools.IdentityMapping) error {
|
||||
srcPath := cleanPath(action.Src)
|
||||
destPath := cleanPath(action.Dest)
|
||||
|
||||
|
@ -109,6 +138,12 @@ func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy, u *
|
|||
return nil
|
||||
}
|
||||
|
||||
// TODO(tonistiigi): this is wrong. fsutil.Copy can't handle non-forced user
|
||||
u, err := mapUser(u, idmap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opt := []copy.Opt{
|
||||
func(ci *copy.CopyInfo) {
|
||||
ci.Chown = u
|
||||
|
@ -195,7 +230,7 @@ func (fb *Backend) Mkdir(ctx context.Context, m, user, group fileoptypes.Mount,
|
|||
return err
|
||||
}
|
||||
|
||||
return mkdir(ctx, dir, action, u)
|
||||
return mkdir(ctx, dir, action, u, mnt.m.IdentityMapping())
|
||||
}
|
||||
|
||||
func (fb *Backend) Mkfile(ctx context.Context, m, user, group fileoptypes.Mount, action pb.FileActionMkFile) error {
|
||||
|
@ -216,7 +251,7 @@ func (fb *Backend) Mkfile(ctx context.Context, m, user, group fileoptypes.Mount,
|
|||
return err
|
||||
}
|
||||
|
||||
return mkfile(ctx, dir, action, u)
|
||||
return mkfile(ctx, dir, action, u, mnt.m.IdentityMapping())
|
||||
}
|
||||
func (fb *Backend) Rm(ctx context.Context, m fileoptypes.Mount, action pb.FileActionRm) error {
|
||||
mnt, ok := m.(*Mount)
|
||||
|
@ -262,5 +297,5 @@ func (fb *Backend) Copy(ctx context.Context, m1, m2, user, group fileoptypes.Mou
|
|||
return err
|
||||
}
|
||||
|
||||
return docopy(ctx, src, dest, action, u)
|
||||
return docopy(ctx, src, dest, action, u, mnt2.m.IdentityMapping())
|
||||
}
|
||||
|
|
78
vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go
generated
vendored
78
vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go
generated
vendored
|
@ -17,6 +17,7 @@ import (
|
|||
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/locker"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/cache/metadata"
|
||||
|
@ -209,7 +210,7 @@ func (e *execOp) getMountDeps() ([]dep, error) {
|
|||
deps[m.Input].Selectors = append(deps[m.Input].Selectors, sel)
|
||||
}
|
||||
|
||||
if !m.Readonly || m.Dest == pb.RootMount { // exclude read-only rootfs
|
||||
if (!m.Readonly || m.Dest == pb.RootMount) && m.Output != -1 { // exclude read-only rootfs && read-write mounts
|
||||
deps[m.Input].NoContentBasedHash = true
|
||||
}
|
||||
}
|
||||
|
@ -329,30 +330,47 @@ func (e *execOp) getSSHMountable(ctx context.Context, m *pb.Mount) (cache.Mounta
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return &sshMount{mount: m, caller: caller}, nil
|
||||
return &sshMount{mount: m, caller: caller, idmap: e.cm.IdentityMapping()}, nil
|
||||
}
|
||||
|
||||
type sshMount struct {
|
||||
mount *pb.Mount
|
||||
caller session.Caller
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func (sm *sshMount) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) {
|
||||
return &sshMountInstance{sm: sm}, nil
|
||||
return &sshMountInstance{sm: sm, idmap: sm.idmap}, nil
|
||||
}
|
||||
|
||||
type sshMountInstance struct {
|
||||
sm *sshMount
|
||||
cleanup func() error
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func (sm *sshMountInstance) Mount() ([]mount.Mount, error) {
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
|
||||
uid := int(sm.sm.mount.SSHOpt.Uid)
|
||||
gid := int(sm.sm.mount.SSHOpt.Gid)
|
||||
|
||||
if sm.idmap != nil {
|
||||
identity, err := sm.idmap.ToHost(idtools.Identity{
|
||||
UID: uid,
|
||||
GID: gid,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uid = identity.UID
|
||||
gid = identity.GID
|
||||
}
|
||||
|
||||
sock, cleanup, err := sshforward.MountSSHSocket(ctx, sm.sm.caller, sshforward.SocketOpt{
|
||||
ID: sm.sm.mount.SSHOpt.ID,
|
||||
UID: int(sm.sm.mount.SSHOpt.Uid),
|
||||
GID: int(sm.sm.mount.SSHOpt.Gid),
|
||||
UID: uid,
|
||||
GID: gid,
|
||||
Mode: int(sm.sm.mount.SSHOpt.Mode & 0777),
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -384,6 +402,10 @@ func (sm *sshMountInstance) Release() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (sm *sshMountInstance) IdentityMapping() *idtools.IdentityMapping {
|
||||
return sm.idmap
|
||||
}
|
||||
|
||||
func (e *execOp) getSecretMountable(ctx context.Context, m *pb.Mount) (cache.Mountable, error) {
|
||||
if m.SecretOpt == nil {
|
||||
return nil, errors.Errorf("invalid sercet mount options")
|
||||
|
@ -416,21 +438,23 @@ func (e *execOp) getSecretMountable(ctx context.Context, m *pb.Mount) (cache.Mou
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return &secretMount{mount: m, data: dt}, nil
|
||||
return &secretMount{mount: m, data: dt, idmap: e.cm.IdentityMapping()}, nil
|
||||
}
|
||||
|
||||
type secretMount struct {
|
||||
mount *pb.Mount
|
||||
data []byte
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func (sm *secretMount) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) {
|
||||
return &secretMountInstance{sm: sm}, nil
|
||||
return &secretMountInstance{sm: sm, idmap: sm.idmap}, nil
|
||||
}
|
||||
|
||||
type secretMountInstance struct {
|
||||
sm *secretMount
|
||||
root string
|
||||
sm *secretMount
|
||||
root string
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func (sm *secretMountInstance) Mount() ([]mount.Mount, error) {
|
||||
|
@ -465,7 +489,22 @@ func (sm *secretMountInstance) Mount() ([]mount.Mount, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if err := os.Chown(fp, int(sm.sm.mount.SecretOpt.Uid), int(sm.sm.mount.SecretOpt.Gid)); err != nil {
|
||||
uid := int(sm.sm.mount.SecretOpt.Uid)
|
||||
gid := int(sm.sm.mount.SecretOpt.Gid)
|
||||
|
||||
if sm.idmap != nil {
|
||||
identity, err := sm.idmap.ToHost(idtools.Identity{
|
||||
UID: uid,
|
||||
GID: gid,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uid = identity.UID
|
||||
gid = identity.GID
|
||||
}
|
||||
|
||||
if err := os.Chown(fp, uid, gid); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -490,6 +529,10 @@ func (sm *secretMountInstance) Release() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (sm *secretMountInstance) IdentityMapping() *idtools.IdentityMapping {
|
||||
return sm.idmap
|
||||
}
|
||||
|
||||
func addDefaultEnvvar(env []string, k, v string) []string {
|
||||
for _, e := range env {
|
||||
if strings.HasPrefix(e, k+"=") {
|
||||
|
@ -585,7 +628,7 @@ func (e *execOp) Exec(ctx context.Context, inputs []solver.Result) ([]solver.Res
|
|||
}
|
||||
|
||||
case pb.MountType_TMPFS:
|
||||
mountable = newTmpfs()
|
||||
mountable = newTmpfs(e.cm.IdentityMapping())
|
||||
|
||||
case pb.MountType_SECRET:
|
||||
secretMount, err := e.getSecretMountable(ctx, m)
|
||||
|
@ -653,6 +696,7 @@ func (e *execOp) Exec(ctx context.Context, inputs []solver.Result) ([]solver.Res
|
|||
ReadonlyRootFS: readonlyRootFS,
|
||||
ExtraHosts: extraHosts,
|
||||
NetMode: e.op.Network,
|
||||
SecurityMode: e.op.Security,
|
||||
}
|
||||
|
||||
if e.op.Meta.ProxyEnv != nil {
|
||||
|
@ -701,19 +745,21 @@ func proxyEnvList(p *pb.ProxyEnv) []string {
|
|||
return out
|
||||
}
|
||||
|
||||
func newTmpfs() cache.Mountable {
|
||||
return &tmpfs{}
|
||||
func newTmpfs(idmap *idtools.IdentityMapping) cache.Mountable {
|
||||
return &tmpfs{idmap: idmap}
|
||||
}
|
||||
|
||||
type tmpfs struct {
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func (f *tmpfs) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) {
|
||||
return &tmpfsMount{readonly: readonly}, nil
|
||||
return &tmpfsMount{readonly: readonly, idmap: f.idmap}, nil
|
||||
}
|
||||
|
||||
type tmpfsMount struct {
|
||||
readonly bool
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func (m *tmpfsMount) Mount() ([]mount.Mount, error) {
|
||||
|
@ -731,6 +777,10 @@ func (m *tmpfsMount) Release() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *tmpfsMount) IdentityMapping() *idtools.IdentityMapping {
|
||||
return m.idmap
|
||||
}
|
||||
|
||||
var cacheRefsLocker = locker.New()
|
||||
var sharedCacheRefs = &cacheRefs{}
|
||||
|
||||
|
|
19
vendor/github.com/moby/buildkit/solver/llbsolver/solver.go
generated
vendored
19
vendor/github.com/moby/buildkit/solver/llbsolver/solver.go
generated
vendored
|
@ -45,9 +45,10 @@ type Solver struct {
|
|||
platforms []specs.Platform
|
||||
gatewayForwarder *controlgateway.GatewayForwarder
|
||||
sm *session.Manager
|
||||
entitlements []string
|
||||
}
|
||||
|
||||
func New(wc *worker.Controller, f map[string]frontend.Frontend, cache solver.CacheManager, resolveCI map[string]remotecache.ResolveCacheImporterFunc, gatewayForwarder *controlgateway.GatewayForwarder, sm *session.Manager) (*Solver, error) {
|
||||
func New(wc *worker.Controller, f map[string]frontend.Frontend, cache solver.CacheManager, resolveCI map[string]remotecache.ResolveCacheImporterFunc, gatewayForwarder *controlgateway.GatewayForwarder, sm *session.Manager, ents []string) (*Solver, error) {
|
||||
s := &Solver{
|
||||
workerController: wc,
|
||||
resolveWorker: defaultResolver(wc),
|
||||
|
@ -55,6 +56,7 @@ func New(wc *worker.Controller, f map[string]frontend.Frontend, cache solver.Cac
|
|||
resolveCacheImporterFuncs: resolveCI,
|
||||
gatewayForwarder: gatewayForwarder,
|
||||
sm: sm,
|
||||
entitlements: ents,
|
||||
}
|
||||
|
||||
// executing is currently only allowed on default worker
|
||||
|
@ -101,7 +103,7 @@ func (s *Solver) Solve(ctx context.Context, id string, req frontend.SolveRequest
|
|||
|
||||
defer j.Discard()
|
||||
|
||||
set, err := entitlements.WhiteList(ent, supportedEntitlements())
|
||||
set, err := entitlements.WhiteList(ent, supportedEntitlements(s.entitlements))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -343,12 +345,15 @@ func notifyCompleted(ctx context.Context, v *client.Vertex, err error, cached bo
|
|||
pw.Write(v.Digest.String(), *v)
|
||||
}
|
||||
|
||||
var AllowNetworkHostUnstable = false // TODO: enable in constructor
|
||||
|
||||
func supportedEntitlements() []entitlements.Entitlement {
|
||||
func supportedEntitlements(ents []string) []entitlements.Entitlement {
|
||||
out := []entitlements.Entitlement{} // nil means no filter
|
||||
if AllowNetworkHostUnstable {
|
||||
out = append(out, entitlements.EntitlementNetworkHost)
|
||||
for _, e := range ents {
|
||||
if e == string(entitlements.EntitlementNetworkHost) {
|
||||
out = append(out, entitlements.EntitlementNetworkHost)
|
||||
}
|
||||
if e == string(entitlements.EntitlementSecurityInsecure) {
|
||||
out = append(out, entitlements.EntitlementSecurityInsecure)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
|
8
vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go
generated
vendored
8
vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go
generated
vendored
|
@ -120,9 +120,10 @@ func ValidateEntitlements(ent entitlements.Set) LoadOpt {
|
|||
return errors.Errorf("%s is not allowed", entitlements.EntitlementNetworkHost)
|
||||
}
|
||||
}
|
||||
if op.Exec.Network == pb.NetMode_NONE {
|
||||
if !ent.Allowed(entitlements.EntitlementNetworkNone) {
|
||||
return errors.Errorf("%s is not allowed", entitlements.EntitlementNetworkNone)
|
||||
|
||||
if op.Exec.Security == pb.SecurityMode_INSECURE {
|
||||
if !ent.Allowed(entitlements.EntitlementSecurityInsecure) {
|
||||
return errors.Errorf("%s is not allowed", entitlements.EntitlementSecurityInsecure)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -155,6 +156,7 @@ func newVertex(dgst digest.Digest, op *pb.Op, opMeta *pb.OpMetadata, load func(d
|
|||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
vtx := &vertex{sys: op, options: opt, digest: dgst, name: llbOpName(op)}
|
||||
for _, in := range op.Inputs {
|
||||
sub, err := load(in.Digest)
|
||||
|
|
7
vendor/github.com/moby/buildkit/solver/pb/caps.go
generated
vendored
7
vendor/github.com/moby/buildkit/solver/pb/caps.go
generated
vendored
|
@ -33,6 +33,7 @@ const (
|
|||
CapExecMetaBase apicaps.CapID = "exec.meta.base"
|
||||
CapExecMetaProxy apicaps.CapID = "exec.meta.proxyenv"
|
||||
CapExecMetaNetwork apicaps.CapID = "exec.meta.network"
|
||||
CapExecMetaSecurity apicaps.CapID = "exec.meta.security"
|
||||
CapExecMetaSetsDefaultPath apicaps.CapID = "exec.meta.setsdefaultpath"
|
||||
CapExecMountBind apicaps.CapID = "exec.mount.bind"
|
||||
CapExecMountCache apicaps.CapID = "exec.mount.cache"
|
||||
|
@ -180,6 +181,12 @@ func init() {
|
|||
Status: apicaps.CapStatusExperimental,
|
||||
})
|
||||
|
||||
Caps.Init(apicaps.Cap{
|
||||
ID: CapExecMetaSecurity,
|
||||
Enabled: true,
|
||||
Status: apicaps.CapStatusExperimental,
|
||||
})
|
||||
|
||||
Caps.Init(apicaps.Cap{
|
||||
ID: CapExecMountBind,
|
||||
Enabled: true,
|
||||
|
|
374
vendor/github.com/moby/buildkit/solver/pb/ops.pb.go
generated
vendored
374
vendor/github.com/moby/buildkit/solver/pb/ops.pb.go
generated
vendored
|
@ -54,7 +54,30 @@ func (x NetMode) String() string {
|
|||
return proto.EnumName(NetMode_name, int32(x))
|
||||
}
|
||||
func (NetMode) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{0}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{0}
|
||||
}
|
||||
|
||||
type SecurityMode int32
|
||||
|
||||
const (
|
||||
SecurityMode_SANDBOX SecurityMode = 0
|
||||
SecurityMode_INSECURE SecurityMode = 1
|
||||
)
|
||||
|
||||
var SecurityMode_name = map[int32]string{
|
||||
0: "SANDBOX",
|
||||
1: "INSECURE",
|
||||
}
|
||||
var SecurityMode_value = map[string]int32{
|
||||
"SANDBOX": 0,
|
||||
"INSECURE": 1,
|
||||
}
|
||||
|
||||
func (x SecurityMode) String() string {
|
||||
return proto.EnumName(SecurityMode_name, int32(x))
|
||||
}
|
||||
func (SecurityMode) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{1}
|
||||
}
|
||||
|
||||
// MountType defines a type of a mount from a supported set
|
||||
|
@ -87,7 +110,7 @@ func (x MountType) String() string {
|
|||
return proto.EnumName(MountType_name, int32(x))
|
||||
}
|
||||
func (MountType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{1}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{2}
|
||||
}
|
||||
|
||||
// CacheSharingOpt defines different sharing modes for cache mount
|
||||
|
@ -117,7 +140,7 @@ func (x CacheSharingOpt) String() string {
|
|||
return proto.EnumName(CacheSharingOpt_name, int32(x))
|
||||
}
|
||||
func (CacheSharingOpt) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{2}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{3}
|
||||
}
|
||||
|
||||
// Op represents a vertex of the LLB DAG.
|
||||
|
@ -138,7 +161,7 @@ func (m *Op) Reset() { *m = Op{} }
|
|||
func (m *Op) String() string { return proto.CompactTextString(m) }
|
||||
func (*Op) ProtoMessage() {}
|
||||
func (*Op) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{0}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{0}
|
||||
}
|
||||
func (m *Op) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -368,7 +391,7 @@ func (m *Platform) Reset() { *m = Platform{} }
|
|||
func (m *Platform) String() string { return proto.CompactTextString(m) }
|
||||
func (*Platform) ProtoMessage() {}
|
||||
func (*Platform) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{1}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{1}
|
||||
}
|
||||
func (m *Platform) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -440,7 +463,7 @@ func (m *Input) Reset() { *m = Input{} }
|
|||
func (m *Input) String() string { return proto.CompactTextString(m) }
|
||||
func (*Input) ProtoMessage() {}
|
||||
func (*Input) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{2}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{2}
|
||||
}
|
||||
func (m *Input) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -467,16 +490,17 @@ var xxx_messageInfo_Input proto.InternalMessageInfo
|
|||
|
||||
// ExecOp executes a command in a container.
|
||||
type ExecOp struct {
|
||||
Meta *Meta `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta,omitempty"`
|
||||
Mounts []*Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"`
|
||||
Network NetMode `protobuf:"varint,3,opt,name=network,proto3,enum=pb.NetMode" json:"network,omitempty"`
|
||||
Meta *Meta `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta,omitempty"`
|
||||
Mounts []*Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"`
|
||||
Network NetMode `protobuf:"varint,3,opt,name=network,proto3,enum=pb.NetMode" json:"network,omitempty"`
|
||||
Security SecurityMode `protobuf:"varint,4,opt,name=security,proto3,enum=pb.SecurityMode" json:"security,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ExecOp) Reset() { *m = ExecOp{} }
|
||||
func (m *ExecOp) String() string { return proto.CompactTextString(m) }
|
||||
func (*ExecOp) ProtoMessage() {}
|
||||
func (*ExecOp) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{3}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{3}
|
||||
}
|
||||
func (m *ExecOp) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -522,6 +546,13 @@ func (m *ExecOp) GetNetwork() NetMode {
|
|||
return NetMode_UNSET
|
||||
}
|
||||
|
||||
func (m *ExecOp) GetSecurity() SecurityMode {
|
||||
if m != nil {
|
||||
return m.Security
|
||||
}
|
||||
return SecurityMode_SANDBOX
|
||||
}
|
||||
|
||||
// Meta is a set of arguments for ExecOp.
|
||||
// Meta is unrelated to LLB metadata.
|
||||
// FIXME: rename (ExecContext? ExecArgs?)
|
||||
|
@ -538,7 +569,7 @@ func (m *Meta) Reset() { *m = Meta{} }
|
|||
func (m *Meta) String() string { return proto.CompactTextString(m) }
|
||||
func (*Meta) ProtoMessage() {}
|
||||
func (*Meta) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{4}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{4}
|
||||
}
|
||||
func (m *Meta) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -622,7 +653,7 @@ func (m *Mount) Reset() { *m = Mount{} }
|
|||
func (m *Mount) String() string { return proto.CompactTextString(m) }
|
||||
func (*Mount) ProtoMessage() {}
|
||||
func (*Mount) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{5}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{5}
|
||||
}
|
||||
func (m *Mount) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -708,7 +739,7 @@ func (m *CacheOpt) Reset() { *m = CacheOpt{} }
|
|||
func (m *CacheOpt) String() string { return proto.CompactTextString(m) }
|
||||
func (*CacheOpt) ProtoMessage() {}
|
||||
func (*CacheOpt) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{6}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{6}
|
||||
}
|
||||
func (m *CacheOpt) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -766,7 +797,7 @@ func (m *SecretOpt) Reset() { *m = SecretOpt{} }
|
|||
func (m *SecretOpt) String() string { return proto.CompactTextString(m) }
|
||||
func (*SecretOpt) ProtoMessage() {}
|
||||
func (*SecretOpt) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{7}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{7}
|
||||
}
|
||||
func (m *SecretOpt) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -845,7 +876,7 @@ func (m *SSHOpt) Reset() { *m = SSHOpt{} }
|
|||
func (m *SSHOpt) String() string { return proto.CompactTextString(m) }
|
||||
func (*SSHOpt) ProtoMessage() {}
|
||||
func (*SSHOpt) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{8}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{8}
|
||||
}
|
||||
func (m *SSHOpt) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -918,7 +949,7 @@ func (m *SourceOp) Reset() { *m = SourceOp{} }
|
|||
func (m *SourceOp) String() string { return proto.CompactTextString(m) }
|
||||
func (*SourceOp) ProtoMessage() {}
|
||||
func (*SourceOp) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{9}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{9}
|
||||
}
|
||||
func (m *SourceOp) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -970,7 +1001,7 @@ func (m *BuildOp) Reset() { *m = BuildOp{} }
|
|||
func (m *BuildOp) String() string { return proto.CompactTextString(m) }
|
||||
func (*BuildOp) ProtoMessage() {}
|
||||
func (*BuildOp) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{10}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{10}
|
||||
}
|
||||
func (m *BuildOp) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -1025,7 +1056,7 @@ func (m *BuildInput) Reset() { *m = BuildInput{} }
|
|||
func (m *BuildInput) String() string { return proto.CompactTextString(m) }
|
||||
func (*BuildInput) ProtoMessage() {}
|
||||
func (*BuildInput) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{11}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{11}
|
||||
}
|
||||
func (m *BuildInput) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -1066,7 +1097,7 @@ func (m *OpMetadata) Reset() { *m = OpMetadata{} }
|
|||
func (m *OpMetadata) String() string { return proto.CompactTextString(m) }
|
||||
func (*OpMetadata) ProtoMessage() {}
|
||||
func (*OpMetadata) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{12}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{12}
|
||||
}
|
||||
func (m *OpMetadata) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -1127,7 +1158,7 @@ func (m *ExportCache) Reset() { *m = ExportCache{} }
|
|||
func (m *ExportCache) String() string { return proto.CompactTextString(m) }
|
||||
func (*ExportCache) ProtoMessage() {}
|
||||
func (*ExportCache) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{13}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{13}
|
||||
}
|
||||
func (m *ExportCache) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -1170,7 +1201,7 @@ func (m *ProxyEnv) Reset() { *m = ProxyEnv{} }
|
|||
func (m *ProxyEnv) String() string { return proto.CompactTextString(m) }
|
||||
func (*ProxyEnv) ProtoMessage() {}
|
||||
func (*ProxyEnv) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{14}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{14}
|
||||
}
|
||||
func (m *ProxyEnv) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -1232,7 +1263,7 @@ func (m *WorkerConstraints) Reset() { *m = WorkerConstraints{} }
|
|||
func (m *WorkerConstraints) String() string { return proto.CompactTextString(m) }
|
||||
func (*WorkerConstraints) ProtoMessage() {}
|
||||
func (*WorkerConstraints) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{15}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{15}
|
||||
}
|
||||
func (m *WorkerConstraints) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -1277,7 +1308,7 @@ func (m *Definition) Reset() { *m = Definition{} }
|
|||
func (m *Definition) String() string { return proto.CompactTextString(m) }
|
||||
func (*Definition) ProtoMessage() {}
|
||||
func (*Definition) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{16}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{16}
|
||||
}
|
||||
func (m *Definition) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -1325,7 +1356,7 @@ func (m *HostIP) Reset() { *m = HostIP{} }
|
|||
func (m *HostIP) String() string { return proto.CompactTextString(m) }
|
||||
func (*HostIP) ProtoMessage() {}
|
||||
func (*HostIP) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{17}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{17}
|
||||
}
|
||||
func (m *HostIP) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -1372,7 +1403,7 @@ func (m *FileOp) Reset() { *m = FileOp{} }
|
|||
func (m *FileOp) String() string { return proto.CompactTextString(m) }
|
||||
func (*FileOp) ProtoMessage() {}
|
||||
func (*FileOp) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{18}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{18}
|
||||
}
|
||||
func (m *FileOp) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -1420,7 +1451,7 @@ func (m *FileAction) Reset() { *m = FileAction{} }
|
|||
func (m *FileAction) String() string { return proto.CompactTextString(m) }
|
||||
func (*FileAction) ProtoMessage() {}
|
||||
func (*FileAction) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{19}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{19}
|
||||
}
|
||||
func (m *FileAction) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -1645,7 +1676,7 @@ func (m *FileActionCopy) Reset() { *m = FileActionCopy{} }
|
|||
func (m *FileActionCopy) String() string { return proto.CompactTextString(m) }
|
||||
func (*FileActionCopy) ProtoMessage() {}
|
||||
func (*FileActionCopy) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{20}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{20}
|
||||
}
|
||||
func (m *FileActionCopy) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -1764,7 +1795,7 @@ func (m *FileActionMkFile) Reset() { *m = FileActionMkFile{} }
|
|||
func (m *FileActionMkFile) String() string { return proto.CompactTextString(m) }
|
||||
func (*FileActionMkFile) ProtoMessage() {}
|
||||
func (*FileActionMkFile) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{21}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{21}
|
||||
}
|
||||
func (m *FileActionMkFile) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -1841,7 +1872,7 @@ func (m *FileActionMkDir) Reset() { *m = FileActionMkDir{} }
|
|||
func (m *FileActionMkDir) String() string { return proto.CompactTextString(m) }
|
||||
func (*FileActionMkDir) ProtoMessage() {}
|
||||
func (*FileActionMkDir) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{22}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{22}
|
||||
}
|
||||
func (m *FileActionMkDir) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -1914,7 +1945,7 @@ func (m *FileActionRm) Reset() { *m = FileActionRm{} }
|
|||
func (m *FileActionRm) String() string { return proto.CompactTextString(m) }
|
||||
func (*FileActionRm) ProtoMessage() {}
|
||||
func (*FileActionRm) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{23}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{23}
|
||||
}
|
||||
func (m *FileActionRm) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -1969,7 +2000,7 @@ func (m *ChownOpt) Reset() { *m = ChownOpt{} }
|
|||
func (m *ChownOpt) String() string { return proto.CompactTextString(m) }
|
||||
func (*ChownOpt) ProtoMessage() {}
|
||||
func (*ChownOpt) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{24}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{24}
|
||||
}
|
||||
func (m *ChownOpt) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -2019,7 +2050,7 @@ func (m *UserOpt) Reset() { *m = UserOpt{} }
|
|||
func (m *UserOpt) String() string { return proto.CompactTextString(m) }
|
||||
func (*UserOpt) ProtoMessage() {}
|
||||
func (*UserOpt) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{25}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{25}
|
||||
}
|
||||
func (m *UserOpt) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -2159,7 +2190,7 @@ func (m *NamedUserOpt) Reset() { *m = NamedUserOpt{} }
|
|||
func (m *NamedUserOpt) String() string { return proto.CompactTextString(m) }
|
||||
func (*NamedUserOpt) ProtoMessage() {}
|
||||
func (*NamedUserOpt) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ops_8d64813b9835ab08, []int{26}
|
||||
return fileDescriptor_ops_0b9d2e829935306b, []int{26}
|
||||
}
|
||||
func (m *NamedUserOpt) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -2226,6 +2257,7 @@ func init() {
|
|||
proto.RegisterType((*UserOpt)(nil), "pb.UserOpt")
|
||||
proto.RegisterType((*NamedUserOpt)(nil), "pb.NamedUserOpt")
|
||||
proto.RegisterEnum("pb.NetMode", NetMode_name, NetMode_value)
|
||||
proto.RegisterEnum("pb.SecurityMode", SecurityMode_name, SecurityMode_value)
|
||||
proto.RegisterEnum("pb.MountType", MountType_name, MountType_value)
|
||||
proto.RegisterEnum("pb.CacheSharingOpt", CacheSharingOpt_name, CacheSharingOpt_value)
|
||||
}
|
||||
|
@ -2470,6 +2502,11 @@ func (m *ExecOp) MarshalTo(dAtA []byte) (int, error) {
|
|||
i++
|
||||
i = encodeVarintOps(dAtA, i, uint64(m.Network))
|
||||
}
|
||||
if m.Security != 0 {
|
||||
dAtA[i] = 0x20
|
||||
i++
|
||||
i = encodeVarintOps(dAtA, i, uint64(m.Security))
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
|
@ -3852,6 +3889,9 @@ func (m *ExecOp) Size() (n int) {
|
|||
if m.Network != 0 {
|
||||
n += 1 + sovOps(uint64(m.Network))
|
||||
}
|
||||
if m.Security != 0 {
|
||||
n += 1 + sovOps(uint64(m.Security))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
|
@ -5156,6 +5196,25 @@ func (m *ExecOp) Unmarshal(dAtA []byte) error {
|
|||
break
|
||||
}
|
||||
}
|
||||
case 4:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Security", wireType)
|
||||
}
|
||||
m.Security = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowOps
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Security |= (SecurityMode(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipOps(dAtA[iNdEx:])
|
||||
|
@ -9171,129 +9230,132 @@ var (
|
|||
ErrIntOverflowOps = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() { proto.RegisterFile("ops.proto", fileDescriptor_ops_8d64813b9835ab08) }
|
||||
func init() { proto.RegisterFile("ops.proto", fileDescriptor_ops_0b9d2e829935306b) }
|
||||
|
||||
var fileDescriptor_ops_8d64813b9835ab08 = []byte{
|
||||
// 1924 bytes of a gzipped FileDescriptorProto
|
||||
var fileDescriptor_ops_0b9d2e829935306b = []byte{
|
||||
// 1978 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x5f, 0x6f, 0x1b, 0xc7,
|
||||
0x11, 0x17, 0x8f, 0x7f, 0x6f, 0x28, 0xc9, 0xec, 0xc6, 0x49, 0x59, 0xd5, 0x95, 0x94, 0x4b, 0x1a,
|
||||
0x30, 0xb2, 0x4d, 0x01, 0x0a, 0x90, 0x04, 0x79, 0x28, 0x2a, 0xfe, 0x31, 0xc4, 0x24, 0x16, 0x85,
|
||||
0xa5, 0xed, 0x3c, 0x1a, 0xc7, 0xbb, 0x25, 0x75, 0x20, 0xef, 0xf6, 0xb0, 0xb7, 0xb4, 0xc4, 0x97,
|
||||
0x3e, 0xf8, 0x13, 0x04, 0x28, 0xd0, 0xb7, 0x3e, 0xf4, 0xa5, 0x40, 0x3f, 0x44, 0xdf, 0xf3, 0x18,
|
||||
0x14, 0x7d, 0x48, 0xfb, 0x90, 0x16, 0xf6, 0x17, 0x29, 0x66, 0x77, 0x8f, 0x77, 0xa4, 0x15, 0xd8,
|
||||
0x46, 0x8b, 0x3e, 0x71, 0x76, 0xe6, 0xb7, 0xb3, 0xb3, 0x33, 0xb3, 0x33, 0x73, 0x04, 0x9b, 0xc7,
|
||||
0x49, 0x3b, 0x16, 0x5c, 0x72, 0x62, 0xc5, 0xe3, 0xbd, 0xfb, 0xd3, 0x40, 0x5e, 0x2e, 0xc6, 0x6d,
|
||||
0x8f, 0x87, 0xc7, 0x53, 0x3e, 0xe5, 0xc7, 0x4a, 0x34, 0x5e, 0x4c, 0xd4, 0x4a, 0x2d, 0x14, 0xa5,
|
||||
0xb7, 0x38, 0x7f, 0xb2, 0xc0, 0x1a, 0xc6, 0xe4, 0x7d, 0xa8, 0x04, 0x51, 0xbc, 0x90, 0x49, 0xb3,
|
||||
0x70, 0x58, 0x6c, 0xd5, 0x4f, 0xec, 0x76, 0x3c, 0x6e, 0x0f, 0x90, 0x43, 0x8d, 0x80, 0x1c, 0x42,
|
||||
0x89, 0x5d, 0x33, 0xaf, 0x69, 0x1d, 0x16, 0x5a, 0xf5, 0x13, 0x40, 0x40, 0xff, 0x9a, 0x79, 0xc3,
|
||||
0xf8, 0x6c, 0x8b, 0x2a, 0x09, 0xf9, 0x08, 0x2a, 0x09, 0x5f, 0x08, 0x8f, 0x35, 0x8b, 0x0a, 0xb3,
|
||||
0x8d, 0x98, 0x91, 0xe2, 0x28, 0x94, 0x91, 0xa2, 0xa6, 0x49, 0x30, 0x67, 0xcd, 0x52, 0xa6, 0xe9,
|
||||
0x41, 0x30, 0xd7, 0x18, 0x25, 0x21, 0x1f, 0x40, 0x79, 0xbc, 0x08, 0xe6, 0x7e, 0xb3, 0xac, 0x20,
|
||||
0x75, 0x84, 0x74, 0x90, 0xa1, 0x30, 0x5a, 0x46, 0x5a, 0x50, 0x8b, 0xe7, 0xae, 0x9c, 0x70, 0x11,
|
||||
0x36, 0x21, 0x3b, 0xf0, 0xc2, 0xf0, 0xe8, 0x4a, 0x4a, 0x3e, 0x83, 0xba, 0xc7, 0xa3, 0x44, 0x0a,
|
||||
0x37, 0x88, 0x64, 0xd2, 0xac, 0x2b, 0xf0, 0xbb, 0x08, 0xfe, 0x86, 0x8b, 0x19, 0x13, 0xdd, 0x4c,
|
||||
0x48, 0xf3, 0xc8, 0x4e, 0x09, 0x2c, 0x1e, 0x3b, 0x7f, 0x28, 0x40, 0x2d, 0xd5, 0x4a, 0x1c, 0xd8,
|
||||
0x3e, 0x15, 0xde, 0x65, 0x20, 0x99, 0x27, 0x17, 0x82, 0x35, 0x0b, 0x87, 0x85, 0x96, 0x4d, 0xd7,
|
||||
0x78, 0x64, 0x17, 0xac, 0xe1, 0x48, 0x39, 0xca, 0xa6, 0xd6, 0x70, 0x44, 0x9a, 0x50, 0x7d, 0xe2,
|
||||
0x8a, 0xc0, 0x8d, 0xa4, 0xf2, 0x8c, 0x4d, 0xd3, 0x25, 0xb9, 0x03, 0xf6, 0x70, 0xf4, 0x84, 0x89,
|
||||
0x24, 0xe0, 0x91, 0xf2, 0x87, 0x4d, 0x33, 0x06, 0xd9, 0x07, 0x18, 0x8e, 0x1e, 0x30, 0x17, 0x95,
|
||||
0x26, 0xcd, 0xf2, 0x61, 0xb1, 0x65, 0xd3, 0x1c, 0xc7, 0xf9, 0x1d, 0x94, 0x55, 0x8c, 0xc8, 0x97,
|
||||
0x50, 0xf1, 0x83, 0x29, 0x4b, 0xa4, 0x36, 0xa7, 0x73, 0xf2, 0xdd, 0x8f, 0x07, 0x5b, 0xff, 0xfc,
|
||||
0xf1, 0xe0, 0x28, 0x97, 0x0c, 0x3c, 0x66, 0x91, 0xc7, 0x23, 0xe9, 0x06, 0x11, 0x13, 0xc9, 0xf1,
|
||||
0x94, 0xdf, 0xd7, 0x5b, 0xda, 0x3d, 0xf5, 0x43, 0x8d, 0x06, 0xf2, 0x31, 0x94, 0x83, 0xc8, 0x67,
|
||||
0xd7, 0xca, 0xfe, 0x62, 0xe7, 0x1d, 0xa3, 0xaa, 0x3e, 0x5c, 0xc8, 0x78, 0x21, 0x07, 0x28, 0xa2,
|
||||
0x1a, 0xe1, 0xc4, 0x50, 0xd1, 0x29, 0x40, 0xee, 0x40, 0x29, 0x64, 0xd2, 0x55, 0xc7, 0xd7, 0x4f,
|
||||
0x6a, 0xe8, 0xda, 0x87, 0x4c, 0xba, 0x54, 0x71, 0x31, 0xbb, 0x42, 0xbe, 0x40, 0xd7, 0x5b, 0x59,
|
||||
0x76, 0x3d, 0x44, 0x0e, 0x35, 0x02, 0xf2, 0x6b, 0xa8, 0x46, 0x4c, 0x5e, 0x71, 0x31, 0x53, 0x2e,
|
||||
0xda, 0xd5, 0x31, 0x3f, 0x67, 0xf2, 0x21, 0xf7, 0x19, 0x4d, 0x65, 0xce, 0x5f, 0x0a, 0x50, 0x42,
|
||||
0xc5, 0x84, 0x40, 0xc9, 0x15, 0x53, 0x9d, 0xae, 0x36, 0x55, 0x34, 0x69, 0x40, 0x91, 0x45, 0xcf,
|
||||
0xd4, 0x19, 0x36, 0x45, 0x12, 0x39, 0xde, 0x95, 0x6f, 0x9c, 0x8e, 0x24, 0xee, 0x5b, 0x24, 0x4c,
|
||||
0x18, 0x5f, 0x2b, 0x9a, 0x7c, 0x0c, 0x76, 0x2c, 0xf8, 0xf5, 0xf2, 0x29, 0xee, 0x2e, 0xe7, 0x32,
|
||||
0x09, 0x99, 0xfd, 0xe8, 0x19, 0xad, 0xc5, 0x86, 0x22, 0x47, 0x00, 0xec, 0x5a, 0x0a, 0xf7, 0x8c,
|
||||
0x27, 0x32, 0x69, 0x56, 0xd4, 0x6d, 0x54, 0x02, 0x23, 0x63, 0x70, 0x41, 0x73, 0x52, 0xe7, 0x6f,
|
||||
0x16, 0x94, 0xd5, 0x25, 0x49, 0x0b, 0x5d, 0x1a, 0x2f, 0x74, 0x74, 0x8a, 0x1d, 0x62, 0x5c, 0x0a,
|
||||
0x2a, 0x78, 0x2b, 0x8f, 0x62, 0x20, 0xf7, 0xa0, 0x96, 0xb0, 0x39, 0xf3, 0x24, 0x17, 0x26, 0x7f,
|
||||
0x56, 0x6b, 0x34, 0xdd, 0xc7, 0x10, 0xeb, 0xdb, 0x28, 0x9a, 0xdc, 0x85, 0x0a, 0x57, 0x71, 0x51,
|
||||
0x17, 0xfa, 0x89, 0x68, 0x19, 0x08, 0x2a, 0x17, 0xcc, 0xf5, 0x79, 0x34, 0x5f, 0xaa, 0x6b, 0xd6,
|
||||
0xe8, 0x6a, 0x4d, 0xee, 0x82, 0xad, 0x22, 0xf1, 0x68, 0x19, 0xb3, 0x66, 0x45, 0x45, 0x60, 0x67,
|
||||
0x15, 0x25, 0x64, 0xd2, 0x4c, 0x8e, 0x2f, 0xcf, 0x73, 0xbd, 0x4b, 0x36, 0x8c, 0x65, 0xf3, 0x76,
|
||||
0xe6, 0xaf, 0xae, 0xe1, 0xd1, 0x95, 0x14, 0xd5, 0x26, 0xcc, 0x13, 0x4c, 0x22, 0xf4, 0x5d, 0x05,
|
||||
0x55, 0x6a, 0x47, 0x29, 0x93, 0x66, 0x72, 0xe2, 0x40, 0x65, 0x34, 0x3a, 0x43, 0xe4, 0x7b, 0x59,
|
||||
0x65, 0xd0, 0x1c, 0x6a, 0x24, 0xce, 0x00, 0x6a, 0xe9, 0x31, 0xf8, 0xcc, 0x06, 0x3d, 0xf3, 0x00,
|
||||
0xad, 0x41, 0x8f, 0xdc, 0x87, 0x6a, 0x72, 0xe9, 0x8a, 0x20, 0x9a, 0x2a, 0xdf, 0xed, 0x9e, 0xbc,
|
||||
0xb3, 0xb2, 0x6a, 0xa4, 0xf9, 0xa8, 0x29, 0xc5, 0x38, 0x1c, 0xec, 0x95, 0x19, 0xaf, 0xe8, 0x6a,
|
||||
0x40, 0x71, 0x11, 0xf8, 0x4a, 0xcf, 0x0e, 0x45, 0x12, 0x39, 0xd3, 0x40, 0xe7, 0xd2, 0x0e, 0x45,
|
||||
0x12, 0x03, 0x12, 0x72, 0x5f, 0xd7, 0xb1, 0x1d, 0xaa, 0x68, 0xf4, 0x31, 0x8f, 0x65, 0xc0, 0x23,
|
||||
0x77, 0x9e, 0xfa, 0x38, 0x5d, 0x3b, 0xf3, 0xf4, 0x7e, 0xff, 0x97, 0xd3, 0x7e, 0x5f, 0x80, 0x5a,
|
||||
0x5a, 0x7c, 0xb1, 0x92, 0x04, 0x3e, 0x8b, 0x64, 0x30, 0x09, 0x98, 0x30, 0x07, 0xe7, 0x38, 0xe4,
|
||||
0x3e, 0x94, 0x5d, 0x29, 0x45, 0xfa, 0x40, 0x7f, 0x9e, 0xaf, 0xdc, 0xed, 0x53, 0x94, 0xf4, 0x23,
|
||||
0x29, 0x96, 0x54, 0xa3, 0xf6, 0x3e, 0x07, 0xc8, 0x98, 0x68, 0xeb, 0x8c, 0x2d, 0x8d, 0x56, 0x24,
|
||||
0xc9, 0x6d, 0x28, 0x3f, 0x73, 0xe7, 0x0b, 0x66, 0x72, 0x58, 0x2f, 0xbe, 0xb0, 0x3e, 0x2f, 0x38,
|
||||
0x7f, 0xb5, 0xa0, 0x6a, 0x2a, 0x39, 0xb9, 0x07, 0x55, 0x55, 0xc9, 0x8d, 0x45, 0x37, 0x3f, 0x8c,
|
||||
0x14, 0x42, 0x8e, 0x57, 0x2d, 0x2a, 0x67, 0xa3, 0x51, 0xa5, 0x5b, 0x95, 0xb1, 0x31, 0x6b, 0x58,
|
||||
0x45, 0x9f, 0x4d, 0x4c, 0x2f, 0xda, 0x45, 0x74, 0x8f, 0x4d, 0x82, 0x28, 0x40, 0xff, 0x50, 0x14,
|
||||
0x91, 0x7b, 0xe9, 0xad, 0x4b, 0x4a, 0xe3, 0x7b, 0x79, 0x8d, 0xaf, 0x5e, 0x7a, 0x00, 0xf5, 0xdc,
|
||||
0x31, 0x37, 0xdc, 0xfa, 0xc3, 0xfc, 0xad, 0xcd, 0x91, 0x4a, 0x9d, 0x6e, 0xa4, 0x99, 0x17, 0xfe,
|
||||
0x0b, 0xff, 0x7d, 0x0a, 0x90, 0xa9, 0x7c, 0xf3, 0xc2, 0xe2, 0x3c, 0x2f, 0x02, 0x0c, 0x63, 0x2c,
|
||||
0x9d, 0xbe, 0xab, 0x2a, 0xf2, 0x76, 0x30, 0x8d, 0xb8, 0x60, 0x4f, 0xd5, 0x53, 0x55, 0xfb, 0x6b,
|
||||
0xb4, 0xae, 0x79, 0xea, 0xc5, 0x90, 0x53, 0xa8, 0xfb, 0x2c, 0xf1, 0x44, 0xa0, 0x12, 0xca, 0x38,
|
||||
0xfd, 0x00, 0xef, 0x94, 0xe9, 0x69, 0xf7, 0x32, 0x84, 0xf6, 0x55, 0x7e, 0x0f, 0x39, 0x81, 0x6d,
|
||||
0x76, 0x1d, 0x73, 0x21, 0xcd, 0x29, 0xba, 0xe1, 0xdf, 0xd2, 0xa3, 0x03, 0xf2, 0xd5, 0x49, 0xb4,
|
||||
0xce, 0xb2, 0x05, 0x71, 0xa1, 0xe4, 0xb9, 0xb1, 0xee, 0x76, 0xf5, 0x93, 0xe6, 0xc6, 0x79, 0x5d,
|
||||
0x37, 0xd6, 0x4e, 0xeb, 0x7c, 0x82, 0x77, 0x7d, 0xfe, 0xaf, 0x83, 0xbb, 0xb9, 0x16, 0x17, 0xf2,
|
||||
0xf1, 0xf2, 0x58, 0xe5, 0xcb, 0x2c, 0x90, 0xc7, 0x0b, 0x19, 0xcc, 0x8f, 0xdd, 0x38, 0x40, 0x75,
|
||||
0xb8, 0x71, 0xd0, 0xa3, 0x4a, 0xf5, 0xde, 0x6f, 0xa0, 0xb1, 0x69, 0xf7, 0xdb, 0xc4, 0x60, 0xef,
|
||||
0x33, 0xb0, 0x57, 0x76, 0xbc, 0x6e, 0x63, 0x2d, 0x1f, 0xbc, 0x0f, 0xa0, 0x9e, 0xbb, 0x37, 0x02,
|
||||
0x9f, 0x28, 0xa0, 0xf6, 0xbe, 0x5e, 0x38, 0xcf, 0x71, 0xda, 0x48, 0xfb, 0xcd, 0xaf, 0x00, 0x2e,
|
||||
0xa5, 0x8c, 0x9f, 0xaa, 0x06, 0x64, 0x0e, 0xb1, 0x91, 0xa3, 0x10, 0xe4, 0x00, 0xea, 0xb8, 0x48,
|
||||
0x8c, 0x5c, 0x5b, 0xaa, 0x76, 0x24, 0x1a, 0xf0, 0x4b, 0xb0, 0x27, 0xab, 0xed, 0xba, 0x71, 0xd4,
|
||||
0x26, 0xe9, 0xee, 0x5f, 0x40, 0x2d, 0xe2, 0x46, 0xa6, 0xfb, 0x61, 0x35, 0xe2, 0x4a, 0xe4, 0xdc,
|
||||
0x85, 0x9f, 0xbd, 0x32, 0x1a, 0x91, 0xf7, 0xa0, 0x32, 0x09, 0xe6, 0x52, 0x3d, 0x57, 0x6c, 0xb1,
|
||||
0x66, 0xe5, 0xfc, 0xa3, 0x00, 0x90, 0x3d, 0x2d, 0xf4, 0x08, 0xbe, 0x3b, 0xc4, 0x6c, 0xeb, 0x77,
|
||||
0x36, 0x87, 0x5a, 0x68, 0x22, 0x68, 0xf2, 0xe8, 0xce, 0xfa, 0x73, 0x6c, 0xa7, 0x01, 0xd6, 0xb1,
|
||||
0x3d, 0x31, 0xb1, 0x7d, 0x9b, 0xf1, 0x65, 0x75, 0xc2, 0xde, 0x57, 0xb0, 0xb3, 0xa6, 0xee, 0x0d,
|
||||
0x5f, 0x6a, 0x96, 0x65, 0xf9, 0x90, 0xdd, 0x83, 0x8a, 0x6e, 0xed, 0x58, 0x7f, 0x91, 0x32, 0x6a,
|
||||
0x14, 0xad, 0xea, 0xf8, 0x45, 0x3a, 0xe8, 0x0d, 0x2e, 0x9c, 0x13, 0xa8, 0xe8, 0x49, 0x96, 0xb4,
|
||||
0xa0, 0xea, 0x7a, 0x78, 0xb5, 0xb4, 0x5c, 0xed, 0xa6, 0x63, 0xee, 0xa9, 0x62, 0xd3, 0x54, 0xec,
|
||||
0xfc, 0xdd, 0x02, 0xc8, 0xf8, 0x6f, 0x31, 0x2b, 0x7c, 0x01, 0xbb, 0x09, 0xf3, 0x78, 0xe4, 0xbb,
|
||||
0x62, 0xa9, 0xa4, 0x66, 0x62, 0xbb, 0x69, 0xcb, 0x06, 0x32, 0x37, 0x37, 0x14, 0x5f, 0x3f, 0x37,
|
||||
0xb4, 0xa0, 0xe4, 0xf1, 0x78, 0x69, 0x9e, 0x2f, 0x59, 0xbf, 0x48, 0x97, 0xc7, 0x4b, 0x9c, 0xdb,
|
||||
0x11, 0x41, 0xda, 0x50, 0x09, 0x67, 0x6a, 0xb6, 0xd7, 0x63, 0xd4, 0xed, 0x75, 0xec, 0xc3, 0x19,
|
||||
0xd2, 0xf8, 0x25, 0xa0, 0x51, 0xe4, 0x2e, 0x94, 0xc3, 0x99, 0x1f, 0x08, 0x35, 0x71, 0xd4, 0x75,
|
||||
0xbf, 0xce, 0xc3, 0x7b, 0x81, 0xc0, 0x79, 0x5f, 0x61, 0x88, 0x03, 0x96, 0x08, 0x9b, 0x55, 0x85,
|
||||
0x6c, 0x6c, 0x78, 0x33, 0x3c, 0xdb, 0xa2, 0x96, 0x08, 0x3b, 0x35, 0xa8, 0x68, 0xbf, 0x3a, 0x7f,
|
||||
0x2e, 0xc2, 0xee, 0xba, 0x95, 0x98, 0x07, 0x89, 0xf0, 0xd2, 0x3c, 0x48, 0x84, 0xb7, 0x1a, 0xa9,
|
||||
0xac, 0xdc, 0x48, 0xe5, 0x40, 0x99, 0x5f, 0x45, 0x4c, 0xe4, 0x3f, 0x62, 0xba, 0x97, 0xfc, 0x2a,
|
||||
0xc2, 0xe1, 0x41, 0x8b, 0xd6, 0x7a, 0x71, 0xd9, 0xf4, 0xe2, 0x0f, 0x61, 0x67, 0xc2, 0xe7, 0x73,
|
||||
0x7e, 0x35, 0x5a, 0x86, 0xf3, 0x20, 0x9a, 0x99, 0x86, 0xbc, 0xce, 0x24, 0x2d, 0xb8, 0xe5, 0x07,
|
||||
0x02, 0xcd, 0xe9, 0xf2, 0x48, 0xb2, 0x48, 0x4d, 0x91, 0x88, 0xdb, 0x64, 0x93, 0x2f, 0xe1, 0xd0,
|
||||
0x95, 0x92, 0x85, 0xb1, 0x7c, 0x1c, 0xc5, 0xae, 0x37, 0xeb, 0x71, 0x4f, 0xbd, 0xc7, 0x30, 0x76,
|
||||
0x65, 0x30, 0x0e, 0xe6, 0x81, 0x5c, 0x2a, 0x67, 0xd4, 0xe8, 0x6b, 0x71, 0xe4, 0x23, 0xd8, 0xf5,
|
||||
0x04, 0x73, 0x25, 0xeb, 0xb1, 0x44, 0x5e, 0xb8, 0xf2, 0xb2, 0x59, 0x53, 0x3b, 0x37, 0xb8, 0x78,
|
||||
0x07, 0x17, 0xad, 0xfd, 0x26, 0x98, 0xfb, 0x9e, 0x2b, 0xfc, 0xa6, 0xad, 0xef, 0xb0, 0xc6, 0x24,
|
||||
0x6d, 0x20, 0x8a, 0xd1, 0x0f, 0x63, 0xb9, 0x5c, 0x41, 0x41, 0x41, 0x6f, 0x90, 0xe0, 0x47, 0x8e,
|
||||
0x0c, 0x42, 0x96, 0x48, 0x37, 0x8c, 0xd5, 0xc7, 0x57, 0x91, 0x66, 0x0c, 0xe7, 0xdb, 0x02, 0x34,
|
||||
0x36, 0x53, 0x04, 0x1d, 0x1c, 0xa3, 0x99, 0xe6, 0xb1, 0x21, 0xbd, 0x72, 0xba, 0x95, 0x73, 0x3a,
|
||||
0x06, 0x10, 0xab, 0x0a, 0xc6, 0x6a, 0x9b, 0x2a, 0x3a, 0x0b, 0x60, 0xe9, 0xa7, 0x03, 0xb8, 0x66,
|
||||
0x52, 0x79, 0xd3, 0xa4, 0x3f, 0x16, 0xe0, 0xd6, 0x46, 0x1a, 0xbe, 0xb1, 0x45, 0x87, 0x50, 0x0f,
|
||||
0xdd, 0x19, 0xbb, 0x70, 0x85, 0x0a, 0x6e, 0x51, 0x37, 0xd6, 0x1c, 0xeb, 0x7f, 0x60, 0x5f, 0x04,
|
||||
0xdb, 0xf9, 0xdc, 0xbf, 0xd1, 0xb6, 0x34, 0x94, 0xe7, 0x5c, 0x3e, 0xe0, 0x8b, 0xc8, 0x37, 0xdd,
|
||||
0x68, 0x9d, 0xf9, 0x6a, 0xc0, 0x8b, 0x37, 0x04, 0xdc, 0x39, 0x87, 0x5a, 0x6a, 0x20, 0x39, 0x30,
|
||||
0x1f, 0x50, 0x85, 0xec, 0xcb, 0xfc, 0x71, 0xc2, 0x04, 0xda, 0xae, 0xbf, 0xa6, 0xde, 0x87, 0xf2,
|
||||
0x54, 0xf0, 0x45, 0x6c, 0x6a, 0xeb, 0x1a, 0x42, 0x4b, 0x9c, 0x11, 0x54, 0x0d, 0x87, 0x1c, 0x41,
|
||||
0x65, 0xbc, 0x3c, 0x77, 0x43, 0x66, 0x14, 0xaa, 0x87, 0x8d, 0x6b, 0xdf, 0x20, 0xb0, 0x5a, 0x68,
|
||||
0x04, 0xb9, 0x0d, 0xa5, 0xf1, 0x72, 0xd0, 0xd3, 0x63, 0x32, 0xd6, 0x1c, 0x5c, 0x75, 0x2a, 0xda,
|
||||
0x20, 0xe7, 0x6b, 0xd8, 0xce, 0xef, 0x43, 0xa7, 0x44, 0xa9, 0x5e, 0x9b, 0x2a, 0x3a, 0x2b, 0xae,
|
||||
0xd6, 0x6b, 0x8a, 0xeb, 0x51, 0x0b, 0xaa, 0xe6, 0xe3, 0x93, 0xd8, 0x50, 0x7e, 0x7c, 0x3e, 0xea,
|
||||
0x3f, 0x6a, 0x6c, 0x91, 0x1a, 0x94, 0xce, 0x86, 0xa3, 0x47, 0x8d, 0x02, 0x52, 0xe7, 0xc3, 0xf3,
|
||||
0x7e, 0xc3, 0x3a, 0xfa, 0x2d, 0xd8, 0xab, 0x8f, 0x24, 0x64, 0x77, 0x06, 0xe7, 0xbd, 0xc6, 0x16,
|
||||
0x01, 0xa8, 0x8c, 0xfa, 0x5d, 0xda, 0x47, 0x70, 0x15, 0x8a, 0xa3, 0xd1, 0x59, 0xc3, 0x42, 0x55,
|
||||
0xdd, 0xd3, 0xee, 0x59, 0xbf, 0x51, 0x44, 0xf2, 0xd1, 0xc3, 0x8b, 0x07, 0xa3, 0x46, 0xe9, 0xe8,
|
||||
0x53, 0xb8, 0xb5, 0xf1, 0x91, 0xa2, 0x76, 0x9f, 0x9d, 0xd2, 0x3e, 0x6a, 0xaa, 0x43, 0xf5, 0x82,
|
||||
0x0e, 0x9e, 0x9c, 0x3e, 0xea, 0x37, 0x0a, 0x28, 0xf8, 0x7a, 0xd8, 0xfd, 0xaa, 0xdf, 0x6b, 0x58,
|
||||
0x9d, 0x3b, 0xdf, 0xbd, 0xd8, 0x2f, 0x7c, 0xff, 0x62, 0xbf, 0xf0, 0xc3, 0x8b, 0xfd, 0xc2, 0xbf,
|
||||
0x5f, 0xec, 0x17, 0xbe, 0x7d, 0xb9, 0xbf, 0xf5, 0xfd, 0xcb, 0xfd, 0xad, 0x1f, 0x5e, 0xee, 0x6f,
|
||||
0x8d, 0x2b, 0xea, 0x0f, 0x9e, 0x4f, 0xfe, 0x13, 0x00, 0x00, 0xff, 0xff, 0x9b, 0x24, 0xd0, 0xaa,
|
||||
0x20, 0x12, 0x00, 0x00,
|
||||
0x11, 0xd7, 0x1d, 0xff, 0xde, 0x50, 0x92, 0xd9, 0x8d, 0x93, 0xb2, 0xaa, 0x2b, 0x29, 0x97, 0x34,
|
||||
0x90, 0x65, 0x5b, 0x02, 0x14, 0x20, 0x09, 0xf2, 0x50, 0x54, 0xfc, 0x63, 0x88, 0x49, 0x2c, 0x0a,
|
||||
0x4b, 0xdb, 0xe9, 0x9b, 0x71, 0xbc, 0x5b, 0x52, 0x07, 0xf2, 0x6e, 0x0f, 0x7b, 0x4b, 0x5b, 0x7c,
|
||||
0xe9, 0x83, 0x3f, 0x41, 0x80, 0x02, 0x7d, 0x6b, 0x81, 0xbe, 0x14, 0xe8, 0x87, 0xe8, 0x7b, 0x1e,
|
||||
0x83, 0xa2, 0x0f, 0x69, 0x1f, 0xd2, 0xc2, 0xfe, 0x22, 0xc5, 0xec, 0xee, 0xf1, 0x8e, 0xb4, 0x02,
|
||||
0xdb, 0x68, 0xd1, 0x27, 0xce, 0xcd, 0xfc, 0x76, 0x76, 0x76, 0x66, 0x76, 0x66, 0x96, 0xe0, 0xf0,
|
||||
0x24, 0x3d, 0x4a, 0x04, 0x97, 0x9c, 0xd8, 0xc9, 0x68, 0xe7, 0xde, 0x24, 0x94, 0x97, 0xf3, 0xd1,
|
||||
0x91, 0xcf, 0xa3, 0xe3, 0x09, 0x9f, 0xf0, 0x63, 0x25, 0x1a, 0xcd, 0xc7, 0xea, 0x4b, 0x7d, 0x28,
|
||||
0x4a, 0x2f, 0x71, 0xff, 0x64, 0x83, 0x3d, 0x48, 0xc8, 0xfb, 0x50, 0x0d, 0xe3, 0x64, 0x2e, 0xd3,
|
||||
0x96, 0xb5, 0x5f, 0x3a, 0x68, 0x9c, 0x38, 0x47, 0xc9, 0xe8, 0xa8, 0x8f, 0x1c, 0x6a, 0x04, 0x64,
|
||||
0x1f, 0xca, 0xec, 0x8a, 0xf9, 0x2d, 0x7b, 0xdf, 0x3a, 0x68, 0x9c, 0x00, 0x02, 0x7a, 0x57, 0xcc,
|
||||
0x1f, 0x24, 0x67, 0x1b, 0x54, 0x49, 0xc8, 0x47, 0x50, 0x4d, 0xf9, 0x5c, 0xf8, 0xac, 0x55, 0x52,
|
||||
0x98, 0x4d, 0xc4, 0x0c, 0x15, 0x47, 0xa1, 0x8c, 0x14, 0x35, 0x8d, 0xc3, 0x19, 0x6b, 0x95, 0x73,
|
||||
0x4d, 0xf7, 0xc3, 0x99, 0xc6, 0x28, 0x09, 0xf9, 0x00, 0x2a, 0xa3, 0x79, 0x38, 0x0b, 0x5a, 0x15,
|
||||
0x05, 0x69, 0x20, 0xa4, 0x8d, 0x0c, 0x85, 0xd1, 0x32, 0x72, 0x00, 0xf5, 0x64, 0xe6, 0xc9, 0x31,
|
||||
0x17, 0x51, 0x0b, 0xf2, 0x0d, 0x2f, 0x0c, 0x8f, 0x2e, 0xa5, 0xe4, 0x53, 0x68, 0xf8, 0x3c, 0x4e,
|
||||
0xa5, 0xf0, 0xc2, 0x58, 0xa6, 0xad, 0x86, 0x02, 0xbf, 0x8b, 0xe0, 0xaf, 0xb9, 0x98, 0x32, 0xd1,
|
||||
0xc9, 0x85, 0xb4, 0x88, 0x6c, 0x97, 0xc1, 0xe6, 0x89, 0xfb, 0x7b, 0x0b, 0xea, 0x99, 0x56, 0xe2,
|
||||
0xc2, 0xe6, 0xa9, 0xf0, 0x2f, 0x43, 0xc9, 0x7c, 0x39, 0x17, 0xac, 0x65, 0xed, 0x5b, 0x07, 0x0e,
|
||||
0x5d, 0xe1, 0x91, 0x6d, 0xb0, 0x07, 0x43, 0xe5, 0x28, 0x87, 0xda, 0x83, 0x21, 0x69, 0x41, 0xed,
|
||||
0xb1, 0x27, 0x42, 0x2f, 0x96, 0xca, 0x33, 0x0e, 0xcd, 0x3e, 0xc9, 0x2d, 0x70, 0x06, 0xc3, 0xc7,
|
||||
0x4c, 0xa4, 0x21, 0x8f, 0x95, 0x3f, 0x1c, 0x9a, 0x33, 0xc8, 0x2e, 0xc0, 0x60, 0x78, 0x9f, 0x79,
|
||||
0xa8, 0x34, 0x6d, 0x55, 0xf6, 0x4b, 0x07, 0x0e, 0x2d, 0x70, 0xdc, 0xdf, 0x42, 0x45, 0xc5, 0x88,
|
||||
0x7c, 0x01, 0xd5, 0x20, 0x9c, 0xb0, 0x54, 0x6a, 0x73, 0xda, 0x27, 0xdf, 0xfe, 0xb0, 0xb7, 0xf1,
|
||||
0xcf, 0x1f, 0xf6, 0x0e, 0x0b, 0xc9, 0xc0, 0x13, 0x16, 0xfb, 0x3c, 0x96, 0x5e, 0x18, 0x33, 0x91,
|
||||
0x1e, 0x4f, 0xf8, 0x3d, 0xbd, 0xe4, 0xa8, 0xab, 0x7e, 0xa8, 0xd1, 0x40, 0x6e, 0x43, 0x25, 0x8c,
|
||||
0x03, 0x76, 0xa5, 0xec, 0x2f, 0xb5, 0xdf, 0x31, 0xaa, 0x1a, 0x83, 0xb9, 0x4c, 0xe6, 0xb2, 0x8f,
|
||||
0x22, 0xaa, 0x11, 0xee, 0x1f, 0x2d, 0xa8, 0xea, 0x1c, 0x20, 0xb7, 0xa0, 0x1c, 0x31, 0xe9, 0xa9,
|
||||
0xfd, 0x1b, 0x27, 0x75, 0xf4, 0xed, 0x03, 0x26, 0x3d, 0xaa, 0xb8, 0x98, 0x5e, 0x11, 0x9f, 0xa3,
|
||||
0xef, 0xed, 0x3c, 0xbd, 0x1e, 0x20, 0x87, 0x1a, 0x01, 0xf9, 0x25, 0xd4, 0x62, 0x26, 0x9f, 0x71,
|
||||
0x31, 0x55, 0x3e, 0xda, 0xd6, 0x41, 0x3f, 0x67, 0xf2, 0x01, 0x0f, 0x18, 0xcd, 0x64, 0xe4, 0x2e,
|
||||
0xd4, 0x53, 0xe6, 0xcf, 0x45, 0x28, 0x17, 0xca, 0x5f, 0xdb, 0x27, 0x4d, 0x95, 0x65, 0x86, 0xa7,
|
||||
0xc0, 0x4b, 0x84, 0xfb, 0x17, 0x0b, 0xca, 0x68, 0x06, 0x21, 0x50, 0xf6, 0xc4, 0x44, 0x67, 0xb7,
|
||||
0x43, 0x15, 0x4d, 0x9a, 0x50, 0x62, 0xf1, 0x53, 0x65, 0x91, 0x43, 0x91, 0x44, 0x8e, 0xff, 0x2c,
|
||||
0x30, 0x31, 0x42, 0x12, 0xd7, 0xcd, 0x53, 0x26, 0x4c, 0x68, 0x14, 0x4d, 0x6e, 0x83, 0x93, 0x08,
|
||||
0x7e, 0xb5, 0x78, 0x82, 0xab, 0x2b, 0x85, 0xc4, 0x43, 0x66, 0x2f, 0x7e, 0x4a, 0xeb, 0x89, 0xa1,
|
||||
0xc8, 0x21, 0x00, 0xbb, 0x92, 0xc2, 0x3b, 0xe3, 0xa9, 0x4c, 0x5b, 0x55, 0x75, 0x76, 0x95, 0xef,
|
||||
0xc8, 0xe8, 0x5f, 0xd0, 0x82, 0xd4, 0xfd, 0x9b, 0x0d, 0x15, 0xe5, 0x12, 0x72, 0x80, 0x11, 0x48,
|
||||
0xe6, 0x3a, 0x98, 0xa5, 0x36, 0x31, 0x11, 0x00, 0x15, 0xeb, 0x65, 0x00, 0x30, 0xee, 0x3b, 0xe8,
|
||||
0x8d, 0x19, 0xf3, 0x25, 0x17, 0x26, 0xdd, 0x96, 0xdf, 0x68, 0x7a, 0x80, 0x19, 0xa1, 0x4f, 0xa3,
|
||||
0x68, 0x72, 0x07, 0xaa, 0x5c, 0x85, 0x51, 0x1d, 0xe8, 0x47, 0x82, 0x6b, 0x20, 0xa8, 0x5c, 0x30,
|
||||
0x2f, 0xe0, 0xf1, 0x6c, 0xa1, 0x8e, 0x59, 0xa7, 0xcb, 0x6f, 0x72, 0x07, 0x1c, 0x15, 0xb7, 0x87,
|
||||
0x8b, 0x84, 0xb5, 0xaa, 0x2a, 0x0e, 0x5b, 0xcb, 0x98, 0x22, 0x93, 0xe6, 0x72, 0xbc, 0xa8, 0xbe,
|
||||
0xe7, 0x5f, 0xb2, 0x41, 0x22, 0x5b, 0x37, 0x73, 0x7f, 0x75, 0x0c, 0x8f, 0x2e, 0xa5, 0xa8, 0x36,
|
||||
0x65, 0xbe, 0x60, 0x12, 0xa1, 0xef, 0x2a, 0xe8, 0x96, 0x09, 0xaf, 0x66, 0xd2, 0x5c, 0x4e, 0x5c,
|
||||
0xa8, 0x0e, 0x87, 0x67, 0x88, 0x7c, 0x2f, 0x2f, 0x24, 0x9a, 0x43, 0x8d, 0xc4, 0xed, 0x43, 0x3d,
|
||||
0xdb, 0x06, 0x6f, 0x65, 0xbf, 0x6b, 0xee, 0xab, 0xdd, 0xef, 0x92, 0x7b, 0x50, 0x4b, 0x2f, 0x3d,
|
||||
0x11, 0xc6, 0x13, 0xe5, 0xbb, 0xed, 0x93, 0x77, 0x96, 0x56, 0x0d, 0x35, 0x1f, 0x35, 0x65, 0x18,
|
||||
0x97, 0x83, 0xb3, 0x34, 0xe3, 0x15, 0x5d, 0x4d, 0x28, 0xcd, 0xc3, 0x40, 0xe9, 0xd9, 0xa2, 0x48,
|
||||
0x22, 0x67, 0x12, 0xea, 0x5c, 0xda, 0xa2, 0x48, 0x62, 0x40, 0x22, 0x1e, 0xe8, 0xb2, 0xb7, 0x45,
|
||||
0x15, 0x8d, 0x3e, 0xe6, 0x89, 0x0c, 0x79, 0xec, 0xcd, 0x32, 0x1f, 0x67, 0xdf, 0xee, 0x2c, 0x3b,
|
||||
0xdf, 0xff, 0x65, 0xb7, 0xdf, 0x59, 0x50, 0xcf, 0x6a, 0x35, 0x16, 0x9e, 0x30, 0x60, 0xb1, 0x0c,
|
||||
0xc7, 0x21, 0x13, 0x66, 0xe3, 0x02, 0x87, 0xdc, 0x83, 0x8a, 0x27, 0xa5, 0xc8, 0xae, 0xf3, 0x4f,
|
||||
0x8b, 0x85, 0xfe, 0xe8, 0x14, 0x25, 0xbd, 0x58, 0x8a, 0x05, 0xd5, 0xa8, 0x9d, 0xcf, 0x00, 0x72,
|
||||
0x26, 0xda, 0x3a, 0x65, 0x0b, 0xa3, 0x15, 0x49, 0x72, 0x13, 0x2a, 0x4f, 0xbd, 0xd9, 0x9c, 0x99,
|
||||
0x1c, 0xd6, 0x1f, 0x9f, 0xdb, 0x9f, 0x59, 0xee, 0x5f, 0x6d, 0xa8, 0x99, 0xc2, 0x4f, 0xee, 0x42,
|
||||
0x4d, 0x15, 0x7e, 0x63, 0xd1, 0xf5, 0x17, 0x23, 0x83, 0x90, 0xe3, 0x65, 0x47, 0x2b, 0xd8, 0x68,
|
||||
0x54, 0xe9, 0xce, 0x66, 0x6c, 0xcc, 0xfb, 0x5b, 0x29, 0x60, 0x63, 0xd3, 0xba, 0xb6, 0x11, 0xdd,
|
||||
0x65, 0xe3, 0x30, 0x0e, 0xd1, 0x3f, 0x14, 0x45, 0xe4, 0x6e, 0x76, 0xea, 0xb2, 0xd2, 0xf8, 0x5e,
|
||||
0x51, 0xe3, 0xab, 0x87, 0xee, 0x43, 0xa3, 0xb0, 0xcd, 0x35, 0xa7, 0xfe, 0xb0, 0x78, 0x6a, 0xb3,
|
||||
0xa5, 0x52, 0xa7, 0xfb, 0x6e, 0xee, 0x85, 0xff, 0xc2, 0x7f, 0x9f, 0x00, 0xe4, 0x2a, 0xdf, 0xbc,
|
||||
0xb0, 0xb8, 0xcf, 0x4b, 0x00, 0x83, 0x04, 0x4b, 0x67, 0xe0, 0xa9, 0xfa, 0xbd, 0x19, 0x4e, 0x62,
|
||||
0x2e, 0xd8, 0x13, 0x75, 0x55, 0xd5, 0xfa, 0x3a, 0x6d, 0x68, 0x9e, 0xba, 0x31, 0xe4, 0x14, 0x1a,
|
||||
0x01, 0x4b, 0x7d, 0x11, 0xaa, 0x84, 0x32, 0x4e, 0xdf, 0xc3, 0x33, 0xe5, 0x7a, 0x8e, 0xba, 0x39,
|
||||
0x42, 0xfb, 0xaa, 0xb8, 0x86, 0x9c, 0xc0, 0x26, 0xbb, 0x4a, 0xb8, 0x90, 0x66, 0x17, 0x3d, 0x1f,
|
||||
0xdc, 0xd0, 0x93, 0x06, 0xf2, 0xd5, 0x4e, 0xb4, 0xc1, 0xf2, 0x0f, 0xe2, 0x41, 0xd9, 0xf7, 0x12,
|
||||
0xdd, 0x1c, 0x1b, 0x27, 0xad, 0xb5, 0xfd, 0x3a, 0x5e, 0xa2, 0x9d, 0xd6, 0xfe, 0x18, 0xcf, 0xfa,
|
||||
0xfc, 0x5f, 0x7b, 0x77, 0x0a, 0x1d, 0x31, 0xe2, 0xa3, 0xc5, 0xb1, 0xca, 0x97, 0x69, 0x28, 0x8f,
|
||||
0xe7, 0x32, 0x9c, 0x1d, 0x7b, 0x49, 0x88, 0xea, 0x70, 0x61, 0xbf, 0x4b, 0x95, 0xea, 0x9d, 0x5f,
|
||||
0x41, 0x73, 0xdd, 0xee, 0xb7, 0x89, 0xc1, 0xce, 0xa7, 0xe0, 0x2c, 0xed, 0x78, 0xdd, 0xc2, 0x7a,
|
||||
0x31, 0x78, 0x1f, 0x40, 0xa3, 0x70, 0x6e, 0x04, 0x3e, 0x56, 0x40, 0xed, 0x7d, 0xfd, 0xe1, 0x3e,
|
||||
0xc7, 0xe1, 0x24, 0xeb, 0x37, 0xbf, 0x00, 0xb8, 0x94, 0x32, 0x79, 0xa2, 0x1a, 0x90, 0xd9, 0xc4,
|
||||
0x41, 0x8e, 0x42, 0x90, 0x3d, 0x68, 0xe0, 0x47, 0x6a, 0xe4, 0xda, 0x52, 0xb5, 0x22, 0xd5, 0x80,
|
||||
0x9f, 0x83, 0x33, 0x5e, 0x2e, 0xd7, 0x8d, 0xa3, 0x3e, 0xce, 0x56, 0xff, 0x0c, 0xea, 0x31, 0x37,
|
||||
0x32, 0xdd, 0x0f, 0x6b, 0x31, 0x57, 0x22, 0xf7, 0x0e, 0xfc, 0xe4, 0x95, 0x49, 0x8a, 0xbc, 0x07,
|
||||
0xd5, 0x71, 0x38, 0x93, 0xea, 0xba, 0x62, 0x8b, 0x35, 0x5f, 0xee, 0x3f, 0x2c, 0x80, 0xfc, 0x6a,
|
||||
0xa1, 0x47, 0xf0, 0xde, 0x21, 0x66, 0x53, 0xdf, 0xb3, 0x19, 0xd4, 0x23, 0x13, 0x41, 0x93, 0x47,
|
||||
0xb7, 0x56, 0xaf, 0xe3, 0x51, 0x16, 0x60, 0x1d, 0xdb, 0x13, 0x13, 0xdb, 0xb7, 0x99, 0x76, 0x96,
|
||||
0x3b, 0xec, 0x7c, 0x09, 0x5b, 0x2b, 0xea, 0xde, 0xf0, 0xa6, 0xe6, 0x59, 0x56, 0x0c, 0xd9, 0x5d,
|
||||
0xa8, 0xea, 0xd6, 0x8e, 0xf5, 0x17, 0x29, 0xa3, 0x46, 0xd1, 0xaa, 0x8e, 0x5f, 0x64, 0x73, 0x61,
|
||||
0xff, 0xc2, 0x3d, 0x81, 0xaa, 0x1e, 0x7c, 0xc9, 0x01, 0xd4, 0x3c, 0x1f, 0x8f, 0x96, 0x95, 0xab,
|
||||
0xed, 0x6c, 0x2a, 0x3e, 0x55, 0x6c, 0x9a, 0x89, 0xdd, 0xbf, 0xdb, 0x00, 0x39, 0xff, 0x2d, 0x66,
|
||||
0x85, 0xcf, 0x61, 0x3b, 0x65, 0x3e, 0x8f, 0x03, 0x4f, 0x2c, 0x94, 0xd4, 0x0c, 0x78, 0xd7, 0x2d,
|
||||
0x59, 0x43, 0x16, 0xe6, 0x86, 0xd2, 0xeb, 0xe7, 0x86, 0x03, 0x28, 0xfb, 0x3c, 0x59, 0x98, 0xeb,
|
||||
0x4b, 0x56, 0x0f, 0xd2, 0xe1, 0xc9, 0x02, 0xc7, 0x7c, 0x44, 0x90, 0x23, 0xa8, 0x46, 0x53, 0xf5,
|
||||
0x14, 0xd0, 0x63, 0xd4, 0xcd, 0x55, 0xec, 0x83, 0x29, 0xd2, 0xf8, 0x70, 0xd0, 0x28, 0x72, 0x07,
|
||||
0x2a, 0xd1, 0x34, 0x08, 0x85, 0x9a, 0x38, 0x1a, 0xba, 0x5f, 0x17, 0xe1, 0xdd, 0x50, 0xe0, 0xf3,
|
||||
0x40, 0x61, 0x88, 0x0b, 0xb6, 0x88, 0x5a, 0x35, 0x85, 0x6c, 0xae, 0x79, 0x33, 0x3a, 0xdb, 0xa0,
|
||||
0xb6, 0x88, 0xda, 0x75, 0xa8, 0x6a, 0xbf, 0xba, 0x7f, 0x2e, 0xc1, 0xf6, 0xaa, 0x95, 0x98, 0x07,
|
||||
0xa9, 0xf0, 0xb3, 0x3c, 0x48, 0x85, 0xbf, 0x1c, 0xa9, 0xec, 0xc2, 0x48, 0xe5, 0x42, 0x85, 0x3f,
|
||||
0x8b, 0x99, 0x28, 0xbe, 0x79, 0x3a, 0x97, 0xfc, 0x59, 0x8c, 0xc3, 0x83, 0x16, 0xad, 0xf4, 0xe2,
|
||||
0x8a, 0xe9, 0xc5, 0x1f, 0xc2, 0xd6, 0x98, 0xcf, 0x66, 0xfc, 0xd9, 0x70, 0x11, 0xcd, 0xc2, 0x78,
|
||||
0x6a, 0x1a, 0xf2, 0x2a, 0x93, 0x1c, 0xc0, 0x8d, 0x20, 0x14, 0x68, 0x4e, 0x87, 0xc7, 0x92, 0xc5,
|
||||
0x6a, 0x8a, 0x44, 0xdc, 0x3a, 0x9b, 0x7c, 0x01, 0xfb, 0x9e, 0x94, 0x2c, 0x4a, 0xe4, 0xa3, 0x38,
|
||||
0xf1, 0xfc, 0x69, 0x97, 0xfb, 0xea, 0x3e, 0x46, 0x89, 0x27, 0xc3, 0x51, 0x38, 0xc3, 0x81, 0xb9,
|
||||
0xa6, 0x96, 0xbe, 0x16, 0x47, 0x3e, 0x82, 0x6d, 0x5f, 0x30, 0x4f, 0xb2, 0x2e, 0x4b, 0xe5, 0x85,
|
||||
0x27, 0x2f, 0x5b, 0x75, 0xb5, 0x72, 0x8d, 0x8b, 0x67, 0xf0, 0xd0, 0xda, 0xaf, 0xc3, 0x59, 0xe0,
|
||||
0x7b, 0x22, 0x68, 0x39, 0xfa, 0x0c, 0x2b, 0x4c, 0x72, 0x04, 0x44, 0x31, 0x7a, 0x51, 0x22, 0x17,
|
||||
0x4b, 0x28, 0x28, 0xe8, 0x35, 0x12, 0x7c, 0x13, 0xc9, 0x30, 0x62, 0xa9, 0xf4, 0xa2, 0x44, 0xbd,
|
||||
0xd5, 0x4a, 0x34, 0x67, 0xb8, 0xdf, 0x58, 0xd0, 0x5c, 0x4f, 0x11, 0x74, 0x70, 0x82, 0x66, 0x9a,
|
||||
0xcb, 0x86, 0xf4, 0xd2, 0xe9, 0x76, 0xc1, 0xe9, 0x18, 0x40, 0xac, 0x2a, 0x18, 0xab, 0x4d, 0xaa,
|
||||
0xe8, 0x3c, 0x80, 0xe5, 0x1f, 0x0f, 0xe0, 0x8a, 0x49, 0x95, 0x75, 0x93, 0xfe, 0x60, 0xc1, 0x8d,
|
||||
0xb5, 0x34, 0x7c, 0x63, 0x8b, 0xf6, 0xa1, 0x11, 0x79, 0x53, 0x76, 0xe1, 0x09, 0x15, 0xdc, 0x92,
|
||||
0x6e, 0xac, 0x05, 0xd6, 0xff, 0xc0, 0xbe, 0x18, 0x36, 0x8b, 0xb9, 0x7f, 0xad, 0x6d, 0x59, 0x28,
|
||||
0xcf, 0xb9, 0xbc, 0xcf, 0xe7, 0x71, 0x60, 0xba, 0xd1, 0x2a, 0xf3, 0xd5, 0x80, 0x97, 0xae, 0x09,
|
||||
0xb8, 0x7b, 0x0e, 0xf5, 0xcc, 0x40, 0xb2, 0x67, 0x1e, 0x50, 0x56, 0xfe, 0x90, 0x7f, 0x94, 0x32,
|
||||
0x81, 0xb6, 0xeb, 0xd7, 0xd4, 0xfb, 0x50, 0x99, 0x08, 0x3e, 0x4f, 0x4c, 0x6d, 0x5d, 0x41, 0x68,
|
||||
0x89, 0x3b, 0x84, 0x9a, 0xe1, 0x90, 0x43, 0xa8, 0x8e, 0x16, 0xe7, 0x5e, 0xc4, 0x8c, 0x42, 0x75,
|
||||
0xb1, 0xf1, 0x3b, 0x30, 0x08, 0xac, 0x16, 0x1a, 0x41, 0x6e, 0x42, 0x79, 0xb4, 0xe8, 0x77, 0xf5,
|
||||
0x98, 0x8c, 0x35, 0x07, 0xbf, 0xda, 0x55, 0x6d, 0x90, 0xfb, 0x15, 0x6c, 0x16, 0xd7, 0xa1, 0x53,
|
||||
0xe2, 0x4c, 0xaf, 0x43, 0x15, 0x9d, 0x17, 0x57, 0xfb, 0x35, 0xc5, 0xf5, 0xf0, 0x00, 0x6a, 0xe6,
|
||||
0xa9, 0x4a, 0x1c, 0xa8, 0x3c, 0x3a, 0x1f, 0xf6, 0x1e, 0x36, 0x37, 0x48, 0x1d, 0xca, 0x67, 0x83,
|
||||
0xe1, 0xc3, 0xa6, 0x85, 0xd4, 0xf9, 0xe0, 0xbc, 0xd7, 0xb4, 0x0f, 0x6f, 0xc3, 0x66, 0xf1, 0xb1,
|
||||
0x4a, 0x1a, 0x50, 0x1b, 0x9e, 0x9e, 0x77, 0xdb, 0x83, 0xdf, 0x34, 0x37, 0xc8, 0x26, 0xd4, 0xfb,
|
||||
0xe7, 0xc3, 0x5e, 0xe7, 0x11, 0xed, 0x35, 0xad, 0xc3, 0x5f, 0x83, 0xb3, 0x7c, 0x4f, 0xa1, 0x86,
|
||||
0x76, 0xff, 0xbc, 0xdb, 0xdc, 0x20, 0x00, 0xd5, 0x61, 0xaf, 0x43, 0x7b, 0xa8, 0xb7, 0x06, 0xa5,
|
||||
0xe1, 0xf0, 0xac, 0x69, 0xe3, 0xae, 0x9d, 0xd3, 0xce, 0x59, 0xaf, 0x59, 0x42, 0xf2, 0xe1, 0x83,
|
||||
0x8b, 0xfb, 0xc3, 0x66, 0xf9, 0xf0, 0x13, 0xb8, 0xb1, 0xf6, 0x9e, 0x51, 0xab, 0xcf, 0x4e, 0x69,
|
||||
0x0f, 0x35, 0x35, 0xa0, 0x76, 0x41, 0xfb, 0x8f, 0x4f, 0x1f, 0xf6, 0x9a, 0x16, 0x0a, 0xbe, 0x1a,
|
||||
0x74, 0xbe, 0xec, 0x75, 0x9b, 0x76, 0xfb, 0xd6, 0xb7, 0x2f, 0x76, 0xad, 0xef, 0x5e, 0xec, 0x5a,
|
||||
0xdf, 0xbf, 0xd8, 0xb5, 0xfe, 0xfd, 0x62, 0xd7, 0xfa, 0xe6, 0xe5, 0xee, 0xc6, 0x77, 0x2f, 0x77,
|
||||
0x37, 0xbe, 0x7f, 0xb9, 0xbb, 0x31, 0xaa, 0xaa, 0xbf, 0x8e, 0x3e, 0xfe, 0x4f, 0x00, 0x00, 0x00,
|
||||
0xff, 0xff, 0x87, 0x95, 0x80, 0x20, 0x7a, 0x12, 0x00, 0x00,
|
||||
}
|
||||
|
|
6
vendor/github.com/moby/buildkit/solver/pb/ops.proto
generated
vendored
6
vendor/github.com/moby/buildkit/solver/pb/ops.proto
generated
vendored
|
@ -44,6 +44,7 @@ message ExecOp {
|
|||
Meta meta = 1;
|
||||
repeated Mount mounts = 2;
|
||||
NetMode network = 3;
|
||||
SecurityMode security = 4;
|
||||
}
|
||||
|
||||
// Meta is a set of arguments for ExecOp.
|
||||
|
@ -64,6 +65,11 @@ enum NetMode {
|
|||
NONE = 2;
|
||||
}
|
||||
|
||||
enum SecurityMode {
|
||||
SANDBOX = 0;
|
||||
INSECURE = 1; // privileged mode
|
||||
}
|
||||
|
||||
// Mount specifies how to mount an input Op as a filesystem.
|
||||
message Mount {
|
||||
int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false];
|
||||
|
|
10
vendor/github.com/moby/buildkit/source/git/gitsource.go
generated
vendored
10
vendor/github.com/moby/buildkit/source/git/gitsource.go
generated
vendored
|
@ -335,6 +335,16 @@ func (gs *gitSourceHandler) Snapshot(ctx context.Context) (out cache.ImmutableRe
|
|||
return nil, errors.Wrapf(err, "failed to update submodules for %s", gs.src.Remote)
|
||||
}
|
||||
|
||||
if idmap := mount.IdentityMapping(); idmap != nil {
|
||||
u := idmap.RootPair()
|
||||
err := filepath.Walk(gitDir, func(p string, f os.FileInfo, err error) error {
|
||||
return os.Lchown(p, u.UID, u.GID)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to remap git checkout")
|
||||
}
|
||||
}
|
||||
|
||||
lm.Unmount()
|
||||
lm = nil
|
||||
|
||||
|
|
19
vendor/github.com/moby/buildkit/source/http/httpsource.go
generated
vendored
19
vendor/github.com/moby/buildkit/source/http/httpsource.go
generated
vendored
|
@ -15,6 +15,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/locker"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/cache/metadata"
|
||||
|
@ -278,8 +279,22 @@ func (hs *httpSourceHandler) save(ctx context.Context, resp *http.Response) (ref
|
|||
}
|
||||
f = nil
|
||||
|
||||
if hs.src.UID != 0 || hs.src.GID != 0 {
|
||||
if err := os.Chown(fp, hs.src.UID, hs.src.GID); err != nil {
|
||||
uid := hs.src.UID
|
||||
gid := hs.src.GID
|
||||
if idmap := mount.IdentityMapping(); idmap != nil {
|
||||
identity, err := idmap.ToHost(idtools.Identity{
|
||||
UID: int(uid),
|
||||
GID: int(gid),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
uid = identity.UID
|
||||
gid = identity.GID
|
||||
}
|
||||
|
||||
if gid != 0 || uid != 0 {
|
||||
if err := os.Chown(fp, uid, gid); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
}
|
||||
|
|
22
vendor/github.com/moby/buildkit/source/local/local.go
generated
vendored
22
vendor/github.com/moby/buildkit/source/local/local.go
generated
vendored
|
@ -6,6 +6,7 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/cache/contenthash"
|
||||
"github.com/moby/buildkit/cache/metadata"
|
||||
|
@ -19,6 +20,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/tonistiigi/fsutil"
|
||||
fstypes "github.com/tonistiigi/fsutil/types"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"golang.org/x/time/rate"
|
||||
"google.golang.org/grpc/codes"
|
||||
|
@ -153,7 +155,7 @@ func (ls *localSourceHandler) Snapshot(ctx context.Context) (out cache.Immutable
|
|||
}
|
||||
}()
|
||||
|
||||
cc, err := contenthash.GetCacheContext(ctx, mutable.Metadata())
|
||||
cc, err := contenthash.GetCacheContext(ctx, mutable.Metadata(), mount.IdentityMapping())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -165,10 +167,25 @@ func (ls *localSourceHandler) Snapshot(ctx context.Context) (out cache.Immutable
|
|||
FollowPaths: ls.src.FollowPaths,
|
||||
OverrideExcludes: false,
|
||||
DestDir: dest,
|
||||
CacheUpdater: &cacheUpdater{cc},
|
||||
CacheUpdater: &cacheUpdater{cc, mount.IdentityMapping()},
|
||||
ProgressCb: newProgressHandler(ctx, "transferring "+ls.src.Name+":"),
|
||||
}
|
||||
|
||||
if idmap := mount.IdentityMapping(); idmap != nil {
|
||||
opt.Filter = func(p string, stat *fstypes.Stat) bool {
|
||||
identity, err := idmap.ToHost(idtools.Identity{
|
||||
UID: int(stat.Uid),
|
||||
GID: int(stat.Gid),
|
||||
})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
stat.Uid = uint32(identity.UID)
|
||||
stat.Gid = uint32(identity.GID)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if err := filesync.FSSync(ctx, caller, opt); err != nil {
|
||||
if status.Code(err) == codes.NotFound {
|
||||
return nil, errors.Errorf("local source %s not enabled from the client", ls.src.Name)
|
||||
|
@ -245,6 +262,7 @@ func newProgressHandler(ctx context.Context, id string) func(int, bool) {
|
|||
|
||||
type cacheUpdater struct {
|
||||
contenthash.CacheContext
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func (cu *cacheUpdater) MarkSupported(bool) {
|
||||
|
|
4
vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_check.go
generated
vendored
4
vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_check.go
generated
vendored
|
@ -2,6 +2,6 @@
|
|||
|
||||
package binfmt_misc
|
||||
|
||||
func amd64Supported() bool {
|
||||
return check(Binaryamd64) == nil
|
||||
func amd64Supported() error {
|
||||
return check(Binaryamd64)
|
||||
}
|
||||
|
|
4
vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_check_amd64.go
generated
vendored
4
vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_check_amd64.go
generated
vendored
|
@ -2,6 +2,6 @@
|
|||
|
||||
package binfmt_misc
|
||||
|
||||
func amd64Supported() bool {
|
||||
return true
|
||||
func amd64Supported() error {
|
||||
return nil
|
||||
}
|
||||
|
|
4
vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_check.go
generated
vendored
4
vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_check.go
generated
vendored
|
@ -2,6 +2,6 @@
|
|||
|
||||
package binfmt_misc
|
||||
|
||||
func arm64Supported() bool {
|
||||
return check(Binaryarm64) == nil
|
||||
func arm64Supported() error {
|
||||
return check(Binaryarm64)
|
||||
}
|
||||
|
|
4
vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_check_arm64.go
generated
vendored
4
vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_check_arm64.go
generated
vendored
|
@ -2,6 +2,6 @@
|
|||
|
||||
package binfmt_misc
|
||||
|
||||
func arm64Supported() bool {
|
||||
return true
|
||||
func arm64Supported() error {
|
||||
return nil
|
||||
}
|
||||
|
|
4
vendor/github.com/moby/buildkit/util/binfmt_misc/arm_check.go
generated
vendored
4
vendor/github.com/moby/buildkit/util/binfmt_misc/arm_check.go
generated
vendored
|
@ -2,6 +2,6 @@
|
|||
|
||||
package binfmt_misc
|
||||
|
||||
func armSupported() bool {
|
||||
return check(Binaryarm) == nil
|
||||
func armSupported() error {
|
||||
return check(Binaryarm)
|
||||
}
|
||||
|
|
4
vendor/github.com/moby/buildkit/util/binfmt_misc/arm_check_arm.go
generated
vendored
4
vendor/github.com/moby/buildkit/util/binfmt_misc/arm_check_arm.go
generated
vendored
|
@ -2,6 +2,6 @@
|
|||
|
||||
package binfmt_misc
|
||||
|
||||
func armSupported() bool {
|
||||
return true
|
||||
func armSupported() error {
|
||||
return nil
|
||||
}
|
||||
|
|
5
vendor/github.com/moby/buildkit/util/binfmt_misc/check.go
generated
vendored
5
vendor/github.com/moby/buildkit/util/binfmt_misc/check.go
generated
vendored
|
@ -35,5 +35,8 @@ func check(bin string) error {
|
|||
}
|
||||
f.Close()
|
||||
|
||||
return exec.Command(pp).Run()
|
||||
cmd := exec.Command("/check")
|
||||
withChroot(cmd, tmpdir)
|
||||
err = cmd.Run()
|
||||
return err
|
||||
}
|
||||
|
|
14
vendor/github.com/moby/buildkit/util/binfmt_misc/check_unix.go
generated
vendored
Normal file
14
vendor/github.com/moby/buildkit/util/binfmt_misc/check_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
// +build !windows
|
||||
|
||||
package binfmt_misc
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func withChroot(cmd *exec.Cmd, dir string) {
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||
Chroot: dir,
|
||||
}
|
||||
}
|
10
vendor/github.com/moby/buildkit/util/binfmt_misc/check_windows.go
generated
vendored
Normal file
10
vendor/github.com/moby/buildkit/util/binfmt_misc/check_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
// +build windows
|
||||
|
||||
package binfmt_misc
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func withChroot(cmd *exec.Cmd, dir string) {
|
||||
}
|
45
vendor/github.com/moby/buildkit/util/binfmt_misc/detect.go
generated
vendored
45
vendor/github.com/moby/buildkit/util/binfmt_misc/detect.go
generated
vendored
|
@ -5,6 +5,7 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var once sync.Once
|
||||
|
@ -14,19 +15,53 @@ func SupportedPlatforms() []string {
|
|||
once.Do(func() {
|
||||
def := platforms.DefaultString()
|
||||
arr = append(arr, def)
|
||||
|
||||
if p := "linux/amd64"; def != p && amd64Supported() {
|
||||
if p := "linux/amd64"; def != p && amd64Supported() == nil {
|
||||
arr = append(arr, p)
|
||||
}
|
||||
if p := "linux/arm64"; def != p && arm64Supported() {
|
||||
if p := "linux/arm64"; def != p && arm64Supported() == nil {
|
||||
arr = append(arr, p)
|
||||
}
|
||||
if !strings.HasPrefix(def, "linux/arm/") && armSupported() {
|
||||
if !strings.HasPrefix(def, "linux/arm/") && armSupported() == nil {
|
||||
arr = append(arr, "linux/arm/v7", "linux/arm/v6")
|
||||
} else if def == "linux/arm/v7" {
|
||||
arr = append(arr, "linux/arm/v6")
|
||||
}
|
||||
})
|
||||
|
||||
return arr
|
||||
}
|
||||
|
||||
//WarnIfUnsupported validates the platforms and show warning message if there is,
|
||||
//the end user could fix the issue based on those warning, and thus no need to drop
|
||||
//the platform from the candidates.
|
||||
func WarnIfUnsupported(pfs []string) {
|
||||
def := platforms.DefaultString()
|
||||
for _, p := range pfs {
|
||||
if p != def {
|
||||
if p == "linux/amd64" {
|
||||
if err := amd64Supported(); err != nil {
|
||||
printPlatfromWarning(p, err)
|
||||
}
|
||||
}
|
||||
if p == "linux/arm64" {
|
||||
if err := arm64Supported(); err != nil {
|
||||
printPlatfromWarning(p, err)
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(p, "linux/arm/v6") || strings.HasPrefix(p, "linux/arm/v7") {
|
||||
if err := armSupported(); err != nil {
|
||||
printPlatfromWarning(p, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func printPlatfromWarning(p string, err error) {
|
||||
if strings.Contains(err.Error(), "exec format error") {
|
||||
logrus.Warnf("platform %s cannot pass the validation, kernel support for miscellaneous binary may have not enabled.", p)
|
||||
} else if strings.Contains(err.Error(), "no such file or directory") {
|
||||
logrus.Warnf("platforms %s cannot pass the validation, '-F' flag might have not set for 'binfmt_misc'.", p)
|
||||
} else {
|
||||
logrus.Warnf("platforms %s cannot pass the validation: %s", p, err.Error())
|
||||
}
|
||||
}
|
||||
|
|
24
vendor/github.com/moby/buildkit/util/entitlements/entitlements.go
generated
vendored
24
vendor/github.com/moby/buildkit/util/entitlements/entitlements.go
generated
vendored
|
@ -1,26 +1,19 @@
|
|||
package entitlements
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type Entitlement string
|
||||
|
||||
const (
|
||||
EntitlementSecurityConfined Entitlement = "security.confined"
|
||||
EntitlementSecurityUnconfined Entitlement = "security.unconfined" // unimplemented
|
||||
EntitlementNetworkHost Entitlement = "network.host"
|
||||
EntitlementNetworkNone Entitlement = "network.none"
|
||||
EntitlementSecurityInsecure Entitlement = "security.insecure"
|
||||
EntitlementNetworkHost Entitlement = "network.host"
|
||||
)
|
||||
|
||||
var all = map[Entitlement]struct{}{
|
||||
EntitlementSecurityConfined: {},
|
||||
EntitlementSecurityUnconfined: {},
|
||||
EntitlementNetworkHost: {},
|
||||
EntitlementNetworkNone: {},
|
||||
}
|
||||
|
||||
var defaults = map[Entitlement]struct{}{
|
||||
EntitlementSecurityConfined: {},
|
||||
EntitlementNetworkNone: {},
|
||||
EntitlementSecurityInsecure: {},
|
||||
EntitlementNetworkHost: {},
|
||||
}
|
||||
|
||||
func Parse(s string) (Entitlement, error) {
|
||||
|
@ -56,9 +49,6 @@ func WhiteList(allowed, supported []Entitlement) (Set, error) {
|
|||
m[e] = struct{}{}
|
||||
}
|
||||
|
||||
for e := range defaults {
|
||||
m[e] = struct{}{}
|
||||
}
|
||||
return Set(m), nil
|
||||
}
|
||||
|
||||
|
|
67
vendor/github.com/moby/buildkit/util/entitlements/security_linux.go
generated
vendored
Normal file
67
vendor/github.com/moby/buildkit/util/entitlements/security_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,67 @@
|
|||
package entitlements
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/oci"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
// WithInsecureSpec sets spec with All capability.
|
||||
func WithInsecureSpec() oci.SpecOpts {
|
||||
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
|
||||
addCaps := []string{
|
||||
"CAP_FSETID",
|
||||
"CAP_KILL",
|
||||
"CAP_FOWNER",
|
||||
"CAP_MKNOD",
|
||||
"CAP_CHOWN",
|
||||
"CAP_DAC_OVERRIDE",
|
||||
"CAP_NET_RAW",
|
||||
"CAP_SETGID",
|
||||
"CAP_SETUID",
|
||||
"CAP_SETPCAP",
|
||||
"CAP_SETFCAP",
|
||||
"CAP_NET_BIND_SERVICE",
|
||||
"CAP_SYS_CHROOT",
|
||||
"CAP_AUDIT_WRITE",
|
||||
"CAP_MAC_ADMIN",
|
||||
"CAP_MAC_OVERRIDE",
|
||||
"CAP_DAC_READ_SEARCH",
|
||||
"CAP_SYS_PTRACE",
|
||||
"CAP_SYS_MODULE",
|
||||
"CAP_SYSLOG",
|
||||
"CAP_SYS_RAWIO",
|
||||
"CAP_SYS_ADMIN",
|
||||
"CAP_LINUX_IMMUTABLE",
|
||||
"CAP_SYS_BOOT",
|
||||
"CAP_SYS_NICE",
|
||||
"CAP_SYS_PACCT",
|
||||
"CAP_SYS_TTY_CONFIG",
|
||||
"CAP_SYS_TIME",
|
||||
"CAP_WAKE_ALARM",
|
||||
"CAP_AUDIT_READ",
|
||||
"CAP_AUDIT_CONTROL",
|
||||
"CAP_SYS_RESOURCE",
|
||||
"CAP_BLOCK_SUSPEND",
|
||||
"CAP_IPC_LOCK",
|
||||
"CAP_IPC_OWNER",
|
||||
"CAP_LEASE",
|
||||
"CAP_NET_ADMIN",
|
||||
"CAP_NET_BROADCAST",
|
||||
}
|
||||
for _, cap := range addCaps {
|
||||
s.Process.Capabilities.Bounding = append(s.Process.Capabilities.Bounding, cap)
|
||||
s.Process.Capabilities.Ambient = append(s.Process.Capabilities.Ambient, cap)
|
||||
s.Process.Capabilities.Effective = append(s.Process.Capabilities.Effective, cap)
|
||||
s.Process.Capabilities.Inheritable = append(s.Process.Capabilities.Inheritable, cap)
|
||||
s.Process.Capabilities.Permitted = append(s.Process.Capabilities.Permitted, cap)
|
||||
}
|
||||
s.Linux.ReadonlyPaths = []string{}
|
||||
s.Linux.MaskedPaths = []string{}
|
||||
s.Process.ApparmorProfile = ""
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
90
vendor/github.com/tonistiigi/fsutil/fs.go
generated
vendored
90
vendor/github.com/tonistiigi/fsutil/fs.go
generated
vendored
|
@ -3,9 +3,11 @@ package fsutil
|
|||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
@ -37,36 +39,80 @@ func (fs *fs) Open(p string) (io.ReadCloser, error) {
|
|||
return os.Open(filepath.Join(fs.root, p))
|
||||
}
|
||||
|
||||
func SubDirFS(fs FS, stat types.Stat) FS {
|
||||
return &subDirFS{fs: fs, stat: stat}
|
||||
type Dir struct {
|
||||
Stat types.Stat
|
||||
FS FS
|
||||
}
|
||||
|
||||
func SubDirFS(dirs []Dir) (FS, error) {
|
||||
sort.Slice(dirs, func(i, j int) bool {
|
||||
return dirs[i].Stat.Path < dirs[j].Stat.Path
|
||||
})
|
||||
m := map[string]Dir{}
|
||||
for _, d := range dirs {
|
||||
if path.Base(d.Stat.Path) != d.Stat.Path {
|
||||
return nil, errors.Errorf("subdir %s must be single file", d.Stat.Path)
|
||||
}
|
||||
if _, ok := m[d.Stat.Path]; ok {
|
||||
return nil, errors.Errorf("invalid path %s", d.Stat.Path)
|
||||
}
|
||||
m[d.Stat.Path] = d
|
||||
}
|
||||
return &subDirFS{m: m, dirs: dirs}, nil
|
||||
}
|
||||
|
||||
type subDirFS struct {
|
||||
fs FS
|
||||
stat types.Stat
|
||||
m map[string]Dir
|
||||
dirs []Dir
|
||||
}
|
||||
|
||||
func (fs *subDirFS) Walk(ctx context.Context, fn filepath.WalkFunc) error {
|
||||
main := &StatInfo{Stat: &fs.stat}
|
||||
if !main.IsDir() {
|
||||
return errors.Errorf("fs subdir not mode directory")
|
||||
}
|
||||
if main.Name() != fs.stat.Path {
|
||||
return errors.Errorf("subdir path must be single file")
|
||||
}
|
||||
if err := fn(fs.stat.Path, main, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return fs.fs.Walk(ctx, func(p string, fi os.FileInfo, err error) error {
|
||||
stat, ok := fi.Sys().(*types.Stat)
|
||||
if !ok {
|
||||
return errors.Wrapf(err, "invalid fileinfo without stat info: %s", p)
|
||||
for _, d := range fs.dirs {
|
||||
fi := &StatInfo{Stat: &d.Stat}
|
||||
if !fi.IsDir() {
|
||||
return errors.Errorf("fs subdir %s not mode directory", d.Stat.Path)
|
||||
}
|
||||
stat.Path = path.Join(fs.stat.Path, stat.Path)
|
||||
return fn(filepath.Join(fs.stat.Path, p), &StatInfo{stat}, nil)
|
||||
})
|
||||
if err := fn(d.Stat.Path, fi, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := d.FS.Walk(ctx, func(p string, fi os.FileInfo, err error) error {
|
||||
stat, ok := fi.Sys().(*types.Stat)
|
||||
if !ok {
|
||||
return errors.Wrapf(err, "invalid fileinfo without stat info: %s", p)
|
||||
}
|
||||
stat.Path = path.Join(d.Stat.Path, stat.Path)
|
||||
if stat.Linkname != "" {
|
||||
if fi.Mode()&os.ModeSymlink != 0 {
|
||||
if strings.HasPrefix(stat.Linkname, "/") {
|
||||
stat.Linkname = path.Join("/"+d.Stat.Path, stat.Linkname)
|
||||
}
|
||||
} else {
|
||||
stat.Linkname = path.Join(d.Stat.Path, stat.Linkname)
|
||||
}
|
||||
}
|
||||
return fn(filepath.Join(d.Stat.Path, p), &StatInfo{stat}, nil)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *subDirFS) Open(p string) (io.ReadCloser, error) {
|
||||
return fs.fs.Open(strings.TrimPrefix(p, fs.stat.Path+"/"))
|
||||
parts := strings.SplitN(filepath.Clean(p), string(filepath.Separator), 2)
|
||||
if len(parts) == 0 {
|
||||
return ioutil.NopCloser(&emptyReader{}), nil
|
||||
}
|
||||
d, ok := fs.m[parts[0]]
|
||||
if !ok {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
return d.FS.Open(parts[1])
|
||||
}
|
||||
|
||||
type emptyReader struct {
|
||||
}
|
||||
|
||||
func (*emptyReader) Read([]byte) (int, error) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
|
72
vendor/github.com/tonistiigi/fsutil/tarwriter.go
generated
vendored
Normal file
72
vendor/github.com/tonistiigi/fsutil/tarwriter.go
generated
vendored
Normal file
|
@ -0,0 +1,72 @@
|
|||
package fsutil
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/tonistiigi/fsutil/types"
|
||||
)
|
||||
|
||||
func WriteTar(ctx context.Context, fs FS, w io.Writer) error {
|
||||
tw := tar.NewWriter(w)
|
||||
err := fs.Walk(ctx, func(path string, fi os.FileInfo, err error) error {
|
||||
stat, ok := fi.Sys().(*types.Stat)
|
||||
if !ok {
|
||||
return errors.Wrapf(err, "invalid fileinfo without stat info: %s", path)
|
||||
}
|
||||
hdr, err := tar.FileInfoHeader(fi, stat.Linkname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name := filepath.ToSlash(path)
|
||||
if fi.IsDir() && !strings.HasSuffix(name, "/") {
|
||||
name += "/"
|
||||
}
|
||||
hdr.Name = name
|
||||
|
||||
hdr.Uid = int(stat.Uid)
|
||||
hdr.Gid = int(stat.Gid)
|
||||
hdr.Devmajor = stat.Devmajor
|
||||
hdr.Devminor = stat.Devminor
|
||||
hdr.Linkname = stat.Linkname
|
||||
if hdr.Linkname != "" {
|
||||
hdr.Size = 0
|
||||
hdr.Typeflag = tar.TypeLink
|
||||
}
|
||||
|
||||
if len(stat.Xattrs) > 0 {
|
||||
hdr.PAXRecords = map[string]string{}
|
||||
}
|
||||
for k, v := range stat.Xattrs {
|
||||
hdr.PAXRecords["SCHILY.xattr."+k] = string(v)
|
||||
}
|
||||
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return errors.Wrap(err, "failed to write file header")
|
||||
}
|
||||
|
||||
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 && hdr.Linkname == "" {
|
||||
rc, err := fs.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(tw, rc); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := rc.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return tw.Close()
|
||||
}
|
Loading…
Reference in a new issue