2016-11-15 17:59:08 -05:00
|
|
|
// +build linux
|
|
|
|
|
2016-05-16 11:50:55 -04:00
|
|
|
package plugin
|
|
|
|
|
|
|
|
import (
|
2016-12-12 18:05:53 -05:00
|
|
|
"archive/tar"
|
|
|
|
"compress/gzip"
|
2016-08-10 19:48:17 -04:00
|
|
|
"encoding/json"
|
2016-05-16 11:50:55 -04:00
|
|
|
"fmt"
|
2016-10-04 15:01:19 -04:00
|
|
|
"io"
|
2016-08-10 19:48:17 -04:00
|
|
|
"io/ioutil"
|
2016-05-16 11:50:55 -04:00
|
|
|
"net/http"
|
|
|
|
"os"
|
2016-12-12 18:05:53 -05:00
|
|
|
"path"
|
2016-05-16 11:50:55 -04:00
|
|
|
"path/filepath"
|
2016-12-12 18:05:53 -05:00
|
|
|
"strings"
|
2016-05-16 11:50:55 -04:00
|
|
|
|
|
|
|
"github.com/Sirupsen/logrus"
|
2016-12-12 18:05:53 -05:00
|
|
|
"github.com/docker/distribution/manifest/schema2"
|
2016-09-06 14:18:12 -04:00
|
|
|
"github.com/docker/docker/api/types"
|
2016-12-12 18:05:53 -05:00
|
|
|
"github.com/docker/docker/distribution"
|
|
|
|
progressutils "github.com/docker/docker/distribution/utils"
|
|
|
|
"github.com/docker/docker/distribution/xfer"
|
|
|
|
"github.com/docker/docker/image"
|
|
|
|
"github.com/docker/docker/layer"
|
2016-10-04 15:01:19 -04:00
|
|
|
"github.com/docker/docker/pkg/chrootarchive"
|
2016-12-12 18:05:53 -05:00
|
|
|
"github.com/docker/docker/pkg/pools"
|
|
|
|
"github.com/docker/docker/pkg/progress"
|
2016-08-26 13:02:38 -04:00
|
|
|
"github.com/docker/docker/plugin/v2"
|
2016-11-28 14:08:39 -05:00
|
|
|
"github.com/docker/docker/reference"
|
2017-01-06 20:23:18 -05:00
|
|
|
"github.com/opencontainers/go-digest"
|
2016-12-12 21:18:17 -05:00
|
|
|
"github.com/pkg/errors"
|
2016-10-04 15:01:19 -04:00
|
|
|
"golang.org/x/net/context"
|
2016-05-16 11:50:55 -04:00
|
|
|
)
|
|
|
|
|
2016-12-20 11:26:58 -05:00
|
|
|
// Disable deactivates a plugin. This means resources (volumes, networks) cant use them.
|
2016-12-12 18:05:53 -05:00
|
|
|
func (pm *Manager) Disable(refOrID string, config *types.PluginDisableConfig) error {
|
|
|
|
p, err := pm.config.Store.GetV2Plugin(refOrID)
|
2016-05-16 11:50:55 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-01 14:36:56 -05:00
|
|
|
pm.mu.RLock()
|
|
|
|
c := pm.cMap[p]
|
|
|
|
pm.mu.RUnlock()
|
|
|
|
|
2016-12-20 11:26:58 -05:00
|
|
|
if !config.ForceDisable && p.GetRefCount() > 0 {
|
|
|
|
return fmt.Errorf("plugin %s is in use", p.Name())
|
|
|
|
}
|
|
|
|
|
2016-12-01 14:36:56 -05:00
|
|
|
if err := pm.disable(p, c); err != nil {
|
2016-07-18 11:02:12 -04:00
|
|
|
return err
|
|
|
|
}
|
2016-12-12 18:05:53 -05:00
|
|
|
pm.config.LogPluginEvent(p.GetID(), refOrID, "disable")
|
2016-07-18 11:02:12 -04:00
|
|
|
return nil
|
2016-05-16 11:50:55 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Enable activates a plugin, which implies that they are ready to be used by containers.
|
2016-12-12 18:05:53 -05:00
|
|
|
func (pm *Manager) Enable(refOrID string, config *types.PluginEnableConfig) error {
|
|
|
|
p, err := pm.config.Store.GetV2Plugin(refOrID)
|
2016-05-16 11:50:55 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-11-21 12:24:01 -05:00
|
|
|
|
2016-12-01 14:36:56 -05:00
|
|
|
c := &controller{timeoutInSecs: config.Timeout}
|
|
|
|
if err := pm.enable(p, c, false); err != nil {
|
2016-07-18 11:02:12 -04:00
|
|
|
return err
|
|
|
|
}
|
2016-12-12 18:05:53 -05:00
|
|
|
pm.config.LogPluginEvent(p.GetID(), refOrID, "enable")
|
2016-07-18 11:02:12 -04:00
|
|
|
return nil
|
2016-05-16 11:50:55 -04:00
|
|
|
}
|
|
|
|
|
2016-11-07 21:51:47 -05:00
|
|
|
// Inspect examines a plugin config
|
2016-12-12 18:05:53 -05:00
|
|
|
func (pm *Manager) Inspect(refOrID string) (tp *types.Plugin, err error) {
|
|
|
|
p, err := pm.config.Store.GetV2Plugin(refOrID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2016-11-23 23:04:44 -05:00
|
|
|
}
|
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
return &p.PluginObj, nil
|
|
|
|
}
|
2016-11-23 23:04:44 -05:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
func (pm *Manager) pull(ctx context.Context, ref reference.Named, config *distribution.ImagePullConfig, outStream io.Writer) error {
|
|
|
|
if outStream != nil {
|
|
|
|
// Include a buffer so that slow client connections don't affect
|
|
|
|
// transfer performance.
|
|
|
|
progressChan := make(chan progress.Progress, 100)
|
|
|
|
|
|
|
|
writesDone := make(chan struct{})
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
close(progressChan)
|
|
|
|
<-writesDone
|
|
|
|
}()
|
|
|
|
|
|
|
|
var cancelFunc context.CancelFunc
|
|
|
|
ctx, cancelFunc = context.WithCancel(ctx)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan)
|
|
|
|
close(writesDone)
|
|
|
|
}()
|
|
|
|
|
|
|
|
config.ProgressOutput = progress.ChanOutput(progressChan)
|
|
|
|
} else {
|
|
|
|
config.ProgressOutput = progress.DiscardOutput()
|
2016-05-16 11:50:55 -04:00
|
|
|
}
|
2016-12-12 18:05:53 -05:00
|
|
|
return distribution.Pull(ctx, ref, config)
|
|
|
|
}
|
2016-11-23 23:04:44 -05:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
type tempConfigStore struct {
|
|
|
|
config []byte
|
|
|
|
configDigest digest.Digest
|
2016-05-16 11:50:55 -04:00
|
|
|
}
|
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
func (s *tempConfigStore) Put(c []byte) (digest.Digest, error) {
|
|
|
|
dgst := digest.FromBytes(c)
|
2016-11-23 20:29:21 -05:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
s.config = c
|
|
|
|
s.configDigest = dgst
|
2016-11-23 20:29:21 -05:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
return dgst, nil
|
2016-11-23 20:29:21 -05:00
|
|
|
}
|
2016-11-28 14:08:39 -05:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
func (s *tempConfigStore) Get(d digest.Digest) ([]byte, error) {
|
|
|
|
if d != s.configDigest {
|
2017-01-06 20:23:18 -05:00
|
|
|
return nil, fmt.Errorf("digest not found")
|
2016-11-28 14:08:39 -05:00
|
|
|
}
|
2016-12-12 18:05:53 -05:00
|
|
|
return s.config, nil
|
|
|
|
}
|
2016-11-28 14:08:39 -05:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
func (s *tempConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) {
|
|
|
|
return configToRootFS(c)
|
|
|
|
}
|
2016-11-28 14:08:39 -05:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
func computePrivileges(c types.PluginConfig) (types.PluginPrivileges, error) {
|
2016-11-23 20:29:21 -05:00
|
|
|
var privileges types.PluginPrivileges
|
2016-12-13 20:46:01 -05:00
|
|
|
if c.Network.Type != "null" && c.Network.Type != "bridge" && c.Network.Type != "" {
|
2016-11-23 20:29:21 -05:00
|
|
|
privileges = append(privileges, types.PluginPrivilege{
|
|
|
|
Name: "network",
|
|
|
|
Description: "permissions to access a network",
|
|
|
|
Value: []string{c.Network.Type},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
for _, mount := range c.Mounts {
|
|
|
|
if mount.Source != nil {
|
|
|
|
privileges = append(privileges, types.PluginPrivilege{
|
|
|
|
Name: "mount",
|
|
|
|
Description: "host path to mount",
|
|
|
|
Value: []string{*mount.Source},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, device := range c.Linux.Devices {
|
|
|
|
if device.Path != nil {
|
|
|
|
privileges = append(privileges, types.PluginPrivilege{
|
|
|
|
Name: "device",
|
|
|
|
Description: "host device to access",
|
|
|
|
Value: []string{*device.Path},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if c.Linux.DeviceCreation {
|
|
|
|
privileges = append(privileges, types.PluginPrivilege{
|
|
|
|
Name: "device-creation",
|
|
|
|
Description: "allow creating devices inside plugin",
|
|
|
|
Value: []string{"true"},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
if len(c.Linux.Capabilities) > 0 {
|
|
|
|
privileges = append(privileges, types.PluginPrivilege{
|
|
|
|
Name: "capabilities",
|
|
|
|
Description: "list of additional capabilities required",
|
|
|
|
Value: c.Linux.Capabilities,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return privileges, nil
|
2016-11-28 14:08:39 -05:00
|
|
|
}
|
|
|
|
|
2016-11-23 20:29:21 -05:00
|
|
|
// Privileges pulls a plugin config and computes the privileges required to install it.
|
2016-12-12 18:05:53 -05:00
|
|
|
func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) {
|
|
|
|
// create image store instance
|
|
|
|
cs := &tempConfigStore{}
|
|
|
|
|
|
|
|
// DownloadManager not defined because only pulling configuration.
|
|
|
|
pluginPullConfig := &distribution.ImagePullConfig{
|
|
|
|
Config: distribution.Config{
|
|
|
|
MetaHeaders: metaHeader,
|
|
|
|
AuthConfig: authConfig,
|
|
|
|
RegistryService: pm.config.RegistryService,
|
|
|
|
ImageEventLogger: func(string, string, string) {},
|
|
|
|
ImageStore: cs,
|
|
|
|
},
|
|
|
|
Schema2Types: distribution.PluginTypes,
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := pm.pull(ctx, ref, pluginPullConfig, nil); err != nil {
|
2016-05-16 11:50:55 -04:00
|
|
|
return nil, err
|
|
|
|
}
|
2016-12-12 18:05:53 -05:00
|
|
|
|
|
|
|
if cs.config == nil {
|
|
|
|
return nil, errors.New("no configuration pulled")
|
|
|
|
}
|
|
|
|
var config types.PluginConfig
|
|
|
|
if err := json.Unmarshal(cs.config, &config); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return computePrivileges(config)
|
2016-11-23 20:29:21 -05:00
|
|
|
}
|
2016-05-16 11:50:55 -04:00
|
|
|
|
2016-11-23 20:29:21 -05:00
|
|
|
// Pull pulls a plugin, check if the correct privileges are provided and install the plugin.
|
2016-12-12 18:05:53 -05:00
|
|
|
func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) (err error) {
|
|
|
|
pm.muGC.RLock()
|
|
|
|
defer pm.muGC.RUnlock()
|
|
|
|
|
|
|
|
// revalidate because Pull is public
|
|
|
|
nameref, err := reference.ParseNamed(name)
|
2016-11-23 20:29:21 -05:00
|
|
|
if err != nil {
|
2016-12-12 18:05:53 -05:00
|
|
|
return errors.Wrapf(err, "failed to parse %q", name)
|
2016-11-23 20:29:21 -05:00
|
|
|
}
|
2016-12-12 18:05:53 -05:00
|
|
|
name = reference.WithDefaultTag(nameref).String()
|
2016-11-23 20:29:21 -05:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
if err := pm.config.Store.validateName(name); err != nil {
|
2016-11-23 20:29:21 -05:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs")
|
|
|
|
defer os.RemoveAll(tmpRootFSDir)
|
2016-05-16 11:50:55 -04:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
dm := &downloadManager{
|
|
|
|
tmpDir: tmpRootFSDir,
|
|
|
|
blobStore: pm.blobStore,
|
2016-05-16 11:50:55 -04:00
|
|
|
}
|
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
pluginPullConfig := &distribution.ImagePullConfig{
|
|
|
|
Config: distribution.Config{
|
|
|
|
MetaHeaders: metaHeader,
|
|
|
|
AuthConfig: authConfig,
|
|
|
|
RegistryService: pm.config.RegistryService,
|
|
|
|
ImageEventLogger: pm.config.LogPluginEvent,
|
|
|
|
ImageStore: dm,
|
|
|
|
},
|
|
|
|
DownloadManager: dm, // todo: reevaluate if possible to substitute distribution/xfer dependencies instead
|
|
|
|
Schema2Types: distribution.PluginTypes,
|
|
|
|
}
|
2016-11-23 20:29:21 -05:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
err = pm.pull(ctx, ref, pluginPullConfig, outStream)
|
2016-11-23 20:29:21 -05:00
|
|
|
if err != nil {
|
2016-12-12 18:05:53 -05:00
|
|
|
go pm.GC()
|
2016-11-23 20:29:21 -05:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
if _, err := pm.createPlugin(name, dm.configDigest, dm.blobs, tmpRootFSDir, &privileges); err != nil {
|
2016-11-23 20:29:21 -05:00
|
|
|
return err
|
2016-05-16 11:50:55 -04:00
|
|
|
}
|
2016-11-23 20:29:21 -05:00
|
|
|
|
|
|
|
return nil
|
2016-05-16 11:50:55 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// List displays the list of plugins and associated metadata.
|
|
|
|
func (pm *Manager) List() ([]types.Plugin, error) {
|
2016-12-12 18:05:53 -05:00
|
|
|
plugins := pm.config.Store.GetAll()
|
2016-08-26 13:02:38 -04:00
|
|
|
out := make([]types.Plugin, 0, len(plugins))
|
|
|
|
for _, p := range plugins {
|
2016-07-18 11:02:12 -04:00
|
|
|
out = append(out, p.PluginObj)
|
2016-05-16 11:50:55 -04:00
|
|
|
}
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Push pushes a plugin to the store.
|
2016-12-12 18:05:53 -05:00
|
|
|
func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, outStream io.Writer) error {
|
|
|
|
p, err := pm.config.Store.GetV2Plugin(name)
|
2016-06-27 11:41:53 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-12 18:05:53 -05:00
|
|
|
|
|
|
|
ref, err := reference.ParseNamed(p.Name())
|
2016-08-10 19:48:17 -04:00
|
|
|
if err != nil {
|
2016-12-12 18:05:53 -05:00
|
|
|
return errors.Wrapf(err, "plugin has invalid name %v for push", p.Name())
|
2016-08-10 19:48:17 -04:00
|
|
|
}
|
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
var po progress.Output
|
|
|
|
if outStream != nil {
|
|
|
|
// Include a buffer so that slow client connections don't affect
|
|
|
|
// transfer performance.
|
|
|
|
progressChan := make(chan progress.Progress, 100)
|
|
|
|
|
|
|
|
writesDone := make(chan struct{})
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
close(progressChan)
|
|
|
|
<-writesDone
|
|
|
|
}()
|
|
|
|
|
|
|
|
var cancelFunc context.CancelFunc
|
|
|
|
ctx, cancelFunc = context.WithCancel(ctx)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan)
|
|
|
|
close(writesDone)
|
|
|
|
}()
|
|
|
|
|
|
|
|
po = progress.ChanOutput(progressChan)
|
|
|
|
} else {
|
|
|
|
po = progress.DiscardOutput()
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: replace these with manager
|
|
|
|
is := &pluginConfigStore{
|
|
|
|
pm: pm,
|
|
|
|
plugin: p,
|
|
|
|
}
|
|
|
|
ls := &pluginLayerProvider{
|
|
|
|
pm: pm,
|
|
|
|
plugin: p,
|
|
|
|
}
|
|
|
|
rs := &pluginReference{
|
|
|
|
name: ref,
|
|
|
|
pluginID: p.Config,
|
2016-05-16 11:50:55 -04:00
|
|
|
}
|
2016-06-27 11:41:53 -04:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
uploadManager := xfer.NewLayerUploadManager(3)
|
|
|
|
|
|
|
|
imagePushConfig := &distribution.ImagePushConfig{
|
|
|
|
Config: distribution.Config{
|
|
|
|
MetaHeaders: metaHeader,
|
|
|
|
AuthConfig: authConfig,
|
|
|
|
ProgressOutput: po,
|
|
|
|
RegistryService: pm.config.RegistryService,
|
|
|
|
ReferenceStore: rs,
|
|
|
|
ImageEventLogger: pm.config.LogPluginEvent,
|
|
|
|
ImageStore: is,
|
|
|
|
RequireSchema2: true,
|
|
|
|
},
|
|
|
|
ConfigMediaType: schema2.MediaTypePluginConfig,
|
|
|
|
LayerStore: ls,
|
|
|
|
UploadManager: uploadManager,
|
|
|
|
}
|
|
|
|
|
|
|
|
return distribution.Push(ctx, ref, imagePushConfig)
|
|
|
|
}
|
|
|
|
|
|
|
|
type pluginReference struct {
|
|
|
|
name reference.Named
|
|
|
|
pluginID digest.Digest
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *pluginReference) References(id digest.Digest) []reference.Named {
|
|
|
|
if r.pluginID != id {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return []reference.Named{r.name}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *pluginReference) ReferencesByName(ref reference.Named) []reference.Association {
|
|
|
|
return []reference.Association{
|
|
|
|
{
|
|
|
|
Ref: r.name,
|
|
|
|
ID: r.pluginID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *pluginReference) Get(ref reference.Named) (digest.Digest, error) {
|
|
|
|
if r.name.String() != ref.String() {
|
|
|
|
return digest.Digest(""), reference.ErrDoesNotExist
|
|
|
|
}
|
|
|
|
return r.pluginID, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *pluginReference) AddTag(ref reference.Named, id digest.Digest, force bool) error {
|
|
|
|
// Read only, ignore
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
func (r *pluginReference) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error {
|
|
|
|
// Read only, ignore
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
func (r *pluginReference) Delete(ref reference.Named) (bool, error) {
|
|
|
|
// Read only, ignore
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type pluginConfigStore struct {
|
|
|
|
pm *Manager
|
|
|
|
plugin *v2.Plugin
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *pluginConfigStore) Put([]byte) (digest.Digest, error) {
|
|
|
|
return digest.Digest(""), errors.New("cannot store config on push")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *pluginConfigStore) Get(d digest.Digest) ([]byte, error) {
|
|
|
|
if s.plugin.Config != d {
|
|
|
|
return nil, errors.New("plugin not found")
|
|
|
|
}
|
|
|
|
rwc, err := s.pm.blobStore.Get(d)
|
2016-05-16 11:50:55 -04:00
|
|
|
if err != nil {
|
2016-12-12 18:05:53 -05:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer rwc.Close()
|
|
|
|
return ioutil.ReadAll(rwc)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *pluginConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) {
|
|
|
|
return configToRootFS(c)
|
|
|
|
}
|
|
|
|
|
|
|
|
type pluginLayerProvider struct {
|
|
|
|
pm *Manager
|
|
|
|
plugin *v2.Plugin
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *pluginLayerProvider) Get(id layer.ChainID) (distribution.PushLayer, error) {
|
|
|
|
rootFS := rootFSFromPlugin(p.plugin.PluginObj.Config.Rootfs)
|
|
|
|
var i int
|
|
|
|
for i = 1; i <= len(rootFS.DiffIDs); i++ {
|
|
|
|
if layer.CreateChainID(rootFS.DiffIDs[:i]) == id {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if i > len(rootFS.DiffIDs) {
|
|
|
|
return nil, errors.New("layer not found")
|
|
|
|
}
|
|
|
|
return &pluginLayer{
|
|
|
|
pm: p.pm,
|
|
|
|
diffIDs: rootFS.DiffIDs[:i],
|
|
|
|
blobs: p.plugin.Blobsums[:i],
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type pluginLayer struct {
|
|
|
|
pm *Manager
|
|
|
|
diffIDs []layer.DiffID
|
|
|
|
blobs []digest.Digest
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *pluginLayer) ChainID() layer.ChainID {
|
|
|
|
return layer.CreateChainID(l.diffIDs)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *pluginLayer) DiffID() layer.DiffID {
|
|
|
|
return l.diffIDs[len(l.diffIDs)-1]
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *pluginLayer) Parent() distribution.PushLayer {
|
|
|
|
if len(l.diffIDs) == 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return &pluginLayer{
|
|
|
|
pm: l.pm,
|
|
|
|
diffIDs: l.diffIDs[:len(l.diffIDs)-1],
|
|
|
|
blobs: l.blobs[:len(l.diffIDs)-1],
|
2016-05-16 11:50:55 -04:00
|
|
|
}
|
2016-12-12 18:05:53 -05:00
|
|
|
}
|
2016-06-24 23:57:21 -04:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
func (l *pluginLayer) Open() (io.ReadCloser, error) {
|
|
|
|
return l.pm.blobStore.Get(l.blobs[len(l.diffIDs)-1])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *pluginLayer) Size() (int64, error) {
|
|
|
|
return l.pm.blobStore.Size(l.blobs[len(l.diffIDs)-1])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *pluginLayer) MediaType() string {
|
|
|
|
return schema2.MediaTypeLayer
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *pluginLayer) Release() {
|
|
|
|
// Nothing needs to be release, no references held
|
2016-05-16 11:50:55 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Remove deletes plugin's root directory.
|
2016-12-12 18:05:53 -05:00
|
|
|
func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error {
|
|
|
|
p, err := pm.config.Store.GetV2Plugin(name)
|
2016-12-01 14:36:56 -05:00
|
|
|
pm.mu.RLock()
|
|
|
|
c := pm.cMap[p]
|
|
|
|
pm.mu.RUnlock()
|
|
|
|
|
2016-05-16 11:50:55 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-09-07 09:59:15 -04:00
|
|
|
|
|
|
|
if !config.ForceRemove {
|
2016-12-01 14:36:56 -05:00
|
|
|
if p.GetRefCount() > 0 {
|
2016-09-07 09:59:15 -04:00
|
|
|
return fmt.Errorf("plugin %s is in use", p.Name())
|
|
|
|
}
|
|
|
|
if p.IsEnabled() {
|
2016-08-26 13:02:38 -04:00
|
|
|
return fmt.Errorf("plugin %s is enabled", p.Name())
|
|
|
|
}
|
2016-09-07 09:59:15 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if p.IsEnabled() {
|
2016-12-01 14:36:56 -05:00
|
|
|
if err := pm.disable(p, c); err != nil {
|
2016-08-26 13:02:38 -04:00
|
|
|
logrus.Errorf("failed to disable plugin '%s': %s", p.Name(), err)
|
|
|
|
}
|
2016-07-18 11:02:12 -04:00
|
|
|
}
|
2016-09-07 09:59:15 -04:00
|
|
|
|
2016-12-12 21:18:17 -05:00
|
|
|
defer func() {
|
2016-12-12 18:05:53 -05:00
|
|
|
go pm.GC()
|
2016-12-12 21:18:17 -05:00
|
|
|
}()
|
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
id := p.GetID()
|
|
|
|
pm.config.Store.Remove(p)
|
|
|
|
pluginDir := filepath.Join(pm.config.Root, id)
|
|
|
|
if err := os.RemoveAll(pluginDir); err != nil {
|
|
|
|
logrus.Warnf("unable to remove %q from plugin remove: %v", pluginDir, err)
|
2016-11-22 14:21:34 -05:00
|
|
|
}
|
2016-12-12 18:05:53 -05:00
|
|
|
pm.config.LogPluginEvent(id, name, "remove")
|
2016-07-18 11:02:12 -04:00
|
|
|
return nil
|
2016-05-16 11:50:55 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Set sets plugin args
|
|
|
|
func (pm *Manager) Set(name string, args []string) error {
|
2016-12-12 18:05:53 -05:00
|
|
|
p, err := pm.config.Store.GetV2Plugin(name)
|
2016-05-16 11:50:55 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-12 18:05:53 -05:00
|
|
|
if err := p.Set(args); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return pm.save(p)
|
2016-05-16 11:50:55 -04:00
|
|
|
}
|
2016-10-04 15:01:19 -04:00
|
|
|
|
|
|
|
// CreateFromContext creates a plugin from the given pluginDir which contains
|
2016-11-07 21:51:47 -05:00
|
|
|
// both the rootfs and the config.json and a repoName with optional tag.
|
2016-12-12 18:05:53 -05:00
|
|
|
func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) (err error) {
|
|
|
|
pm.muGC.RLock()
|
|
|
|
defer pm.muGC.RUnlock()
|
|
|
|
|
|
|
|
ref, err := reference.ParseNamed(options.RepoName)
|
2016-11-29 15:55:41 -05:00
|
|
|
if err != nil {
|
2016-12-12 18:05:53 -05:00
|
|
|
return errors.Wrapf(err, "failed to parse reference %v", options.RepoName)
|
|
|
|
}
|
|
|
|
if _, ok := ref.(reference.Canonical); ok {
|
|
|
|
return errors.Errorf("canonical references are not permitted")
|
2016-11-29 15:55:41 -05:00
|
|
|
}
|
2016-12-12 18:05:53 -05:00
|
|
|
name := reference.WithDefaultTag(ref).String()
|
2016-11-29 15:55:41 -05:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
if err := pm.config.Store.validateName(name); err != nil { // fast check, real check is in createPlugin()
|
|
|
|
return err
|
|
|
|
}
|
2016-10-04 15:01:19 -04:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs")
|
|
|
|
defer os.RemoveAll(tmpRootFSDir)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "failed to create temp directory")
|
|
|
|
}
|
|
|
|
var configJSON []byte
|
|
|
|
rootFS := splitConfigRootFSFromTar(tarCtx, &configJSON)
|
2016-11-29 15:55:41 -05:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
rootFSBlob, err := pm.blobStore.New()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2016-11-29 15:55:41 -05:00
|
|
|
}
|
2016-12-12 18:05:53 -05:00
|
|
|
defer rootFSBlob.Close()
|
|
|
|
gzw := gzip.NewWriter(rootFSBlob)
|
2017-01-06 20:23:18 -05:00
|
|
|
layerDigester := digest.Canonical.Digester()
|
2016-12-12 18:05:53 -05:00
|
|
|
rootFSReader := io.TeeReader(rootFS, io.MultiWriter(gzw, layerDigester.Hash()))
|
2016-11-29 15:55:41 -05:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
if err := chrootarchive.Untar(rootFSReader, tmpRootFSDir, nil); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := rootFS.Close(); err != nil {
|
2016-10-04 15:01:19 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
if configJSON == nil {
|
|
|
|
return errors.New("config not found")
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := gzw.Close(); err != nil {
|
|
|
|
return errors.Wrap(err, "error closing gzip writer")
|
|
|
|
}
|
|
|
|
|
|
|
|
var config types.PluginConfig
|
|
|
|
if err := json.Unmarshal(configJSON, &config); err != nil {
|
|
|
|
return errors.Wrap(err, "failed to parse config")
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := pm.validateConfig(config); err != nil {
|
2016-11-22 12:42:58 -05:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
pm.mu.Lock()
|
|
|
|
defer pm.mu.Unlock()
|
2016-11-22 12:42:58 -05:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
rootFSBlobsum, err := rootFSBlob.Commit()
|
|
|
|
if err != nil {
|
2016-10-04 15:01:19 -04:00
|
|
|
return err
|
|
|
|
}
|
2016-12-12 18:05:53 -05:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
go pm.GC()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
config.Rootfs = &types.PluginConfigRootfs{
|
|
|
|
Type: "layers",
|
|
|
|
DiffIds: []string{layerDigester.Digest().String()},
|
|
|
|
}
|
2016-10-04 15:01:19 -04:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
configBlob, err := pm.blobStore.New()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer configBlob.Close()
|
|
|
|
if err := json.NewEncoder(configBlob).Encode(config); err != nil {
|
|
|
|
return errors.Wrap(err, "error encoding json config")
|
|
|
|
}
|
|
|
|
configBlobsum, err := configBlob.Commit()
|
|
|
|
if err != nil {
|
2016-10-04 15:01:19 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
p, err := pm.createPlugin(name, configBlobsum, []digest.Digest{rootFSBlobsum}, tmpRootFSDir, nil)
|
|
|
|
if err != nil {
|
2016-11-22 12:42:58 -05:00
|
|
|
return err
|
|
|
|
}
|
2016-10-04 15:01:19 -04:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
pm.config.LogPluginEvent(p.PluginObj.ID, name, "create")
|
2016-10-04 15:01:19 -04:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2016-11-23 23:04:44 -05:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
func (pm *Manager) validateConfig(config types.PluginConfig) error {
|
|
|
|
return nil // TODO:
|
|
|
|
}
|
|
|
|
|
|
|
|
func splitConfigRootFSFromTar(in io.ReadCloser, config *[]byte) io.ReadCloser {
|
|
|
|
pr, pw := io.Pipe()
|
|
|
|
go func() {
|
|
|
|
tarReader := tar.NewReader(in)
|
|
|
|
tarWriter := tar.NewWriter(pw)
|
|
|
|
defer in.Close()
|
|
|
|
|
|
|
|
hasRootFS := false
|
|
|
|
|
|
|
|
for {
|
|
|
|
hdr, err := tarReader.Next()
|
|
|
|
if err == io.EOF {
|
|
|
|
if !hasRootFS {
|
|
|
|
pw.CloseWithError(errors.Wrap(err, "no rootfs found"))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Signals end of archive.
|
|
|
|
tarWriter.Close()
|
|
|
|
pw.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
pw.CloseWithError(errors.Wrap(err, "failed to read from tar"))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
content := io.Reader(tarReader)
|
|
|
|
name := path.Clean(hdr.Name)
|
|
|
|
if path.IsAbs(name) {
|
|
|
|
name = name[1:]
|
|
|
|
}
|
|
|
|
if name == configFileName {
|
|
|
|
dt, err := ioutil.ReadAll(content)
|
|
|
|
if err != nil {
|
|
|
|
pw.CloseWithError(errors.Wrapf(err, "failed to read %s", configFileName))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
*config = dt
|
|
|
|
}
|
|
|
|
if parts := strings.Split(name, "/"); len(parts) != 0 && parts[0] == rootFSFileName {
|
|
|
|
hdr.Name = path.Clean(path.Join(parts[1:]...))
|
|
|
|
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(strings.ToLower(hdr.Linkname), rootFSFileName+"/") {
|
|
|
|
hdr.Linkname = hdr.Linkname[len(rootFSFileName)+1:]
|
|
|
|
}
|
|
|
|
if err := tarWriter.WriteHeader(hdr); err != nil {
|
|
|
|
pw.CloseWithError(errors.Wrap(err, "error writing tar header"))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if _, err := pools.Copy(tarWriter, content); err != nil {
|
|
|
|
pw.CloseWithError(errors.Wrap(err, "error copying tar data"))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
hasRootFS = true
|
|
|
|
} else {
|
|
|
|
io.Copy(ioutil.Discard, content)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
return pr
|
2016-11-23 23:04:44 -05:00
|
|
|
}
|