2018-02-05 16:05:59 -05:00
|
|
|
package plugin // import "github.com/docker/docker/plugin"
|
2016-05-16 11:50:55 -04:00
|
|
|
|
|
|
|
import (
|
2016-12-12 18:05:53 -05:00
|
|
|
"archive/tar"
|
|
|
|
"compress/gzip"
|
2018-04-19 18:30:59 -04:00
|
|
|
"context"
|
2016-08-10 19:48:17 -04:00
|
|
|
"encoding/json"
|
2016-10-04 15:01:19 -04:00
|
|
|
"io"
|
2016-08-10 19:48:17 -04:00
|
|
|
"io/ioutil"
|
2016-05-16 11:50:55 -04:00
|
|
|
"net/http"
|
|
|
|
"os"
|
2016-12-12 18:05:53 -05:00
|
|
|
"path"
|
2016-05-16 11:50:55 -04:00
|
|
|
"path/filepath"
|
2017-09-19 15:14:46 -04:00
|
|
|
"runtime"
|
2016-12-12 18:05:53 -05:00
|
|
|
"strings"
|
2016-05-16 11:50:55 -04:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
"github.com/docker/distribution/manifest/schema2"
|
2017-01-25 19:54:18 -05:00
|
|
|
"github.com/docker/distribution/reference"
|
2016-09-06 14:18:12 -04:00
|
|
|
"github.com/docker/docker/api/types"
|
2016-11-23 07:58:15 -05:00
|
|
|
"github.com/docker/docker/api/types/filters"
|
2016-12-12 18:05:53 -05:00
|
|
|
"github.com/docker/docker/distribution"
|
|
|
|
progressutils "github.com/docker/docker/distribution/utils"
|
|
|
|
"github.com/docker/docker/distribution/xfer"
|
Embed DockerVersion in plugin config.
Embedding DockerVersion in plugin config when the plugin is created,
enables users to do a docker plugin inspect and know which version
the plugin was built on. This is helpful in cases where users are
running a new plugin on older docker releases and confused at
unexpected behavior.
By embedding DockerVersion in the config, we claim that there's no
guarantee that if the plugin config's DockerVersion is greater that
the version of the docker engine the plugin is executed against, the
plugin will work as expected.
For example, lets say:
- in 17.03, a plugin was released as johndoe/foo:v1
- in 17.05, the plugin uses the new ipchost config setting and author
publishes johndoe/foo:v2
In this case, johndoe/foo:v2 was built on 17.05 using ipchost, but is
running on docker-engine version 17.03. Since 17.05 > 17.03, there's
no guarantee that the plugin will work as expected. Ofcourse, if the
plugin did not use newly added config settings (ipchost in this case)
in 17.05, it would work fine in 17.03.
Signed-off-by: Anusha Ragunathan <anusha.ragunathan@docker.com>
2017-03-21 17:07:41 -04:00
|
|
|
"github.com/docker/docker/dockerversion"
|
2018-01-11 14:53:06 -05:00
|
|
|
"github.com/docker/docker/errdefs"
|
2016-12-12 18:05:53 -05:00
|
|
|
"github.com/docker/docker/image"
|
|
|
|
"github.com/docker/docker/layer"
|
2017-03-17 17:57:23 -04:00
|
|
|
"github.com/docker/docker/pkg/authorization"
|
2016-10-04 15:01:19 -04:00
|
|
|
"github.com/docker/docker/pkg/chrootarchive"
|
2017-02-02 23:08:35 -05:00
|
|
|
"github.com/docker/docker/pkg/mount"
|
2016-12-12 18:05:53 -05:00
|
|
|
"github.com/docker/docker/pkg/pools"
|
|
|
|
"github.com/docker/docker/pkg/progress"
|
2017-06-26 14:54:14 -04:00
|
|
|
"github.com/docker/docker/pkg/system"
|
2016-08-26 13:02:38 -04:00
|
|
|
"github.com/docker/docker/plugin/v2"
|
2017-01-25 19:54:18 -05:00
|
|
|
refstore "github.com/docker/docker/reference"
|
2018-05-19 07:38:54 -04:00
|
|
|
"github.com/opencontainers/go-digest"
|
2018-02-15 16:17:27 -05:00
|
|
|
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
2016-12-12 21:18:17 -05:00
|
|
|
"github.com/pkg/errors"
|
2017-07-26 17:42:13 -04:00
|
|
|
"github.com/sirupsen/logrus"
|
2016-05-16 11:50:55 -04:00
|
|
|
)
|
|
|
|
|
2016-11-23 07:58:15 -05:00
|
|
|
var acceptedPluginFilterTags = map[string]bool{
|
2016-11-23 08:27:09 -05:00
|
|
|
"enabled": true,
|
|
|
|
"capability": true,
|
2016-11-23 07:58:15 -05:00
|
|
|
}
|
|
|
|
|
2016-12-20 11:26:58 -05:00
|
|
|
// Disable deactivates a plugin. This means resources (volumes, networks) cant use them.
|
2016-12-12 18:05:53 -05:00
|
|
|
func (pm *Manager) Disable(refOrID string, config *types.PluginDisableConfig) error {
|
|
|
|
p, err := pm.config.Store.GetV2Plugin(refOrID)
|
2016-05-16 11:50:55 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-01 14:36:56 -05:00
|
|
|
pm.mu.RLock()
|
|
|
|
c := pm.cMap[p]
|
|
|
|
pm.mu.RUnlock()
|
|
|
|
|
2016-12-20 11:26:58 -05:00
|
|
|
if !config.ForceDisable && p.GetRefCount() > 0 {
|
2017-07-19 10:20:13 -04:00
|
|
|
return errors.WithStack(inUseError(p.Name()))
|
2016-12-20 11:26:58 -05:00
|
|
|
}
|
|
|
|
|
2017-03-17 17:57:23 -04:00
|
|
|
for _, typ := range p.GetTypes() {
|
|
|
|
if typ.Capability == authorization.AuthZApiImplements {
|
2017-06-13 06:52:04 -04:00
|
|
|
pm.config.AuthzMiddleware.RemovePlugin(p.Name())
|
2017-03-17 17:57:23 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-01 14:36:56 -05:00
|
|
|
if err := pm.disable(p, c); err != nil {
|
2016-07-18 11:02:12 -04:00
|
|
|
return err
|
|
|
|
}
|
2017-06-07 13:07:01 -04:00
|
|
|
pm.publisher.Publish(EventDisable{Plugin: p.PluginObj})
|
2016-12-12 18:05:53 -05:00
|
|
|
pm.config.LogPluginEvent(p.GetID(), refOrID, "disable")
|
2016-07-18 11:02:12 -04:00
|
|
|
return nil
|
2016-05-16 11:50:55 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Enable activates a plugin, which implies that they are ready to be used by containers.
|
2016-12-12 18:05:53 -05:00
|
|
|
func (pm *Manager) Enable(refOrID string, config *types.PluginEnableConfig) error {
|
|
|
|
p, err := pm.config.Store.GetV2Plugin(refOrID)
|
2016-05-16 11:50:55 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-11-21 12:24:01 -05:00
|
|
|
|
2016-12-01 14:36:56 -05:00
|
|
|
c := &controller{timeoutInSecs: config.Timeout}
|
|
|
|
if err := pm.enable(p, c, false); err != nil {
|
2016-07-18 11:02:12 -04:00
|
|
|
return err
|
|
|
|
}
|
2017-06-07 13:07:01 -04:00
|
|
|
pm.publisher.Publish(EventEnable{Plugin: p.PluginObj})
|
2016-12-12 18:05:53 -05:00
|
|
|
pm.config.LogPluginEvent(p.GetID(), refOrID, "enable")
|
2016-07-18 11:02:12 -04:00
|
|
|
return nil
|
2016-05-16 11:50:55 -04:00
|
|
|
}
|
|
|
|
|
2016-11-07 21:51:47 -05:00
|
|
|
// Inspect examines a plugin config
|
2016-12-12 18:05:53 -05:00
|
|
|
func (pm *Manager) Inspect(refOrID string) (tp *types.Plugin, err error) {
|
|
|
|
p, err := pm.config.Store.GetV2Plugin(refOrID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2016-11-23 23:04:44 -05:00
|
|
|
}
|
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
return &p.PluginObj, nil
|
|
|
|
}
|
2016-11-23 23:04:44 -05:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
func (pm *Manager) pull(ctx context.Context, ref reference.Named, config *distribution.ImagePullConfig, outStream io.Writer) error {
|
|
|
|
if outStream != nil {
|
|
|
|
// Include a buffer so that slow client connections don't affect
|
|
|
|
// transfer performance.
|
|
|
|
progressChan := make(chan progress.Progress, 100)
|
|
|
|
|
|
|
|
writesDone := make(chan struct{})
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
close(progressChan)
|
|
|
|
<-writesDone
|
|
|
|
}()
|
|
|
|
|
|
|
|
var cancelFunc context.CancelFunc
|
|
|
|
ctx, cancelFunc = context.WithCancel(ctx)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan)
|
|
|
|
close(writesDone)
|
|
|
|
}()
|
|
|
|
|
|
|
|
config.ProgressOutput = progress.ChanOutput(progressChan)
|
|
|
|
} else {
|
|
|
|
config.ProgressOutput = progress.DiscardOutput()
|
2016-05-16 11:50:55 -04:00
|
|
|
}
|
2016-12-12 18:05:53 -05:00
|
|
|
return distribution.Pull(ctx, ref, config)
|
|
|
|
}
|
2016-11-23 23:04:44 -05:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
type tempConfigStore struct {
|
|
|
|
config []byte
|
|
|
|
configDigest digest.Digest
|
2016-05-16 11:50:55 -04:00
|
|
|
}
|
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
func (s *tempConfigStore) Put(c []byte) (digest.Digest, error) {
|
|
|
|
dgst := digest.FromBytes(c)
|
2016-11-23 20:29:21 -05:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
s.config = c
|
|
|
|
s.configDigest = dgst
|
2016-11-23 20:29:21 -05:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
return dgst, nil
|
2016-11-23 20:29:21 -05:00
|
|
|
}
|
2016-11-28 14:08:39 -05:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
func (s *tempConfigStore) Get(d digest.Digest) ([]byte, error) {
|
|
|
|
if d != s.configDigest {
|
2017-07-19 10:20:13 -04:00
|
|
|
return nil, errNotFound("digest not found")
|
2016-11-28 14:08:39 -05:00
|
|
|
}
|
2016-12-12 18:05:53 -05:00
|
|
|
return s.config, nil
|
|
|
|
}
|
2016-11-28 14:08:39 -05:00
|
|
|
|
2018-02-15 16:17:27 -05:00
|
|
|
func (s *tempConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) {
|
2016-12-12 18:05:53 -05:00
|
|
|
return configToRootFS(c)
|
|
|
|
}
|
2016-11-28 14:08:39 -05:00
|
|
|
|
2018-02-15 16:17:27 -05:00
|
|
|
func (s *tempConfigStore) PlatformFromConfig(c []byte) (*specs.Platform, error) {
|
|
|
|
// TODO: LCOW/Plugins. This will need revisiting. For now use the runtime OS
|
|
|
|
return &specs.Platform{OS: runtime.GOOS}, nil
|
|
|
|
}
|
|
|
|
|
2017-07-19 10:20:13 -04:00
|
|
|
func computePrivileges(c types.PluginConfig) types.PluginPrivileges {
|
2016-11-23 20:29:21 -05:00
|
|
|
var privileges types.PluginPrivileges
|
2016-12-13 20:46:01 -05:00
|
|
|
if c.Network.Type != "null" && c.Network.Type != "bridge" && c.Network.Type != "" {
|
2016-11-23 20:29:21 -05:00
|
|
|
privileges = append(privileges, types.PluginPrivilege{
|
|
|
|
Name: "network",
|
|
|
|
Description: "permissions to access a network",
|
|
|
|
Value: []string{c.Network.Type},
|
|
|
|
})
|
|
|
|
}
|
2017-03-07 21:26:09 -05:00
|
|
|
if c.IpcHost {
|
|
|
|
privileges = append(privileges, types.PluginPrivilege{
|
|
|
|
Name: "host ipc namespace",
|
|
|
|
Description: "allow access to host ipc namespace",
|
|
|
|
Value: []string{"true"},
|
|
|
|
})
|
|
|
|
}
|
2017-03-10 17:17:24 -05:00
|
|
|
if c.PidHost {
|
|
|
|
privileges = append(privileges, types.PluginPrivilege{
|
|
|
|
Name: "host pid namespace",
|
|
|
|
Description: "allow access to host pid namespace",
|
|
|
|
Value: []string{"true"},
|
|
|
|
})
|
|
|
|
}
|
2016-11-23 20:29:21 -05:00
|
|
|
for _, mount := range c.Mounts {
|
|
|
|
if mount.Source != nil {
|
|
|
|
privileges = append(privileges, types.PluginPrivilege{
|
|
|
|
Name: "mount",
|
|
|
|
Description: "host path to mount",
|
|
|
|
Value: []string{*mount.Source},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, device := range c.Linux.Devices {
|
|
|
|
if device.Path != nil {
|
|
|
|
privileges = append(privileges, types.PluginPrivilege{
|
|
|
|
Name: "device",
|
|
|
|
Description: "host device to access",
|
|
|
|
Value: []string{*device.Path},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2017-01-10 14:00:57 -05:00
|
|
|
if c.Linux.AllowAllDevices {
|
2016-11-23 20:29:21 -05:00
|
|
|
privileges = append(privileges, types.PluginPrivilege{
|
2017-01-10 14:00:57 -05:00
|
|
|
Name: "allow-all-devices",
|
|
|
|
Description: "allow 'rwm' access to all devices",
|
2016-11-23 20:29:21 -05:00
|
|
|
Value: []string{"true"},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
if len(c.Linux.Capabilities) > 0 {
|
|
|
|
privileges = append(privileges, types.PluginPrivilege{
|
|
|
|
Name: "capabilities",
|
|
|
|
Description: "list of additional capabilities required",
|
|
|
|
Value: c.Linux.Capabilities,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2017-07-19 10:20:13 -04:00
|
|
|
return privileges
|
2016-11-28 14:08:39 -05:00
|
|
|
}
|
|
|
|
|
2016-11-23 20:29:21 -05:00
|
|
|
// Privileges pulls a plugin config and computes the privileges required to install it.
|
2016-12-12 18:05:53 -05:00
|
|
|
func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) {
|
|
|
|
// create image store instance
|
|
|
|
cs := &tempConfigStore{}
|
|
|
|
|
|
|
|
// DownloadManager not defined because only pulling configuration.
|
|
|
|
pluginPullConfig := &distribution.ImagePullConfig{
|
|
|
|
Config: distribution.Config{
|
|
|
|
MetaHeaders: metaHeader,
|
|
|
|
AuthConfig: authConfig,
|
|
|
|
RegistryService: pm.config.RegistryService,
|
|
|
|
ImageEventLogger: func(string, string, string) {},
|
|
|
|
ImageStore: cs,
|
|
|
|
},
|
|
|
|
Schema2Types: distribution.PluginTypes,
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := pm.pull(ctx, ref, pluginPullConfig, nil); err != nil {
|
2016-05-16 11:50:55 -04:00
|
|
|
return nil, err
|
|
|
|
}
|
2016-12-12 18:05:53 -05:00
|
|
|
|
|
|
|
if cs.config == nil {
|
|
|
|
return nil, errors.New("no configuration pulled")
|
|
|
|
}
|
|
|
|
var config types.PluginConfig
|
|
|
|
if err := json.Unmarshal(cs.config, &config); err != nil {
|
2017-11-28 23:09:37 -05:00
|
|
|
return nil, errdefs.System(err)
|
2016-12-12 18:05:53 -05:00
|
|
|
}
|
|
|
|
|
2017-07-19 10:20:13 -04:00
|
|
|
return computePrivileges(config), nil
|
2016-11-23 20:29:21 -05:00
|
|
|
}
|
2016-05-16 11:50:55 -04:00
|
|
|
|
2017-01-28 19:54:32 -05:00
|
|
|
// Upgrade upgrades a plugin
|
|
|
|
func (pm *Manager) Upgrade(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) (err error) {
|
|
|
|
p, err := pm.config.Store.GetV2Plugin(name)
|
|
|
|
if err != nil {
|
2017-07-19 10:20:13 -04:00
|
|
|
return err
|
2017-01-28 19:54:32 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
if p.IsEnabled() {
|
2017-07-19 10:20:13 -04:00
|
|
|
return errors.Wrap(enabledError(p.Name()), "plugin must be disabled before upgrading")
|
2017-01-28 19:54:32 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
pm.muGC.RLock()
|
|
|
|
defer pm.muGC.RUnlock()
|
|
|
|
|
|
|
|
// revalidate because Pull is public
|
2017-04-25 03:13:48 -04:00
|
|
|
if _, err := reference.ParseNormalizedNamed(name); err != nil {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errors.Wrapf(errdefs.InvalidParameter(err), "failed to parse %q", name)
|
2017-01-28 19:54:32 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs")
|
2017-02-09 03:58:58 -05:00
|
|
|
if err != nil {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errors.Wrap(errdefs.System(err), "error preparing upgrade")
|
2017-02-09 03:58:58 -05:00
|
|
|
}
|
2017-01-28 19:54:32 -05:00
|
|
|
defer os.RemoveAll(tmpRootFSDir)
|
|
|
|
|
|
|
|
dm := &downloadManager{
|
|
|
|
tmpDir: tmpRootFSDir,
|
|
|
|
blobStore: pm.blobStore,
|
|
|
|
}
|
|
|
|
|
|
|
|
pluginPullConfig := &distribution.ImagePullConfig{
|
|
|
|
Config: distribution.Config{
|
|
|
|
MetaHeaders: metaHeader,
|
|
|
|
AuthConfig: authConfig,
|
|
|
|
RegistryService: pm.config.RegistryService,
|
|
|
|
ImageEventLogger: pm.config.LogPluginEvent,
|
|
|
|
ImageStore: dm,
|
|
|
|
},
|
|
|
|
DownloadManager: dm, // todo: reevaluate if possible to substitute distribution/xfer dependencies instead
|
|
|
|
Schema2Types: distribution.PluginTypes,
|
|
|
|
}
|
|
|
|
|
|
|
|
err = pm.pull(ctx, ref, pluginPullConfig, outStream)
|
|
|
|
if err != nil {
|
|
|
|
go pm.GC()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := pm.upgradePlugin(p, dm.configDigest, dm.blobs, tmpRootFSDir, &privileges); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
p.PluginObj.PluginReference = ref.String()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-11-23 20:29:21 -05:00
|
|
|
// Pull pulls a plugin, check if the correct privileges are provided and install the plugin.
|
2017-06-07 13:07:01 -04:00
|
|
|
func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer, opts ...CreateOpt) (err error) {
|
2016-12-12 18:05:53 -05:00
|
|
|
pm.muGC.RLock()
|
|
|
|
defer pm.muGC.RUnlock()
|
|
|
|
|
|
|
|
// revalidate because Pull is public
|
2017-01-25 19:54:18 -05:00
|
|
|
nameref, err := reference.ParseNormalizedNamed(name)
|
2016-11-23 20:29:21 -05:00
|
|
|
if err != nil {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errors.Wrapf(errdefs.InvalidParameter(err), "failed to parse %q", name)
|
2016-11-23 20:29:21 -05:00
|
|
|
}
|
2017-01-25 19:54:18 -05:00
|
|
|
name = reference.FamiliarString(reference.TagNameOnly(nameref))
|
2016-11-23 20:29:21 -05:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
if err := pm.config.Store.validateName(name); err != nil {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.InvalidParameter(err)
|
2016-11-23 20:29:21 -05:00
|
|
|
}
|
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs")
|
2017-02-09 03:58:58 -05:00
|
|
|
if err != nil {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errors.Wrap(errdefs.System(err), "error preparing pull")
|
2017-02-09 03:58:58 -05:00
|
|
|
}
|
2016-12-12 18:05:53 -05:00
|
|
|
defer os.RemoveAll(tmpRootFSDir)
|
2016-05-16 11:50:55 -04:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
dm := &downloadManager{
|
|
|
|
tmpDir: tmpRootFSDir,
|
|
|
|
blobStore: pm.blobStore,
|
2016-05-16 11:50:55 -04:00
|
|
|
}
|
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
pluginPullConfig := &distribution.ImagePullConfig{
|
|
|
|
Config: distribution.Config{
|
|
|
|
MetaHeaders: metaHeader,
|
|
|
|
AuthConfig: authConfig,
|
|
|
|
RegistryService: pm.config.RegistryService,
|
|
|
|
ImageEventLogger: pm.config.LogPluginEvent,
|
|
|
|
ImageStore: dm,
|
|
|
|
},
|
|
|
|
DownloadManager: dm, // todo: reevaluate if possible to substitute distribution/xfer dependencies instead
|
|
|
|
Schema2Types: distribution.PluginTypes,
|
|
|
|
}
|
2016-11-23 20:29:21 -05:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
err = pm.pull(ctx, ref, pluginPullConfig, outStream)
|
2016-11-23 20:29:21 -05:00
|
|
|
if err != nil {
|
2016-12-12 18:05:53 -05:00
|
|
|
go pm.GC()
|
2016-11-23 20:29:21 -05:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-06-07 13:07:01 -04:00
|
|
|
refOpt := func(p *v2.Plugin) {
|
|
|
|
p.PluginObj.PluginReference = ref.String()
|
|
|
|
}
|
|
|
|
optsList := make([]CreateOpt, 0, len(opts)+1)
|
|
|
|
optsList = append(optsList, opts...)
|
|
|
|
optsList = append(optsList, refOpt)
|
|
|
|
|
|
|
|
p, err := pm.createPlugin(name, dm.configDigest, dm.blobs, tmpRootFSDir, &privileges, optsList...)
|
2017-01-28 19:54:32 -05:00
|
|
|
if err != nil {
|
2016-11-23 20:29:21 -05:00
|
|
|
return err
|
2016-05-16 11:50:55 -04:00
|
|
|
}
|
2016-11-23 20:29:21 -05:00
|
|
|
|
2017-06-07 13:07:01 -04:00
|
|
|
pm.publisher.Publish(EventCreate{Plugin: p.PluginObj})
|
2016-11-23 20:29:21 -05:00
|
|
|
return nil
|
2016-05-16 11:50:55 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// List displays the list of plugins and associated metadata.
|
2016-11-23 07:58:15 -05:00
|
|
|
func (pm *Manager) List(pluginFilters filters.Args) ([]types.Plugin, error) {
|
|
|
|
if err := pluginFilters.Validate(acceptedPluginFilterTags); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
enabledOnly := false
|
|
|
|
disabledOnly := false
|
2017-09-26 07:39:56 -04:00
|
|
|
if pluginFilters.Contains("enabled") {
|
2016-11-23 07:58:15 -05:00
|
|
|
if pluginFilters.ExactMatch("enabled", "true") {
|
|
|
|
enabledOnly = true
|
|
|
|
} else if pluginFilters.ExactMatch("enabled", "false") {
|
|
|
|
disabledOnly = true
|
|
|
|
} else {
|
2017-07-19 10:20:13 -04:00
|
|
|
return nil, invalidFilter{"enabled", pluginFilters.Get("enabled")}
|
2016-11-23 07:58:15 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
plugins := pm.config.Store.GetAll()
|
2016-08-26 13:02:38 -04:00
|
|
|
out := make([]types.Plugin, 0, len(plugins))
|
2016-11-23 07:58:15 -05:00
|
|
|
|
2016-11-23 08:27:09 -05:00
|
|
|
next:
|
2016-08-26 13:02:38 -04:00
|
|
|
for _, p := range plugins {
|
2016-11-23 07:58:15 -05:00
|
|
|
if enabledOnly && !p.PluginObj.Enabled {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if disabledOnly && p.PluginObj.Enabled {
|
|
|
|
continue
|
|
|
|
}
|
2017-09-26 07:39:56 -04:00
|
|
|
if pluginFilters.Contains("capability") {
|
2016-11-23 08:27:09 -05:00
|
|
|
for _, f := range p.GetTypes() {
|
|
|
|
if !pluginFilters.Match("capability", f.Capability) {
|
|
|
|
continue next
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-07-18 11:02:12 -04:00
|
|
|
out = append(out, p.PluginObj)
|
2016-05-16 11:50:55 -04:00
|
|
|
}
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Push pushes a plugin to the store.
|
2016-12-12 18:05:53 -05:00
|
|
|
func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, outStream io.Writer) error {
|
|
|
|
p, err := pm.config.Store.GetV2Plugin(name)
|
2016-06-27 11:41:53 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-12 18:05:53 -05:00
|
|
|
|
2017-01-25 19:54:18 -05:00
|
|
|
ref, err := reference.ParseNormalizedNamed(p.Name())
|
2016-08-10 19:48:17 -04:00
|
|
|
if err != nil {
|
2016-12-12 18:05:53 -05:00
|
|
|
return errors.Wrapf(err, "plugin has invalid name %v for push", p.Name())
|
2016-08-10 19:48:17 -04:00
|
|
|
}
|
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
var po progress.Output
|
|
|
|
if outStream != nil {
|
|
|
|
// Include a buffer so that slow client connections don't affect
|
|
|
|
// transfer performance.
|
|
|
|
progressChan := make(chan progress.Progress, 100)
|
|
|
|
|
|
|
|
writesDone := make(chan struct{})
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
close(progressChan)
|
|
|
|
<-writesDone
|
|
|
|
}()
|
|
|
|
|
|
|
|
var cancelFunc context.CancelFunc
|
|
|
|
ctx, cancelFunc = context.WithCancel(ctx)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan)
|
|
|
|
close(writesDone)
|
|
|
|
}()
|
|
|
|
|
|
|
|
po = progress.ChanOutput(progressChan)
|
|
|
|
} else {
|
|
|
|
po = progress.DiscardOutput()
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: replace these with manager
|
|
|
|
is := &pluginConfigStore{
|
|
|
|
pm: pm,
|
|
|
|
plugin: p,
|
|
|
|
}
|
2017-09-19 15:14:46 -04:00
|
|
|
lss := make(map[string]distribution.PushLayerProvider)
|
|
|
|
lss[runtime.GOOS] = &pluginLayerProvider{
|
2016-12-12 18:05:53 -05:00
|
|
|
pm: pm,
|
|
|
|
plugin: p,
|
|
|
|
}
|
|
|
|
rs := &pluginReference{
|
|
|
|
name: ref,
|
|
|
|
pluginID: p.Config,
|
2016-05-16 11:50:55 -04:00
|
|
|
}
|
2016-06-27 11:41:53 -04:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
uploadManager := xfer.NewLayerUploadManager(3)
|
|
|
|
|
|
|
|
imagePushConfig := &distribution.ImagePushConfig{
|
|
|
|
Config: distribution.Config{
|
|
|
|
MetaHeaders: metaHeader,
|
|
|
|
AuthConfig: authConfig,
|
|
|
|
ProgressOutput: po,
|
|
|
|
RegistryService: pm.config.RegistryService,
|
|
|
|
ReferenceStore: rs,
|
|
|
|
ImageEventLogger: pm.config.LogPluginEvent,
|
|
|
|
ImageStore: is,
|
|
|
|
RequireSchema2: true,
|
|
|
|
},
|
|
|
|
ConfigMediaType: schema2.MediaTypePluginConfig,
|
2017-09-19 15:14:46 -04:00
|
|
|
LayerStores: lss,
|
2016-12-12 18:05:53 -05:00
|
|
|
UploadManager: uploadManager,
|
|
|
|
}
|
|
|
|
|
|
|
|
return distribution.Push(ctx, ref, imagePushConfig)
|
|
|
|
}
|
|
|
|
|
|
|
|
type pluginReference struct {
|
|
|
|
name reference.Named
|
|
|
|
pluginID digest.Digest
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *pluginReference) References(id digest.Digest) []reference.Named {
|
|
|
|
if r.pluginID != id {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return []reference.Named{r.name}
|
|
|
|
}
|
|
|
|
|
2017-01-25 19:54:18 -05:00
|
|
|
func (r *pluginReference) ReferencesByName(ref reference.Named) []refstore.Association {
|
|
|
|
return []refstore.Association{
|
2016-12-12 18:05:53 -05:00
|
|
|
{
|
|
|
|
Ref: r.name,
|
|
|
|
ID: r.pluginID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *pluginReference) Get(ref reference.Named) (digest.Digest, error) {
|
|
|
|
if r.name.String() != ref.String() {
|
2017-01-25 19:54:18 -05:00
|
|
|
return digest.Digest(""), refstore.ErrDoesNotExist
|
2016-12-12 18:05:53 -05:00
|
|
|
}
|
|
|
|
return r.pluginID, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *pluginReference) AddTag(ref reference.Named, id digest.Digest, force bool) error {
|
|
|
|
// Read only, ignore
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
func (r *pluginReference) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error {
|
|
|
|
// Read only, ignore
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
func (r *pluginReference) Delete(ref reference.Named) (bool, error) {
|
|
|
|
// Read only, ignore
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type pluginConfigStore struct {
|
|
|
|
pm *Manager
|
|
|
|
plugin *v2.Plugin
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *pluginConfigStore) Put([]byte) (digest.Digest, error) {
|
|
|
|
return digest.Digest(""), errors.New("cannot store config on push")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *pluginConfigStore) Get(d digest.Digest) ([]byte, error) {
|
|
|
|
if s.plugin.Config != d {
|
|
|
|
return nil, errors.New("plugin not found")
|
|
|
|
}
|
|
|
|
rwc, err := s.pm.blobStore.Get(d)
|
2016-05-16 11:50:55 -04:00
|
|
|
if err != nil {
|
2016-12-12 18:05:53 -05:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer rwc.Close()
|
|
|
|
return ioutil.ReadAll(rwc)
|
|
|
|
}
|
|
|
|
|
2018-02-15 16:17:27 -05:00
|
|
|
func (s *pluginConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) {
|
2016-12-12 18:05:53 -05:00
|
|
|
return configToRootFS(c)
|
|
|
|
}
|
|
|
|
|
2018-02-15 16:17:27 -05:00
|
|
|
func (s *pluginConfigStore) PlatformFromConfig(c []byte) (*specs.Platform, error) {
|
|
|
|
// TODO: LCOW/Plugins. This will need revisiting. For now use the runtime OS
|
|
|
|
return &specs.Platform{OS: runtime.GOOS}, nil
|
|
|
|
}
|
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
type pluginLayerProvider struct {
|
|
|
|
pm *Manager
|
|
|
|
plugin *v2.Plugin
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *pluginLayerProvider) Get(id layer.ChainID) (distribution.PushLayer, error) {
|
|
|
|
rootFS := rootFSFromPlugin(p.plugin.PluginObj.Config.Rootfs)
|
|
|
|
var i int
|
|
|
|
for i = 1; i <= len(rootFS.DiffIDs); i++ {
|
|
|
|
if layer.CreateChainID(rootFS.DiffIDs[:i]) == id {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if i > len(rootFS.DiffIDs) {
|
|
|
|
return nil, errors.New("layer not found")
|
|
|
|
}
|
|
|
|
return &pluginLayer{
|
|
|
|
pm: p.pm,
|
|
|
|
diffIDs: rootFS.DiffIDs[:i],
|
|
|
|
blobs: p.plugin.Blobsums[:i],
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type pluginLayer struct {
|
|
|
|
pm *Manager
|
|
|
|
diffIDs []layer.DiffID
|
|
|
|
blobs []digest.Digest
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *pluginLayer) ChainID() layer.ChainID {
|
|
|
|
return layer.CreateChainID(l.diffIDs)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *pluginLayer) DiffID() layer.DiffID {
|
|
|
|
return l.diffIDs[len(l.diffIDs)-1]
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *pluginLayer) Parent() distribution.PushLayer {
|
|
|
|
if len(l.diffIDs) == 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return &pluginLayer{
|
|
|
|
pm: l.pm,
|
|
|
|
diffIDs: l.diffIDs[:len(l.diffIDs)-1],
|
|
|
|
blobs: l.blobs[:len(l.diffIDs)-1],
|
2016-05-16 11:50:55 -04:00
|
|
|
}
|
2016-12-12 18:05:53 -05:00
|
|
|
}
|
2016-06-24 23:57:21 -04:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
func (l *pluginLayer) Open() (io.ReadCloser, error) {
|
|
|
|
return l.pm.blobStore.Get(l.blobs[len(l.diffIDs)-1])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *pluginLayer) Size() (int64, error) {
|
|
|
|
return l.pm.blobStore.Size(l.blobs[len(l.diffIDs)-1])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *pluginLayer) MediaType() string {
|
|
|
|
return schema2.MediaTypeLayer
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *pluginLayer) Release() {
|
|
|
|
// Nothing needs to be release, no references held
|
2016-05-16 11:50:55 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Remove deletes plugin's root directory.
|
2016-12-12 18:05:53 -05:00
|
|
|
func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error {
|
|
|
|
p, err := pm.config.Store.GetV2Plugin(name)
|
2016-12-01 14:36:56 -05:00
|
|
|
pm.mu.RLock()
|
|
|
|
c := pm.cMap[p]
|
|
|
|
pm.mu.RUnlock()
|
|
|
|
|
2016-05-16 11:50:55 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-09-07 09:59:15 -04:00
|
|
|
|
|
|
|
if !config.ForceRemove {
|
2016-12-01 14:36:56 -05:00
|
|
|
if p.GetRefCount() > 0 {
|
2017-07-19 10:20:13 -04:00
|
|
|
return inUseError(p.Name())
|
2016-09-07 09:59:15 -04:00
|
|
|
}
|
|
|
|
if p.IsEnabled() {
|
2017-07-19 10:20:13 -04:00
|
|
|
return enabledError(p.Name())
|
2016-08-26 13:02:38 -04:00
|
|
|
}
|
2016-09-07 09:59:15 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if p.IsEnabled() {
|
2016-12-01 14:36:56 -05:00
|
|
|
if err := pm.disable(p, c); err != nil {
|
2016-08-26 13:02:38 -04:00
|
|
|
logrus.Errorf("failed to disable plugin '%s': %s", p.Name(), err)
|
|
|
|
}
|
2016-07-18 11:02:12 -04:00
|
|
|
}
|
2016-09-07 09:59:15 -04:00
|
|
|
|
2016-12-12 21:18:17 -05:00
|
|
|
defer func() {
|
2016-12-12 18:05:53 -05:00
|
|
|
go pm.GC()
|
2016-12-12 21:18:17 -05:00
|
|
|
}()
|
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
id := p.GetID()
|
|
|
|
pluginDir := filepath.Join(pm.config.Root, id)
|
2017-06-26 14:54:14 -04:00
|
|
|
|
|
|
|
if err := mount.RecursiveUnmount(pluginDir); err != nil {
|
|
|
|
return errors.Wrap(err, "error unmounting plugin data")
|
2017-02-02 23:08:35 -05:00
|
|
|
}
|
2017-06-26 14:54:14 -04:00
|
|
|
|
2017-08-02 14:28:49 -04:00
|
|
|
if err := atomicRemoveAll(pluginDir); err != nil {
|
|
|
|
return err
|
2017-06-26 14:54:14 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
pm.config.Store.Remove(p)
|
2016-12-12 18:05:53 -05:00
|
|
|
pm.config.LogPluginEvent(id, name, "remove")
|
2017-06-07 13:07:01 -04:00
|
|
|
pm.publisher.Publish(EventRemove{Plugin: p.PluginObj})
|
2016-07-18 11:02:12 -04:00
|
|
|
return nil
|
2016-05-16 11:50:55 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Set sets plugin args
|
|
|
|
func (pm *Manager) Set(name string, args []string) error {
|
2016-12-12 18:05:53 -05:00
|
|
|
p, err := pm.config.Store.GetV2Plugin(name)
|
2016-05-16 11:50:55 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-12 18:05:53 -05:00
|
|
|
if err := p.Set(args); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return pm.save(p)
|
2016-05-16 11:50:55 -04:00
|
|
|
}
|
2016-10-04 15:01:19 -04:00
|
|
|
|
|
|
|
// CreateFromContext creates a plugin from the given pluginDir which contains
|
2016-11-07 21:51:47 -05:00
|
|
|
// both the rootfs and the config.json and a repoName with optional tag.
|
2016-12-12 18:05:53 -05:00
|
|
|
func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) (err error) {
|
|
|
|
pm.muGC.RLock()
|
|
|
|
defer pm.muGC.RUnlock()
|
|
|
|
|
2017-01-25 19:54:18 -05:00
|
|
|
ref, err := reference.ParseNormalizedNamed(options.RepoName)
|
2016-11-29 15:55:41 -05:00
|
|
|
if err != nil {
|
2016-12-12 18:05:53 -05:00
|
|
|
return errors.Wrapf(err, "failed to parse reference %v", options.RepoName)
|
|
|
|
}
|
|
|
|
if _, ok := ref.(reference.Canonical); ok {
|
|
|
|
return errors.Errorf("canonical references are not permitted")
|
2016-11-29 15:55:41 -05:00
|
|
|
}
|
2017-01-25 19:54:18 -05:00
|
|
|
name := reference.FamiliarString(reference.TagNameOnly(ref))
|
2016-11-29 15:55:41 -05:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
if err := pm.config.Store.validateName(name); err != nil { // fast check, real check is in createPlugin()
|
|
|
|
return err
|
|
|
|
}
|
2016-10-04 15:01:19 -04:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs")
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "failed to create temp directory")
|
|
|
|
}
|
2017-02-09 03:58:58 -05:00
|
|
|
defer os.RemoveAll(tmpRootFSDir)
|
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
var configJSON []byte
|
|
|
|
rootFS := splitConfigRootFSFromTar(tarCtx, &configJSON)
|
2016-11-29 15:55:41 -05:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
rootFSBlob, err := pm.blobStore.New()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2016-11-29 15:55:41 -05:00
|
|
|
}
|
2016-12-12 18:05:53 -05:00
|
|
|
defer rootFSBlob.Close()
|
|
|
|
gzw := gzip.NewWriter(rootFSBlob)
|
2017-01-06 20:23:18 -05:00
|
|
|
layerDigester := digest.Canonical.Digester()
|
2016-12-12 18:05:53 -05:00
|
|
|
rootFSReader := io.TeeReader(rootFS, io.MultiWriter(gzw, layerDigester.Hash()))
|
2016-11-29 15:55:41 -05:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
if err := chrootarchive.Untar(rootFSReader, tmpRootFSDir, nil); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := rootFS.Close(); err != nil {
|
2016-10-04 15:01:19 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
if configJSON == nil {
|
|
|
|
return errors.New("config not found")
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := gzw.Close(); err != nil {
|
|
|
|
return errors.Wrap(err, "error closing gzip writer")
|
|
|
|
}
|
|
|
|
|
|
|
|
var config types.PluginConfig
|
|
|
|
if err := json.Unmarshal(configJSON, &config); err != nil {
|
|
|
|
return errors.Wrap(err, "failed to parse config")
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := pm.validateConfig(config); err != nil {
|
2016-11-22 12:42:58 -05:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
pm.mu.Lock()
|
|
|
|
defer pm.mu.Unlock()
|
2016-11-22 12:42:58 -05:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
rootFSBlobsum, err := rootFSBlob.Commit()
|
|
|
|
if err != nil {
|
2016-10-04 15:01:19 -04:00
|
|
|
return err
|
|
|
|
}
|
2016-12-12 18:05:53 -05:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
go pm.GC()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
config.Rootfs = &types.PluginConfigRootfs{
|
|
|
|
Type: "layers",
|
|
|
|
DiffIds: []string{layerDigester.Digest().String()},
|
|
|
|
}
|
2016-10-04 15:01:19 -04:00
|
|
|
|
Embed DockerVersion in plugin config.
Embedding DockerVersion in plugin config when the plugin is created,
enables users to do a docker plugin inspect and know which version
the plugin was built on. This is helpful in cases where users are
running a new plugin on older docker releases and confused at
unexpected behavior.
By embedding DockerVersion in the config, we claim that there's no
guarantee that if the plugin config's DockerVersion is greater that
the version of the docker engine the plugin is executed against, the
plugin will work as expected.
For example, lets say:
- in 17.03, a plugin was released as johndoe/foo:v1
- in 17.05, the plugin uses the new ipchost config setting and author
publishes johndoe/foo:v2
In this case, johndoe/foo:v2 was built on 17.05 using ipchost, but is
running on docker-engine version 17.03. Since 17.05 > 17.03, there's
no guarantee that the plugin will work as expected. Ofcourse, if the
plugin did not use newly added config settings (ipchost in this case)
in 17.05, it would work fine in 17.03.
Signed-off-by: Anusha Ragunathan <anusha.ragunathan@docker.com>
2017-03-21 17:07:41 -04:00
|
|
|
config.DockerVersion = dockerversion.Version
|
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
configBlob, err := pm.blobStore.New()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer configBlob.Close()
|
|
|
|
if err := json.NewEncoder(configBlob).Encode(config); err != nil {
|
|
|
|
return errors.Wrap(err, "error encoding json config")
|
|
|
|
}
|
|
|
|
configBlobsum, err := configBlob.Commit()
|
|
|
|
if err != nil {
|
2016-10-04 15:01:19 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
p, err := pm.createPlugin(name, configBlobsum, []digest.Digest{rootFSBlobsum}, tmpRootFSDir, nil)
|
|
|
|
if err != nil {
|
2016-11-22 12:42:58 -05:00
|
|
|
return err
|
|
|
|
}
|
2017-01-25 19:54:18 -05:00
|
|
|
p.PluginObj.PluginReference = name
|
2016-10-04 15:01:19 -04:00
|
|
|
|
2017-06-07 13:07:01 -04:00
|
|
|
pm.publisher.Publish(EventCreate{Plugin: p.PluginObj})
|
2016-12-12 18:05:53 -05:00
|
|
|
pm.config.LogPluginEvent(p.PluginObj.ID, name, "create")
|
2016-10-04 15:01:19 -04:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2016-11-23 23:04:44 -05:00
|
|
|
|
2016-12-12 18:05:53 -05:00
|
|
|
func (pm *Manager) validateConfig(config types.PluginConfig) error {
|
|
|
|
return nil // TODO:
|
|
|
|
}
|
|
|
|
|
|
|
|
func splitConfigRootFSFromTar(in io.ReadCloser, config *[]byte) io.ReadCloser {
|
|
|
|
pr, pw := io.Pipe()
|
|
|
|
go func() {
|
|
|
|
tarReader := tar.NewReader(in)
|
|
|
|
tarWriter := tar.NewWriter(pw)
|
|
|
|
defer in.Close()
|
|
|
|
|
|
|
|
hasRootFS := false
|
|
|
|
|
|
|
|
for {
|
|
|
|
hdr, err := tarReader.Next()
|
|
|
|
if err == io.EOF {
|
|
|
|
if !hasRootFS {
|
|
|
|
pw.CloseWithError(errors.Wrap(err, "no rootfs found"))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Signals end of archive.
|
|
|
|
tarWriter.Close()
|
|
|
|
pw.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
pw.CloseWithError(errors.Wrap(err, "failed to read from tar"))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
content := io.Reader(tarReader)
|
|
|
|
name := path.Clean(hdr.Name)
|
|
|
|
if path.IsAbs(name) {
|
|
|
|
name = name[1:]
|
|
|
|
}
|
|
|
|
if name == configFileName {
|
|
|
|
dt, err := ioutil.ReadAll(content)
|
|
|
|
if err != nil {
|
|
|
|
pw.CloseWithError(errors.Wrapf(err, "failed to read %s", configFileName))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
*config = dt
|
|
|
|
}
|
|
|
|
if parts := strings.Split(name, "/"); len(parts) != 0 && parts[0] == rootFSFileName {
|
|
|
|
hdr.Name = path.Clean(path.Join(parts[1:]...))
|
|
|
|
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(strings.ToLower(hdr.Linkname), rootFSFileName+"/") {
|
|
|
|
hdr.Linkname = hdr.Linkname[len(rootFSFileName)+1:]
|
|
|
|
}
|
|
|
|
if err := tarWriter.WriteHeader(hdr); err != nil {
|
|
|
|
pw.CloseWithError(errors.Wrap(err, "error writing tar header"))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if _, err := pools.Copy(tarWriter, content); err != nil {
|
|
|
|
pw.CloseWithError(errors.Wrap(err, "error copying tar data"))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
hasRootFS = true
|
|
|
|
} else {
|
|
|
|
io.Copy(ioutil.Discard, content)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
return pr
|
2016-11-23 23:04:44 -05:00
|
|
|
}
|
2017-08-02 14:28:49 -04:00
|
|
|
|
|
|
|
func atomicRemoveAll(dir string) error {
|
|
|
|
renamed := dir + "-removing"
|
|
|
|
|
|
|
|
err := os.Rename(dir, renamed)
|
|
|
|
switch {
|
|
|
|
case os.IsNotExist(err), err == nil:
|
|
|
|
// even if `dir` doesn't exist, we can still try and remove `renamed`
|
|
|
|
case os.IsExist(err):
|
|
|
|
// Some previous remove failed, check if the origin dir exists
|
|
|
|
if e := system.EnsureRemoveAll(renamed); e != nil {
|
|
|
|
return errors.Wrap(err, "rename target already exists and could not be removed")
|
|
|
|
}
|
|
|
|
if _, err := os.Stat(dir); os.IsNotExist(err) {
|
|
|
|
// origin doesn't exist, nothing left to do
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// attempt to rename again
|
|
|
|
if err := os.Rename(dir, renamed); err != nil {
|
|
|
|
return errors.Wrap(err, "failed to rename dir for atomic removal")
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return errors.Wrap(err, "failed to rename dir for atomic removal")
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := system.EnsureRemoveAll(renamed); err != nil {
|
|
|
|
os.Rename(renamed, dir)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|