1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Implement content addressability for plugins

Move plugins to shared distribution stack with images.

Create immutable plugin config that matches schema2 requirements.

Ensure data being pushed is same as pulled/created.

Store distribution artifacts in a blobstore.

Run init layer setup for every plugin start.

Fix breakouts from unsafe file accesses.

Add support for `docker plugin install --alias`

Uses normalized references for default names to avoid collisions when using default hosts/tags.

Some refactoring of the plugin manager to support the change, like removing the singleton manager and adding manager config struct.

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
Signed-off-by: Derek McGowan <derek@mcgstyle.net>
This commit is contained in:
Tonis Tiigi 2016-12-12 15:05:53 -08:00
parent d1dfc1a5ef
commit 3d86b0c79b
61 changed files with 1709 additions and 1382 deletions

View file

@ -5,6 +5,7 @@ import (
"net/http"
enginetypes "github.com/docker/docker/api/types"
"github.com/docker/docker/reference"
"golang.org/x/net/context"
)
@ -13,11 +14,11 @@ type Backend interface {
Disable(name string, config *enginetypes.PluginDisableConfig) error
Enable(name string, config *enginetypes.PluginEnableConfig) error
List() ([]enginetypes.Plugin, error)
Inspect(name string) (enginetypes.Plugin, error)
Inspect(name string) (*enginetypes.Plugin, error)
Remove(name string, config *enginetypes.PluginRmConfig) error
Set(name string, args []string) error
Privileges(name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error)
Pull(name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges) error
Push(name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) error
CreateFromContext(ctx context.Context, tarCtx io.Reader, options *enginetypes.PluginCreateOptions) error
Privileges(ctx context.Context, ref reference.Named, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error)
Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error
Push(ctx context.Context, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, outStream io.Writer) error
CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *enginetypes.PluginCreateOptions) error
}

View file

@ -30,8 +30,8 @@ func (r *pluginRouter) initRoutes() {
router.NewDeleteRoute("/plugins/{name:.*}", r.removePlugin),
router.NewPostRoute("/plugins/{name:.*}/enable", r.enablePlugin), // PATCH?
router.NewPostRoute("/plugins/{name:.*}/disable", r.disablePlugin),
router.NewPostRoute("/plugins/pull", r.pullPlugin),
router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin),
router.Cancellable(router.NewPostRoute("/plugins/pull", r.pullPlugin)),
router.Cancellable(router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin)),
router.NewPostRoute("/plugins/{name:.*}/set", r.setPlugin),
router.NewPostRoute("/plugins/create", r.createPlugin),
}

View file

@ -7,8 +7,13 @@ import (
"strconv"
"strings"
distreference "github.com/docker/distribution/reference"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/docker/reference"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
@ -34,6 +39,48 @@ func parseHeaders(headers http.Header) (map[string][]string, *types.AuthConfig)
return metaHeaders, authConfig
}
// parseRemoteRef parses the remote reference into a reference.Named
// returning the tag associated with the reference. In the case the
// given reference string includes both digest and tag, the returned
// reference will have the digest without the tag, but the tag will
// be returned.
func parseRemoteRef(remote string) (reference.Named, string, error) {
// Parse remote reference, supporting remotes with name and tag
// NOTE: Using distribution reference to handle references
// containing both a name and digest
remoteRef, err := distreference.ParseNamed(remote)
if err != nil {
return nil, "", err
}
var tag string
if t, ok := remoteRef.(distreference.Tagged); ok {
tag = t.Tag()
}
// Convert distribution reference to docker reference
// TODO: remove when docker reference changes reconciled upstream
ref, err := reference.WithName(remoteRef.Name())
if err != nil {
return nil, "", err
}
if d, ok := remoteRef.(distreference.Digested); ok {
ref, err = reference.WithDigest(ref, d.Digest())
if err != nil {
return nil, "", err
}
} else if tag != "" {
ref, err = reference.WithTag(ref, tag)
if err != nil {
return nil, "", err
}
} else {
ref = reference.WithDefaultTag(ref)
}
return ref, tag, nil
}
func (pr *pluginRouter) getPrivileges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
@ -41,7 +88,12 @@ func (pr *pluginRouter) getPrivileges(ctx context.Context, w http.ResponseWriter
metaHeaders, authConfig := parseHeaders(r.Header)
privileges, err := pr.backend.Privileges(r.FormValue("name"), metaHeaders, authConfig)
ref, _, err := parseRemoteRef(r.FormValue("remote"))
if err != nil {
return err
}
privileges, err := pr.backend.Privileges(ctx, ref, metaHeaders, authConfig)
if err != nil {
return err
}
@ -50,20 +102,66 @@ func (pr *pluginRouter) getPrivileges(ctx context.Context, w http.ResponseWriter
func (pr *pluginRouter) pullPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
return errors.Wrap(err, "failed to parse form")
}
var privileges types.PluginPrivileges
if err := json.NewDecoder(r.Body).Decode(&privileges); err != nil {
return err
dec := json.NewDecoder(r.Body)
if err := dec.Decode(&privileges); err != nil {
return errors.Wrap(err, "failed to parse privileges")
}
if dec.More() {
return errors.New("invalid privileges")
}
metaHeaders, authConfig := parseHeaders(r.Header)
if err := pr.backend.Pull(r.FormValue("name"), metaHeaders, authConfig, privileges); err != nil {
ref, tag, err := parseRemoteRef(r.FormValue("remote"))
if err != nil {
return err
}
w.WriteHeader(http.StatusCreated)
name := r.FormValue("name")
if name == "" {
if _, ok := ref.(reference.Canonical); ok {
trimmed := reference.TrimNamed(ref)
if tag != "" {
nt, err := reference.WithTag(trimmed, tag)
if err != nil {
return err
}
name = nt.String()
} else {
name = reference.WithDefaultTag(trimmed).String()
}
} else {
name = ref.String()
}
} else {
localRef, err := reference.ParseNamed(name)
if err != nil {
return err
}
if _, ok := localRef.(reference.Canonical); ok {
return errors.New("cannot use digest in plugin tag")
}
if distreference.IsNameOnly(localRef) {
// TODO: log change in name to out stream
name = reference.WithDefaultTag(localRef).String()
}
}
w.Header().Set("Docker-Plugin-Name", name)
w.Header().Set("Content-Type", "application/json")
output := ioutils.NewWriteFlusher(w)
if err := pr.backend.Pull(ctx, ref, name, metaHeaders, authConfig, privileges, output); err != nil {
if !output.Flushed() {
return err
}
output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err))
}
return nil
}
@ -125,12 +223,21 @@ func (pr *pluginRouter) removePlugin(ctx context.Context, w http.ResponseWriter,
func (pr *pluginRouter) pushPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
return errors.Wrap(err, "failed to parse form")
}
metaHeaders, authConfig := parseHeaders(r.Header)
return pr.backend.Push(vars["name"], metaHeaders, authConfig)
w.Header().Set("Content-Type", "application/json")
output := ioutils.NewWriteFlusher(w)
if err := pr.backend.Push(ctx, vars["name"], metaHeaders, authConfig, output); err != nil {
if !output.Flushed() {
return err
}
output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err))
}
return nil
}
func (pr *pluginRouter) setPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {

View file

@ -1347,16 +1347,13 @@ definitions:
Plugin:
description: "A plugin for the Engine API"
type: "object"
required: [Settings, Enabled, Config, Name, Tag]
required: [Settings, Enabled, Config, Name]
properties:
Id:
type: "string"
Name:
type: "string"
x-nullable: false
Tag:
type: "string"
x-nullable: false
Enabled:
description: "True when the plugin is running. False when the plugin is not running, only installed."
type: "boolean"
@ -1392,7 +1389,7 @@ definitions:
- Documentation
- Interface
- Entrypoint
- Workdir
- WorkDir
- Network
- Linux
- PropagatedMount
@ -1423,7 +1420,7 @@ definitions:
type: "array"
items:
type: "string"
Workdir:
WorkDir:
type: "string"
x-nullable: false
User:
@ -1490,6 +1487,15 @@ definitions:
type: "array"
items:
type: "string"
rootfs:
type: "object"
properties:
type:
type: "string"
diff_ids:
type: "array"
items:
type: "string"
example:
Id: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078"
Name: "tiborvass/no-remove"
@ -1528,7 +1534,7 @@ definitions:
Entrypoint:
- "plugin-no-remove"
- "/data"
Workdir: ""
WorkDir: ""
User: {}
Network:
Type: "host"
@ -6397,7 +6403,7 @@ paths:
Entrypoint:
- "plugin-no-remove"
- "/data"
Workdir: ""
WorkDir: ""
User: {}
Network:
Type: "host"
@ -6503,14 +6509,22 @@ paths:
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "name"
- name: "remote"
in: "query"
description: |
The plugin to install.
Remote reference for plugin to install.
The `:latest` tag is optional, and is used as the default if omitted.
required: true
type: "string"
- name: "name"
in: "query"
description: |
Local name for the pulled plugin.
The `:latest` tag is optional, and is used as the default if omitted.
required: false
type: "string"
- name: "X-Registry-Auth"
in: "header"
description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)"

View file

@ -350,6 +350,7 @@ type PluginInstallOptions struct {
Disabled bool
AcceptAllPermissions bool
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
RemoteRef string // RemoteRef is the plugin name on the registry
PrivilegeFunc RequestPrivilegeFunc
AcceptPermissionsFunc func(PluginPrivileges) (bool, error)
Args []string

View file

@ -25,10 +25,6 @@ type Plugin struct {
// settings
// Required: true
Settings PluginSettings `json:"Settings"`
// tag
// Required: true
Tag string `json:"Tag"`
}
// PluginConfig The config of a plugin.
@ -78,9 +74,12 @@ type PluginConfig struct {
// user
User PluginConfigUser `json:"User,omitempty"`
// workdir
// work dir
// Required: true
Workdir string `json:"Workdir"`
WorkDir string `json:"WorkDir"`
// rootfs
Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"`
}
// PluginConfigArgs plugin config args
@ -143,6 +142,17 @@ type PluginConfigNetwork struct {
Type string `json:"Type"`
}
// PluginConfigRootfs plugin config rootfs
// swagger:model PluginConfigRootfs
type PluginConfigRootfs struct {
// diff ids
DiffIds []string `json:"diff_ids"`
// type
Type string `json:"type,omitempty"`
}
// PluginConfigUser plugin config user
// swagger:model PluginConfigUser
type PluginConfigUser struct {

View file

@ -64,8 +64,8 @@ func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command {
options := pluginCreateOptions{}
cmd := &cobra.Command{
Use: "create [OPTIONS] PLUGIN[:tag] PATH-TO-ROOTFS(rootfs + config.json)",
Short: "Create a plugin from a rootfs and config",
Use: "create [OPTIONS] PLUGIN PLUGIN-DATA-DIR",
Short: "Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory.",
Args: cli.RequiresMinArgs(2),
RunE: func(cmd *cobra.Command, args []string) error {
options.repoName = args[0]

View file

@ -6,7 +6,6 @@ import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/cli"
"github.com/docker/docker/cli/command"
"github.com/docker/docker/reference"
"github.com/spf13/cobra"
"golang.org/x/net/context"
)
@ -29,18 +28,7 @@ func newDisableCommand(dockerCli *command.DockerCli) *cobra.Command {
}
func runDisable(dockerCli *command.DockerCli, name string, force bool) error {
named, err := reference.ParseNamed(name) // FIXME: validate
if err != nil {
return err
}
if reference.IsNameOnly(named) {
named = reference.WithDefaultTag(named)
}
ref, ok := named.(reference.NamedTagged)
if !ok {
return fmt.Errorf("invalid name: %s", named.String())
}
if err := dockerCli.Client().PluginDisable(context.Background(), ref.String(), types.PluginDisableOptions{Force: force}); err != nil {
if err := dockerCli.Client().PluginDisable(context.Background(), name, types.PluginDisableOptions{Force: force}); err != nil {
return err
}
fmt.Fprintln(dockerCli.Out(), name)

View file

@ -6,7 +6,6 @@ import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/cli"
"github.com/docker/docker/cli/command"
"github.com/docker/docker/reference"
"github.com/spf13/cobra"
"golang.org/x/net/context"
)
@ -36,23 +35,11 @@ func newEnableCommand(dockerCli *command.DockerCli) *cobra.Command {
func runEnable(dockerCli *command.DockerCli, opts *enableOpts) error {
name := opts.name
named, err := reference.ParseNamed(name) // FIXME: validate
if err != nil {
return err
}
if reference.IsNameOnly(named) {
named = reference.WithDefaultTag(named)
}
ref, ok := named.(reference.NamedTagged)
if !ok {
return fmt.Errorf("invalid name: %s", named.String())
}
if opts.timeout < 0 {
return fmt.Errorf("negative timeout %d is invalid", opts.timeout)
}
if err := dockerCli.Client().PluginEnable(context.Background(), ref.String(), types.PluginEnableOptions{Timeout: opts.timeout}); err != nil {
if err := dockerCli.Client().PluginEnable(context.Background(), name, types.PluginEnableOptions{Timeout: opts.timeout}); err != nil {
return err
}
fmt.Fprintln(dockerCli.Out(), name)

View file

@ -2,12 +2,16 @@ package plugin
import (
"bufio"
"errors"
"fmt"
"strings"
distreference "github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
registrytypes "github.com/docker/docker/api/types/registry"
"github.com/docker/docker/cli"
"github.com/docker/docker/cli/command"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/reference"
"github.com/docker/docker/registry"
"github.com/spf13/cobra"
@ -16,6 +20,7 @@ import (
type pluginOptions struct {
name string
alias string
grantPerms bool
disable bool
args []string
@ -39,41 +44,67 @@ func newInstallCommand(dockerCli *command.DockerCli) *cobra.Command {
flags := cmd.Flags()
flags.BoolVar(&options.grantPerms, "grant-all-permissions", false, "Grant all permissions necessary to run the plugin")
flags.BoolVar(&options.disable, "disable", false, "Do not enable the plugin on install")
flags.StringVar(&options.alias, "alias", "", "Local name for plugin")
return cmd
}
func getRepoIndexFromUnnormalizedRef(ref distreference.Named) (*registrytypes.IndexInfo, error) {
named, err := reference.ParseNamed(ref.Name())
if err != nil {
return nil, err
}
repoInfo, err := registry.ParseRepositoryInfo(named)
if err != nil {
return nil, err
}
return repoInfo.Index, nil
}
func runInstall(dockerCli *command.DockerCli, opts pluginOptions) error {
named, err := reference.ParseNamed(opts.name) // FIXME: validate
// Parse name using distribution reference package to support name
// containing both tag and digest. Names with both tag and digest
// will be treated by the daemon as a pull by digest with
// an alias for the tag (if no alias is provided).
ref, err := distreference.ParseNamed(opts.name)
if err != nil {
return err
}
if reference.IsNameOnly(named) {
named = reference.WithDefaultTag(named)
alias := ""
if opts.alias != "" {
aref, err := reference.ParseNamed(opts.alias)
if err != nil {
return err
}
aref = reference.WithDefaultTag(aref)
if _, ok := aref.(reference.NamedTagged); !ok {
return fmt.Errorf("invalid name: %s", opts.alias)
}
alias = aref.String()
}
ref, ok := named.(reference.NamedTagged)
if !ok {
return fmt.Errorf("invalid name: %s", named.String())
index, err := getRepoIndexFromUnnormalizedRef(ref)
if err != nil {
return err
}
ctx := context.Background()
repoInfo, err := registry.ParseRepositoryInfo(named)
if err != nil {
return err
}
authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index)
authConfig := command.ResolveAuthConfig(ctx, dockerCli, index)
encodedAuth, err := command.EncodeAuthToBase64(authConfig)
if err != nil {
return err
}
registryAuthFunc := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, "plugin install")
registryAuthFunc := command.RegistryAuthenticationPrivilegedFunc(dockerCli, index, "plugin install")
options := types.PluginInstallOptions{
RegistryAuth: encodedAuth,
RemoteRef: ref.String(),
Disabled: opts.disable,
AcceptAllPermissions: opts.grantPerms,
AcceptPermissionsFunc: acceptPrivileges(dockerCli, opts.name),
@ -81,10 +112,19 @@ func runInstall(dockerCli *command.DockerCli, opts pluginOptions) error {
PrivilegeFunc: registryAuthFunc,
Args: opts.args,
}
if err := dockerCli.Client().PluginInstall(ctx, ref.String(), options); err != nil {
responseBody, err := dockerCli.Client().PluginInstall(ctx, alias, options)
if err != nil {
if strings.Contains(err.Error(), "target is image") {
return errors.New(err.Error() + " - Use `docker image pull`")
}
return err
}
fmt.Fprintln(dockerCli.Out(), opts.name)
defer responseBody.Close()
if err := jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil); err != nil {
return err
}
fmt.Fprintf(dockerCli.Out(), "Installed plugin %s\n", opts.name) // todo: return proper values from the API for this result
return nil
}

View file

@ -44,7 +44,7 @@ func runList(dockerCli *command.DockerCli, opts listOptions) error {
}
w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0)
fmt.Fprintf(w, "ID \tNAME \tTAG \tDESCRIPTION\tENABLED")
fmt.Fprintf(w, "ID \tNAME \tDESCRIPTION\tENABLED")
fmt.Fprintf(w, "\n")
for _, p := range plugins {
@ -56,7 +56,7 @@ func runList(dockerCli *command.DockerCli, opts listOptions) error {
desc = stringutils.Ellipsis(desc, 45)
}
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%v\n", id, p.Name, p.Tag, desc, p.Enabled)
fmt.Fprintf(w, "%s\t%s\t%s\t%v\n", id, p.Name, desc, p.Enabled)
}
w.Flush()
return nil

View file

@ -7,6 +7,7 @@ import (
"github.com/docker/docker/cli"
"github.com/docker/docker/cli/command"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/reference"
"github.com/docker/docker/registry"
"github.com/spf13/cobra"
@ -49,5 +50,10 @@ func runPush(dockerCli *command.DockerCli, name string) error {
if err != nil {
return err
}
return dockerCli.Client().PluginPush(ctx, ref.String(), encodedAuth)
responseBody, err := dockerCli.Client().PluginPush(ctx, ref.String(), encodedAuth)
if err != nil {
return err
}
defer responseBody.Close()
return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil)
}

View file

@ -6,7 +6,6 @@ import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/cli"
"github.com/docker/docker/cli/command"
"github.com/docker/docker/reference"
"github.com/spf13/cobra"
"golang.org/x/net/context"
)
@ -41,21 +40,8 @@ func runRemove(dockerCli *command.DockerCli, opts *rmOptions) error {
var errs cli.Errors
for _, name := range opts.plugins {
named, err := reference.ParseNamed(name) // FIXME: validate
if err != nil {
errs = append(errs, err)
continue
}
if reference.IsNameOnly(named) {
named = reference.WithDefaultTag(named)
}
ref, ok := named.(reference.NamedTagged)
if !ok {
errs = append(errs, fmt.Errorf("invalid name: %s", named.String()))
continue
}
// TODO: pass names to api instead of making multiple api calls
if err := dockerCli.Client().PluginRemove(ctx, ref.String(), types.PluginRemoveOptions{Force: opts.force}); err != nil {
if err := dockerCli.Client().PluginRemove(ctx, name, types.PluginRemoveOptions{Force: opts.force}); err != nil {
errs = append(errs, err)
continue
}

View file

@ -1,13 +1,10 @@
package plugin
import (
"fmt"
"golang.org/x/net/context"
"github.com/docker/docker/cli"
"github.com/docker/docker/cli/command"
"github.com/docker/docker/reference"
"github.com/spf13/cobra"
)
@ -17,24 +14,9 @@ func newSetCommand(dockerCli *command.DockerCli) *cobra.Command {
Short: "Change settings for a plugin",
Args: cli.RequiresMinArgs(2),
RunE: func(cmd *cobra.Command, args []string) error {
return runSet(dockerCli, args[0], args[1:])
return dockerCli.Client().PluginSet(context.Background(), args[0], args[1:])
},
}
return cmd
}
func runSet(dockerCli *command.DockerCli, name string, args []string) error {
named, err := reference.ParseNamed(name) // FIXME: validate
if err != nil {
return err
}
if reference.IsNameOnly(named) {
named = reference.WithDefaultTag(named)
}
ref, ok := named.(reference.NamedTagged)
if !ok {
return fmt.Errorf("invalid name: %s", named.String())
}
return dockerCli.Client().PluginSet(context.Background(), ref.String(), args)
}

View file

@ -111,8 +111,8 @@ type PluginAPIClient interface {
PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error
PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error
PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error
PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) error
PluginPush(ctx context.Context, name string, registryAuth string) error
PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error)
PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error)
PluginSet(ctx context.Context, name string, args []string) error
PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error)
PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error

View file

@ -2,73 +2,96 @@ package client
import (
"encoding/json"
"io"
"net/http"
"net/url"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
// PluginInstall installs a plugin
func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (err error) {
// FIXME(vdemeester) name is a ref, we might want to parse/validate it here.
func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) {
query := url.Values{}
query.Set("name", name)
if _, err := reference.ParseNamed(options.RemoteRef); err != nil {
return nil, errors.Wrap(err, "invalid remote reference")
}
query.Set("remote", options.RemoteRef)
resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth)
if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
// todo: do inspect before to check existing name before checking privileges
newAuthHeader, privilegeErr := options.PrivilegeFunc()
if privilegeErr != nil {
ensureReaderClosed(resp)
return privilegeErr
return nil, privilegeErr
}
options.RegistryAuth = newAuthHeader
resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth)
}
if err != nil {
ensureReaderClosed(resp)
return err
return nil, err
}
var privileges types.PluginPrivileges
if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil {
ensureReaderClosed(resp)
return err
return nil, err
}
ensureReaderClosed(resp)
if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 {
accept, err := options.AcceptPermissionsFunc(privileges)
if err != nil {
return err
return nil, err
}
if !accept {
return pluginPermissionDenied{name}
return nil, pluginPermissionDenied{options.RemoteRef}
}
}
_, err = cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth)
// set name for plugin pull, if empty should default to remote reference
query.Set("name", name)
resp, err = cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth)
if err != nil {
return err
return nil, err
}
defer func() {
name = resp.header.Get("Docker-Plugin-Name")
pr, pw := io.Pipe()
go func() { // todo: the client should probably be designed more around the actual api
_, err := io.Copy(pw, resp.body)
if err != nil {
delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil)
ensureReaderClosed(delResp)
pw.CloseWithError(err)
return
}
defer func() {
if err != nil {
delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil)
ensureReaderClosed(delResp)
}
}()
if len(options.Args) > 0 {
if err := cli.PluginSet(ctx, name, options.Args); err != nil {
pw.CloseWithError(err)
return
}
}
if options.Disabled {
pw.Close()
return
}
err = cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0})
pw.CloseWithError(err)
}()
if len(options.Args) > 0 {
if err := cli.PluginSet(ctx, name, options.Args); err != nil {
return err
}
}
if options.Disabled {
return nil
}
return cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0})
return pr, nil
}
func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) {

View file

@ -1,13 +1,17 @@
package client
import (
"io"
"golang.org/x/net/context"
)
// PluginPush pushes a plugin to a registry
func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) error {
func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) {
headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers)
ensureReaderClosed(resp)
return err
if err != nil {
return nil, err
}
return resp.body, nil
}

View file

@ -16,7 +16,7 @@ func TestPluginPushError(t *testing.T) {
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
}
err := client.PluginPush(context.Background(), "plugin_name", "")
_, err := client.PluginPush(context.Background(), "plugin_name", "")
if err == nil || err.Error() != "Error response from daemon: Server error" {
t.Fatalf("expected a Server Error, got %v", err)
}
@ -44,7 +44,7 @@ func TestPluginPush(t *testing.T) {
}),
}
err := client.PluginPush(context.Background(), "plugin_name", "authtoken")
_, err := client.PluginPush(context.Background(), "plugin_name", "authtoken")
if err != nil {
t.Fatal(err)
}

View file

@ -42,7 +42,6 @@ import (
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/plugin"
"github.com/docker/docker/registry"
"github.com/docker/docker/runconfig"
"github.com/docker/go-connections/tlsconfig"
@ -471,7 +470,7 @@ func initRouter(s *apiserver.Server, d *daemon.Daemon, c *cluster.Cluster) {
volume.NewRouter(d),
build.NewRouter(dockerfile.NewBuildManager(d)),
swarmrouter.NewRouter(c),
pluginrouter.NewRouter(plugin.GetManager()),
pluginrouter.NewRouter(d.PluginManager()),
}
if d.NetworkControllerEnabled() {

View file

@ -13,6 +13,7 @@ import (
"github.com/docker/docker/api/types/network"
swarmtypes "github.com/docker/docker/api/types/swarm"
clustertypes "github.com/docker/docker/daemon/cluster/provider"
"github.com/docker/docker/plugin"
"github.com/docker/docker/reference"
"github.com/docker/libnetwork"
"github.com/docker/libnetwork/cluster"
@ -54,4 +55,5 @@ type Backend interface {
WaitForDetachment(context.Context, string, string, string, string) error
GetRepository(context.Context, reference.NamedTagged, *types.AuthConfig) (distribution.Repository, bool, error)
LookupImage(name string) (*types.ImageInspect, error)
PluginManager() *plugin.Manager
}

View file

@ -8,7 +8,6 @@ import (
"github.com/docker/docker/api/types/network"
executorpkg "github.com/docker/docker/daemon/cluster/executor"
clustertypes "github.com/docker/docker/daemon/cluster/provider"
"github.com/docker/docker/plugin"
networktypes "github.com/docker/libnetwork/types"
"github.com/docker/swarmkit/agent/exec"
"github.com/docker/swarmkit/agent/secrets"
@ -54,7 +53,7 @@ func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) {
addPlugins("Authorization", info.Plugins.Authorization)
// add v2 plugins
v2Plugins, err := plugin.GetManager().List()
v2Plugins, err := e.backend.PluginManager().List()
if err == nil {
for _, plgn := range v2Plugins {
for _, typ := range plgn.Config.Interface.Types {
@ -67,13 +66,9 @@ func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) {
} else if typ.Capability == "networkdriver" {
plgnTyp = "Network"
}
plgnName := plgn.Name
if plgn.Tag != "" {
plgnName += ":" + plgn.Tag
}
plugins[api.PluginDescription{
Type: plgnTyp,
Name: plgnName,
Name: plgn.Name,
}] = struct{}{}
}
}

View file

@ -8,7 +8,6 @@ package daemon
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"os"
@ -17,7 +16,6 @@ import (
"runtime"
"strings"
"sync"
"syscall"
"time"
"github.com/Sirupsen/logrus"
@ -28,6 +26,7 @@ import (
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/events"
"github.com/docker/docker/daemon/exec"
"github.com/docker/docker/daemon/initlayer"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/plugin"
"github.com/docker/libnetwork/cluster"
@ -42,14 +41,11 @@ import (
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/registrar"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/truncindex"
pluginstore "github.com/docker/docker/plugin/store"
"github.com/docker/docker/reference"
"github.com/docker/docker/registry"
"github.com/docker/docker/runconfig"
@ -59,6 +55,7 @@ import (
"github.com/docker/libnetwork"
nwconfig "github.com/docker/libnetwork/config"
"github.com/docker/libtrust"
"github.com/pkg/errors"
)
var (
@ -99,7 +96,8 @@ type Daemon struct {
gidMaps []idtools.IDMap
layerStore layer.Store
imageStore image.Store
PluginStore *pluginstore.Store
PluginStore *plugin.Store // todo: remove
pluginManager *plugin.Manager
nameIndex *registrar.Registrar
linkIndex *linkIndex
containerd libcontainerd.Client
@ -554,10 +552,19 @@ func NewDaemon(config *Config, registryService registry.Service, containerdRemot
}
d.RegistryService = registryService
d.PluginStore = pluginstore.NewStore(config.Root)
d.PluginStore = plugin.NewStore(config.Root) // todo: remove
// Plugin system initialization should happen before restore. Do not change order.
if err := d.pluginInit(config, containerdRemote); err != nil {
return nil, err
d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{
Root: filepath.Join(config.Root, "plugins"),
ExecRoot: "/run/docker/plugins", // possibly needs fixing
Store: d.PluginStore,
Executor: containerdRemote,
RegistryService: registryService,
LiveRestoreEnabled: config.LiveRestoreEnabled,
LogPluginEvent: d.LogPluginEvent, // todo: make private
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create plugin manager")
}
d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{
@ -895,36 +902,6 @@ func (daemon *Daemon) V6Subnets() []net.IPNet {
return subnets
}
func writeDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) {
progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false)
operationCancelled := false
for prog := range progressChan {
if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled {
// don't log broken pipe errors as this is the normal case when a client aborts
if isBrokenPipe(err) {
logrus.Info("Pull session cancelled")
} else {
logrus.Errorf("error writing progress to client: %v", err)
}
cancelFunc()
operationCancelled = true
// Don't return, because we need to continue draining
// progressChan until it's closed to avoid a deadlock.
}
}
}
func isBrokenPipe(e error) bool {
if netErr, ok := e.(*net.OpError); ok {
e = netErr.Err
if sysErr, ok := netErr.Err.(*os.SyscallError); ok {
e = sysErr.Err
}
}
return e == syscall.EPIPE
}
// GraphDriverName returns the name of the graph driver used by the layer.Store
func (daemon *Daemon) GraphDriverName() string {
return daemon.layerStore.DriverName()
@ -956,7 +933,7 @@ func tempDir(rootDir string, rootUID, rootGID int) (string, error) {
func (daemon *Daemon) setupInitLayer(initPath string) error {
rootUID, rootGID := daemon.GetRemappedUIDGID()
return setupInitLayer(initPath, rootUID, rootGID)
return initlayer.Setup(initPath, rootUID, rootGID)
}
func setDefaultMtu(config *Config) {
@ -1270,12 +1247,8 @@ func (daemon *Daemon) SetCluster(cluster Cluster) {
daemon.cluster = cluster
}
func (daemon *Daemon) pluginInit(cfg *Config, remote libcontainerd.Remote) error {
return plugin.Init(cfg.Root, daemon.PluginStore, remote, daemon.RegistryService, cfg.LiveRestoreEnabled, daemon.LogPluginEvent)
}
func (daemon *Daemon) pluginShutdown() {
manager := plugin.GetManager()
manager := daemon.pluginManager
// Check for a valid manager object. In error conditions, daemon init can fail
// and shutdown called, before plugin manager is initialized.
if manager != nil {
@ -1283,6 +1256,11 @@ func (daemon *Daemon) pluginShutdown() {
}
}
// PluginManager returns current pluginManager associated with the daemon
func (daemon *Daemon) PluginManager() *plugin.Manager { // set up before daemon to avoid this method
return daemon.pluginManager
}
// CreateDaemonRoot creates the root for the daemon
func CreateDaemonRoot(config *Config) error {
// get the canonical path to the Docker root directory

View file

@ -96,16 +96,6 @@ func (daemon *Daemon) getLayerInit() func(string) error {
return nil
}
// setupInitLayer populates a directory with mountpoints suitable
// for bind-mounting dockerinit into the container. The mountpoint is simply an
// empty file at /.dockerinit
//
// This extra layer is used by all containers as the top-most ro layer. It protects
// the container from unwanted side-effects on the rw layer.
func setupInitLayer(initLayer string, rootUID, rootGID int) error {
return nil
}
func checkKernel() error {
// solaris can rely upon checkSystem() below, we don't skew kernel versions
return nil

View file

@ -858,63 +858,6 @@ func (daemon *Daemon) getLayerInit() func(string) error {
return daemon.setupInitLayer
}
// setupInitLayer populates a directory with mountpoints suitable
// for bind-mounting things into the container.
//
// This extra layer is used by all containers as the top-most ro layer. It protects
// the container from unwanted side-effects on the rw layer.
func setupInitLayer(initLayer string, rootUID, rootGID int) error {
for pth, typ := range map[string]string{
"/dev/pts": "dir",
"/dev/shm": "dir",
"/proc": "dir",
"/sys": "dir",
"/.dockerenv": "file",
"/etc/resolv.conf": "file",
"/etc/hosts": "file",
"/etc/hostname": "file",
"/dev/console": "file",
"/etc/mtab": "/proc/mounts",
} {
parts := strings.Split(pth, "/")
prev := "/"
for _, p := range parts[1:] {
prev = filepath.Join(prev, p)
syscall.Unlink(filepath.Join(initLayer, prev))
}
if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil {
if os.IsNotExist(err) {
if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, filepath.Dir(pth)), 0755, rootUID, rootGID); err != nil {
return err
}
switch typ {
case "dir":
if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, pth), 0755, rootUID, rootGID); err != nil {
return err
}
case "file":
f, err := os.OpenFile(filepath.Join(initLayer, pth), os.O_CREATE, 0755)
if err != nil {
return err
}
f.Chown(rootUID, rootGID)
f.Close()
default:
if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil {
return err
}
}
} else {
return err
}
}
}
// Layer is ready to use, if it wasn't before.
return nil
}
// Parse the remapped root (user namespace) option, which can be one of:
// username - valid username from /etc/passwd
// username:groupname - valid username; valid groupname from /etc/group

View file

@ -61,10 +61,6 @@ func getBlkioWriteBpsDevices(config *containertypes.HostConfig) ([]blkiodev.Thro
return nil, nil
}
func setupInitLayer(initLayer string, rootUID, rootGID int) error {
return nil
}
func (daemon *Daemon) getLayerInit() func(string) error {
return nil
}

View file

@ -9,6 +9,7 @@ import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/builder"
"github.com/docker/docker/distribution"
progressutils "github.com/docker/docker/distribution/utils"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/reference"
"github.com/docker/docker/registry"
@ -84,7 +85,7 @@ func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference.
ctx, cancelFunc := context.WithCancel(ctx)
go func() {
writeDistributionProgress(cancelFunc, outStream, progressChan)
progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan)
close(writesDone)
}()

View file

@ -6,6 +6,7 @@ import (
"github.com/docker/distribution/manifest/schema2"
"github.com/docker/docker/api/types"
"github.com/docker/docker/distribution"
progressutils "github.com/docker/docker/distribution/utils"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/reference"
"golang.org/x/net/context"
@ -34,7 +35,7 @@ func (daemon *Daemon) PushImage(ctx context.Context, image, tag string, metaHead
ctx, cancelFunc := context.WithCancel(ctx)
go func() {
writeDistributionProgress(cancelFunc, outStream, progressChan)
progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan)
close(writesDone)
}()

View file

@ -0,0 +1,13 @@
// +build solaris,cgo
package initlayer
// Setup populates a directory with mountpoints suitable
// for bind-mounting dockerinit into the container. The mountpoint is simply an
// empty file at /.dockerinit
//
// This extra layer is used by all containers as the top-most ro layer. It protects
// the container from unwanted side-effects on the rw layer.
func Setup(initLayer string, rootUID, rootGID int) error {
return nil
}

View file

@ -0,0 +1,69 @@
// +build linux freebsd
package initlayer
import (
"os"
"path/filepath"
"strings"
"syscall"
"github.com/docker/docker/pkg/idtools"
)
// Setup populates a directory with mountpoints suitable
// for bind-mounting things into the container.
//
// This extra layer is used by all containers as the top-most ro layer. It protects
// the container from unwanted side-effects on the rw layer.
func Setup(initLayer string, rootUID, rootGID int) error {
for pth, typ := range map[string]string{
"/dev/pts": "dir",
"/dev/shm": "dir",
"/proc": "dir",
"/sys": "dir",
"/.dockerenv": "file",
"/etc/resolv.conf": "file",
"/etc/hosts": "file",
"/etc/hostname": "file",
"/dev/console": "file",
"/etc/mtab": "/proc/mounts",
} {
parts := strings.Split(pth, "/")
prev := "/"
for _, p := range parts[1:] {
prev = filepath.Join(prev, p)
syscall.Unlink(filepath.Join(initLayer, prev))
}
if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil {
if os.IsNotExist(err) {
if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, filepath.Dir(pth)), 0755, rootUID, rootGID); err != nil {
return err
}
switch typ {
case "dir":
if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, pth), 0755, rootUID, rootGID); err != nil {
return err
}
case "file":
f, err := os.OpenFile(filepath.Join(initLayer, pth), os.O_CREATE, 0755)
if err != nil {
return err
}
f.Chown(rootUID, rootGID)
f.Close()
default:
if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil {
return err
}
}
} else {
return err
}
}
}
// Layer is ready to use, if it wasn't before.
return nil
}

View file

@ -0,0 +1,13 @@
// +build windows
package initlayer
// Setup populates a directory with mountpoints suitable
// for bind-mounting dockerinit into the container. The mountpoint is simply an
// empty file at /.dockerinit
//
// This extra layer is used by all containers as the top-most ro layer. It protects
// the container from unwanted side-effects on the rw layer.
func Setup(initLayer string, rootUID, rootGID int) error {
return nil
}

View file

@ -3,6 +3,7 @@ package metadata
import (
"github.com/docker/docker/image/v1"
"github.com/docker/docker/layer"
"github.com/pkg/errors"
)
// V1IDService maps v1 IDs to layers on disk.
@ -24,6 +25,9 @@ func (idserv *V1IDService) namespace() string {
// Get finds a layer by its V1 ID.
func (idserv *V1IDService) Get(v1ID, registry string) (layer.DiffID, error) {
if idserv.store == nil {
return "", errors.New("no v1IDService storage")
}
if err := v1.ValidateID(v1ID); err != nil {
return layer.DiffID(""), err
}
@ -37,6 +41,9 @@ func (idserv *V1IDService) Get(v1ID, registry string) (layer.DiffID, error) {
// Set associates an image with a V1 ID.
func (idserv *V1IDService) Set(v1ID, registry string, id layer.DiffID) error {
if idserv.store == nil {
return nil
}
if err := v1.ValidateID(v1ID); err != nil {
return err
}

View file

@ -5,6 +5,7 @@ import (
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"github.com/docker/distribution/digest"
"github.com/docker/docker/api/types"
@ -125,6 +126,9 @@ func (serv *v2MetadataService) digestKey(dgst digest.Digest) string {
// GetMetadata finds the metadata associated with a layer DiffID.
func (serv *v2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) {
if serv.store == nil {
return nil, errors.New("no metadata storage")
}
jsonBytes, err := serv.store.Get(serv.diffIDNamespace(), serv.diffIDKey(diffID))
if err != nil {
return nil, err
@ -140,6 +144,9 @@ func (serv *v2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, e
// GetDiffID finds a layer DiffID from a digest.
func (serv *v2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) {
if serv.store == nil {
return layer.DiffID(""), errors.New("no metadata storage")
}
diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst))
if err != nil {
return layer.DiffID(""), err
@ -151,6 +158,12 @@ func (serv *v2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, erro
// Add associates metadata with a layer DiffID. If too many metadata entries are
// present, the oldest one is dropped.
func (serv *v2MetadataService) Add(diffID layer.DiffID, metadata V2Metadata) error {
if serv.store == nil {
// Support a service which has no backend storage, in this case
// an add becomes a no-op.
// TODO: implement in memory storage
return nil
}
oldMetadata, err := serv.GetMetadata(diffID)
if err != nil {
oldMetadata = nil
@ -192,6 +205,12 @@ func (serv *v2MetadataService) TagAndAdd(diffID layer.DiffID, hmacKey []byte, me
// Remove unassociates a metadata entry from a layer DiffID.
func (serv *v2MetadataService) Remove(metadata V2Metadata) error {
if serv.store == nil {
// Support a service which has no backend storage, in this case
// an remove becomes a no-op.
// TODO: implement in memory storage
return nil
}
diffID, err := serv.GetDiffID(metadata.Digest)
if err != nil {
return err

View file

@ -102,11 +102,7 @@ func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, end
scope := auth.RepositoryScope{
Repository: repoName,
Actions: actions,
}
// Keep image repositories blank for scope compatibility
if repoInfo.Class != "image" {
scope.Class = repoInfo.Class
Class: repoInfo.Class,
}
creds := registry.NewStaticCredentialStore(authConfig)

View file

@ -0,0 +1,44 @@
package utils
import (
"io"
"net"
"os"
"syscall"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/streamformatter"
)
// WriteDistributionProgress is a helper for writing progress from chan to JSON
// stream with an optional cancel function.
func WriteDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) {
progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false)
operationCancelled := false
for prog := range progressChan {
if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled {
// don't log broken pipe errors as this is the normal case when a client aborts
if isBrokenPipe(err) {
logrus.Info("Pull session cancelled")
} else {
logrus.Errorf("error writing progress to client: %v", err)
}
cancelFunc()
operationCancelled = true
// Don't return, because we need to continue draining
// progressChan until it's closed to avoid a deadlock.
}
}
}
func isBrokenPipe(e error) bool {
if netErr, ok := e.(*net.OpError); ok {
e = netErr.Err
if sysErr, ok := netErr.Err.(*os.SyscallError); ok {
e = sysErr.Err
}
}
return e == syscall.EPIPE
}

View file

@ -109,93 +109,6 @@ commands and options, see the
## Developing a plugin
Currently, there are no CLI commands available to help you develop a plugin.
This is expected to change in a future release. The manual process for creating
plugins is described in this section.
### Plugin location and files
Plugins are stored in `/var/lib/docker/plugins`. The `plugins.json` file lists
each plugin's configuration, and each plugin is stored in a directory with a
unique identifier.
```bash
# ls -la /var/lib/docker/plugins
total 20
drwx------ 4 root root 4096 Aug 8 18:03 .
drwx--x--x 12 root root 4096 Aug 8 17:53 ..
drwxr-xr-x 3 root root 4096 Aug 8 17:56 cd851ce43a403
-rw------- 1 root root 2107 Aug 8 18:03 plugins.json
```
### Format of plugins.json
The `plugins.json` is an inventory of all installed plugins. This example shows
a `plugins.json` with a single plugin installed.
```json
# cat plugins.json
{
"cd851ce43a403": {
"plugin": {
"Config": {
"Args": {
"Value": null,
"Settable": null,
"Description": "",
"Name": ""
},
"Env": null,
"Devices": null,
"Mounts": null,
"Capabilities": [
"CAP_SYS_ADMIN"
],
"Description": "sshFS plugin for Docker",
"Documentation": "https://docs.docker.com/engine/extend/plugins/",
"Interface": {
"Socket": "sshfs.sock",
"Types": [
"docker.volumedriver/1.0"
]
},
"Entrypoint": [
"/go/bin/docker-volume-sshfs"
],
"Workdir": "",
"User": {},
"Network": {
"Type": "host"
}
},
"Config": {
"Devices": null,
"Args": null,
"Env": [],
"Mounts": []
},
"Active": true,
"Tag": "latest",
"Name": "vieux/sshfs",
"Id": "cd851ce43a403"
}
}
}
```
### Contents of a plugin directory
Each directory within `/var/lib/docker/plugins/` contains a `rootfs` directory
and two JSON files.
```bash
# ls -la /var/lib/docker/plugins/cd851ce43a403
total 12
drwx------ 19 root root 4096 Aug 8 17:56 rootfs
-rw-r--r-- 1 root root 50 Aug 8 17:56 plugin-settings.json
-rw------- 1 root root 347 Aug 8 17:56 config.json
```
#### The rootfs directory
The `rootfs` directory represents the root filesystem of the plugin. In this
example, it was created from a Dockerfile:
@ -206,20 +119,17 @@ plugin's filesystem for docker to communicate with the plugin.
```bash
$ git clone https://github.com/vieux/docker-volume-sshfs
$ cd docker-volume-sshfs
$ docker build -t rootfs .
$ id=$(docker create rootfs true) # id was cd851ce43a403 when the image was created
$ sudo mkdir -p /var/lib/docker/plugins/$id/rootfs
$ sudo docker export "$id" | sudo tar -x -C /var/lib/docker/plugins/$id/rootfs
$ sudo chgrp -R docker /var/lib/docker/plugins/
$ docker build -t rootfsimage .
$ id=$(docker create rootfsimage true) # id was cd851ce43a403 when the image was created
$ sudo mkdir -p myplugin/rootfs
$ sudo docker export "$id" | sudo tar -x -C myplugin/rootfs
$ docker rm -vf "$id"
$ docker rmi rootfs
$ docker rmi rootfsimage
```
#### The config.json and plugin-settings.json files
#### The config.json file
The `config.json` file describes the plugin. The `plugin-settings.json` file
contains runtime parameters and is only required if your plugin has runtime
parameters. [See the Plugins Config reference](config.md).
The `config.json` file describes the plugin. See the [plugins config reference](config.md).
Consider the following `config.json` file.
@ -242,56 +152,15 @@ Consider the following `config.json` file.
This plugin is a volume driver. It requires a `host` network and the
`CAP_SYS_ADMIN` capability. It depends upon the `/go/bin/docker-volume-sshfs`
entrypoint and uses the `/run/docker/plugins/sshfs.sock` socket to communicate
with Docker Engine.
Consider the following `plugin-settings.json` file.
```json
{
"Devices": null,
"Args": null,
"Env": [],
"Mounts": []
}
```
This plugin has no runtime parameters.
Each of these JSON files is included as part of `plugins.json`, as you can see
by looking back at the example above. After a plugin is installed, `config.json`
is read-only, but `plugin-settings.json` is read-write, and includes all runtime
configuration options for the plugin.
with Docker Engine. This plugin has no runtime parameters.
### Creating the plugin
Follow these steps to create a plugin:
A new plugin can be created by running
`docker plugin create <plugin-name> ./path/to/plugin/data` where the plugin
data contains a plugin configuration file `config.json` and a root filesystem
in subdirectory `rootfs`.
1. Choose a name for the plugin. Plugin name uses the same format as images,
for example: `<repo_name>/<name>`.
2. Create a `rootfs` and export it to `/var/lib/docker/plugins/$id/rootfs`
using `docker export`. See [The rootfs directory](#the-rootfs-directory) for
an example of creating a `rootfs`.
3. Create a `config.json` file in `/var/lib/docker/plugins/$id/`.
4. Create a `plugin-settings.json` file if needed.
5. Create or add a section to `/var/lib/docker/plugins/plugins.json`. Use
`<user>/<name>` as “Name” and `$id` as “Id”.
6. Restart the Docker Engine service.
7. Run `docker plugin ls`.
* If your plugin is enabled, you can push it to the
registry.
* If the plugin is not listed or is disabled, something went wrong.
Check the daemon logs for errors.
8. If you are not already logged in, use `docker login` to authenticate against
the registry so that you can push to it.
9. Run `docker plugin push <repo_name>/<name>` to push the plugin.
The plugin can now be used by any user with access to your registry.
After that the plugin `<plugin-name>` will show up in `docker plugin ls`.
Plugins can be pushed to remote registries with
`docker plugin push <plugin-name>`.

View file

@ -16,9 +16,9 @@ keywords: "plugin, create"
# plugin create
```markdown
Usage: docker plugin create [OPTIONS] PLUGIN[:tag] PATH-TO-ROOTFS(rootfs + config.json)
Usage: docker plugin create [OPTIONS] PLUGIN PLUGIN-DATA-DIR
Create a plugin from a rootfs and configuration
Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory.
Options:
--compress Compress the context using gzip

View file

@ -36,8 +36,7 @@ $ docker plugin inspect tiborvass/no-remove:latest
```JSON
{
"Id": "8c74c978c434745c3ade82f1bc0acf38d04990eaf494fa507c16d9f1daa99c21",
"Name": "tiborvass/no-remove",
"Tag": "latest",
"Name": "tiborvass/no-remove:latest",
"Enabled": true,
"Config": {
"Mounts": [

View file

@ -21,6 +21,7 @@ Usage: docker plugin install [OPTIONS] PLUGIN [KEY=VALUE...]
Install a plugin
Options:
--alias string Local name for plugin
--disable Do not enable the plugin on install
--grant-all-permissions Grant all permissions necessary to run the plugin
--help Print usage

View file

@ -12,10 +12,10 @@ import (
)
var (
authzPluginName = "riyaz/authz-no-volume-plugin"
authzPluginName = "tonistiigi/authz-no-volume-plugin"
authzPluginTag = "latest"
authzPluginNameWithTag = authzPluginName + ":" + authzPluginTag
authzPluginBadManifestName = "riyaz/authz-plugin-bad-manifest"
authzPluginBadManifestName = "tonistiigi/authz-plugin-bad-manifest"
nonexistentAuthzPluginName = "riyaz/nonexistent-authz-plugin"
)

View file

@ -425,20 +425,20 @@ func (s *DockerSuite) TestInspectPlugin(c *check.C) {
out, _, err := dockerCmdWithError("inspect", "--type", "plugin", "--format", "{{.Name}}", pNameWithTag)
c.Assert(err, checker.IsNil)
c.Assert(strings.TrimSpace(out), checker.Equals, pName)
c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag)
out, _, err = dockerCmdWithError("inspect", "--format", "{{.Name}}", pNameWithTag)
c.Assert(err, checker.IsNil)
c.Assert(strings.TrimSpace(out), checker.Equals, pName)
c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag)
// Even without tag the inspect still work
out, _, err = dockerCmdWithError("inspect", "--type", "plugin", "--format", "{{.Name}}", pName)
out, _, err = dockerCmdWithError("inspect", "--type", "plugin", "--format", "{{.Name}}", pNameWithTag)
c.Assert(err, checker.IsNil)
c.Assert(strings.TrimSpace(out), checker.Equals, pName)
c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag)
out, _, err = dockerCmdWithError("inspect", "--format", "{{.Name}}", pName)
out, _, err = dockerCmdWithError("inspect", "--format", "{{.Name}}", pNameWithTag)
c.Assert(err, checker.IsNil)
c.Assert(strings.TrimSpace(out), checker.Equals, pName)
c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag)
_, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag)
c.Assert(err, checker.IsNil)

View file

@ -777,7 +777,7 @@ func (s *DockerNetworkSuite) TestDockerPluginV2NetworkDriver(c *check.C) {
testRequires(c, DaemonIsLinux, IsAmd64, Network)
var (
npName = "tiborvass/test-docker-netplugin"
npName = "tonistiigi/test-docker-netplugin"
npTag = "latest"
npNameWithTag = npName + ":" + npTag
)

View file

@ -1,6 +1,8 @@
package main
import (
"fmt"
"github.com/docker/docker/pkg/integration/checker"
"github.com/go-check/check"
@ -12,7 +14,7 @@ import (
var (
pluginProcessName = "sample-volume-plugin"
pName = "tiborvass/sample-volume-plugin"
pName = "tonistiigi/sample-volume-plugin"
pTag = "latest"
pNameWithTag = pName + ":" + pTag
)
@ -139,11 +141,18 @@ func (s *DockerSuite) TestPluginInstallArgs(c *check.C) {
c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=1]")
}
func (s *DockerSuite) TestPluginInstallImage(c *check.C) {
testRequires(c, DaemonIsLinux, IsAmd64, Network)
out, _, err := dockerCmdWithError("plugin", "install", "redis")
func (s *DockerRegistrySuite) TestPluginInstallImage(c *check.C) {
testRequires(c, DaemonIsLinux, IsAmd64)
repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
// tag the image to upload it to the private registry
dockerCmd(c, "tag", "busybox", repoName)
// push the image to the registry
dockerCmd(c, "push", repoName)
out, _, err := dockerCmdWithError("plugin", "install", repoName)
c.Assert(err, checker.NotNil)
c.Assert(out, checker.Contains, "content is not a plugin")
c.Assert(out, checker.Contains, "target is image")
}
func (s *DockerSuite) TestPluginEnableDisableNegative(c *check.C) {
@ -179,6 +188,9 @@ func (s *DockerSuite) TestPluginCreate(c *check.C) {
err = ioutil.WriteFile(filepath.Join(temp, "config.json"), []byte(data), 0644)
c.Assert(err, checker.IsNil)
err = os.MkdirAll(filepath.Join(temp, "rootfs"), 0700)
c.Assert(err, checker.IsNil)
out, _, err := dockerCmdWithError("plugin", "create", name, temp)
c.Assert(err, checker.IsNil)
c.Assert(out, checker.Contains, name)

View file

@ -31,7 +31,7 @@ import (
icmd "github.com/docker/docker/pkg/integration/cmd"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/stringutils"
"github.com/docker/go-units"
units "github.com/docker/go-units"
"github.com/go-check/check"
)
@ -250,11 +250,7 @@ func deleteAllPlugins(c *check.C) {
var errs []string
for _, p := range plugins {
pluginName := p.Name
tag := p.Tag
if tag == "" {
tag = "latest"
}
status, b, err := sockRequest("DELETE", "/plugins/"+pluginName+":"+tag+"?force=1", nil)
status, b, err := sockRequest("DELETE", "/plugins/"+pluginName+"?force=1", nil)
if err != nil {
errs = append(errs, err.Error())
continue

View file

@ -44,6 +44,17 @@ func ChanOutput(progressChan chan<- Progress) Output {
return chanOutput(progressChan)
}
type discardOutput struct{}
func (discardOutput) WriteProgress(Progress) error {
return nil
}
// DiscardOutput returns an Output that discards progress
func DiscardOutput() Output {
return discardOutput{}
}
// Update is a convenience function to write a progress update to the channel.
func Update(out Output, id, action string) {
out.WriteProgress(Progress{ID: id, Action: action})

View file

@ -3,37 +3,39 @@
package plugin
import (
"bytes"
"archive/tar"
"compress/gzip"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path"
"path/filepath"
"reflect"
"regexp"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest/schema2"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/distribution"
progressutils "github.com/docker/docker/distribution/utils"
"github.com/docker/docker/distribution/xfer"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/plugin/distribution"
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/plugin/v2"
"github.com/docker/docker/reference"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
var (
validFullID = regexp.MustCompile(`^([a-f0-9]{64})$`)
validPartialID = regexp.MustCompile(`^([a-f0-9]{1,64})$`)
)
// Disable deactivates a plugin. This means resources (volumes, networks) cant use them.
func (pm *Manager) Disable(name string, config *types.PluginDisableConfig) error {
p, err := pm.pluginStore.GetByName(name)
func (pm *Manager) Disable(refOrID string, config *types.PluginDisableConfig) error {
p, err := pm.config.Store.GetV2Plugin(refOrID)
if err != nil {
return err
}
@ -48,13 +50,13 @@ func (pm *Manager) Disable(name string, config *types.PluginDisableConfig) error
if err := pm.disable(p, c); err != nil {
return err
}
pm.pluginEventLogger(p.GetID(), name, "disable")
pm.config.LogPluginEvent(p.GetID(), refOrID, "disable")
return nil
}
// Enable activates a plugin, which implies that they are ready to be used by containers.
func (pm *Manager) Enable(name string, config *types.PluginEnableConfig) error {
p, err := pm.pluginStore.GetByName(name)
func (pm *Manager) Enable(refOrID string, config *types.PluginEnableConfig) error {
p, err := pm.config.Store.GetV2Plugin(refOrID)
if err != nil {
return err
}
@ -63,71 +65,74 @@ func (pm *Manager) Enable(name string, config *types.PluginEnableConfig) error {
if err := pm.enable(p, c, false); err != nil {
return err
}
pm.pluginEventLogger(p.GetID(), name, "enable")
pm.config.LogPluginEvent(p.GetID(), refOrID, "enable")
return nil
}
// Inspect examines a plugin config
func (pm *Manager) Inspect(refOrID string) (tp types.Plugin, err error) {
// Match on full ID
if validFullID.MatchString(refOrID) {
p, err := pm.pluginStore.GetByID(refOrID)
if err == nil {
return p.PluginObj, nil
}
}
// Match on full name
if pluginName, err := getPluginName(refOrID); err == nil {
if p, err := pm.pluginStore.GetByName(pluginName); err == nil {
return p.PluginObj, nil
}
}
// Match on partial ID
if validPartialID.MatchString(refOrID) {
p, err := pm.pluginStore.Search(refOrID)
if err == nil {
return p.PluginObj, nil
}
return tp, err
}
return tp, fmt.Errorf("no such plugin name or ID associated with %q", refOrID)
}
func (pm *Manager) pull(name string, metaHeader http.Header, authConfig *types.AuthConfig) (reference.Named, distribution.PullData, error) {
ref, err := distribution.GetRef(name)
if err != nil {
logrus.Debugf("error in distribution.GetRef: %v", err)
return nil, nil, err
}
name = ref.String()
if p, _ := pm.pluginStore.GetByName(name); p != nil {
logrus.Debug("plugin already exists")
return nil, nil, fmt.Errorf("%s exists", name)
}
pd, err := distribution.Pull(ref, pm.registryService, metaHeader, authConfig)
if err != nil {
logrus.Debugf("error in distribution.Pull(): %v", err)
return nil, nil, err
}
return ref, pd, nil
}
func computePrivileges(pd distribution.PullData) (types.PluginPrivileges, error) {
config, err := pd.Config()
func (pm *Manager) Inspect(refOrID string) (tp *types.Plugin, err error) {
p, err := pm.config.Store.GetV2Plugin(refOrID)
if err != nil {
return nil, err
}
var c types.PluginConfig
if err := json.Unmarshal(config, &c); err != nil {
return nil, err
}
return &p.PluginObj, nil
}
func (pm *Manager) pull(ctx context.Context, ref reference.Named, config *distribution.ImagePullConfig, outStream io.Writer) error {
if outStream != nil {
// Include a buffer so that slow client connections don't affect
// transfer performance.
progressChan := make(chan progress.Progress, 100)
writesDone := make(chan struct{})
defer func() {
close(progressChan)
<-writesDone
}()
var cancelFunc context.CancelFunc
ctx, cancelFunc = context.WithCancel(ctx)
go func() {
progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan)
close(writesDone)
}()
config.ProgressOutput = progress.ChanOutput(progressChan)
} else {
config.ProgressOutput = progress.DiscardOutput()
}
return distribution.Pull(ctx, ref, config)
}
type tempConfigStore struct {
config []byte
configDigest digest.Digest
}
func (s *tempConfigStore) Put(c []byte) (digest.Digest, error) {
dgst := digest.FromBytes(c)
s.config = c
s.configDigest = dgst
return dgst, nil
}
func (s *tempConfigStore) Get(d digest.Digest) ([]byte, error) {
if d != s.configDigest {
return nil, digest.ErrDigestNotFound
}
return s.config, nil
}
func (s *tempConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) {
return configToRootFS(c)
}
func computePrivileges(c types.PluginConfig) (types.PluginPrivileges, error) {
var privileges types.PluginPrivileges
if c.Network.Type != "null" && c.Network.Type != "bridge" && c.Network.Type != "" {
privileges = append(privileges, types.PluginPrivilege{
@ -173,67 +178,89 @@ func computePrivileges(pd distribution.PullData) (types.PluginPrivileges, error)
}
// Privileges pulls a plugin config and computes the privileges required to install it.
func (pm *Manager) Privileges(name string, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) {
_, pd, err := pm.pull(name, metaHeader, authConfig)
if err != nil {
func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) {
// create image store instance
cs := &tempConfigStore{}
// DownloadManager not defined because only pulling configuration.
pluginPullConfig := &distribution.ImagePullConfig{
Config: distribution.Config{
MetaHeaders: metaHeader,
AuthConfig: authConfig,
RegistryService: pm.config.RegistryService,
ImageEventLogger: func(string, string, string) {},
ImageStore: cs,
},
Schema2Types: distribution.PluginTypes,
}
if err := pm.pull(ctx, ref, pluginPullConfig, nil); err != nil {
return nil, err
}
return computePrivileges(pd)
if cs.config == nil {
return nil, errors.New("no configuration pulled")
}
var config types.PluginConfig
if err := json.Unmarshal(cs.config, &config); err != nil {
return nil, err
}
return computePrivileges(config)
}
// Pull pulls a plugin, check if the correct privileges are provided and install the plugin.
func (pm *Manager) Pull(name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges) (err error) {
ref, pd, err := pm.pull(name, metaHeader, authConfig)
func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) (err error) {
pm.muGC.RLock()
defer pm.muGC.RUnlock()
// revalidate because Pull is public
nameref, err := reference.ParseNamed(name)
if err != nil {
return errors.Wrapf(err, "failed to parse %q", name)
}
name = reference.WithDefaultTag(nameref).String()
if err := pm.config.Store.validateName(name); err != nil {
return err
}
requiredPrivileges, err := computePrivileges(pd)
tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs")
defer os.RemoveAll(tmpRootFSDir)
dm := &downloadManager{
tmpDir: tmpRootFSDir,
blobStore: pm.blobStore,
}
pluginPullConfig := &distribution.ImagePullConfig{
Config: distribution.Config{
MetaHeaders: metaHeader,
AuthConfig: authConfig,
RegistryService: pm.config.RegistryService,
ImageEventLogger: pm.config.LogPluginEvent,
ImageStore: dm,
},
DownloadManager: dm, // todo: reevaluate if possible to substitute distribution/xfer dependencies instead
Schema2Types: distribution.PluginTypes,
}
err = pm.pull(ctx, ref, pluginPullConfig, outStream)
if err != nil {
go pm.GC()
return err
}
if !reflect.DeepEqual(privileges, requiredPrivileges) {
return errors.New("incorrect privileges")
}
pluginID := stringid.GenerateNonCryptoID()
pluginDir := filepath.Join(pm.libRoot, pluginID)
if err := os.MkdirAll(pluginDir, 0755); err != nil {
logrus.Debugf("error in MkdirAll: %v", err)
if _, err := pm.createPlugin(name, dm.configDigest, dm.blobs, tmpRootFSDir, &privileges); err != nil {
return err
}
defer func() {
if err != nil {
if delErr := os.RemoveAll(pluginDir); delErr != nil {
logrus.Warnf("unable to remove %q from failed plugin pull: %v", pluginDir, delErr)
}
}
}()
err = distribution.WritePullData(pd, filepath.Join(pm.libRoot, pluginID), true)
if err != nil {
logrus.Debugf("error in distribution.WritePullData(): %v", err)
return err
}
tag := distribution.GetTag(ref)
p := v2.NewPlugin(ref.Name(), pluginID, pm.runRoot, pm.libRoot, tag)
err = p.InitPlugin()
if err != nil {
return err
}
pm.pluginStore.Add(p)
pm.pluginEventLogger(pluginID, ref.String(), "pull")
return nil
}
// List displays the list of plugins and associated metadata.
func (pm *Manager) List() ([]types.Plugin, error) {
plugins := pm.pluginStore.GetAll()
plugins := pm.config.Store.GetAll()
out := make([]types.Plugin, 0, len(plugins))
for _, p := range plugins {
out = append(out, p.PluginObj)
@ -242,38 +269,211 @@ func (pm *Manager) List() ([]types.Plugin, error) {
}
// Push pushes a plugin to the store.
func (pm *Manager) Push(name string, metaHeader http.Header, authConfig *types.AuthConfig) error {
p, err := pm.pluginStore.GetByName(name)
if err != nil {
return err
}
dest := filepath.Join(pm.libRoot, p.GetID())
config, err := ioutil.ReadFile(filepath.Join(dest, "config.json"))
func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, outStream io.Writer) error {
p, err := pm.config.Store.GetV2Plugin(name)
if err != nil {
return err
}
var dummy types.Plugin
err = json.Unmarshal(config, &dummy)
ref, err := reference.ParseNamed(p.Name())
if err != nil {
return err
return errors.Wrapf(err, "plugin has invalid name %v for push", p.Name())
}
rootfs, err := archive.Tar(p.Rootfs, archive.Gzip)
if err != nil {
return err
}
defer rootfs.Close()
var po progress.Output
if outStream != nil {
// Include a buffer so that slow client connections don't affect
// transfer performance.
progressChan := make(chan progress.Progress, 100)
_, err = distribution.Push(name, pm.registryService, metaHeader, authConfig, ioutil.NopCloser(bytes.NewReader(config)), rootfs)
// XXX: Ignore returning digest for now.
// Since digest needs to be written to the ProgressWriter.
return err
writesDone := make(chan struct{})
defer func() {
close(progressChan)
<-writesDone
}()
var cancelFunc context.CancelFunc
ctx, cancelFunc = context.WithCancel(ctx)
go func() {
progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan)
close(writesDone)
}()
po = progress.ChanOutput(progressChan)
} else {
po = progress.DiscardOutput()
}
// TODO: replace these with manager
is := &pluginConfigStore{
pm: pm,
plugin: p,
}
ls := &pluginLayerProvider{
pm: pm,
plugin: p,
}
rs := &pluginReference{
name: ref,
pluginID: p.Config,
}
uploadManager := xfer.NewLayerUploadManager(3)
imagePushConfig := &distribution.ImagePushConfig{
Config: distribution.Config{
MetaHeaders: metaHeader,
AuthConfig: authConfig,
ProgressOutput: po,
RegistryService: pm.config.RegistryService,
ReferenceStore: rs,
ImageEventLogger: pm.config.LogPluginEvent,
ImageStore: is,
RequireSchema2: true,
},
ConfigMediaType: schema2.MediaTypePluginConfig,
LayerStore: ls,
UploadManager: uploadManager,
}
return distribution.Push(ctx, ref, imagePushConfig)
}
type pluginReference struct {
name reference.Named
pluginID digest.Digest
}
func (r *pluginReference) References(id digest.Digest) []reference.Named {
if r.pluginID != id {
return nil
}
return []reference.Named{r.name}
}
func (r *pluginReference) ReferencesByName(ref reference.Named) []reference.Association {
return []reference.Association{
{
Ref: r.name,
ID: r.pluginID,
},
}
}
func (r *pluginReference) Get(ref reference.Named) (digest.Digest, error) {
if r.name.String() != ref.String() {
return digest.Digest(""), reference.ErrDoesNotExist
}
return r.pluginID, nil
}
func (r *pluginReference) AddTag(ref reference.Named, id digest.Digest, force bool) error {
// Read only, ignore
return nil
}
func (r *pluginReference) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error {
// Read only, ignore
return nil
}
func (r *pluginReference) Delete(ref reference.Named) (bool, error) {
// Read only, ignore
return false, nil
}
type pluginConfigStore struct {
pm *Manager
plugin *v2.Plugin
}
func (s *pluginConfigStore) Put([]byte) (digest.Digest, error) {
return digest.Digest(""), errors.New("cannot store config on push")
}
func (s *pluginConfigStore) Get(d digest.Digest) ([]byte, error) {
if s.plugin.Config != d {
return nil, errors.New("plugin not found")
}
rwc, err := s.pm.blobStore.Get(d)
if err != nil {
return nil, err
}
defer rwc.Close()
return ioutil.ReadAll(rwc)
}
func (s *pluginConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) {
return configToRootFS(c)
}
type pluginLayerProvider struct {
pm *Manager
plugin *v2.Plugin
}
func (p *pluginLayerProvider) Get(id layer.ChainID) (distribution.PushLayer, error) {
rootFS := rootFSFromPlugin(p.plugin.PluginObj.Config.Rootfs)
var i int
for i = 1; i <= len(rootFS.DiffIDs); i++ {
if layer.CreateChainID(rootFS.DiffIDs[:i]) == id {
break
}
}
if i > len(rootFS.DiffIDs) {
return nil, errors.New("layer not found")
}
return &pluginLayer{
pm: p.pm,
diffIDs: rootFS.DiffIDs[:i],
blobs: p.plugin.Blobsums[:i],
}, nil
}
type pluginLayer struct {
pm *Manager
diffIDs []layer.DiffID
blobs []digest.Digest
}
func (l *pluginLayer) ChainID() layer.ChainID {
return layer.CreateChainID(l.diffIDs)
}
func (l *pluginLayer) DiffID() layer.DiffID {
return l.diffIDs[len(l.diffIDs)-1]
}
func (l *pluginLayer) Parent() distribution.PushLayer {
if len(l.diffIDs) == 1 {
return nil
}
return &pluginLayer{
pm: l.pm,
diffIDs: l.diffIDs[:len(l.diffIDs)-1],
blobs: l.blobs[:len(l.diffIDs)-1],
}
}
func (l *pluginLayer) Open() (io.ReadCloser, error) {
return l.pm.blobStore.Get(l.blobs[len(l.diffIDs)-1])
}
func (l *pluginLayer) Size() (int64, error) {
return l.pm.blobStore.Size(l.blobs[len(l.diffIDs)-1])
}
func (l *pluginLayer) MediaType() string {
return schema2.MediaTypeLayer
}
func (l *pluginLayer) Release() {
// Nothing needs to be release, no references held
}
// Remove deletes plugin's root directory.
func (pm *Manager) Remove(name string, config *types.PluginRmConfig) (err error) {
p, err := pm.pluginStore.GetByName(name)
func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error {
p, err := pm.config.Store.GetV2Plugin(name)
pm.mu.RLock()
c := pm.cMap[p]
pm.mu.RUnlock()
@ -297,95 +497,194 @@ func (pm *Manager) Remove(name string, config *types.PluginRmConfig) (err error)
}
}
id := p.GetID()
pluginDir := filepath.Join(pm.libRoot, id)
defer func() {
if err == nil || config.ForceRemove {
pm.pluginStore.Remove(p)
pm.pluginEventLogger(id, name, "remove")
}
go pm.GC()
}()
if err = os.RemoveAll(pluginDir); err != nil {
return errors.Wrap(err, "failed to remove plugin directory")
id := p.GetID()
pm.config.Store.Remove(p)
pluginDir := filepath.Join(pm.config.Root, id)
if err := os.RemoveAll(pluginDir); err != nil {
logrus.Warnf("unable to remove %q from plugin remove: %v", pluginDir, err)
}
pm.config.LogPluginEvent(id, name, "remove")
return nil
}
// Set sets plugin args
func (pm *Manager) Set(name string, args []string) error {
p, err := pm.pluginStore.GetByName(name)
p, err := pm.config.Store.GetV2Plugin(name)
if err != nil {
return err
}
return p.Set(args)
if err := p.Set(args); err != nil {
return err
}
return pm.save(p)
}
// CreateFromContext creates a plugin from the given pluginDir which contains
// both the rootfs and the config.json and a repoName with optional tag.
func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.Reader, options *types.PluginCreateOptions) error {
repoName := options.RepoName
ref, err := distribution.GetRef(repoName)
func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) (err error) {
pm.muGC.RLock()
defer pm.muGC.RUnlock()
ref, err := reference.ParseNamed(options.RepoName)
if err != nil {
return errors.Wrapf(err, "failed to parse reference %v", options.RepoName)
}
if _, ok := ref.(reference.Canonical); ok {
return errors.Errorf("canonical references are not permitted")
}
name := reference.WithDefaultTag(ref).String()
if err := pm.config.Store.validateName(name); err != nil { // fast check, real check is in createPlugin()
return err
}
tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs")
defer os.RemoveAll(tmpRootFSDir)
if err != nil {
return errors.Wrap(err, "failed to create temp directory")
}
var configJSON []byte
rootFS := splitConfigRootFSFromTar(tarCtx, &configJSON)
rootFSBlob, err := pm.blobStore.New()
if err != nil {
return err
}
defer rootFSBlob.Close()
gzw := gzip.NewWriter(rootFSBlob)
layerDigester := digest.Canonical.New()
rootFSReader := io.TeeReader(rootFS, io.MultiWriter(gzw, layerDigester.Hash()))
name := ref.Name()
tag := distribution.GetTag(ref)
pluginID := stringid.GenerateNonCryptoID()
p := v2.NewPlugin(name, pluginID, pm.runRoot, pm.libRoot, tag)
if v, _ := pm.pluginStore.GetByName(p.Name()); v != nil {
return fmt.Errorf("plugin %q already exists", p.Name())
if err := chrootarchive.Untar(rootFSReader, tmpRootFSDir, nil); err != nil {
return err
}
pluginDir := filepath.Join(pm.libRoot, pluginID)
if err := os.MkdirAll(pluginDir, 0755); err != nil {
if err := rootFS.Close(); err != nil {
return err
}
// In case an error happens, remove the created directory.
if err := pm.createFromContext(ctx, tarCtx, pluginDir, repoName, p); err != nil {
if err := os.RemoveAll(pluginDir); err != nil {
logrus.Warnf("unable to remove %q from failed plugin creation: %v", pluginDir, err)
if configJSON == nil {
return errors.New("config not found")
}
if err := gzw.Close(); err != nil {
return errors.Wrap(err, "error closing gzip writer")
}
var config types.PluginConfig
if err := json.Unmarshal(configJSON, &config); err != nil {
return errors.Wrap(err, "failed to parse config")
}
if err := pm.validateConfig(config); err != nil {
return err
}
pm.mu.Lock()
defer pm.mu.Unlock()
rootFSBlobsum, err := rootFSBlob.Commit()
if err != nil {
return err
}
defer func() {
if err != nil {
go pm.GC()
}
return err
}()
config.Rootfs = &types.PluginConfigRootfs{
Type: "layers",
DiffIds: []string{layerDigester.Digest().String()},
}
return nil
}
func (pm *Manager) createFromContext(ctx context.Context, tarCtx io.Reader, pluginDir, repoName string, p *v2.Plugin) error {
if err := chrootarchive.Untar(tarCtx, pluginDir, nil); err != nil {
return err
}
if err := p.InitPlugin(); err != nil {
return err
}
if err := pm.pluginStore.Add(p); err != nil {
return err
}
pm.pluginEventLogger(p.GetID(), repoName, "create")
return nil
}
func getPluginName(name string) (string, error) {
named, err := reference.ParseNamed(name) // FIXME: validate
configBlob, err := pm.blobStore.New()
if err != nil {
return "", err
return err
}
if reference.IsNameOnly(named) {
named = reference.WithDefaultTag(named)
defer configBlob.Close()
if err := json.NewEncoder(configBlob).Encode(config); err != nil {
return errors.Wrap(err, "error encoding json config")
}
ref, ok := named.(reference.NamedTagged)
if !ok {
return "", fmt.Errorf("invalid name: %s", named.String())
configBlobsum, err := configBlob.Commit()
if err != nil {
return err
}
return ref.String(), nil
p, err := pm.createPlugin(name, configBlobsum, []digest.Digest{rootFSBlobsum}, tmpRootFSDir, nil)
if err != nil {
return err
}
pm.config.LogPluginEvent(p.PluginObj.ID, name, "create")
return nil
}
func (pm *Manager) validateConfig(config types.PluginConfig) error {
return nil // TODO:
}
func splitConfigRootFSFromTar(in io.ReadCloser, config *[]byte) io.ReadCloser {
pr, pw := io.Pipe()
go func() {
tarReader := tar.NewReader(in)
tarWriter := tar.NewWriter(pw)
defer in.Close()
hasRootFS := false
for {
hdr, err := tarReader.Next()
if err == io.EOF {
if !hasRootFS {
pw.CloseWithError(errors.Wrap(err, "no rootfs found"))
return
}
// Signals end of archive.
tarWriter.Close()
pw.Close()
return
}
if err != nil {
pw.CloseWithError(errors.Wrap(err, "failed to read from tar"))
return
}
content := io.Reader(tarReader)
name := path.Clean(hdr.Name)
if path.IsAbs(name) {
name = name[1:]
}
if name == configFileName {
dt, err := ioutil.ReadAll(content)
if err != nil {
pw.CloseWithError(errors.Wrapf(err, "failed to read %s", configFileName))
return
}
*config = dt
}
if parts := strings.Split(name, "/"); len(parts) != 0 && parts[0] == rootFSFileName {
hdr.Name = path.Clean(path.Join(parts[1:]...))
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(strings.ToLower(hdr.Linkname), rootFSFileName+"/") {
hdr.Linkname = hdr.Linkname[len(rootFSFileName)+1:]
}
if err := tarWriter.WriteHeader(hdr); err != nil {
pw.CloseWithError(errors.Wrap(err, "error writing tar header"))
return
}
if _, err := pools.Copy(tarWriter, content); err != nil {
pw.CloseWithError(errors.Wrap(err, "error copying tar data"))
return
}
hasRootFS = true
} else {
io.Copy(ioutil.Discard, content)
}
}
}()
return pr
}

View file

@ -8,6 +8,7 @@ import (
"net/http"
"github.com/docker/docker/api/types"
"github.com/docker/docker/reference"
"golang.org/x/net/context"
)
@ -24,17 +25,17 @@ func (pm *Manager) Enable(name string, config *types.PluginEnableConfig) error {
}
// Inspect examines a plugin config
func (pm *Manager) Inspect(refOrID string) (tp types.Plugin, err error) {
return tp, errNotSupported
func (pm *Manager) Inspect(refOrID string) (tp *types.Plugin, err error) {
return nil, errNotSupported
}
// Privileges pulls a plugin config and computes the privileges required to install it.
func (pm *Manager) Privileges(name string, metaHeaders http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) {
func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) {
return nil, errNotSupported
}
// Pull pulls a plugin, check if the correct privileges are provided and install the plugin.
func (pm *Manager) Pull(name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges) error {
func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, out io.Writer) error {
return errNotSupported
}
@ -44,7 +45,7 @@ func (pm *Manager) List() ([]types.Plugin, error) {
}
// Push pushes a plugin to the store.
func (pm *Manager) Push(name string, metaHeader http.Header, authConfig *types.AuthConfig) error {
func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, out io.Writer) error {
return errNotSupported
}
@ -60,6 +61,6 @@ func (pm *Manager) Set(name string, args []string) error {
// CreateFromContext creates a plugin from the given pluginDir which contains
// both the rootfs and the config.json and a repoName with optional tag.
func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.Reader, options *types.PluginCreateOptions) error {
func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) error {
return errNotSupported
}

181
plugin/blobstore.go Normal file
View file

@ -0,0 +1,181 @@
package plugin
import (
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/digest"
"github.com/docker/docker/distribution/xfer"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/progress"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
type blobstore interface {
New() (WriteCommitCloser, error)
Get(dgst digest.Digest) (io.ReadCloser, error)
Size(dgst digest.Digest) (int64, error)
}
type basicBlobStore struct {
path string
}
func newBasicBlobStore(p string) (*basicBlobStore, error) {
tmpdir := filepath.Join(p, "tmp")
if err := os.MkdirAll(tmpdir, 0700); err != nil {
return nil, errors.Wrapf(err, "failed to mkdir %v", p)
}
return &basicBlobStore{path: p}, nil
}
func (b *basicBlobStore) New() (WriteCommitCloser, error) {
f, err := ioutil.TempFile(filepath.Join(b.path, "tmp"), ".insertion")
if err != nil {
return nil, errors.Wrap(err, "failed to create temp file")
}
return newInsertion(f), nil
}
func (b *basicBlobStore) Get(dgst digest.Digest) (io.ReadCloser, error) {
return os.Open(filepath.Join(b.path, string(dgst.Algorithm()), dgst.Hex()))
}
func (b *basicBlobStore) Size(dgst digest.Digest) (int64, error) {
stat, err := os.Stat(filepath.Join(b.path, string(dgst.Algorithm()), dgst.Hex()))
if err != nil {
return 0, err
}
return stat.Size(), nil
}
func (b *basicBlobStore) gc(whitelist map[digest.Digest]struct{}) {
for _, alg := range []string{string(digest.Canonical)} {
items, err := ioutil.ReadDir(filepath.Join(b.path, alg))
if err != nil {
continue
}
for _, fi := range items {
if _, exists := whitelist[digest.Digest(alg+":"+fi.Name())]; !exists {
p := filepath.Join(b.path, alg, fi.Name())
err := os.RemoveAll(p)
logrus.Debugf("cleaned up blob %v: %v", p, err)
}
}
}
}
// WriteCommitCloser defines object that can be committed to blobstore.
type WriteCommitCloser interface {
io.WriteCloser
Commit() (digest.Digest, error)
}
type insertion struct {
io.Writer
f *os.File
digester digest.Digester
closed bool
}
func newInsertion(tempFile *os.File) *insertion {
digester := digest.Canonical.New()
return &insertion{f: tempFile, digester: digester, Writer: io.MultiWriter(tempFile, digester.Hash())}
}
func (i *insertion) Commit() (digest.Digest, error) {
p := i.f.Name()
d := filepath.Join(filepath.Join(p, "../../"))
i.f.Sync()
defer os.RemoveAll(p)
if err := i.f.Close(); err != nil {
return "", err
}
i.closed = true
dgst := i.digester.Digest()
if err := os.MkdirAll(filepath.Join(d, string(dgst.Algorithm())), 0700); err != nil {
return "", errors.Wrapf(err, "failed to mkdir %v", d)
}
if err := os.Rename(p, filepath.Join(d, string(dgst.Algorithm()), dgst.Hex())); err != nil {
return "", errors.Wrapf(err, "failed to rename %v", p)
}
return dgst, nil
}
func (i *insertion) Close() error {
if i.closed {
return nil
}
defer os.RemoveAll(i.f.Name())
return i.f.Close()
}
type downloadManager struct {
blobStore blobstore
tmpDir string
blobs []digest.Digest
configDigest digest.Digest
}
func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) {
for _, l := range layers {
b, err := dm.blobStore.New()
if err != nil {
return initialRootFS, nil, err
}
defer b.Close()
rc, _, err := l.Download(ctx, progressOutput)
if err != nil {
return initialRootFS, nil, errors.Wrap(err, "failed to download")
}
defer rc.Close()
r := io.TeeReader(rc, b)
inflatedLayerData, err := archive.DecompressStream(r)
if err != nil {
return initialRootFS, nil, err
}
digester := digest.Canonical.New()
if _, err := archive.ApplyLayer(dm.tmpDir, io.TeeReader(inflatedLayerData, digester.Hash())); err != nil {
return initialRootFS, nil, err
}
initialRootFS.Append(layer.DiffID(digester.Digest()))
d, err := b.Commit()
if err != nil {
return initialRootFS, nil, err
}
dm.blobs = append(dm.blobs, d)
}
return initialRootFS, nil, nil
}
func (dm *downloadManager) Put(dt []byte) (digest.Digest, error) {
b, err := dm.blobStore.New()
if err != nil {
return "", err
}
defer b.Close()
n, err := b.Write(dt)
if err != nil {
return "", err
}
if n != len(dt) {
return "", io.ErrShortWrite
}
d, err := b.Commit()
dm.configDigest = d
return d, err
}
func (dm *downloadManager) Get(d digest.Digest) ([]byte, error) {
return nil, digest.ErrDigestNotFound
}
func (dm *downloadManager) RootFSFromConfig(c []byte) (*image.RootFS, error) {
return configToRootFS(c)
}

View file

@ -1,7 +1,6 @@
package store
package plugin
import (
"path/filepath"
"sync"
"github.com/docker/docker/pkg/plugins"
@ -16,8 +15,6 @@ type Store struct {
* to the new model. Legacy plugins use Handle() for registering an
* activation callback.*/
handlers map[string][]func(string, *plugins.Client)
nameToID map[string]string
plugindb string
}
// NewStore creates a Store.
@ -25,7 +22,5 @@ func NewStore(libRoot string) *Store {
return &Store{
plugins: make(map[string]*v2.Plugin),
handlers: make(map[string][]func(string, *plugins.Client)),
nameToID: make(map[string]string),
plugindb: filepath.Join(libRoot, "plugins", "plugins.json"),
}
}

View file

@ -1,222 +0,0 @@
package distribution
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest/schema2"
"github.com/docker/docker/api/types"
dockerdist "github.com/docker/docker/distribution"
archive "github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/reference"
"github.com/docker/docker/registry"
"golang.org/x/net/context"
)
// PullData is the plugin config and the rootfs
type PullData interface {
Config() ([]byte, error)
Layer() (io.ReadCloser, error)
}
type pullData struct {
repository distribution.Repository
manifest schema2.Manifest
index int
}
func (pd *pullData) Config() ([]byte, error) {
blobs := pd.repository.Blobs(context.Background())
config, err := blobs.Get(context.Background(), pd.manifest.Config.Digest)
if err != nil {
return nil, err
}
// validate
var p types.Plugin
if err := json.Unmarshal(config, &p); err != nil {
return nil, err
}
return config, nil
}
func (pd *pullData) Layer() (io.ReadCloser, error) {
if pd.index >= len(pd.manifest.Layers) {
return nil, io.EOF
}
blobs := pd.repository.Blobs(context.Background())
rsc, err := blobs.Open(context.Background(), pd.manifest.Layers[pd.index].Digest)
if err != nil {
return nil, err
}
pd.index++
return rsc, nil
}
// GetRef returns the distribution reference for a given name.
func GetRef(name string) (reference.Named, error) {
ref, err := reference.ParseNamed(name)
if err != nil {
return nil, err
}
return ref, nil
}
// GetTag returns the tag associated with the given reference name.
func GetTag(ref reference.Named) string {
tag := DefaultTag
if ref, ok := ref.(reference.NamedTagged); ok {
tag = ref.Tag()
}
return tag
}
// Pull downloads the plugin from Store
func Pull(ref reference.Named, rs registry.Service, metaheader http.Header, authConfig *types.AuthConfig) (PullData, error) {
repoInfo, err := rs.ResolveRepository(ref)
if err != nil {
logrus.Debugf("pull.go: error in ResolveRepository: %v", err)
return nil, err
}
repoInfo.Class = "plugin"
if err := dockerdist.ValidateRepoName(repoInfo.Name()); err != nil {
logrus.Debugf("pull.go: error in ValidateRepoName: %v", err)
return nil, err
}
endpoints, err := rs.LookupPullEndpoints(repoInfo.Hostname())
if err != nil {
logrus.Debugf("pull.go: error in LookupPullEndpoints: %v", err)
return nil, err
}
var confirmedV2 bool
var repository distribution.Repository
for _, endpoint := range endpoints {
if confirmedV2 && endpoint.Version == registry.APIVersion1 {
logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL)
continue
}
// TODO: reuse contexts
repository, confirmedV2, err = dockerdist.NewV2Repository(context.Background(), repoInfo, endpoint, metaheader, authConfig, "pull")
if err != nil {
logrus.Debugf("pull.go: error in NewV2Repository: %v", err)
return nil, err
}
if !confirmedV2 {
logrus.Debug("pull.go: !confirmedV2")
return nil, ErrUnsupportedRegistry
}
logrus.Debugf("Trying to pull %s from %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version)
break
}
tag := DefaultTag
if ref, ok := ref.(reference.NamedTagged); ok {
tag = ref.Tag()
}
// tags := repository.Tags(context.Background())
// desc, err := tags.Get(context.Background(), tag)
// if err != nil {
// return nil, err
// }
//
msv, err := repository.Manifests(context.Background())
if err != nil {
logrus.Debugf("pull.go: error in repository.Manifests: %v", err)
return nil, err
}
manifest, err := msv.Get(context.Background(), "", distribution.WithTag(tag))
if err != nil {
logrus.Debugf("pull.go: error in msv.Get(): %v", err)
return nil, dockerdist.TranslatePullError(err, repoInfo)
}
_, pl, err := manifest.Payload()
if err != nil {
logrus.Debugf("pull.go: error in manifest.Payload(): %v", err)
return nil, err
}
var m schema2.Manifest
if err := json.Unmarshal(pl, &m); err != nil {
logrus.Debugf("pull.go: error in json.Unmarshal(): %v", err)
return nil, err
}
if m.Config.MediaType != schema2.MediaTypePluginConfig {
return nil, ErrUnsupportedMediaType
}
pd := &pullData{
repository: repository,
manifest: m,
}
logrus.Debugf("manifest: %s", pl)
return pd, nil
}
// WritePullData extracts manifest and rootfs to the disk.
func WritePullData(pd PullData, dest string, extract bool) error {
config, err := pd.Config()
if err != nil {
return err
}
var p types.Plugin
if err := json.Unmarshal(config, &p); err != nil {
return err
}
logrus.Debugf("plugin: %#v", p)
if err := os.MkdirAll(dest, 0700); err != nil {
return err
}
if extract {
if err := ioutil.WriteFile(filepath.Join(dest, "config.json"), config, 0600); err != nil {
return err
}
if err := os.MkdirAll(filepath.Join(dest, "rootfs"), 0700); err != nil {
return err
}
}
for i := 0; ; i++ {
l, err := pd.Layer()
if err == io.EOF {
break
}
if err != nil {
return err
}
if !extract {
f, err := os.Create(filepath.Join(dest, fmt.Sprintf("layer%d.tar", i)))
if err != nil {
l.Close()
return err
}
io.Copy(f, l)
l.Close()
f.Close()
continue
}
if _, err := archive.ApplyLayer(filepath.Join(dest, "rootfs"), l); err != nil {
return err
}
}
return nil
}

View file

@ -1,134 +0,0 @@
package distribution
import (
"crypto/sha256"
"io"
"net/http"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest/schema2"
"github.com/docker/docker/api/types"
dockerdist "github.com/docker/docker/distribution"
"github.com/docker/docker/reference"
"github.com/docker/docker/registry"
"golang.org/x/net/context"
)
// Push pushes a plugin to a registry.
func Push(name string, rs registry.Service, metaHeader http.Header, authConfig *types.AuthConfig, config io.ReadCloser, layers io.ReadCloser) (digest.Digest, error) {
ref, err := reference.ParseNamed(name)
if err != nil {
return "", err
}
repoInfo, err := rs.ResolveRepository(ref)
if err != nil {
return "", err
}
repoInfo.Class = "plugin"
if err := dockerdist.ValidateRepoName(repoInfo.Name()); err != nil {
return "", err
}
endpoints, err := rs.LookupPushEndpoints(repoInfo.Hostname())
if err != nil {
return "", err
}
var confirmedV2 bool
var repository distribution.Repository
for _, endpoint := range endpoints {
if confirmedV2 && endpoint.Version == registry.APIVersion1 {
logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL)
continue
}
repository, confirmedV2, err = dockerdist.NewV2Repository(context.Background(), repoInfo, endpoint, metaHeader, authConfig, "push", "pull")
if err != nil {
return "", err
}
if !confirmedV2 {
return "", ErrUnsupportedRegistry
}
logrus.Debugf("Trying to push %s to %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version)
// This means that we found an endpoint. and we are ready to push
break
}
// Returns a reference to the repository's blob service.
blobs := repository.Blobs(context.Background())
// Descriptor = {mediaType, size, digest}
var descs []distribution.Descriptor
for i, f := range []io.ReadCloser{config, layers} {
bw, err := blobs.Create(context.Background())
if err != nil {
logrus.Debugf("Error in blobs.Create: %v", err)
return "", err
}
h := sha256.New()
r := io.TeeReader(f, h)
_, err = io.Copy(bw, r)
if err != nil {
f.Close()
logrus.Debugf("Error in io.Copy: %v", err)
return "", err
}
f.Close()
mt := schema2.MediaTypeLayer
if i == 0 {
mt = schema2.MediaTypePluginConfig
}
// Commit completes the write process to the BlobService.
// The descriptor arg to Commit is called the "provisional" descriptor and
// used for validation.
// The returned descriptor should be the one used. Its called the "Canonical"
// descriptor.
desc, err := bw.Commit(context.Background(), distribution.Descriptor{
MediaType: mt,
// XXX: What about the Size?
Digest: digest.NewDigest("sha256", h),
})
if err != nil {
logrus.Debugf("Error in bw.Commit: %v", err)
return "", err
}
// The canonical descriptor is set the mediatype again, just in case.
// Don't touch the digest or the size here.
desc.MediaType = mt
logrus.Debugf("pushed blob: %s %s", desc.MediaType, desc.Digest)
descs = append(descs, desc)
}
// XXX: schema2.Versioned needs a MediaType as well.
// "application/vnd.docker.distribution.manifest.v2+json"
m, err := schema2.FromStruct(schema2.Manifest{Versioned: schema2.SchemaVersion, Config: descs[0], Layers: descs[1:]})
if err != nil {
logrus.Debugf("error in schema2.FromStruct: %v", err)
return "", err
}
msv, err := repository.Manifests(context.Background())
if err != nil {
logrus.Debugf("error in repository.Manifests: %v", err)
return "", err
}
_, pl, err := m.Payload()
if err != nil {
logrus.Debugf("error in m.Payload: %v", err)
return "", err
}
logrus.Debugf("Pushed manifest: %s", pl)
tag := DefaultTag
if tagged, ok := ref.(reference.NamedTagged); ok {
tag = tagged.Tag()
}
return msv.Put(context.Background(), m, distribution.WithTag(tag))
}

View file

@ -1,12 +0,0 @@
package distribution
import "errors"
// ErrUnsupportedRegistry indicates that the registry does not support v2 protocol
var ErrUnsupportedRegistry = errors.New("only V2 repositories are supported for plugin distribution")
// ErrUnsupportedMediaType indicates we are pulling content that's not a plugin
var ErrUnsupportedMediaType = errors.New("content is not a plugin")
// DefaultTag is the default tag for plugins
const DefaultTag = "latest"

View file

@ -3,25 +3,34 @@ package plugin
import (
"encoding/json"
"io"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"regexp"
"strings"
"sync"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/digest"
"github.com/docker/docker/api/types"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/docker/libcontainerd"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/mount"
"github.com/docker/docker/plugin/store"
"github.com/docker/docker/plugin/v2"
"github.com/docker/docker/reference"
"github.com/docker/docker/registry"
"github.com/pkg/errors"
)
var (
manager *Manager
)
const configFileName = "config.json"
const rootFSFileName = "rootfs"
var validFullID = regexp.MustCompile(`^([a-f0-9]{64})$`)
func (pm *Manager) restorePlugin(p *v2.Plugin) error {
p.Restore(pm.runRoot)
if p.IsEnabled() {
return pm.restore(p)
}
@ -30,17 +39,25 @@ func (pm *Manager) restorePlugin(p *v2.Plugin) error {
type eventLogger func(id, name, action string)
// ManagerConfig defines configuration needed to start new manager.
type ManagerConfig struct {
Store *Store // remove
Executor libcontainerd.Remote
RegistryService registry.Service
LiveRestoreEnabled bool // TODO: remove
LogPluginEvent eventLogger
Root string
ExecRoot string
}
// Manager controls the plugin subsystem.
type Manager struct {
libRoot string
runRoot string
pluginStore *store.Store
containerdClient libcontainerd.Client
registryService registry.Service
liveRestore bool
pluginEventLogger eventLogger
mu sync.RWMutex // protects cMap
cMap map[*v2.Plugin]*controller
config ManagerConfig
mu sync.RWMutex // protects cMap
muGC sync.RWMutex // protects blobstore deletions
cMap map[*v2.Plugin]*controller
containerdClient libcontainerd.Client
blobStore *basicBlobStore
}
// controller represents the manager's control on a plugin.
@ -50,36 +67,56 @@ type controller struct {
timeoutInSecs int
}
// GetManager returns the singleton plugin Manager
func GetManager() *Manager {
return manager
// pluginRegistryService ensures that all resolved repositories
// are of the plugin class.
type pluginRegistryService struct {
registry.Service
}
// Init (was NewManager) instantiates the singleton Manager.
// TODO: revert this to NewManager once we get rid of all the singletons.
func Init(root string, ps *store.Store, remote libcontainerd.Remote, rs registry.Service, liveRestore bool, evL eventLogger) (err error) {
if manager != nil {
return nil
func (s pluginRegistryService) ResolveRepository(name reference.Named) (repoInfo *registry.RepositoryInfo, err error) {
repoInfo, err = s.Service.ResolveRepository(name)
if repoInfo != nil {
repoInfo.Class = "plugin"
}
return
}
// NewManager returns a new plugin manager.
func NewManager(config ManagerConfig) (*Manager, error) {
if config.RegistryService != nil {
config.RegistryService = pluginRegistryService{config.RegistryService}
}
manager := &Manager{
config: config,
}
if err := os.MkdirAll(manager.config.Root, 0700); err != nil {
return nil, errors.Wrapf(err, "failed to mkdir %v", manager.config.Root)
}
if err := os.MkdirAll(manager.config.ExecRoot, 0700); err != nil {
return nil, errors.Wrapf(err, "failed to mkdir %v", manager.config.ExecRoot)
}
if err := os.MkdirAll(manager.tmpDir(), 0700); err != nil {
return nil, errors.Wrapf(err, "failed to mkdir %v", manager.tmpDir())
}
var err error
manager.containerdClient, err = config.Executor.Client(manager) // todo: move to another struct
if err != nil {
return nil, errors.Wrap(err, "failed to create containerd client")
}
manager.blobStore, err = newBasicBlobStore(filepath.Join(manager.config.Root, "storage/blobs"))
if err != nil {
return nil, err
}
root = filepath.Join(root, "plugins")
manager = &Manager{
libRoot: root,
runRoot: "/run/docker/plugins",
pluginStore: ps,
registryService: rs,
liveRestore: liveRestore,
pluginEventLogger: evL,
}
if err := os.MkdirAll(manager.runRoot, 0700); err != nil {
return err
}
manager.containerdClient, err = remote.Client(manager)
if err != nil {
return err
}
manager.cMap = make(map[*v2.Plugin]*controller)
return manager.reload()
if err := manager.reload(); err != nil {
return nil, errors.Wrap(err, "failed to restore plugins")
}
return manager, nil
}
func (pm *Manager) tmpDir() string {
return filepath.Join(pm.config.Root, "tmp")
}
// StateChanged updates plugin internals using libcontainerd events.
@ -88,7 +125,7 @@ func (pm *Manager) StateChanged(id string, e libcontainerd.StateInfo) error {
switch e.State {
case libcontainerd.StateExit:
p, err := pm.pluginStore.GetByID(id)
p, err := pm.config.Store.GetV2Plugin(id)
if err != nil {
return err
}
@ -102,7 +139,7 @@ func (pm *Manager) StateChanged(id string, e libcontainerd.StateInfo) error {
restart := c.restart
pm.mu.RUnlock()
p.RemoveFromDisk()
os.RemoveAll(filepath.Join(pm.config.ExecRoot, id))
if p.PropagatedMount != "" {
if err := mount.Unmount(p.PropagatedMount); err != nil {
@ -118,37 +155,38 @@ func (pm *Manager) StateChanged(id string, e libcontainerd.StateInfo) error {
return nil
}
// reload is used on daemon restarts to load the manager's state
func (pm *Manager) reload() error {
dt, err := os.Open(filepath.Join(pm.libRoot, "plugins.json"))
func (pm *Manager) reload() error { // todo: restore
dir, err := ioutil.ReadDir(pm.config.Root)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
return errors.Wrapf(err, "failed to read %v", pm.config.Root)
}
defer dt.Close()
plugins := make(map[string]*v2.Plugin)
if err := json.NewDecoder(dt).Decode(&plugins); err != nil {
return err
for _, v := range dir {
if validFullID.MatchString(v.Name()) {
p, err := pm.loadPlugin(v.Name())
if err != nil {
return err
}
plugins[p.GetID()] = p
}
}
pm.pluginStore.SetAll(plugins)
var group sync.WaitGroup
group.Add(len(plugins))
pm.config.Store.SetAll(plugins)
var wg sync.WaitGroup
wg.Add(len(plugins))
for _, p := range plugins {
c := &controller{}
c := &controller{} // todo: remove this
pm.cMap[p] = c
go func(p *v2.Plugin) {
defer group.Done()
defer wg.Done()
if err := pm.restorePlugin(p); err != nil {
logrus.Errorf("failed to restore plugin '%s': %s", p.Name(), err)
return
}
if p.Rootfs != "" {
p.Rootfs = filepath.Join(pm.libRoot, p.PluginObj.ID, "rootfs")
p.Rootfs = filepath.Join(pm.config.Root, p.PluginObj.ID, "rootfs")
}
// We should only enable rootfs propagation for certain plugin types that need it.
@ -165,8 +203,8 @@ func (pm *Manager) reload() error {
}
}
pm.pluginStore.Update(p)
requiresManualRestore := !pm.liveRestore && p.IsEnabled()
pm.save(p)
requiresManualRestore := !pm.config.LiveRestoreEnabled && p.IsEnabled()
if requiresManualRestore {
// if liveRestore is not enabled, the plugin will be stopped now so we should enable it
@ -176,10 +214,50 @@ func (pm *Manager) reload() error {
}
}(p)
}
group.Wait()
wg.Wait()
return nil
}
func (pm *Manager) loadPlugin(id string) (*v2.Plugin, error) {
p := filepath.Join(pm.config.Root, id, configFileName)
dt, err := ioutil.ReadFile(p)
if err != nil {
return nil, errors.Wrapf(err, "error reading %v", p)
}
var plugin v2.Plugin
if err := json.Unmarshal(dt, &plugin); err != nil {
return nil, errors.Wrapf(err, "error decoding %v", p)
}
return &plugin, nil
}
func (pm *Manager) save(p *v2.Plugin) error {
pluginJSON, err := json.Marshal(p)
if err != nil {
return errors.Wrap(err, "failed to marshal plugin json")
}
if err := ioutils.AtomicWriteFile(filepath.Join(pm.config.Root, p.GetID(), configFileName), pluginJSON, 0600); err != nil {
return err
}
return nil
}
// GC cleans up unrefrenced blobs. This is recommended to run in a goroutine
func (pm *Manager) GC() {
pm.muGC.Lock()
defer pm.muGC.Unlock()
whitelist := make(map[digest.Digest]struct{})
for _, p := range pm.config.Store.GetAll() {
whitelist[p.Config] = struct{}{}
for _, b := range p.Blobsums {
whitelist[b] = struct{}{}
}
}
pm.blobStore.gc(whitelist)
}
type logHook struct{ id string }
func (logHook) Levels() []logrus.Level {
@ -209,3 +287,32 @@ func attachToLog(id string) func(libcontainerd.IOPipe) error {
return nil
}
}
func validatePrivileges(requiredPrivileges, privileges types.PluginPrivileges) error {
// todo: make a better function that doesn't check order
if !reflect.DeepEqual(privileges, requiredPrivileges) {
return errors.New("incorrect privileges")
}
return nil
}
func configToRootFS(c []byte) (*image.RootFS, error) {
var pluginConfig types.PluginConfig
if err := json.Unmarshal(c, &pluginConfig); err != nil {
return nil, err
}
return rootFSFromPlugin(pluginConfig.Rootfs), nil
}
func rootFSFromPlugin(pluginfs *types.PluginConfigRootfs) *image.RootFS {
rootFS := image.RootFS{
Type: pluginfs.Type,
DiffIDs: make([]layer.DiffID, len(pluginfs.DiffIds)),
}
for i := range pluginfs.DiffIds {
rootFS.DiffIDs[i] = layer.DiffID(pluginfs.DiffIds[i])
}
return &rootFS
}

View file

@ -3,26 +3,32 @@
package plugin
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"syscall"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/digest"
"github.com/docker/docker/api/types"
"github.com/docker/docker/daemon/initlayer"
"github.com/docker/docker/libcontainerd"
"github.com/docker/docker/oci"
"github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/plugins"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/plugin/v2"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error {
p.Rootfs = filepath.Join(pm.libRoot, p.PluginObj.ID, "rootfs")
p.Rootfs = filepath.Join(pm.config.Root, p.PluginObj.ID, "rootfs")
if p.IsEnabled() && !force {
return fmt.Errorf("plugin %s is already enabled", p.Name())
}
spec, err := p.InitSpec(oci.DefaultSpec())
spec, err := p.InitSpec(pm.config.ExecRoot)
if err != nil {
return err
}
@ -40,6 +46,10 @@ func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error {
}
}
if err := initlayer.Setup(filepath.Join(pm.config.Root, p.PluginObj.ID, rootFSFileName), 0, 0); err != nil {
return err
}
if err := pm.containerdClient.Create(p.GetID(), "", "", specs.Spec(*spec), attachToLog(p.GetID())); err != nil {
if p.PropagatedMount != "" {
if err := mount.Unmount(p.PropagatedMount); err != nil {
@ -53,7 +63,7 @@ func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error {
}
func (pm *Manager) pluginPostStart(p *v2.Plugin, c *controller) error {
client, err := plugins.NewClientWithTimeout("unix://"+filepath.Join(p.GetRuntimeSourcePath(), p.GetSocket()), nil, c.timeoutInSecs)
client, err := plugins.NewClientWithTimeout("unix://"+filepath.Join(pm.config.ExecRoot, p.GetID(), p.GetSocket()), nil, c.timeoutInSecs)
if err != nil {
c.restart = false
shutdownPlugin(p, c, pm.containerdClient)
@ -61,9 +71,10 @@ func (pm *Manager) pluginPostStart(p *v2.Plugin, c *controller) error {
}
p.SetPClient(client)
pm.pluginStore.SetState(p, true)
pm.pluginStore.CallHandler(p)
return nil
pm.config.Store.SetState(p, true)
pm.config.Store.CallHandler(p)
return pm.save(p)
}
func (pm *Manager) restore(p *v2.Plugin) error {
@ -71,7 +82,7 @@ func (pm *Manager) restore(p *v2.Plugin) error {
return err
}
if pm.liveRestore {
if pm.config.LiveRestoreEnabled {
c := &controller{}
if pids, _ := pm.containerdClient.GetPidsForContainer(p.GetID()); len(pids) == 0 {
// plugin is not running, so follow normal startup procedure
@ -115,19 +126,19 @@ func (pm *Manager) disable(p *v2.Plugin, c *controller) error {
c.restart = false
shutdownPlugin(p, c, pm.containerdClient)
pm.pluginStore.SetState(p, false)
return nil
pm.config.Store.SetState(p, false)
return pm.save(p)
}
// Shutdown stops all plugins and called during daemon shutdown.
func (pm *Manager) Shutdown() {
plugins := pm.pluginStore.GetAll()
plugins := pm.config.Store.GetAll()
for _, p := range plugins {
pm.mu.RLock()
c := pm.cMap[p]
pm.mu.RUnlock()
if pm.liveRestore && p.IsEnabled() {
if pm.config.LiveRestoreEnabled && p.IsEnabled() {
logrus.Debug("Plugin active when liveRestore is set, skipping shutdown")
continue
}
@ -137,3 +148,69 @@ func (pm *Manager) Shutdown() {
}
}
}
// createPlugin creates a new plugin. take lock before calling.
func (pm *Manager) createPlugin(name string, configDigest digest.Digest, blobsums []digest.Digest, rootFSDir string, privileges *types.PluginPrivileges) (p *v2.Plugin, err error) {
if err := pm.config.Store.validateName(name); err != nil { // todo: this check is wrong. remove store
return nil, err
}
configRC, err := pm.blobStore.Get(configDigest)
if err != nil {
return nil, err
}
defer configRC.Close()
var config types.PluginConfig
dec := json.NewDecoder(configRC)
if err := dec.Decode(&config); err != nil {
return nil, errors.Wrapf(err, "failed to parse config")
}
if dec.More() {
return nil, errors.New("invalid config json")
}
requiredPrivileges, err := computePrivileges(config)
if err != nil {
return nil, err
}
if privileges != nil {
if err := validatePrivileges(requiredPrivileges, *privileges); err != nil {
return nil, err
}
}
p = &v2.Plugin{
PluginObj: types.Plugin{
Name: name,
ID: stringid.GenerateRandomID(),
Config: config,
},
Config: configDigest,
Blobsums: blobsums,
}
p.InitEmptySettings()
pdir := filepath.Join(pm.config.Root, p.PluginObj.ID)
if err := os.MkdirAll(pdir, 0700); err != nil {
return nil, errors.Wrapf(err, "failed to mkdir %v", pdir)
}
defer func() {
if err != nil {
os.RemoveAll(pdir)
}
}()
if err := os.Rename(rootFSDir, filepath.Join(pdir, rootFSFileName)); err != nil {
return nil, errors.Wrap(err, "failed to rename rootfs")
}
if err := pm.save(p); err != nil {
return nil, err
}
pm.config.Store.Add(p) // todo: remove
return p, nil
}

View file

@ -1,16 +1,15 @@
package store
package plugin
import (
"encoding/json"
"fmt"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/pkg/plugins"
"github.com/docker/docker/plugin/v2"
"github.com/docker/docker/reference"
"github.com/pkg/errors"
)
/* allowV1PluginsFallback determines daemon's support for V1 plugins.
@ -37,33 +36,32 @@ func (name ErrAmbiguous) Error() string {
return fmt.Sprintf("multiple plugins found for %q", string(name))
}
// GetByName retreives a plugin by name.
func (ps *Store) GetByName(name string) (*v2.Plugin, error) {
// GetV2Plugin retreives a plugin by name, id or partial ID.
func (ps *Store) GetV2Plugin(refOrID string) (*v2.Plugin, error) {
ps.RLock()
defer ps.RUnlock()
id, nameOk := ps.nameToID[name]
if !nameOk {
return nil, ErrNotFound(name)
id, err := ps.resolvePluginID(refOrID)
if err != nil {
return nil, err
}
p, idOk := ps.plugins[id]
if !idOk {
return nil, ErrNotFound(id)
return nil, errors.WithStack(ErrNotFound(id))
}
return p, nil
}
// GetByID retreives a plugin by ID.
func (ps *Store) GetByID(id string) (*v2.Plugin, error) {
ps.RLock()
defer ps.RUnlock()
p, idOk := ps.plugins[id]
if !idOk {
return nil, ErrNotFound(id)
// validateName returns error if name is already reserved. always call with lock and full name
func (ps *Store) validateName(name string) error {
for _, p := range ps.plugins {
if p.Name() == name {
return errors.Errorf("%v already exists", name)
}
}
return p, nil
return nil
}
// GetAll retreives all plugins.
@ -101,7 +99,6 @@ func (ps *Store) SetState(p *v2.Plugin, state bool) {
defer ps.Unlock()
p.PluginObj.Enabled = state
ps.updatePluginDB()
}
// Add adds a plugin to memory and plugindb.
@ -113,52 +110,17 @@ func (ps *Store) Add(p *v2.Plugin) error {
if v, exist := ps.plugins[p.GetID()]; exist {
return fmt.Errorf("plugin %q has the same ID %s as %q", p.Name(), p.GetID(), v.Name())
}
// Since both Pull() and CreateFromContext() calls GetByName() before any plugin
// to search for collision (to fail fast), it is unlikely the following check
// will return an error.
// However, in case two CreateFromContext() are called at the same time,
// there is still a remote possibility that a collision might happen.
// For that reason we still perform the collision check below as it is protected
// by ps.Lock() and ps.Unlock() above.
if _, exist := ps.nameToID[p.Name()]; exist {
return fmt.Errorf("plugin %q already exists", p.Name())
}
ps.plugins[p.GetID()] = p
ps.nameToID[p.Name()] = p.GetID()
ps.updatePluginDB()
return nil
}
// Update updates a plugin to memory and plugindb.
func (ps *Store) Update(p *v2.Plugin) {
ps.Lock()
defer ps.Unlock()
ps.plugins[p.GetID()] = p
ps.nameToID[p.Name()] = p.GetID()
ps.updatePluginDB()
}
// Remove removes a plugin from memory and plugindb.
func (ps *Store) Remove(p *v2.Plugin) {
ps.Lock()
delete(ps.plugins, p.GetID())
delete(ps.nameToID, p.Name())
ps.updatePluginDB()
ps.Unlock()
}
// Callers are expected to hold the store lock.
func (ps *Store) updatePluginDB() error {
jsonData, err := json.Marshal(ps.plugins)
if err != nil {
logrus.Debugf("Error in json.Marshal: %v", err)
return err
}
ioutils.AtomicWriteFile(ps.plugindb, jsonData, 0600)
return nil
}
// Get returns an enabled plugin matching the given name and capability.
func (ps *Store) Get(name, capability string, mode int) (plugingetter.CompatPlugin, error) {
var (
@ -168,18 +130,7 @@ func (ps *Store) Get(name, capability string, mode int) (plugingetter.CompatPlug
// Lookup using new model.
if ps != nil {
fullName := name
if named, err := reference.ParseNamed(fullName); err == nil { // FIXME: validate
if reference.IsNameOnly(named) {
named = reference.WithDefaultTag(named)
}
ref, ok := named.(reference.NamedTagged)
if !ok {
return nil, fmt.Errorf("invalid name: %s", named.String())
}
fullName = ref.String()
}
p, err = ps.GetByName(fullName)
p, err = ps.GetV2Plugin(name)
if err == nil {
p.AddRefCount(mode)
if p.IsEnabled() {
@ -187,9 +138,9 @@ func (ps *Store) Get(name, capability string, mode int) (plugingetter.CompatPlug
}
// Plugin was found but it is disabled, so we should not fall back to legacy plugins
// but we should error out right away
return nil, ErrNotFound(fullName)
return nil, ErrNotFound(name)
}
if _, ok := err.(ErrNotFound); !ok {
if _, ok := errors.Cause(err).(ErrNotFound); !ok {
return nil, err
}
}
@ -271,24 +222,42 @@ func (ps *Store) CallHandler(p *v2.Plugin) {
}
}
// Search retreives a plugin by ID Prefix
// If no plugin is found, then ErrNotFound is returned
// If multiple plugins are found, then ErrAmbiguous is returned
func (ps *Store) Search(partialID string) (*v2.Plugin, error) {
ps.RLock()
func (ps *Store) resolvePluginID(idOrName string) (string, error) {
ps.RLock() // todo: fix
defer ps.RUnlock()
if validFullID.MatchString(idOrName) {
return idOrName, nil
}
ref, err := reference.ParseNamed(idOrName)
if err != nil {
return "", errors.Wrapf(err, "failed to parse %v", idOrName)
}
if _, ok := ref.(reference.Canonical); ok {
logrus.Warnf("canonical references cannot be resolved: %v", ref.String())
return "", errors.WithStack(ErrNotFound(idOrName))
}
fullRef := reference.WithDefaultTag(ref)
for _, p := range ps.plugins {
if p.PluginObj.Name == fullRef.String() {
return p.PluginObj.ID, nil
}
}
var found *v2.Plugin
for id, p := range ps.plugins {
if strings.HasPrefix(id, partialID) {
for id, p := range ps.plugins { // this can be optimized
if strings.HasPrefix(id, idOrName) {
if found != nil {
return nil, ErrAmbiguous(partialID)
return "", errors.WithStack(ErrAmbiguous(idOrName))
}
found = p
}
}
if found == nil {
return nil, ErrNotFound(partialID)
return "", errors.WithStack(ErrNotFound(idOrName))
}
return found, nil
return found.PluginObj.ID, nil
}

View file

@ -1,4 +1,4 @@
package store
package plugin
import (
"testing"
@ -8,8 +8,7 @@ import (
)
func TestFilterByCapNeg(t *testing.T) {
p := v2.NewPlugin("test", "1234567890", "/run/docker", "/var/lib/docker/plugins", "latest")
p := v2.Plugin{PluginObj: types.Plugin{Name: "test:latest"}}
iType := types.PluginInterfaceType{"volumedriver", "docker", "1.0"}
i := types.PluginConfigInterface{"plugins.sock", []types.PluginInterfaceType{iType}}
p.PluginObj.Config.Interface = i
@ -21,7 +20,7 @@ func TestFilterByCapNeg(t *testing.T) {
}
func TestFilterByCapPos(t *testing.T) {
p := v2.NewPlugin("test", "1234567890", "/run/docker", "/var/lib/docker/plugins", "latest")
p := v2.Plugin{PluginObj: types.Plugin{Name: "test:latest"}}
iType := types.PluginInterfaceType{"volumedriver", "docker", "1.0"}
i := types.PluginConfigInterface{"plugins.sock", []types.PluginInterfaceType{iType}}

View file

@ -1,32 +1,27 @@
package v2
import (
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"github.com/docker/distribution/digest"
"github.com/docker/docker/api/types"
"github.com/docker/docker/oci"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/pkg/plugins"
"github.com/docker/docker/pkg/system"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
// Plugin represents an individual plugin.
type Plugin struct {
mu sync.RWMutex
PluginObj types.Plugin `json:"plugin"`
pClient *plugins.Client
runtimeSourcePath string
refCount int
LibRoot string // TODO: make private
PropagatedMount string // TODO: make private
Rootfs string // TODO: make private
mu sync.RWMutex
PluginObj types.Plugin `json:"plugin"` // todo: embed struct
pClient *plugins.Client
refCount int
PropagatedMount string // TODO: make private
Rootfs string // TODO: make private
Config digest.Digest
Blobsums []digest.Digest
}
const defaultPluginRuntimeDestination = "/run/docker/plugins"
@ -40,33 +35,6 @@ func (e ErrInadequateCapability) Error() string {
return fmt.Sprintf("plugin does not provide %q capability", e.cap)
}
func newPluginObj(name, id, tag string) types.Plugin {
return types.Plugin{Name: name, ID: id, Tag: tag}
}
// NewPlugin creates a plugin.
func NewPlugin(name, id, runRoot, libRoot, tag string) *Plugin {
return &Plugin{
PluginObj: newPluginObj(name, id, tag),
runtimeSourcePath: filepath.Join(runRoot, id),
LibRoot: libRoot,
}
}
// Restore restores the plugin
func (p *Plugin) Restore(runRoot string) {
p.runtimeSourcePath = filepath.Join(runRoot, p.GetID())
}
// GetRuntimeSourcePath gets the Source (host) path of the plugin socket
// This path gets bind mounted into the plugin.
func (p *Plugin) GetRuntimeSourcePath() string {
p.mu.RLock()
defer p.mu.RUnlock()
return p.runtimeSourcePath
}
// BasePath returns the path to which all paths returned by the plugin are relative to.
// For Plugin objects this returns the host path of the plugin container's rootfs.
func (p *Plugin) BasePath() string {
@ -96,12 +64,7 @@ func (p *Plugin) IsV1() bool {
// Name returns the plugin name.
func (p *Plugin) Name() string {
name := p.PluginObj.Name
if len(p.PluginObj.Tag) > 0 {
// TODO: this feels hacky, maybe we should be storing the distribution reference rather than splitting these
name += ":" + p.PluginObj.Tag
}
return name
return p.PluginObj.Name
}
// FilterByCap query the plugin for a given capability.
@ -115,23 +78,8 @@ func (p *Plugin) FilterByCap(capability string) (*Plugin, error) {
return nil, ErrInadequateCapability{capability}
}
// RemoveFromDisk deletes the plugin's runtime files from disk.
func (p *Plugin) RemoveFromDisk() error {
return os.RemoveAll(p.runtimeSourcePath)
}
// InitPlugin populates the plugin object from the plugin config file.
func (p *Plugin) InitPlugin() error {
dt, err := os.Open(filepath.Join(p.LibRoot, p.PluginObj.ID, "config.json"))
if err != nil {
return err
}
err = json.NewDecoder(dt).Decode(&p.PluginObj.Config)
dt.Close()
if err != nil {
return err
}
// InitEmptySettings initializes empty settings for a plugin.
func (p *Plugin) InitEmptySettings() {
p.PluginObj.Settings.Mounts = make([]types.PluginMount, len(p.PluginObj.Config.Mounts))
copy(p.PluginObj.Settings.Mounts, p.PluginObj.Config.Mounts)
p.PluginObj.Settings.Devices = make([]types.PluginDevice, len(p.PluginObj.Config.Linux.Devices))
@ -144,18 +92,6 @@ func (p *Plugin) InitPlugin() error {
}
p.PluginObj.Settings.Args = make([]string, len(p.PluginObj.Config.Args.Value))
copy(p.PluginObj.Settings.Args, p.PluginObj.Config.Args.Value)
return p.writeSettings()
}
func (p *Plugin) writeSettings() error {
f, err := os.Create(filepath.Join(p.LibRoot, p.PluginObj.ID, "plugin-settings.json"))
if err != nil {
return err
}
err = json.NewEncoder(f).Encode(&p.PluginObj.Settings)
f.Close()
return err
}
// Set is used to pass arguments to the plugin.
@ -243,8 +179,7 @@ next:
return fmt.Errorf("setting %q not found in the plugin configuration", s.name)
}
// update the settings on disk
return p.writeSettings()
return nil
}
// IsEnabled returns the active state of the plugin.
@ -307,107 +242,3 @@ func (p *Plugin) Acquire() {
func (p *Plugin) Release() {
p.AddRefCount(plugingetter.RELEASE)
}
// InitSpec creates an OCI spec from the plugin's config.
func (p *Plugin) InitSpec(s specs.Spec) (*specs.Spec, error) {
s.Root = specs.Root{
Path: p.Rootfs,
Readonly: false, // TODO: all plugins should be readonly? settable in config?
}
userMounts := make(map[string]struct{}, len(p.PluginObj.Settings.Mounts))
for _, m := range p.PluginObj.Settings.Mounts {
userMounts[m.Destination] = struct{}{}
}
if err := os.MkdirAll(p.runtimeSourcePath, 0755); err != nil {
return nil, err
}
mounts := append(p.PluginObj.Config.Mounts, types.PluginMount{
Source: &p.runtimeSourcePath,
Destination: defaultPluginRuntimeDestination,
Type: "bind",
Options: []string{"rbind", "rshared"},
})
if p.PluginObj.Config.Network.Type != "" {
// TODO: if net == bridge, use libnetwork controller to create a new plugin-specific bridge, bind mount /etc/hosts and /etc/resolv.conf look at the docker code (allocateNetwork, initialize)
if p.PluginObj.Config.Network.Type == "host" {
oci.RemoveNamespace(&s, specs.NamespaceType("network"))
}
etcHosts := "/etc/hosts"
resolvConf := "/etc/resolv.conf"
mounts = append(mounts,
types.PluginMount{
Source: &etcHosts,
Destination: etcHosts,
Type: "bind",
Options: []string{"rbind", "ro"},
},
types.PluginMount{
Source: &resolvConf,
Destination: resolvConf,
Type: "bind",
Options: []string{"rbind", "ro"},
})
}
for _, mnt := range mounts {
m := specs.Mount{
Destination: mnt.Destination,
Type: mnt.Type,
Options: mnt.Options,
}
if mnt.Source == nil {
return nil, errors.New("mount source is not specified")
}
m.Source = *mnt.Source
s.Mounts = append(s.Mounts, m)
}
for i, m := range s.Mounts {
if strings.HasPrefix(m.Destination, "/dev/") {
if _, ok := userMounts[m.Destination]; ok {
s.Mounts = append(s.Mounts[:i], s.Mounts[i+1:]...)
}
}
}
if p.PluginObj.Config.PropagatedMount != "" {
p.PropagatedMount = filepath.Join(p.Rootfs, p.PluginObj.Config.PropagatedMount)
s.Linux.RootfsPropagation = "rshared"
}
if p.PluginObj.Config.Linux.DeviceCreation {
rwm := "rwm"
s.Linux.Resources.Devices = []specs.DeviceCgroup{{Allow: true, Access: &rwm}}
}
for _, dev := range p.PluginObj.Settings.Devices {
path := *dev.Path
d, dPermissions, err := oci.DevicesFromPath(path, path, "rwm")
if err != nil {
return nil, err
}
s.Linux.Devices = append(s.Linux.Devices, d...)
s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, dPermissions...)
}
envs := make([]string, 1, len(p.PluginObj.Settings.Env)+1)
envs[0] = "PATH=" + system.DefaultPathEnv
envs = append(envs, p.PluginObj.Settings.Env...)
args := append(p.PluginObj.Config.Entrypoint, p.PluginObj.Settings.Args...)
cwd := p.PluginObj.Config.Workdir
if len(cwd) == 0 {
cwd = "/"
}
s.Process.Terminal = false
s.Process.Args = args
s.Process.Cwd = cwd
s.Process.Env = envs
s.Process.Capabilities = append(s.Process.Capabilities, p.PluginObj.Config.Linux.Capabilities...)
return &s, nil
}

121
plugin/v2/plugin_linux.go Normal file
View file

@ -0,0 +1,121 @@
// +build linux
package v2
import (
"errors"
"os"
"path/filepath"
"strings"
"github.com/docker/docker/api/types"
"github.com/docker/docker/oci"
"github.com/docker/docker/pkg/system"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
// InitSpec creates an OCI spec from the plugin's config.
func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) {
s := oci.DefaultSpec()
s.Root = specs.Root{
Path: p.Rootfs,
Readonly: false, // TODO: all plugins should be readonly? settable in config?
}
userMounts := make(map[string]struct{}, len(p.PluginObj.Settings.Mounts))
for _, m := range p.PluginObj.Settings.Mounts {
userMounts[m.Destination] = struct{}{}
}
execRoot = filepath.Join(execRoot, p.PluginObj.ID)
if err := os.MkdirAll(execRoot, 0700); err != nil {
return nil, err
}
mounts := append(p.PluginObj.Config.Mounts, types.PluginMount{
Source: &execRoot,
Destination: defaultPluginRuntimeDestination,
Type: "bind",
Options: []string{"rbind", "rshared"},
})
if p.PluginObj.Config.Network.Type != "" {
// TODO: if net == bridge, use libnetwork controller to create a new plugin-specific bridge, bind mount /etc/hosts and /etc/resolv.conf look at the docker code (allocateNetwork, initialize)
if p.PluginObj.Config.Network.Type == "host" {
oci.RemoveNamespace(&s, specs.NamespaceType("network"))
}
etcHosts := "/etc/hosts"
resolvConf := "/etc/resolv.conf"
mounts = append(mounts,
types.PluginMount{
Source: &etcHosts,
Destination: etcHosts,
Type: "bind",
Options: []string{"rbind", "ro"},
},
types.PluginMount{
Source: &resolvConf,
Destination: resolvConf,
Type: "bind",
Options: []string{"rbind", "ro"},
})
}
for _, mnt := range mounts {
m := specs.Mount{
Destination: mnt.Destination,
Type: mnt.Type,
Options: mnt.Options,
}
if mnt.Source == nil {
return nil, errors.New("mount source is not specified")
}
m.Source = *mnt.Source
s.Mounts = append(s.Mounts, m)
}
for i, m := range s.Mounts {
if strings.HasPrefix(m.Destination, "/dev/") {
if _, ok := userMounts[m.Destination]; ok {
s.Mounts = append(s.Mounts[:i], s.Mounts[i+1:]...)
}
}
}
if p.PluginObj.Config.PropagatedMount != "" {
p.PropagatedMount = filepath.Join(p.Rootfs, p.PluginObj.Config.PropagatedMount)
s.Linux.RootfsPropagation = "rshared"
}
if p.PluginObj.Config.Linux.DeviceCreation {
rwm := "rwm"
s.Linux.Resources.Devices = []specs.DeviceCgroup{{Allow: true, Access: &rwm}}
}
for _, dev := range p.PluginObj.Settings.Devices {
path := *dev.Path
d, dPermissions, err := oci.DevicesFromPath(path, path, "rwm")
if err != nil {
return nil, err
}
s.Linux.Devices = append(s.Linux.Devices, d...)
s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, dPermissions...)
}
envs := make([]string, 1, len(p.PluginObj.Settings.Env)+1)
envs[0] = "PATH=" + system.DefaultPathEnv
envs = append(envs, p.PluginObj.Settings.Env...)
args := append(p.PluginObj.Config.Entrypoint, p.PluginObj.Settings.Args...)
cwd := p.PluginObj.Config.WorkDir
if len(cwd) == 0 {
cwd = "/"
}
s.Process.Terminal = false
s.Process.Args = args
s.Process.Cwd = cwd
s.Process.Env = envs
s.Process.Capabilities = append(s.Process.Capabilities, p.PluginObj.Config.Linux.Capabilities...)
return &s, nil
}

View file

@ -0,0 +1,14 @@
// +build !linux
package v2
import (
"errors"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
// InitSpec creates an OCI spec from the plugin's config.
func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) {
return nil, errors.New("not supported")
}

View file

@ -111,23 +111,25 @@ func lookup(name string, mode int) (volume.Driver, error) {
if ok {
return ext, nil
}
if drivers.plugingetter != nil {
p, err := drivers.plugingetter.Get(name, extName, mode)
if err != nil {
return nil, fmt.Errorf("Error looking up volume plugin %s: %v", name, err)
}
p, err := drivers.plugingetter.Get(name, extName, mode)
if err != nil {
return nil, fmt.Errorf("Error looking up volume plugin %s: %v", name, err)
}
d := NewVolumeDriver(p.Name(), p.BasePath(), p.Client())
if err := validateDriver(d); err != nil {
return nil, err
}
d := NewVolumeDriver(p.Name(), p.BasePath(), p.Client())
if err := validateDriver(d); err != nil {
return nil, err
if p.IsV1() {
drivers.Lock()
drivers.extensions[name] = d
drivers.Unlock()
}
return d, nil
}
if p.IsV1() {
drivers.Lock()
drivers.extensions[name] = d
drivers.Unlock()
}
return d, nil
return nil, fmt.Errorf("Error looking up volume plugin %s", name)
}
func validateDriver(vd volume.Driver) error {
@ -179,9 +181,13 @@ func GetDriverList() []string {
// GetAllDrivers lists all the registered drivers
func GetAllDrivers() ([]volume.Driver, error) {
plugins, err := drivers.plugingetter.GetAllByCap(extName)
if err != nil {
return nil, fmt.Errorf("error listing plugins: %v", err)
var plugins []getter.CompatPlugin
if drivers.plugingetter != nil {
var err error
plugins, err = drivers.plugingetter.GetAllByCap(extName)
if err != nil {
return nil, fmt.Errorf("error listing plugins: %v", err)
}
}
var ds []volume.Driver

View file

@ -3,14 +3,10 @@ package volumedrivers
import (
"testing"
pluginstore "github.com/docker/docker/plugin/store"
volumetestutils "github.com/docker/docker/volume/testutils"
)
func TestGetDriver(t *testing.T) {
pluginStore := pluginstore.NewStore("/var/lib/docker")
RegisterPluginGetter(pluginStore)
_, err := GetDriver("missing")
if err == nil {
t.Fatal("Expected error, was nil")

View file

@ -7,15 +7,11 @@ import (
"strings"
"testing"
pluginstore "github.com/docker/docker/plugin/store"
"github.com/docker/docker/volume/drivers"
volumetestutils "github.com/docker/docker/volume/testutils"
)
func TestCreate(t *testing.T) {
pluginStore := pluginstore.NewStore("/var/lib/docker")
volumedrivers.RegisterPluginGetter(pluginStore)
volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake")
defer volumedrivers.Unregister("fake")
dir, err := ioutil.TempDir("", "test-create")