1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Merge pull request #28674 from vieux/1.13.0-rc2-cherrypicks

1.13.0-rc2 cherry-picks : part 3
This commit is contained in:
Victor Vieux 2016-11-22 18:39:06 -08:00 committed by GitHub
commit 44fe761b35
163 changed files with 2555 additions and 7677 deletions

View file

@ -25,16 +25,15 @@
FROM debian:jessie
# allow replacing httpredir or deb mirror
ARG APT_MIRROR=deb.debian.org
RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
# Add zfs ppa
RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys E871F18B51E0147C77796AC81196BA81F6B0FC61 \
|| apt-key adv --keyserver hkp://pgp.mit.edu:80 --recv-keys E871F18B51E0147C77796AC81196BA81F6B0FC61
RUN echo deb http://ppa.launchpad.net/zfs-native/stable/ubuntu trusty main > /etc/apt/sources.list.d/zfs.list
# Allow replacing httpredir mirror
ARG APT_MIRROR=httpredir.debian.org
RUN sed -i s/httpredir.debian.org/$APT_MIRROR/g /etc/apt/sources.list
# Packaged dependencies
RUN apt-get update && apt-get install -y \
apparmor \

View file

@ -17,6 +17,10 @@
FROM armhf/debian:jessie
# allow replacing httpredir or deb mirror
ARG APT_MIRROR=deb.debian.org
RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
# Packaged dependencies
RUN apt-get update && apt-get install -y \
apparmor \

View file

@ -7,6 +7,10 @@
FROM debian:jessie
# allow replacing httpredir or deb mirror
ARG APT_MIRROR=deb.debian.org
RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
# Compile and runtime deps
# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies
# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies

View file

@ -1,5 +1,42 @@
This directory contains code pertaining to the Docker API:
# Working on the Engine API
- Used by the docker client when communicating with the docker daemon
The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon.
- Used by third party tools wishing to interface with the docker daemon
It consists of various components in this repository:
- `api/swagger.yaml` A Swagger definition of the API.
- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this.
- `cli/` The command-line client.
- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs.
- `daemon/` The daemon, which serves the API.
## Swagger definition
The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to:
1. To automatically generate documentation.
2. To automatically generate the Go server and client. (A work-in-progress.)
3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc.
## Updating the API documentation
The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, you'll need to edit this file to represent the change in the documentation.
The file is split into two main sections:
- `definitions`, which defines re-usable objects used in requests and responses
- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable)
To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section.
There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919)
`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful for when you are making edits to ensure you are doing the right thing.
## Viewing the API documentation
When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly.
All the documentation generation is done in the documentation repository, [docker/docker.github.io](https://github.com/docker/docker.github.io). The Swagger definition is vendored periodically into this repository, but you can manually copy over the Swagger definition to test changes.
Copy `api/swagger.yaml` in this repository to `engine/api/[VERSION_NUMBER]/swagger.yaml` in the documentation repository, overwriting what is already there. Then, run `docker-compose up` in the documentation repository and browse to [http://localhost:4000/engine/api/](http://localhost:4000/engine/api/) when it finishes rendering.

View file

@ -11,7 +11,7 @@ import (
// Backend for Plugin
type Backend interface {
Disable(name string) error
Enable(name string) error
Enable(name string, config *enginetypes.PluginEnableConfig) error
List() ([]enginetypes.Plugin, error)
Inspect(name string) (enginetypes.Plugin, error)
Remove(name string, config *enginetypes.PluginRmConfig) error

View file

@ -4,6 +4,7 @@ import (
"encoding/base64"
"encoding/json"
"net/http"
"strconv"
"strings"
"github.com/docker/docker/api/server/httputils"
@ -56,7 +57,18 @@ func (pr *pluginRouter) createPlugin(ctx context.Context, w http.ResponseWriter,
}
func (pr *pluginRouter) enablePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
return pr.backend.Enable(vars["name"])
if err := httputils.ParseForm(r); err != nil {
return err
}
name := vars["name"]
timeout, err := strconv.Atoi(r.Form.Get("timeout"))
if err != nil {
return err
}
config := &types.PluginEnableConfig{Timeout: timeout}
return pr.backend.Enable(name, config)
}
func (pr *pluginRouter) disablePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {

View file

@ -44,7 +44,7 @@ func (sr *swarmRouter) initRoutes() {
router.NewGetRoute("/tasks", sr.getTasks),
router.NewGetRoute("/tasks/{id}", sr.getTask),
router.NewGetRoute("/secrets", sr.getSecrets),
router.NewPostRoute("/secrets", sr.createSecret),
router.NewPostRoute("/secrets/create", sr.createSecret),
router.NewDeleteRoute("/secrets/{id}", sr.removeSecret),
router.NewGetRoute("/secrets/{id}", sr.getSecret),
router.NewPostRoute("/secrets/{id}/update", sr.updateSecret),

View file

@ -237,10 +237,6 @@ func (sr *swarmRouter) getServiceLogs(ctx context.Context, w http.ResponseWriter
OutStream: w,
}
if !logsConfig.Follow {
return fmt.Errorf("Bad parameters: Only follow mode is currently supported")
}
if logsConfig.Details {
return fmt.Errorf("Bad parameters: details is not currently supported")
}

View file

@ -16,7 +16,6 @@ import (
"github.com/docker/docker/api/types/registry"
timetypes "github.com/docker/docker/api/types/time"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/api/types/versions/v1p24"
"github.com/docker/docker/pkg/ioutils"
"golang.org/x/net/context"
)
@ -42,16 +41,24 @@ func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *ht
if versions.LessThan(httputils.VersionFromContext(ctx), "1.25") {
// TODO: handle this conversion in engine-api
oldInfo := &v1p24.Info{
InfoBase: info.InfoBase,
type oldInfo struct {
*types.Info
ExecutionDriver string
}
old := &oldInfo{
Info: info,
ExecutionDriver: "<not supported>",
}
for _, s := range info.SecurityOptions {
if s.Key == "Name" {
oldInfo.SecurityOptions = append(oldInfo.SecurityOptions, s.Value)
}
nameOnlySecurityOptions := []string{}
kvSecOpts, err := types.DecodeSecurityOptions(old.SecurityOptions)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, oldInfo)
for _, s := range kvSecOpts {
nameOnlySecurityOptions = append(nameOnlySecurityOptions, s.Name)
}
old.SecurityOptions = nameOnlySecurityOptions
return httputils.WriteJSON(w, http.StatusOK, old)
}
return httputils.WriteJSON(w, http.StatusOK, info)
}

View file

@ -10,18 +10,18 @@ consumes:
- "text/plain"
basePath: "/v1.25"
info:
title: "Docker Remote API"
title: "Docker Engine API"
version: "1.25"
x-logo:
url: "https://docs.docker.com/images/logo-docker-main.png"
description: |
The Docker API is an HTTP REST API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API.
The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API.
Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls. [There is example of using `curl` to run a container in the SDK documentation.](#TODO)
Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls.
# Errors
The Remote API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format:
The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format:
```
{
@ -31,12 +31,28 @@ info:
# Versioning
The API is usually changed in each release of Docker. If you want to write a client that doesn't break when connecting to newer Docker releases, you can lock to a specific API version.
The API is usually changed in each release of Docker, so API calls are versioned to ensure that clients don't break.
For Docker 1.13, the API version is 1.25. To lock to this version, you prefix the URL with `/v1.25`. For example, calling `/info` is the same as calling `/v1.25/info`.
For Docker Engine 1.13, the API version is 1.25. To lock to this version, you prefix the URL with `/v1.25`. For example, calling `/info` is the same as calling `/v1.25/info`.
Engine releases in the near future should support this version of the API, so your client will continue to work even if it is talking to a newer Engine.
In previous versions of Docker, it was possible to access the API without providing a version. This behaviour is now deprecated will be removed in a future version of Docker.
The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer Docker daemons.
This documentation is for version 1.25 of the API, which was introduced with Docker 1.13. Use this table to find documentation for previous versions of the API:
Docker version | API version | Changes
----------------|-------------|---------
1.12.x | [1.24](/engine/api/v1.24/) | [API changes](/engine/api/version-history/#v1-24-api-changes)
1.11.x | [1.23](/engine/api/v1.23/) | [API changes](/engine/api/version-history/#v1-23-api-changes)
1.10.x | [1.22](/engine/api/v1.22/) | [API changes](/engine/api/version-history/#v1-22-api-changes)
1.9.x | [1.21](/engine/api/v1.21/) | [API changes](/engine/api/version-history/#v1-21-api-changes)
1.8.x | [1.20](/engine/api/v1.20/) | [API changes](/engine/api/version-history/#v1-20-api-changes)
1.7.x | [1.19](/engine/api/v1.19/) | [API changes](/engine/api/version-history/#v1-19-api-changes)
1.6.x | [1.18](/engine/api/v1.18/) | [API changes](/engine/api/version-history/#v1-18-api-changes)
# Authentication
Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure:
@ -543,7 +559,8 @@ definitions:
type: "string"
Tmpfs:
type: "object"
description: "List of tmpfs mounts for this container."
description: |
A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: `{ "/run": "rw,noexec,nosuid,size=65536k" }`.
additionalProperties:
type: "string"
UTSMode:
@ -1262,7 +1279,7 @@ definitions:
x-nullable: false
Plugin:
description: "A plugin for the Remote API"
description: "A plugin for the Engine API"
type: "object"
required: [Settings, Enabled, Config, Name, Tag]
properties:
@ -1311,9 +1328,8 @@ definitions:
- Entrypoint
- Workdir
- Network
- Capabilities
- Linux
- Mounts
- Devices
- Env
- Args
properties:
@ -1361,18 +1377,26 @@ definitions:
Type:
x-nullable: false
type: "string"
Capabilities:
type: "array"
items:
type: "string"
Linux:
type: "object"
x-nullable: false
required: [Capabilities, DeviceCreation, Devices]
properties:
Capabilities:
type: "array"
items:
type: "string"
DeviceCreation:
type: "boolean"
x-nullable: false
Devices:
type: "array"
items:
$ref: "#/definitions/PluginDevice"
Mounts:
type: "array"
items:
$ref: "#/definitions/PluginMount"
Devices:
type: "array"
items:
$ref: "#/definitions/PluginDevice"
Env:
type: "array"
items:
@ -1684,6 +1708,13 @@ definitions:
type: "object"
additionalProperties:
type: "string"
EncryptionConfig:
description: "Parameters related to encryption-at-rest."
type: "object"
properties:
AutoLockManagers:
description: "If set, generate a key and use it to lock data stored on the managers."
type: "boolean"
TaskDefaults:
description: "Defaults for creating tasks in this cluster."
type: "object"
@ -1717,6 +1748,8 @@ definitions:
JoinTokens:
Worker: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx"
Manager: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"
EncryptionConfig:
AutoLockManagers: false
# The Swarm information for `GET /info`. It is the same as `GET /swarm`, but
# without `JoinTokens`.
ClusterInfo:
@ -1787,6 +1820,25 @@ definitions:
description: "Amount of time to wait for the container to terminate before forcefully killing it."
type: "integer"
format: "int64"
DNSConfig:
description: "Specification for DNS related configurations in resolver configuration file (`resolv.conf`)."
type: "object"
properties:
Nameservers:
description: "The IP addresses of the name servers."
type: "array"
items:
type: "string"
Search:
description: "A search list for host-name lookup."
type: "array"
items:
type: "string"
Options:
description: "A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.)."
type: "array"
items:
type: "string"
Resources:
description: "Resource requirements which apply to each individual container created as part of the service."
type: "object"
@ -2296,7 +2348,41 @@ definitions:
type: "array"
items:
$ref: "#/definitions/Mount"
SecretSpec:
type: "object"
properties:
Name:
description: "User-defined name of the secret."
type: "string"
Labels:
description: "User-defined key/value metadata."
type: "object"
additionalProperties:
type: "string"
Data:
description: "Base64-url-safe-encoded secret data"
type: "array"
items:
type: "string"
Secret:
type: "object"
properties:
ID:
type: "string"
Version:
type: "object"
properties:
Index:
type: "integer"
format: "int64"
CreatedAt:
type: "string"
format: "dateTime"
UpdatedAt:
type: "string"
format: "dateTime"
Spec:
$ref: "#/definitions/ServiceSpec"
paths:
/containers/json:
get:
@ -3651,7 +3737,7 @@ paths:
Either the `stream` or `logs` parameter must be `true` for this endpoint to do anything.
See [the documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details.
See [the documentation for the `docker attach` command](/engine/reference/commandline/attach/) for more details.
### Hijacking
@ -4152,11 +4238,15 @@ paths:
default: false
- name: "filters"
in: "query"
description: "A JSON encoded value of the filters (a `map[string][]string`) to process on the containers list"
type: "string"
- name: "filter"
in: "query"
description: "Only return images with the specified name."
description: |
A JSON encoded value of the filters (a `map[string][]string`) to process on the images list.
Available filters:
- `dangling=true`
- `label=key` or `label="key=value"` of an image label
- `before`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`)
- `since`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`)
- `reference`=(`<image-name>[:<tag>]`)
type: "string"
- name: "digests"
in: "query"
@ -4171,7 +4261,7 @@ paths:
description: |
Build an image from a tar archive with a `Dockerfile` in it.
The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/).
The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](/engine/reference/builder/).
The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output.
@ -4255,7 +4345,7 @@ paths:
type: "integer"
- name: "buildargs"
in: "query"
description: "JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker uses the buildargs as the environment context for commands run via the `Dockerfile` RUN instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for passing secret values. [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg)"
description: "JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker uses the buildargs as the environment context for commands run via the `Dockerfile` RUN instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for passing secret values. [Read more about the buildargs instruction.](/engine/reference/builder/#arg)"
type: "integer"
- name: "shmsize"
in: "query"
@ -6307,7 +6397,7 @@ paths:
summary: "Install a plugin"
operationId: "PostPluginsPull"
description: |
Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginEnable).
Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable).
produces:
- "application/json"
responses:
@ -6430,6 +6520,11 @@ paths:
description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
required: true
type: "string"
- name: "timeout"
in: "query"
description: "Set the HTTP client timeout (in seconds)"
type: "integer"
default: 0
tags:
- "Plugins"
/plugins/{name}/disable:
@ -6478,6 +6573,31 @@ paths:
format: "binary"
tags:
- "Plugins"
/plugins/{name}/push:
post:
summary: "Push a plugin"
operationId: "PluginPush"
description: |
Push a plugin to the registry.
parameters:
- name: "name"
in: "path"
description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
required: true
type: "string"
responses:
200:
description: "no error"
404:
description: "plugin not installed"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
tags:
- "Plugins"
/plugins/{name}/set:
post:
summary: "Configure a plugin"
@ -6533,9 +6653,10 @@ paths:
Available filters:
- `id=<node id>`
- `label=<engine label>`
- `membership=`(`accepted`|`pending`)`
- `name=<node name>`
- `membership=`(`pending`|`accepted`|`rejected`)`
- `role=`(`worker`|`manager`)`
- `role=`(`manager`|`worker`)`
type: "string"
tags:
- "Nodes"
@ -6559,7 +6680,7 @@ paths:
parameters:
- name: "id"
in: "path"
description: "The ID of the node"
description: "The ID or name of the node"
type: "string"
required: true
tags:
@ -6581,7 +6702,7 @@ paths:
parameters:
- name: "id"
in: "path"
description: "The ID of the node"
description: "The ID or name of the node"
type: "string"
required: true
- name: "force"
@ -6661,6 +6782,8 @@ paths:
SnapshotInterval: 10000
ElectionTick: 3
TaskDefaults: {}
EncryptionConfig:
AutoLockManagers: false
Name: "default"
JoinTokens:
Worker: "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-6qmn92w6bu3jdvnglku58u11a"
@ -6728,6 +6851,8 @@ paths:
Raft: {}
Dispatcher: {}
CAConfig: {}
EncryptionConfig:
AutoLockManagers: false
tags:
- "Swarm"
/swarm/join:
@ -6840,6 +6965,63 @@ paths:
description: "Rotate the manager join token."
type: "boolean"
default: false
- name: "rotateManagerUnlockKey"
in: "query"
description: "Rotate the manager unlock key."
type: "boolean"
default: false
tags:
- "Swarm"
/swarm/unlockkey:
get:
summary: "Get the unlock key"
operationId: "SwarmUnlockkey"
consumes:
- "application/json"
responses:
200:
description: "no error"
schema:
type: "object"
properties:
UnlockKey:
description: "The swarm's unlock key."
type: "string"
example:
UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
tags:
- "Swarm"
/swarm/unlock:
post:
summary: "Unlock a locked manager"
operationId: "SwarmUnlock"
consumes:
- "application/json"
produces:
- "application/json"
parameters:
- name: "body"
in: "body"
required: true
schema:
type: "object"
properties:
UnlockKey:
description: "The swarm's unlock key."
type: "string"
example:
UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8"
responses:
200:
description: "no error"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
tags:
- "Swarm"
/services:
@ -6864,8 +7046,9 @@ paths:
description: |
A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters:
- `id=<node id>`
- `name=<node name>`
- `id=<service id>`
- `name=<service name>`
- `label=<service label>`
tags:
- "Services"
/services/create:
@ -6927,6 +7110,10 @@ paths:
Labels:
com.example.something: "something-value"
User: "33"
DNSConfig:
Nameservers: ["8.8.8.8"]
Search: ["example.org"]
Options: ["timeout:3"]
LogDriver:
Name: "json-file"
Options:
@ -7083,7 +7270,85 @@ paths:
description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)"
type: "string"
tags: [Service]
tags: ["Services"]
/services/{id}/logs:
get:
summary: "Get service logs"
description: |
Get `stdout` and `stderr` logs from a service.
**Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers.
operationId: "ServiceLogs"
produces:
- "application/vnd.docker.raw-stream"
- "application/json"
responses:
101:
description: "logs returned as a stream"
schema:
type: "string"
format: "binary"
200:
description: "logs returned as a string in response body"
schema:
type: "string"
404:
description: "no such container"
schema:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
message: "No such container: c2ada9df5af8"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
description: "ID or name of the container"
type: "string"
- name: "details"
in: "query"
description: "Show extra details provided to logs."
type: "boolean"
default: false
- name: "follow"
in: "query"
description: |
Return the logs as a stream.
This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/PostContainerAttach).
type: "boolean"
default: false
- name: "stdout"
in: "query"
description: "Return logs from `stdout`"
type: "boolean"
default: false
- name: "stderr"
in: "query"
description: "Return logs from `stderr`"
type: "boolean"
default: false
- name: "since"
in: "query"
description: "Only return logs since this time, as a UNIX timestamp"
type: "integer"
default: 0
- name: "timestamps"
in: "query"
description: "Add timestamps to every log line"
type: "boolean"
default: false
- name: "tail"
in: "query"
description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines."
type: "string"
default: "all"
tags:
- "Services"
/tasks:
get:
summary: "List tasks"
@ -7255,3 +7520,146 @@ paths:
type: "string"
tags:
- "Tasks"
/secrets:
get:
summary: "List secrets"
operationId: "SecretList"
produces:
- "application/json"
responses:
200:
description: "no error"
schema:
type: "array"
items:
$ref: "#/definitions/Secret"
example:
- ID: "ktnbjxoalbkvbvedmg1urrz8h"
Version:
Index: 11
CreatedAt: "2016-11-05T01:20:17.327670065Z"
UpdatedAt: "2016-11-05T01:20:17.327670065Z"
Spec:
Name: "app-dev.crt"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "filters"
in: "query"
type: "string"
description: |
A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters:
- `names=<secret name>`
tags:
- "Secrets"
/secrets/create:
post:
summary: "Create a secret"
operationId: "SecretCreate"
consumes:
- "application/json"
produces:
- "application/json"
responses:
201:
description: "no error"
schema:
type: "object"
properties:
ID:
description: "The ID of the created secret."
type: "string"
example:
ID: "ktnbjxoalbkvbvedmg1urrz8h"
406:
description: "server error or node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
409:
description: "name conflicts with an existing object"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "body"
in: "body"
schema:
allOf:
- $ref: "#/definitions/SecretSpec"
- type: "object"
example:
Name: "app-key.crt"
Labels:
foo: "bar"
Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg=="
tags:
- "Secrets"
/secrets/{id}:
get:
summary: "Inspect a secret"
operationId: "SecretsInspect"
produces:
- "application/json"
responses:
200:
description: "no error"
schema:
$ref: "#/definitions/Secret"
example:
ID: "ktnbjxoalbkvbvedmg1urrz8h"
Version:
Index: 11
CreatedAt: "2016-11-05T01:20:17.327670065Z"
UpdatedAt: "2016-11-05T01:20:17.327670065Z"
Spec:
Name: "app-dev.crt"
404:
description: "secret not found"
schema:
$ref: "#/definitions/ErrorResponse"
406:
description: "node is not part of a swarm"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
type: "string"
description: "ID of the secret"
tags:
- "Secrets"
delete:
summary: "Delete a secret"
operationId: "SecretsDelete"
produces:
- "application/json"
responses:
204:
description: "no error"
404:
description: "secret not found"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "id"
in: "path"
required: true
type: "string"
description: "ID of the secret"
tags:
- "Secrets"

View file

@ -332,6 +332,11 @@ type PluginRemoveOptions struct {
Force bool
}
// PluginEnableOptions holds parameters to enable plugins.
type PluginEnableOptions struct {
Timeout int
}
// PluginInstallOptions holds parameters to install a plugin.
type PluginInstallOptions struct {
Disabled bool
@ -351,7 +356,7 @@ type SecretRequestOption struct {
Mode os.FileMode
}
// SwarmUnlockKeyResponse contains the response for Remote API:
// SwarmUnlockKeyResponse contains the response for Engine API:
// GET /swarm/unlockkey
type SwarmUnlockKeyResponse struct {
// UnlockKey is the unlock key in ASCII-armored format.

View file

@ -59,3 +59,8 @@ type ExecConfig struct {
type PluginRmConfig struct {
ForceRemove bool
}
// PluginEnableConfig holds arguments for the plugin enable
type PluginEnableConfig struct {
Timeout int
}

View file

@ -34,29 +34,29 @@ type HealthConfig struct {
// All fields added to this struct must be marked `omitempty` to keep getting
// predictable hashes from the old `v1Compatibility` configuration.
type Config struct {
Hostname string // Hostname
Domainname string // Domainname
User string // User that will run the command(s) inside the container, also support user:group
AttachStdin bool // Attach the standard input, makes possible user interaction
AttachStdout bool // Attach the standard output
AttachStderr bool // Attach the standard error
ExposedPorts map[nat.Port]struct{} `json:",omitempty"` // List of exposed ports
Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
OpenStdin bool // Open stdin
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
Env []string // List of environment variable to set in the container
Cmd strslice.StrSlice // Command to run when starting the container
Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
Volumes map[string]struct{} // List of volumes (mounts) used for the container
WorkingDir string // Current directory (PWD) in the command will be launched
Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
NetworkDisabled bool `json:",omitempty"` // Is network disabled
MacAddress string `json:",omitempty"` // Mac Address of the container
OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
Labels map[string]string // List of labels set to this container
StopSignal string `json:",omitempty"` // Signal to stop a container
StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
Hostname string // Hostname
Domainname string // Domainname
User string // User that will run the command(s) inside the container, also support user:group
AttachStdin bool // Attach the standard input, makes possible user interaction
AttachStdout bool // Attach the standard output
AttachStderr bool // Attach the standard error
ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports
Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
OpenStdin bool // Open stdin
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
Env []string // List of environment variable to set in the container
Cmd strslice.StrSlice // Command to run when starting the container
Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
Volumes map[string]struct{} // List of volumes (mounts) used for the container
WorkingDir string // Current directory (PWD) in the command will be launched
Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
NetworkDisabled bool `json:",omitempty"` // Is network disabled
MacAddress string `json:",omitempty"` // Mac Address of the container
OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
Labels map[string]string // List of labels set to this container
StopSignal string `json:",omitempty"` // Signal to stop a container
StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
}

View file

@ -1,14 +0,0 @@
package container
import "os"
// ContainerSecret represents a secret in a container. This gets realized
// in the container tmpfs
type ContainerSecret struct {
Name string
Target string
Data []byte
UID string
GID string
Mode os.FileMode
}

View file

@ -3,7 +3,7 @@ package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// Plugin A plugin for the Remote API
// Plugin A plugin for the Engine API
// swagger:model Plugin
type Plugin struct {
@ -39,18 +39,10 @@ type PluginConfig struct {
// Required: true
Args PluginConfigArgs `json:"Args"`
// capabilities
// Required: true
Capabilities []string `json:"Capabilities"`
// description
// Required: true
Description string `json:"Description"`
// devices
// Required: true
Devices []PluginDevice `json:"Devices"`
// documentation
// Required: true
Documentation string `json:"Documentation"`
@ -67,6 +59,10 @@ type PluginConfig struct {
// Required: true
Interface PluginConfigInterface `json:"Interface"`
// linux
// Required: true
Linux PluginConfigLinux `json:"Linux"`
// mounts
// Required: true
Mounts []PluginMount `json:"Mounts"`
@ -117,6 +113,23 @@ type PluginConfigInterface struct {
Types []PluginInterfaceType `json:"Types"`
}
// PluginConfigLinux plugin config linux
// swagger:model PluginConfigLinux
type PluginConfigLinux struct {
// capabilities
// Required: true
Capabilities []string `json:"Capabilities"`
// device creation
// Required: true
DeviceCreation bool `json:"DeviceCreation"`
// devices
// Required: true
Devices []PluginDevice `json:"Devices"`
}
// PluginConfigNetwork plugin config network
// swagger:model PluginConfigNetwork
type PluginConfigNetwork struct {

View file

@ -5,7 +5,7 @@ import (
"fmt"
)
// PluginsListResponse contains the response for the remote API
// PluginsListResponse contains the response for the Engine API
type PluginsListResponse []*Plugin
const (

View file

@ -6,9 +6,7 @@ import "os"
type Secret struct {
ID string
Meta
Spec SecretSpec
Digest string
SecretSize int64
Spec SecretSpec
}
// SecretSpec represents a secret specification from a secret in swarm
@ -27,7 +25,7 @@ type SecretReferenceFileTarget struct {
// SecretReference is a reference to a secret in swarm
type SecretReference struct {
File *SecretReferenceFileTarget
SecretID string
SecretName string
Target *SecretReferenceFileTarget
}

View file

@ -1,8 +1,11 @@
package types
import (
"errors"
"fmt"
"io"
"os"
"strings"
"time"
"github.com/docker/docker/api/types/container"
@ -14,14 +17,14 @@ import (
"github.com/docker/go-connections/nat"
)
// ContainerChange contains response of Remote API:
// ContainerChange contains response of Engine API:
// GET "/containers/{name:.*}/changes"
type ContainerChange struct {
Kind int
Path string
}
// ImageHistory contains response of Remote API:
// ImageHistory contains response of Engine API:
// GET "/images/{name:.*}/history"
type ImageHistory struct {
ID string `json:"Id"`
@ -32,7 +35,7 @@ type ImageHistory struct {
Comment string
}
// ImageDelete contains response of Remote API:
// ImageDelete contains response of Engine API:
// DELETE "/images/{name:.*}"
type ImageDelete struct {
Untagged string `json:",omitempty"`
@ -53,7 +56,7 @@ type RootFS struct {
BaseLayer string `json:",omitempty"`
}
// ImageInspect contains response of Remote API:
// ImageInspect contains response of Engine API:
// GET "/images/{name:.*}/json"
type ImageInspect struct {
ID string `json:"Id"`
@ -76,7 +79,7 @@ type ImageInspect struct {
RootFS RootFS
}
// Container contains response of Remote API:
// Container contains response of Engine API:
// GET "/containers/json"
type Container struct {
ID string `json:"Id"`
@ -98,7 +101,7 @@ type Container struct {
Mounts []MountPoint
}
// CopyConfig contains request body of Remote API:
// CopyConfig contains request body of Engine API:
// POST "/containers/"+containerID+"/copy"
type CopyConfig struct {
Resource string
@ -115,28 +118,28 @@ type ContainerPathStat struct {
LinkTarget string `json:"linkTarget"`
}
// ContainerStats contains response of Remote API:
// ContainerStats contains response of Engine API:
// GET "/stats"
type ContainerStats struct {
Body io.ReadCloser `json:"body"`
OSType string `json:"ostype"`
}
// ContainerProcessList contains response of Remote API:
// ContainerProcessList contains response of Engine API:
// GET "/containers/{name:.*}/top"
type ContainerProcessList struct {
Processes [][]string
Titles []string
}
// Ping contains response of Remote API:
// Ping contains response of Engine API:
// GET "/_ping"
type Ping struct {
APIVersion string
Experimental bool
}
// Version contains response of Remote API:
// Version contains response of Engine API:
// GET "/version"
type Version struct {
Version string
@ -158,9 +161,9 @@ type Commit struct {
Expected string
}
// InfoBase contains the base response of Remote API:
// Info contains response of Engine API:
// GET "/info"
type InfoBase struct {
type Info struct {
ID string
Containers int
ContainersRunning int
@ -219,18 +222,49 @@ type InfoBase struct {
ContainerdCommit Commit
RuncCommit Commit
InitCommit Commit
SecurityOptions []string
}
// SecurityOpt holds key/value pair about a security option
type SecurityOpt struct {
// KeyValue holds a key/value pair
type KeyValue struct {
Key, Value string
}
// Info contains response of Remote API:
// GET "/info"
type Info struct {
*InfoBase
SecurityOptions []SecurityOpt
// SecurityOpt contains the name and options of a security option
type SecurityOpt struct {
Name string
Options []KeyValue
}
// DecodeSecurityOptions decodes a security options string slice to a type safe
// SecurityOpt
func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) {
so := []SecurityOpt{}
for _, opt := range opts {
// support output from a < 1.13 docker daemon
if !strings.Contains(opt, "=") {
so = append(so, SecurityOpt{Name: opt})
continue
}
secopt := SecurityOpt{}
split := strings.Split(opt, ",")
for _, s := range split {
kv := strings.SplitN(s, "=", 2)
if len(kv) != 2 {
return nil, fmt.Errorf("invalid security option %q", s)
}
if kv[0] == "" || kv[1] == "" {
return nil, errors.New("invalid empty security option")
}
if kv[0] == "name" {
secopt.Name = kv[1]
continue
}
secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]})
}
so = append(so, secopt)
}
return so, nil
}
// PluginsInfo is a temp struct holding Plugins name
@ -305,7 +339,7 @@ type ContainerNode struct {
Labels map[string]string
}
// ContainerJSONBase contains response of Remote API:
// ContainerJSONBase contains response of Engine API:
// GET "/containers/{name:.*}/json"
type ContainerJSONBase struct {
ID string `json:"Id"`
@ -466,7 +500,7 @@ type Runtime struct {
Args []string `json:"runtimeArgs,omitempty"`
}
// DiskUsage contains response of Remote API:
// DiskUsage contains response of Engine API:
// GET "/system/df"
type DiskUsage struct {
LayersSize int64
@ -475,49 +509,49 @@ type DiskUsage struct {
Volumes []*Volume
}
// ImagesPruneConfig contains the configuration for Remote API:
// ImagesPruneConfig contains the configuration for Engine API:
// POST "/images/prune"
type ImagesPruneConfig struct {
DanglingOnly bool
}
// ContainersPruneConfig contains the configuration for Remote API:
// ContainersPruneConfig contains the configuration for Engine API:
// POST "/images/prune"
type ContainersPruneConfig struct {
}
// VolumesPruneConfig contains the configuration for Remote API:
// VolumesPruneConfig contains the configuration for Engine API:
// POST "/images/prune"
type VolumesPruneConfig struct {
}
// NetworksPruneConfig contains the configuration for Remote API:
// NetworksPruneConfig contains the configuration for Engine API:
// POST "/networks/prune"
type NetworksPruneConfig struct {
}
// ContainersPruneReport contains the response for Remote API:
// ContainersPruneReport contains the response for Engine API:
// POST "/containers/prune"
type ContainersPruneReport struct {
ContainersDeleted []string
SpaceReclaimed uint64
}
// VolumesPruneReport contains the response for Remote API:
// VolumesPruneReport contains the response for Engine API:
// POST "/volumes/prune"
type VolumesPruneReport struct {
VolumesDeleted []string
SpaceReclaimed uint64
}
// ImagesPruneReport contains the response for Remote API:
// ImagesPruneReport contains the response for Engine API:
// POST "/images/prune"
type ImagesPruneReport struct {
ImagesDeleted []ImageDelete
SpaceReclaimed uint64
}
// NetworksPruneReport contains the response for Remote API:
// NetworksPruneReport contains the response for Engine API:
// POST "/networks/prune"
type NetworksPruneReport struct {
NetworksDeleted []string

View file

@ -1,11 +0,0 @@
// Package v1p24 provides specific API types for the API version 1, patch 24.
package v1p24
import "github.com/docker/docker/api/types"
// Info is a backcompatibility struct for the API 1.24
type Info struct {
*types.InfoBase
ExecutionDriver string
SecurityOptions []string
}

View file

@ -170,7 +170,7 @@ func getExecExitCode(ctx context.Context, client apiclient.ContainerAPIClient, e
resp, err := client.ContainerExecInspect(ctx, execID)
if err != nil {
// If we can't connect, then the daemon probably died.
if err != apiclient.ErrConnectionFailed {
if !apiclient.IsErrConnectionFailed(err) {
return false, -1, err
}
return false, -1, nil

View file

@ -9,6 +9,7 @@ import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/cli/command"
clientapi "github.com/docker/docker/client"
)
@ -19,11 +20,21 @@ func waitExitOrRemoved(dockerCli *command.DockerCli, ctx context.Context, contai
panic("Internal Error: waitExitOrRemoved needs a containerID as parameter")
}
var removeErr error
statusChan := make(chan int)
exitCode := 125
eventProcessor := func(e events.Message) bool {
// Get events via Events API
f := filters.NewArgs()
f.Add("type", "container")
f.Add("container", containerID)
options := types.EventsOptions{
Filters: f,
}
eventCtx, cancel := context.WithCancel(ctx)
eventq, errq := dockerCli.Client().Events(eventCtx, options)
eventProcessor := func(e events.Message) bool {
stopProcessing := false
switch e.Status {
case "die":
@ -37,6 +48,18 @@ func waitExitOrRemoved(dockerCli *command.DockerCli, ctx context.Context, contai
}
if !waitRemove {
stopProcessing = true
} else {
// If we are talking to an older daemon, `AutoRemove` is not supported.
// We need to fall back to the old behavior, which is client-side removal
if versions.LessThan(dockerCli.Client().ClientVersion(), "1.25") {
go func() {
removeErr = dockerCli.Client().ContainerRemove(ctx, containerID, types.ContainerRemoveOptions{RemoveVolumes: true})
if removeErr != nil {
logrus.Errorf("error removing container: %v", removeErr)
cancel() // cancel the event Q
}
}()
}
}
case "detach":
exitCode = 0
@ -44,39 +67,27 @@ func waitExitOrRemoved(dockerCli *command.DockerCli, ctx context.Context, contai
case "destroy":
stopProcessing = true
}
if stopProcessing {
statusChan <- exitCode
return true
}
return false
return stopProcessing
}
// Get events via Events API
f := filters.NewArgs()
f.Add("type", "container")
f.Add("container", containerID)
options := types.EventsOptions{
Filters: f,
}
eventCtx, cancel := context.WithCancel(ctx)
eventq, errq := dockerCli.Client().Events(eventCtx, options)
go func() {
defer cancel()
defer func() {
statusChan <- exitCode // must always send an exit code or the caller will block
cancel()
}()
for {
select {
case <-eventCtx.Done():
if removeErr != nil {
return
}
case evt := <-eventq:
if eventProcessor(evt) {
return
}
case err := <-errq:
logrus.Errorf("error getting events from daemon: %v", err)
statusChan <- exitCode
return
}
}
@ -91,7 +102,7 @@ func getExitCode(dockerCli *command.DockerCli, ctx context.Context, containerID
c, err := dockerCli.Client().ContainerInspect(ctx, containerID)
if err != nil {
// If we can't connect, then the daemon probably died.
if err != clientapi.ErrConnectionFailed {
if !clientapi.IsErrConnectionFailed(err) {
return false, -1, err
}
return false, -1, nil

View file

@ -3,6 +3,7 @@ package plugin
import (
"fmt"
"github.com/docker/docker/api/types"
"github.com/docker/docker/cli"
"github.com/docker/docker/cli/command"
"github.com/docker/docker/reference"
@ -10,20 +11,32 @@ import (
"golang.org/x/net/context"
)
type enableOpts struct {
timeout int
name string
}
func newEnableCommand(dockerCli *command.DockerCli) *cobra.Command {
var opts enableOpts
cmd := &cobra.Command{
Use: "enable PLUGIN",
Short: "Enable a plugin",
Args: cli.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
return runEnable(dockerCli, args[0])
opts.name = args[0]
return runEnable(dockerCli, &opts)
},
}
flags := cmd.Flags()
flags.IntVar(&opts.timeout, "timeout", 0, "HTTP client timeout (in seconds)")
return cmd
}
func runEnable(dockerCli *command.DockerCli, name string) error {
func runEnable(dockerCli *command.DockerCli, opts *enableOpts) error {
name := opts.name
named, err := reference.ParseNamed(name) // FIXME: validate
if err != nil {
return err
@ -35,7 +48,11 @@ func runEnable(dockerCli *command.DockerCli, name string) error {
if !ok {
return fmt.Errorf("invalid name: %s", named.String())
}
if err := dockerCli.Client().PluginEnable(context.Background(), ref.String()); err != nil {
if opts.timeout < 0 {
return fmt.Errorf("negative timeout %d is invalid", opts.timeout)
}
if err := dockerCli.Client().PluginEnable(context.Background(), ref.String(), types.PluginEnableOptions{Timeout: opts.timeout}); err != nil {
return err
}
fmt.Fprintln(dockerCli.Out(), name)

View file

@ -50,15 +50,14 @@ func runSecretList(dockerCli *command.DockerCli, opts listOptions) error {
fmt.Fprintf(w, "%s\n", s.ID)
}
} else {
fmt.Fprintf(w, "ID\tNAME\tCREATED\tUPDATED\tSIZE")
fmt.Fprintf(w, "ID\tNAME\tCREATED\tUPDATED")
fmt.Fprintf(w, "\n")
for _, s := range secrets {
created := units.HumanDuration(time.Now().UTC().Sub(s.Meta.CreatedAt)) + " ago"
updated := units.HumanDuration(time.Now().UTC().Sub(s.Meta.UpdatedAt)) + " ago"
size := units.HumanSizeWithPrecision(float64(s.SecretSize), 3)
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", s.ID, s.Spec.Annotations.Name, created, updated, size)
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", s.ID, s.Spec.Annotations.Name, created, updated)
}
}

View file

@ -5,6 +5,7 @@ import (
"io"
"text/tabwriter"
distreference "github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
@ -127,6 +128,16 @@ func printTable(out io.Writer, services []swarm.Service, running, tasksNoShutdow
mode = "global"
replicas = fmt.Sprintf("%d/%d", running[service.ID], tasksNoShutdown[service.ID])
}
image := service.Spec.TaskTemplate.ContainerSpec.Image
ref, err := distreference.ParseNamed(image)
if err == nil {
// update image string for display
namedTagged, ok := ref.(distreference.NamedTagged)
if ok {
image = namedTagged.Name() + ":" + namedTagged.Tag()
}
}
fmt.Fprintf(
writer,
listItemFmt,
@ -134,7 +145,7 @@ func printTable(out io.Writer, services []swarm.Service, running, tasksNoShutdow
service.Spec.Name,
mode,
replicas,
service.Spec.TaskTemplate.ContainerSpec.Image)
image)
}
}

View file

@ -575,14 +575,14 @@ func addServiceFlags(cmd *cobra.Command, opts *serviceOptions) {
flags.Var(&opts.resources.limitMemBytes, flagLimitMemory, "Limit Memory")
flags.Var(&opts.resources.resCPU, flagReserveCPU, "Reserve CPUs")
flags.Var(&opts.resources.resMemBytes, flagReserveMemory, "Reserve Memory")
flags.Var(&opts.stopGrace, flagStopGracePeriod, "Time to wait before force killing a container")
flags.Var(&opts.stopGrace, flagStopGracePeriod, "Time to wait before force killing a container (ns|us|ms|s|m|h)")
flags.Var(&opts.replicas, flagReplicas, "Number of tasks")
flags.StringVar(&opts.restartPolicy.condition, flagRestartCondition, "", "Restart when condition is met (none, on-failure, or any)")
flags.Var(&opts.restartPolicy.delay, flagRestartDelay, "Delay between restart attempts")
flags.Var(&opts.restartPolicy.delay, flagRestartDelay, "Delay between restart attempts (ns|us|ms|s|m|h)")
flags.Var(&opts.restartPolicy.maxAttempts, flagRestartMaxAttempts, "Maximum number of restarts before giving up")
flags.Var(&opts.restartPolicy.window, flagRestartWindow, "Window used to evaluate the restart policy")
flags.Var(&opts.restartPolicy.window, flagRestartWindow, "Window used to evaluate the restart policy (ns|us|ms|s|m|h)")
flags.Uint64Var(&opts.update.parallelism, flagUpdateParallelism, 1, "Maximum number of tasks updated simultaneously (0 to update all at once)")
flags.DurationVar(&opts.update.delay, flagUpdateDelay, time.Duration(0), "Delay between updates (ns|us|ms|s|m|h) (default 0s)")
@ -598,8 +598,8 @@ func addServiceFlags(cmd *cobra.Command, opts *serviceOptions) {
flags.Var(&opts.logDriver.opts, flagLogOpt, "Logging driver options")
flags.StringVar(&opts.healthcheck.cmd, flagHealthCmd, "", "Command to run to check health")
flags.Var(&opts.healthcheck.interval, flagHealthInterval, "Time between running the check")
flags.Var(&opts.healthcheck.timeout, flagHealthTimeout, "Maximum time to allow one check to run")
flags.Var(&opts.healthcheck.interval, flagHealthInterval, "Time between running the check (ns|us|ms|s|m|h)")
flags.Var(&opts.healthcheck.timeout, flagHealthTimeout, "Maximum time to allow one check to run (ns|us|ms|s|m|h)")
flags.IntVar(&opts.healthcheck.retries, flagHealthRetries, 0, "Consecutive failures needed to report unhealthy")
flags.BoolVar(&opts.healthcheck.noHealthcheck, flagNoHealthcheck, false, "Disable any container-specified HEALTHCHECK")

View file

@ -17,19 +17,19 @@ func parseSecrets(client client.APIClient, requestedSecrets []*types.SecretReque
ctx := context.Background()
for _, secret := range requestedSecrets {
if _, exists := secretRefs[secret.Target]; exists {
return nil, fmt.Errorf("duplicate secret target for %s not allowed", secret.Source)
}
secretRef := &swarmtypes.SecretReference{
SecretName: secret.Source,
Target: &swarmtypes.SecretReferenceFileTarget{
File: &swarmtypes.SecretReferenceFileTarget{
Name: secret.Target,
UID: secret.UID,
GID: secret.GID,
Mode: secret.Mode,
},
SecretName: secret.Source,
}
if _, exists := secretRefs[secret.Target]; exists {
return nil, fmt.Errorf("duplicate secret target for %s not allowed", secret.Source)
}
secretRefs[secret.Target] = secretRef
}

View file

@ -13,7 +13,7 @@ func NewStackCommand(dockerCli *command.DockerCli) *cobra.Command {
Short: "Manage Docker stacks",
Args: cli.NoArgs,
RunE: dockerCli.ShowHelp,
Tags: map[string]string{"experimental": "", "version": "1.25"},
Tags: map[string]string{"version": "1.25"},
}
cmd.AddCommand(
newDeployCommand(dockerCli),
@ -30,5 +30,6 @@ func NewTopLevelDeployCommand(dockerCli *command.DockerCli) *cobra.Command {
cmd := newDeployCommand(dockerCli)
// Remove the aliases at the top level
cmd.Aliases = []string{}
cmd.Tags = map[string]string{"experimental": "", "version": "1.25"}
return cmd
}

View file

@ -6,6 +6,7 @@ import (
"os"
"sort"
"strings"
"time"
"github.com/spf13/cobra"
"golang.org/x/net/context"
@ -13,6 +14,7 @@ import (
"github.com/aanand/compose-file/loader"
composetypes "github.com/aanand/compose-file/types"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
networktypes "github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/swarm"
@ -47,7 +49,6 @@ func newDeployCommand(dockerCli *command.DockerCli) *cobra.Command {
opts.namespace = args[0]
return runDeploy(dockerCli, opts)
},
Tags: map[string]string{"experimental": "", "version": "1.25"},
}
flags := cmd.Flags()
@ -250,9 +251,13 @@ func convertServiceNetworks(
nets := []swarm.NetworkAttachmentConfig{}
for networkName, network := range networks {
var aliases []string
if network != nil {
aliases = network.Aliases
}
nets = append(nets, swarm.NetworkAttachmentConfig{
Target: namespace.scope(networkName),
Aliases: append(network.Aliases, name),
Aliases: append(aliases, name),
})
}
return nets
@ -492,6 +497,11 @@ func convertService(
return swarm.ServiceSpec{}, err
}
healthcheck, err := convertHealthcheck(service.HealthCheck)
if err != nil {
return swarm.ServiceSpec{}, err
}
serviceSpec := swarm.ServiceSpec{
Annotations: swarm.Annotations{
Name: name,
@ -504,6 +514,7 @@ func convertService(
Args: service.Command,
Hostname: service.Hostname,
Hosts: convertExtraHosts(service.ExtraHosts),
Healthcheck: healthcheck,
Env: convertEnvironment(service.Environment),
Labels: getStackLabels(namespace.name, service.Labels),
Dir: service.WorkingDir,
@ -536,6 +547,47 @@ func convertExtraHosts(extraHosts map[string]string) []string {
return hosts
}
func convertHealthcheck(healthcheck *composetypes.HealthCheckConfig) (*container.HealthConfig, error) {
if healthcheck == nil {
return nil, nil
}
var (
err error
timeout, interval time.Duration
retries int
)
if healthcheck.Disable {
if len(healthcheck.Test) != 0 {
return nil, fmt.Errorf("command and disable key can't be set at the same time")
}
return &container.HealthConfig{
Test: []string{"NONE"},
}, nil
}
if healthcheck.Timeout != "" {
timeout, err = time.ParseDuration(healthcheck.Timeout)
if err != nil {
return nil, err
}
}
if healthcheck.Interval != "" {
interval, err = time.ParseDuration(healthcheck.Interval)
if err != nil {
return nil, err
}
}
if healthcheck.Retries != nil {
retries = int(*healthcheck.Retries)
}
return &container.HealthConfig{
Test: healthcheck.Test,
Timeout: timeout,
Interval: interval,
Retries: retries,
}, nil
}
func convertRestartPolicy(restart string, source *composetypes.RestartPolicy) (*swarm.RestartPolicy, error) {
// TODO: log if restart is being ignored
if source == nil {
@ -571,8 +623,12 @@ func convertUpdateConfig(source *composetypes.UpdateConfig) *swarm.UpdateConfig
if source == nil {
return nil
}
parallel := uint64(1)
if source.Parallelism != nil {
parallel = *source.Parallelism
}
return &swarm.UpdateConfig{
Parallelism: source.Parallelism,
Parallelism: parallel,
Delay: source.Delay,
FailureAction: source.FailureAction,
Monitor: source.Monitor,

View file

@ -15,6 +15,7 @@ func addComposefileFlag(opt *string, flags *pflag.FlagSet) {
func addBundlefileFlag(opt *string, flags *pflag.FlagSet) {
flags.StringVar(opt, "bundle-file", "", "Path to a Distributed Application Bundle file")
flags.SetAnnotation("bundle-file", "experimental", nil)
}
func addRegistryAuthFlag(opt *bool, flags *pflag.FlagSet) {

View file

@ -172,16 +172,21 @@ func prettyPrintInfo(dockerCli *command.DockerCli, info types.Info) error {
fmt.Fprintf(dockerCli.Out(), "\n")
}
if len(info.SecurityOptions) != 0 {
kvs, err := types.DecodeSecurityOptions(info.SecurityOptions)
if err != nil {
return err
}
fmt.Fprintf(dockerCli.Out(), "Security Options:\n")
for _, o := range info.SecurityOptions {
switch o.Key {
case "Name":
fmt.Fprintf(dockerCli.Out(), " %s\n", o.Value)
case "Profile":
if o.Value != "default" {
fmt.Fprintf(dockerCli.Err(), " WARNING: You're not using the default seccomp profile\n")
for _, so := range kvs {
fmt.Fprintf(dockerCli.Out(), " %s\n", so.Name)
for _, o := range so.Options {
switch o.Key {
case "profile":
if o.Value != "default" {
fmt.Fprintf(dockerCli.Err(), " WARNING: You're not using the default seccomp profile\n")
}
fmt.Fprintf(dockerCli.Out(), " Profile: %s\n", o.Value)
}
fmt.Fprintf(dockerCli.Out(), " %s: %s\n", o.Key, o.Value)
}
}
}

View file

@ -10,6 +10,7 @@ import (
"golang.org/x/net/context"
distreference "github.com/docker/distribution/reference"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/cli/command"
"github.com/docker/docker/cli/command/idresolver"
@ -118,11 +119,23 @@ func print(out io.Writer, ctx context.Context, tasks []swarm.Task, resolver *idr
taskErr = fmt.Sprintf("\"%s\"", taskErr)
}
image := task.Spec.ContainerSpec.Image
if !noTrunc {
ref, err := distreference.ParseNamed(image)
if err == nil {
// update image string for display
namedTagged, ok := ref.(distreference.NamedTagged)
if ok {
image = namedTagged.Name() + ":" + namedTagged.Tag()
}
}
}
fmt.Fprintf(
out,
psTaskItemFmt,
indentedName,
task.Spec.ContainerSpec.Image,
image,
nodeValue,
command.PrettyPrint(task.DesiredState),
command.PrettyPrint(task.Status.State),

View file

@ -1,4 +1,4 @@
## Go client for the Docker Remote API
# Go client for the Docker Engine API
The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does  running containers, pulling images, managing swarms, etc.

View file

@ -1,12 +1,12 @@
/*
Package client is a Go client for the Docker Remote API.
Package client is a Go client for the Docker Engine API.
The "docker" command uses this package to communicate with the daemon. It can also
be used by your own Go applications to do anything the command-line interface does
 running containers, pulling images, managing swarms, etc.
For more information about the Remote API, see the documentation:
https://docs.docker.com/engine/reference/api/docker_remote_api/
For more information about the Engine API, see the documentation:
https://docs.docker.com/engine/reference/api/
Usage
@ -122,7 +122,7 @@ func NewEnvClient() (*Client, error) {
if err != nil {
return cli, err
}
if version != "" {
if os.Getenv("DOCKER_API_VERSION") != "" {
cli.manualOverride = true
}
return cli, nil

View file

@ -1,18 +1,34 @@
package client
import (
"errors"
"fmt"
"github.com/docker/docker/api/types/versions"
"github.com/pkg/errors"
)
// ErrConnectionFailed is an error raised when the connection between the client and the server failed.
var ErrConnectionFailed = errors.New("Cannot connect to the Docker daemon. Is the docker daemon running on this host?")
// errConnectionFailed implements an error returned when connection failed.
type errConnectionFailed struct {
host string
}
// Error returns a string representation of an errConnectionFailed
func (err errConnectionFailed) Error() string {
if err.host == "" {
return "Cannot connect to the Docker daemon. Is the docker daemon running on this host?"
}
return fmt.Sprintf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", err.host)
}
// IsErrConnectionFailed returns true if the error is caused by connection failed.
func IsErrConnectionFailed(err error) bool {
_, ok := errors.Cause(err).(errConnectionFailed)
return ok
}
// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed.
func ErrorConnectionFailed(host string) error {
return fmt.Errorf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", host)
return errConnectionFailed{host: host}
}
type notFound interface {

View file

@ -46,10 +46,8 @@ func TestInfo(t *testing.T) {
return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL)
}
info := &types.Info{
InfoBase: &types.InfoBase{
ID: "daemonID",
Containers: 3,
},
ID: "daemonID",
Containers: 3,
}
b, err := json.Marshal(info)
if err != nil {

View file

@ -109,7 +109,7 @@ type NodeAPIClient interface {
type PluginAPIClient interface {
PluginList(ctx context.Context) (types.PluginsListResponse, error)
PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error
PluginEnable(ctx context.Context, name string) error
PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error
PluginDisable(ctx context.Context, name string) error
PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) error
PluginPush(ctx context.Context, name string, registryAuth string) error

View file

@ -1,12 +1,19 @@
package client
import (
"net/url"
"strconv"
"github.com/docker/docker/api/types"
"golang.org/x/net/context"
)
// PluginEnable enables a plugin
func (cli *Client) PluginEnable(ctx context.Context, name string) error {
resp, err := cli.post(ctx, "/plugins/"+name+"/enable", nil, nil, nil)
func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error {
query := url.Values{}
query.Set("timeout", strconv.Itoa(options.Timeout))
resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil)
ensureReaderClosed(resp)
return err
}

View file

@ -8,6 +8,7 @@ import (
"strings"
"testing"
"github.com/docker/docker/api/types"
"golang.org/x/net/context"
)
@ -16,7 +17,7 @@ func TestPluginEnableError(t *testing.T) {
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
}
err := client.PluginEnable(context.Background(), "plugin_name")
err := client.PluginEnable(context.Background(), "plugin_name", types.PluginEnableOptions{})
if err == nil || err.Error() != "Error response from daemon: Server error" {
t.Fatalf("expected a Server Error, got %v", err)
}
@ -40,7 +41,7 @@ func TestPluginEnable(t *testing.T) {
}),
}
err := client.PluginEnable(context.Background(), "plugin_name")
err := client.PluginEnable(context.Background(), "plugin_name", types.PluginEnableOptions{})
if err != nil {
t.Fatal(err)
}

View file

@ -62,7 +62,7 @@ func (cli *Client) PluginInstall(ctx context.Context, name string, options types
return nil
}
return cli.PluginEnable(ctx, name)
return cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0})
}
func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) {

View file

@ -13,7 +13,7 @@ func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (t
var headers map[string][]string
var response types.SecretCreateResponse
resp, err := cli.post(ctx, "/secrets", nil, secret, headers)
resp, err := cli.post(ctx, "/secrets/create", nil, secret, headers)
if err != nil {
return response, err
}

View file

@ -25,7 +25,7 @@ func TestSecretCreateError(t *testing.T) {
}
func TestSecretCreate(t *testing.T) {
expectedURL := "/secrets"
expectedURL := "/secrets/create"
client := &Client{
client: newMockClient(func(req *http.Request) (*http.Response, error) {
if !strings.HasPrefix(req.URL.Path, expectedURL) {
@ -41,7 +41,7 @@ func TestSecretCreate(t *testing.T) {
return nil, err
}
return &http.Response{
StatusCode: http.StatusOK,
StatusCode: http.StatusCreated,
Body: ioutil.NopCloser(bytes.NewReader(b)),
}, nil
}),

View file

@ -19,6 +19,7 @@ import (
containertypes "github.com/docker/docker/api/types/container"
mounttypes "github.com/docker/docker/api/types/mount"
networktypes "github.com/docker/docker/api/types/network"
swarmtypes "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/container/stream"
"github.com/docker/docker/daemon/exec"
"github.com/docker/docker/daemon/logger"
@ -41,6 +42,7 @@ import (
"github.com/docker/libnetwork/netlabel"
"github.com/docker/libnetwork/options"
"github.com/docker/libnetwork/types"
agentexec "github.com/docker/swarmkit/agent/exec"
"github.com/opencontainers/runc/libcontainer/label"
)
@ -68,7 +70,7 @@ func (DetachError) Error() string {
type CommonContainer struct {
StreamConfig *stream.Config
// embed for Container to support states directly.
*State `json:"State"` // Needed for remote api version <= 1.11
*State `json:"State"` // Needed for Engine API version <= 1.11
Root string `json:"-"` // Path to the "home" of the container, including metadata.
BaseFS string `json:"-"` // Path to the graphdriver mountpoint
RWLayer layer.RWLayer `json:"-"`
@ -90,9 +92,10 @@ type CommonContainer struct {
HasBeenStartedBefore bool
HasBeenManuallyStopped bool // used for unless-stopped restart policy
MountPoints map[string]*volume.MountPoint
HostConfig *containertypes.HostConfig `json:"-"` // do not serialize the host config in the json, otherwise we'll make the container unportable
ExecCommands *exec.Store `json:"-"`
Secrets []*containertypes.ContainerSecret `json:"-"` // do not serialize
HostConfig *containertypes.HostConfig `json:"-"` // do not serialize the host config in the json, otherwise we'll make the container unportable
ExecCommands *exec.Store `json:"-"`
SecretStore agentexec.SecretGetter `json:"-"`
SecretReferences []*swarmtypes.SecretReference
// logDriver for closing
LogDriver logger.Logger `json:"-"`
LogCopier *logger.Copier `json:"-"`

View file

@ -258,7 +258,7 @@ func (container *Container) IpcMounts() []Mount {
// SecretMount returns the mount for the secret path
func (container *Container) SecretMount() *Mount {
if len(container.Secrets) > 0 {
if len(container.SecretReferences) > 0 {
return &Mount{
Source: container.SecretMountPath(),
Destination: containerSecretMountPath,

View file

@ -0,0 +1,10 @@
#!/bin/bash
set -e
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
set -x
./generate.sh
for d in */; do
docker build -t "dockercore/builder-deb:$(basename "$d")" "$d"
done

View file

@ -0,0 +1,118 @@
#!/bin/bash
set -e
# This file is used to auto-generate Dockerfiles for making debs via 'make deb'
#
# usage: ./generate.sh [versions]
# ie: ./generate.sh
# to update all Dockerfiles in this directory
# or: ./generate.sh ubuntu-trusty
# to only update ubuntu-trusty/Dockerfile
# or: ./generate.sh ubuntu-newversion
# to create a new folder and a Dockerfile within it
#
# Note: non-LTS versions are not guaranteed to work.
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
versions=( "$@" )
if [ ${#versions[@]} -eq 0 ]; then
versions=( */ )
fi
versions=( "${versions[@]%/}" )
for version in "${versions[@]}"; do
echo "${versions[@]}"
distro="${version%-*}"
suite="${version##*-}"
from="aarch64/${distro}:${suite}"
mkdir -p "$version"
echo "$version -> FROM $from"
cat > "$version/Dockerfile" <<-EOF
#
# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"!
#
FROM $from
EOF
dockerBuildTags='apparmor pkcs11 selinux'
runcBuildTags='apparmor selinux'
# this list is sorted alphabetically; please keep it that way
packages=(
apparmor # for apparmor_parser for testing the profile
bash-completion # for bash-completion debhelper integration
btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible)
build-essential # "essential for building Debian packages"
cmake # tini dep
curl ca-certificates # for downloading Go
debhelper # for easy ".deb" building
dh-apparmor # for apparmor debhelper
dh-systemd # for systemd debhelper integration
git # for "git commit" info in "docker -v"
libapparmor-dev # for "sys/apparmor.h"
libdevmapper-dev # for "libdevmapper.h"
libltdl-dev # for pkcs11 "ltdl.h"
libsqlite3-dev # for "sqlite3.h"
pkg-config # for detecting things like libsystemd-journal dynamically
vim-common # tini dep
)
case "$suite" in
trusty)
packages+=( libsystemd-journal-dev )
# aarch64 doesn't have an official downloadable binary for go.
# And gccgo for trusty only includes Go 1.2 implementation which
# is too old to build current go source, fortunately trusty has
# golang-1.6-go package can be used as bootstrap.
packages+=( golang-1.6-go )
;;
xenial)
packages+=( libsystemd-dev )
packages+=( golang-go libseccomp-dev)
dockerBuildTags="$dockerBuildTags seccomp"
runcBuildTags="$runcBuildTags seccomp"
;;
*)
echo "Unsupported distro:" $distro:$suite
rm -fr "$version"
exit 1
;;
esac
# update and install packages
echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile"
echo >> "$version/Dockerfile"
case "$suite" in
trusty)
echo 'RUN update-alternatives --install /usr/bin/go go /usr/lib/go-1.6/bin/go 100' >> "$version/Dockerfile"
echo >> "$version/Dockerfile"
;;
*)
;;
esac
echo "# Install Go" >> "$version/Dockerfile"
echo "# aarch64 doesn't have official go binaries, so use the version of go installed from" >> "$version/Dockerfile"
echo "# the image to build go from source." >> "$version/Dockerfile"
awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.aarch64 >> "$version/Dockerfile"
echo 'RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \' >> "$version/Dockerfile"
echo ' && cd /usr/src/go/src \' >> "$version/Dockerfile"
echo ' && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash' >> "$version/Dockerfile"
echo >> "$version/Dockerfile"
echo 'ENV PATH $PATH:/usr/src/go/bin' >> "$version/Dockerfile"
echo >> "$version/Dockerfile"
echo "ENV AUTO_GOPATH 1" >> "$version/Dockerfile"
echo >> "$version/Dockerfile"
echo "ENV DOCKER_BUILDTAGS $dockerBuildTags" >> "$version/Dockerfile"
echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile"
done

View file

@ -0,0 +1,24 @@
#
# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"!
#
FROM aarch64/ubuntu:trusty
RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev golang-1.6-go --no-install-recommends && rm -rf /var/lib/apt/lists/*
RUN update-alternatives --install /usr/bin/go go /usr/lib/go-1.6/bin/go 100
# Install Go
# aarch64 doesn't have official go binaries, so use the version of go installed from
# the image to build go from source.
ENV GO_VERSION 1.7.3
RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \
&& cd /usr/src/go/src \
&& GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash
ENV PATH $PATH:/usr/src/go/bin
ENV AUTO_GOPATH 1
ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux
ENV RUNC_BUILDTAGS apparmor selinux

View file

@ -0,0 +1,22 @@
#
# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"!
#
FROM aarch64/ubuntu:xenial
RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-dev golang-go libseccomp-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
# Install Go
# aarch64 doesn't have official go binaries, so use the version of go installed from
# the image to build go from source.
ENV GO_VERSION 1.7.3
RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \
&& cd /usr/src/go/src \
&& GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash
ENV PATH $PATH:/usr/src/go/bin
ENV AUTO_GOPATH 1
ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux seccomp
ENV RUNC_BUILDTAGS apparmor selinux seccomp

View file

@ -1,15 +1,20 @@
#
# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"!
#
FROM armhf/debian:jessie
# allow replacing httpredir mirror
ARG APT_MIRROR=httpredir.debian.org
RUN sed -i s/httpredir.debian.org/$APT_MIRROR/g /etc/apt/sources.list
# allow replacing httpredir or deb mirror
ARG APT_MIRROR=deb.debian.org
RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev libsystemd-journal-dev vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/*
RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
ENV GO_VERSION 1.7.3
RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local
ENV PATH $PATH:/usr/local/go/bin
ENV AUTO_GOPATH 1
ENV DOCKER_BUILDTAGS apparmor selinux
ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux
ENV RUNC_BUILDTAGS apparmor selinux

View file

@ -0,0 +1,158 @@
#!/bin/bash
set -e
# usage: ./generate.sh [versions]
# ie: ./generate.sh
# to update all Dockerfiles in this directory
# or: ./generate.sh debian-jessie
# to only update debian-jessie/Dockerfile
# or: ./generate.sh debian-newversion
# to create a new folder and a Dockerfile within it
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
versions=( "$@" )
if [ ${#versions[@]} -eq 0 ]; then
versions=( */ )
fi
versions=( "${versions[@]%/}" )
for version in "${versions[@]}"; do
distro="${version%-*}"
suite="${version##*-}"
from="${distro}:${suite}"
case "$from" in
raspbian:jessie)
from="resin/rpi-raspbian:jessie"
;;
*)
from="armhf/$from"
;;
esac
mkdir -p "$version"
echo "$version -> FROM $from"
cat > "$version/Dockerfile" <<-EOF
#
# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"!
#
FROM $from
EOF
echo >> "$version/Dockerfile"
if [[ "$distro" = "debian" || "$distro" = "raspbian" ]]; then
cat >> "$version/Dockerfile" <<-'EOF'
# allow replacing httpredir or deb mirror
ARG APT_MIRROR=deb.debian.org
RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
EOF
if [ "$suite" = "wheezy" ]; then
cat >> "$version/Dockerfile" <<-'EOF'
RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list.d/backports.list
EOF
fi
echo "" >> "$version/Dockerfile"
fi
extraBuildTags='pkcs11'
runcBuildTags=
# this list is sorted alphabetically; please keep it that way
packages=(
apparmor # for apparmor_parser for testing the profile
bash-completion # for bash-completion debhelper integration
btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible)
build-essential # "essential for building Debian packages"
cmake # tini dep
curl ca-certificates # for downloading Go
debhelper # for easy ".deb" building
dh-apparmor # for apparmor debhelper
dh-systemd # for systemd debhelper integration
git # for "git commit" info in "docker -v"
libapparmor-dev # for "sys/apparmor.h"
libdevmapper-dev # for "libdevmapper.h"
libltdl-dev # for pkcs11 "ltdl.h"
libseccomp-dev # for "seccomp.h" & "libseccomp.so"
libsqlite3-dev # for "sqlite3.h"
pkg-config # for detecting things like libsystemd-journal dynamically
vim-common # tini dep
)
# packaging for "sd-journal.h" and libraries varies
case "$suite" in
precise|wheezy) ;;
jessie|trusty) packages+=( libsystemd-journal-dev );;
*) packages+=( libsystemd-dev );;
esac
# debian wheezy & ubuntu precise do not have the right libseccomp libs
# debian jessie & ubuntu trusty have a libseccomp < 2.2.1 :(
case "$suite" in
precise|wheezy|jessie|trusty)
packages=( "${packages[@]/libseccomp-dev}" )
runcBuildTags="apparmor selinux"
;;
*)
extraBuildTags+=' seccomp'
runcBuildTags="apparmor seccomp selinux"
;;
esac
if [ "$suite" = 'precise' ]; then
# precise has a few package issues
# - dh-systemd doesn't exist at all
packages=( "${packages[@]/dh-systemd}" )
# - libdevmapper-dev is missing critical structs (too old)
packages=( "${packages[@]/libdevmapper-dev}" )
extraBuildTags+=' exclude_graphdriver_devicemapper'
# - btrfs-tools is missing "ioctl.h" (too old), so it's useless
# (since kernels on precise are old too, just skip btrfs entirely)
packages=( "${packages[@]/btrfs-tools}" )
extraBuildTags+=' exclude_graphdriver_btrfs'
fi
if [ "$suite" = 'wheezy' ]; then
# pull a couple packages from backports explicitly
# (build failures otherwise)
backportsPackages=( btrfs-tools )
for pkg in "${backportsPackages[@]}"; do
packages=( "${packages[@]/$pkg}" )
done
echo "RUN apt-get update && apt-get install -y -t $suite-backports ${backportsPackages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile"
fi
echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile"
echo >> "$version/Dockerfile"
awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile >> "$version/Dockerfile"
if [ "$distro" == 'raspbian' ];
then
cat <<EOF >> "$version/Dockerfile"
# GOARM is the ARM architecture version which is unrelated to the above Golang version
ENV GOARM 6
EOF
fi
echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile"
echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile"
echo >> "$version/Dockerfile"
echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile"
echo >> "$version/Dockerfile"
# print build tags in alphabetical order
buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' )
echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile"
echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile"
done

View file

@ -1,10 +1,14 @@
#
# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"!
#
FROM resin/rpi-raspbian:jessie
# allow replacing httpredir mirror
ARG APT_MIRROR=httpredir.debian.org
RUN sed -i s/httpredir.debian.org/$APT_MIRROR/g /etc/apt/sources.list
# allow replacing httpredir or deb mirror
ARG APT_MIRROR=deb.debian.org
RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev libsystemd-journal-dev vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/*
RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
ENV GO_VERSION 1.7.3
# GOARM is the ARM architecture version which is unrelated to the above Golang version
@ -13,5 +17,6 @@ RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar
ENV PATH $PATH:/usr/local/go/bin
ENV AUTO_GOPATH 1
ENV DOCKER_BUILDTAGS apparmor selinux
ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux
ENV RUNC_BUILDTAGS apparmor selinux

View file

@ -1,6 +1,10 @@
#
# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"!
#
FROM armhf/ubuntu:trusty
RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config libsystemd-journal-dev vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/*
RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
ENV GO_VERSION 1.7.3
RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local

View file

@ -0,0 +1,16 @@
#
# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"!
#
FROM armhf/ubuntu:xenial
RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
ENV GO_VERSION 1.7.3
RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local
ENV PATH $PATH:/usr/local/go/bin
ENV AUTO_GOPATH 1
ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux
ENV RUNC_BUILDTAGS apparmor seccomp selinux

View file

@ -0,0 +1,16 @@
#
# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"!
#
FROM armhf/ubuntu:yakkety
RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
ENV GO_VERSION 1.7.3
RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local
ENV PATH $PATH:/usr/local/go/bin
ENV AUTO_GOPATH 1
ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux
ENV RUNC_BUILDTAGS apparmor seccomp selinux

View file

@ -1708,6 +1708,7 @@ _docker_daemon() {
--shutdown-timeout
--storage-driver -s
--storage-opt
--userland-proxy-path
--userns-remap
"
@ -1754,7 +1755,7 @@ _docker_daemon() {
__docker_nospace
return
;;
--config-file|--containerd|--init-path|--pidfile|-p|--tlscacert|--tlscert|--tlskey)
--config-file|--containerd|--init-path|--pidfile|-p|--tlscacert|--tlscert|--tlskey|--userland-proxy-path)
_filedir
return
;;
@ -1913,6 +1914,7 @@ _docker_image_build() {
--label
--memory -m
--memory-swap
--network
--shm-size
--tag -t
--ulimit
@ -1949,6 +1951,20 @@ _docker_image_build() {
__docker_complete_isolation
return
;;
--network)
case "$cur" in
container:*)
__docker_complete_containers_all --cur "${cur#*:}"
;;
*)
COMPREPLY=( $( compgen -W "$(__docker_plugins --type Network) $(__docker_networks) container:" -- "$cur") )
if [ "${COMPREPLY[*]}" = "container:" ] ; then
__docker_nospace
fi
;;
esac
return
;;
--tag|-t)
__docker_complete_image_repos_and_tags
return
@ -2041,7 +2057,7 @@ _docker_image_list() {
_docker_image_ls() {
local key=$(__docker_map_key_of_current_option '--filter|-f')
case "$key" in
before)
before|since|reference)
cur="${cur##*=}"
__docker_complete_images
return
@ -2053,16 +2069,11 @@ _docker_image_ls() {
label)
return
;;
since)
cur="${cur##*=}"
__docker_complete_images
return
;;
esac
case "$prev" in
--filter|-f)
COMPREPLY=( $( compgen -S = -W "before dangling label since" -- "$cur" ) )
COMPREPLY=( $( compgen -S = -W "before dangling label reference since" -- "$cur" ) )
__docker_nospace
return
;;

View file

@ -43,7 +43,7 @@ function __fish_print_docker_repositories --description 'Print a list of docker
end
# common options
complete -c docker -f -n '__fish_docker_no_subcommand' -l api-cors-header -d "Set CORS headers in the remote API. Default is cors disabled"
complete -c docker -f -n '__fish_docker_no_subcommand' -l api-cors-header -d "Set CORS headers in the Engine API. Default is cors disabled"
complete -c docker -f -n '__fish_docker_no_subcommand' -s b -l bridge -d 'Attach containers to a pre-existing network bridge'
complete -c docker -f -n '__fish_docker_no_subcommand' -l bip -d "Use this CIDR notation address for the network bridge's IP, not compatible with -b"
complete -c docker -f -n '__fish_docker_no_subcommand' -s D -l debug -d 'Enable debug mode'

View file

@ -399,11 +399,11 @@ __docker_complete_images_filters() {
declare -a boolean_opts opts
boolean_opts=('true' 'false')
opts=('before' 'dangling' 'label' 'since')
opts=('before' 'dangling' 'label' 'reference' 'since')
if compset -P '*='; then
case "${${words[-1]%=*}#*=}" in
(before|since)
(before|reference|since)
__docker_complete_images && ret=0
;;
(dangling)
@ -2200,7 +2200,7 @@ __docker_subcommand() {
_arguments $(__docker_arguments) \
$opts_help \
"($help)*--add-runtime=[Register an additional OCI compatible runtime]:runtime:__docker_complete_runtimes" \
"($help)--api-cors-header=[CORS headers in the remote API]:CORS headers: " \
"($help)--api-cors-header=[CORS headers in the Engine API]:CORS headers: " \
"($help)*--authorization-plugin=[Authorization plugins to load]" \
"($help -b --bridge)"{-b=,--bridge=}"[Attach containers to a network bridge]:bridge:_net_interfaces" \
"($help)--bip=[Network bridge IP]:IP address: " \
@ -2257,7 +2257,8 @@ __docker_subcommand() {
"($help)--tlskey=[Path to TLS key file]:Key file:_files -g \"*.(pem|key)\"" \
"($help)--tlsverify[Use TLS and verify the remote]" \
"($help)--userns-remap=[User/Group setting for user namespaces]:user\:group:->users-groups" \
"($help)--userland-proxy[Use userland proxy for loopback traffic]" && ret=0
"($help)--userland-proxy[Use userland proxy for loopback traffic]" \
"($help)--userland-proxy-path=[Path to the userland proxy binary]:binary:_files" && ret=0
case $state in
(cluster-store)

View file

@ -18,7 +18,7 @@ meaning you can use Vagrant to control Docker containers.
* [docker-provider](https://github.com/fgrehm/docker-provider)
* [vagrant-shell](https://github.com/destructuring/vagrant-shell)
## Setting up Vagrant-docker with the Remote API
## Setting up Vagrant-docker with the Engine API
The initial Docker upstart script will not work because it runs on `127.0.0.1`, which is not accessible to the host machine. Instead, we need to change the script to connect to `0.0.0.0`. To do this, modify `/etc/init/docker.conf` to look like this:

View file

@ -595,6 +595,11 @@ func (c *Cluster) UnlockSwarm(req types.UnlockRequest) error {
return err
}
}
if c.node != nil || c.locked != true {
c.RUnlock()
return errors.New("swarm is not locked")
}
c.RUnlock()
key, err := encryption.ParseHumanReadableKey(req.UnlockKey)
@ -603,11 +608,6 @@ func (c *Cluster) UnlockSwarm(req types.UnlockRequest) error {
}
c.Lock()
if c.node != nil || c.locked != true {
c.Unlock()
return errors.New("swarm is not locked")
}
config := *c.lastNodeConfig
config.lockKey = key
n, err := c.startNewNode(config)
@ -1278,7 +1278,7 @@ func (c *Cluster) ServiceLogs(ctx context.Context, input string, config *backend
ServiceIDs: []string{service.ID},
},
Options: &swarmapi.LogSubscriptionOptions{
Follow: true,
Follow: config.Follow,
},
})
if err != nil {

View file

@ -82,18 +82,22 @@ func containerSpecFromGRPC(c *swarmapi.ContainerSpec) types.ContainerSpec {
func secretReferencesToGRPC(sr []*types.SecretReference) []*swarmapi.SecretReference {
refs := make([]*swarmapi.SecretReference, 0, len(sr))
for _, s := range sr {
refs = append(refs, &swarmapi.SecretReference{
ref := &swarmapi.SecretReference{
SecretID: s.SecretID,
SecretName: s.SecretName,
Target: &swarmapi.SecretReference_File{
}
if s.File != nil {
ref.Target = &swarmapi.SecretReference_File{
File: &swarmapi.SecretReference_FileTarget{
Name: s.Target.Name,
UID: s.Target.UID,
GID: s.Target.GID,
Mode: s.Target.Mode,
Name: s.File.Name,
UID: s.File.UID,
GID: s.File.GID,
Mode: s.File.Mode,
},
},
})
}
}
refs = append(refs, ref)
}
return refs
@ -108,14 +112,14 @@ func secretReferencesFromGRPC(sr []*swarmapi.SecretReference) []*types.SecretRef
continue
}
refs = append(refs, &types.SecretReference{
SecretID: s.SecretID,
SecretName: s.SecretName,
Target: &types.SecretReferenceFileTarget{
File: &types.SecretReferenceFileTarget{
Name: target.Name,
UID: target.UID,
GID: target.GID,
Mode: target.Mode,
},
SecretID: s.SecretID,
SecretName: s.SecretName,
})
}

View file

@ -9,9 +9,7 @@ import (
// SecretFromGRPC converts a grpc Secret to a Secret.
func SecretFromGRPC(s *swarmapi.Secret) swarmtypes.Secret {
secret := swarmtypes.Secret{
ID: s.ID,
Digest: s.Digest,
SecretSize: s.SecretSize,
ID: s.ID,
Spec: swarmtypes.SecretSpec{
Annotations: swarmtypes.Annotations{
Name: s.Spec.Annotations.Name,
@ -39,3 +37,28 @@ func SecretSpecToGRPC(s swarmtypes.SecretSpec) swarmapi.SecretSpec {
Data: s.Data,
}
}
// SecretReferencesFromGRPC converts a slice of grpc SecretReference to SecretReference
func SecretReferencesFromGRPC(s []*swarmapi.SecretReference) []*swarmtypes.SecretReference {
refs := []*swarmtypes.SecretReference{}
for _, r := range s {
ref := &swarmtypes.SecretReference{
SecretID: r.SecretID,
SecretName: r.SecretName,
}
if t, ok := r.Target.(*swarmapi.SecretReference_File); ok {
ref.File = &swarmtypes.SecretReferenceFileTarget{
Name: t.File.Name,
UID: t.File.UID,
GID: t.File.GID,
Mode: t.File.Mode,
}
}
refs = append(refs, ref)
}
return refs
}

View file

@ -11,11 +11,13 @@ import (
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
swarmtypes "github.com/docker/docker/api/types/swarm"
clustertypes "github.com/docker/docker/daemon/cluster/provider"
"github.com/docker/docker/reference"
"github.com/docker/libnetwork"
"github.com/docker/libnetwork/cluster"
networktypes "github.com/docker/libnetwork/types"
"github.com/docker/swarmkit/agent/exec"
"golang.org/x/net/context"
)
@ -38,7 +40,8 @@ type Backend interface {
ContainerWaitWithContext(ctx context.Context, name string) error
ContainerRm(name string, config *types.ContainerRmConfig) error
ContainerKill(name string, sig uint64) error
SetContainerSecrets(name string, secrets []*container.ContainerSecret) error
SetContainerSecretStore(name string, store exec.SecretGetter) error
SetContainerSecretReferences(name string, refs []*swarmtypes.SecretReference) error
SystemInfo() (*types.Info, error)
VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error)
Containers(config *types.ContainerListOptions) ([]*types.Container, error)

View file

@ -16,6 +16,7 @@ import (
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/daemon/cluster/convert"
executorpkg "github.com/docker/docker/daemon/cluster/executor"
"github.com/docker/docker/reference"
"github.com/docker/libnetwork"
@ -237,33 +238,14 @@ func (c *containerAdapter) create(ctx context.Context) error {
if container == nil {
return fmt.Errorf("unable to get container from task spec")
}
secrets := make([]*containertypes.ContainerSecret, 0, len(container.Secrets))
for _, s := range container.Secrets {
sec := c.secrets.Get(s.SecretID)
if sec == nil {
logrus.Warnf("unable to get secret %s from provider", s.SecretID)
continue
}
name := sec.Spec.Annotations.Name
target := s.GetFile()
if target == nil {
logrus.Warnf("secret target was not a file: secret=%s", s.SecretID)
continue
}
secrets = append(secrets, &containertypes.ContainerSecret{
Name: name,
Target: target.Name,
Data: sec.Spec.Data,
UID: target.UID,
GID: target.GID,
Mode: target.Mode,
})
}
// configure secrets
if err := c.backend.SetContainerSecrets(cr.ID, secrets); err != nil {
if err := c.backend.SetContainerSecretStore(cr.ID, c.secrets); err != nil {
return err
}
refs := convert.SecretReferencesFromGRPC(container.Secrets)
if err := c.backend.SetContainerSecretReferences(cr.ID, refs); err != nil {
return err
}
@ -437,7 +419,11 @@ func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscription
}
chStarted := make(chan struct{})
go c.backend.ContainerLogs(ctx, c.container.name(), apiOptions, chStarted)
go func() {
defer writer.Close()
c.backend.ContainerLogs(ctx, c.container.name(), apiOptions, chStarted)
}()
return reader, nil
}

View file

@ -14,6 +14,7 @@ import (
enginecontainer "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters"
enginemount "github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network"
volumetypes "github.com/docker/docker/api/types/volume"
clustertypes "github.com/docker/docker/daemon/cluster/provider"
@ -191,7 +192,6 @@ func (c *containerConfig) config() *enginecontainer.Config {
Hostname: c.spec().Hostname,
WorkingDir: c.spec().Dir,
Image: c.image(),
Volumes: c.volumes(),
ExposedPorts: c.exposedPorts(),
Healthcheck: c.healthcheck(),
}
@ -243,49 +243,79 @@ func (c *containerConfig) labels() map[string]string {
return labels
}
// volumes gets placed into the Volumes field on the containerConfig.
func (c *containerConfig) volumes() map[string]struct{} {
r := make(map[string]struct{})
// Volumes *only* creates anonymous volumes. The rest is mixed in with
// binds, which aren't actually binds. Basically, any volume that
// results in a single component must be added here.
//
// This is reversed engineered from the behavior of the engine API.
func (c *containerConfig) mounts() []enginemount.Mount {
var r []enginemount.Mount
for _, mount := range c.spec().Mounts {
if mount.Type == api.MountTypeVolume && mount.Source == "" {
r[mount.Target] = struct{}{}
}
r = append(r, convertMount(mount))
}
return r
}
func (c *containerConfig) tmpfs() map[string]string {
r := make(map[string]string)
for _, spec := range c.spec().Mounts {
if spec.Type != api.MountTypeTmpfs {
continue
}
r[spec.Target] = getMountMask(&spec)
func convertMount(m api.Mount) enginemount.Mount {
mount := enginemount.Mount{
Source: m.Source,
Target: m.Target,
ReadOnly: m.ReadOnly,
}
return r
}
switch m.Type {
case api.MountTypeBind:
mount.Type = enginemount.TypeBind
case api.MountTypeVolume:
mount.Type = enginemount.TypeVolume
case api.MountTypeTmpfs:
mount.Type = enginemount.TypeTmpfs
}
func (c *containerConfig) binds() []string {
var r []string
for _, mount := range c.spec().Mounts {
if mount.Type == api.MountTypeBind || (mount.Type == api.MountTypeVolume && mount.Source != "") {
spec := fmt.Sprintf("%s:%s", mount.Source, mount.Target)
mask := getMountMask(&mount)
if mask != "" {
spec = fmt.Sprintf("%s:%s", spec, mask)
if m.BindOptions != nil {
mount.BindOptions = &enginemount.BindOptions{}
switch m.BindOptions.Propagation {
case api.MountPropagationRPrivate:
mount.BindOptions.Propagation = enginemount.PropagationRPrivate
case api.MountPropagationPrivate:
mount.BindOptions.Propagation = enginemount.PropagationPrivate
case api.MountPropagationRSlave:
mount.BindOptions.Propagation = enginemount.PropagationRSlave
case api.MountPropagationSlave:
mount.BindOptions.Propagation = enginemount.PropagationSlave
case api.MountPropagationRShared:
mount.BindOptions.Propagation = enginemount.PropagationRShared
case api.MountPropagationShared:
mount.BindOptions.Propagation = enginemount.PropagationShared
}
}
if m.VolumeOptions != nil {
mount.VolumeOptions = &enginemount.VolumeOptions{
NoCopy: m.VolumeOptions.NoCopy,
}
if m.VolumeOptions.Labels != nil {
mount.VolumeOptions.Labels = make(map[string]string, len(m.VolumeOptions.Labels))
for k, v := range m.VolumeOptions.Labels {
mount.VolumeOptions.Labels[k] = v
}
}
if m.VolumeOptions.DriverConfig != nil {
mount.VolumeOptions.DriverConfig = &enginemount.Driver{
Name: m.VolumeOptions.DriverConfig.Name,
}
if m.VolumeOptions.DriverConfig.Options != nil {
mount.VolumeOptions.DriverConfig.Options = make(map[string]string, len(m.VolumeOptions.DriverConfig.Options))
for k, v := range m.VolumeOptions.DriverConfig.Options {
mount.VolumeOptions.DriverConfig.Options[k] = v
}
}
r = append(r, spec)
}
}
return r
if m.TmpfsOptions != nil {
mount.TmpfsOptions = &enginemount.TmpfsOptions{
SizeBytes: m.TmpfsOptions.SizeBytes,
Mode: m.TmpfsOptions.Mode,
}
}
return mount
}
func (c *containerConfig) healthcheck() *enginecontainer.HealthConfig {
@ -303,88 +333,12 @@ func (c *containerConfig) healthcheck() *enginecontainer.HealthConfig {
}
}
func getMountMask(m *api.Mount) string {
var maskOpts []string
if m.ReadOnly {
maskOpts = append(maskOpts, "ro")
}
switch m.Type {
case api.MountTypeVolume:
if m.VolumeOptions != nil && m.VolumeOptions.NoCopy {
maskOpts = append(maskOpts, "nocopy")
}
case api.MountTypeBind:
if m.BindOptions == nil {
break
}
switch m.BindOptions.Propagation {
case api.MountPropagationPrivate:
maskOpts = append(maskOpts, "private")
case api.MountPropagationRPrivate:
maskOpts = append(maskOpts, "rprivate")
case api.MountPropagationShared:
maskOpts = append(maskOpts, "shared")
case api.MountPropagationRShared:
maskOpts = append(maskOpts, "rshared")
case api.MountPropagationSlave:
maskOpts = append(maskOpts, "slave")
case api.MountPropagationRSlave:
maskOpts = append(maskOpts, "rslave")
}
case api.MountTypeTmpfs:
if m.TmpfsOptions == nil {
break
}
if m.TmpfsOptions.Mode != 0 {
maskOpts = append(maskOpts, fmt.Sprintf("mode=%o", m.TmpfsOptions.Mode))
}
if m.TmpfsOptions.SizeBytes != 0 {
// calculate suffix here, making this linux specific, but that is
// okay, since API is that way anyways.
// we do this by finding the suffix that divides evenly into the
// value, returing the value itself, with no suffix, if it fails.
//
// For the most part, we don't enforce any semantic to this values.
// The operating system will usually align this and enforce minimum
// and maximums.
var (
size = m.TmpfsOptions.SizeBytes
suffix string
)
for _, r := range []struct {
suffix string
divisor int64
}{
{"g", 1 << 30},
{"m", 1 << 20},
{"k", 1 << 10},
} {
if size%r.divisor == 0 {
size = size / r.divisor
suffix = r.suffix
break
}
}
maskOpts = append(maskOpts, fmt.Sprintf("size=%d%s", size, suffix))
}
}
return strings.Join(maskOpts, ",")
}
func (c *containerConfig) hostConfig() *enginecontainer.HostConfig {
hc := &enginecontainer.HostConfig{
Resources: c.resources(),
Binds: c.binds(),
Tmpfs: c.tmpfs(),
GroupAdd: c.spec().Groups,
PortBindings: c.portBindings(),
Mounts: c.mounts(),
}
if c.spec().DNSConfig != nil {

View file

@ -185,7 +185,7 @@ func (config *Config) InstallCommonFlags(flags *pflag.FlagSet) {
flags.StringVar(&config.ClusterAdvertise, "cluster-advertise", "", "Address or interface name to advertise")
flags.StringVar(&config.ClusterStore, "cluster-store", "", "URL of the distributed storage backend")
flags.Var(opts.NewNamedMapOpts("cluster-store-opts", config.ClusterOpts, nil), "cluster-store-opt", "Set cluster store options")
flags.StringVar(&config.CorsHeaders, "api-cors-header", "", "Set CORS headers in the remote API")
flags.StringVar(&config.CorsHeaders, "api-cors-header", "", "Set CORS headers in the Engine API")
flags.IntVar(&maxConcurrentDownloads, "max-concurrent-downloads", defaultMaxConcurrentDownloads, "Set the max concurrent downloads for each pull")
flags.IntVar(&maxConcurrentUploads, "max-concurrent-uploads", defaultMaxConcurrentUploads, "Set the max concurrent uploads for each push")
flags.IntVar(&config.ShutdownTimeout, "shutdown-timeout", defaultShutdownTimeout, "Set the default shutdown timeout")

View file

@ -77,7 +77,7 @@ func (config *Config) InstallFlags(flags *pflag.FlagSet) {
flags.StringVar(&config.bridgeConfig.FixedCIDRv6, "fixed-cidr-v6", "", "IPv6 subnet for fixed IPs")
flags.BoolVar(&config.bridgeConfig.EnableUserlandProxy, "userland-proxy", true, "Use userland proxy for loopback traffic")
flags.StringVar(&config.bridgeConfig.UserlandProxyPath, "userland-proxy-path", "", "Path to the userland proxy binary")
flags.BoolVar(&config.EnableCors, "api-enable-cors", false, "Enable CORS headers in the remote API, this is deprecated by --api-cors-header")
flags.BoolVar(&config.EnableCors, "api-enable-cors", false, "Enable CORS headers in the Engine API, this is deprecated by --api-cors-header")
flags.MarkDeprecated("api-enable-cors", "Please use --api-cors-header")
flags.StringVar(&config.CgroupParent, "cgroup-parent", "", "Set parent cgroup for all containers")
flags.StringVar(&config.RemappedRoot, "userns-remap", "", "User/Group setting for user namespaces")

View file

@ -8,13 +8,11 @@ import (
"os"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/Sirupsen/logrus"
"github.com/cloudflare/cfssl/log"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/links"
"github.com/docker/docker/pkg/idtools"
@ -22,16 +20,10 @@ import (
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/runconfig"
"github.com/docker/libnetwork"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/devices"
"github.com/opencontainers/runc/libcontainer/label"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
func u32Ptr(i int64) *uint32 { u := uint32(i); return &u }
func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm }
func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) {
var env []string
children := daemon.children(container)
@ -145,7 +137,7 @@ func (daemon *Daemon) setupIpcDirs(c *container.Container) error {
}
func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) {
if len(c.Secrets) == 0 {
if len(c.SecretReferences) == 0 {
return nil
}
@ -174,8 +166,17 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) {
return errors.Wrap(err, "unable to setup secret mount")
}
for _, s := range c.Secrets {
targetPath := filepath.Clean(s.Target)
for _, s := range c.SecretReferences {
if c.SecretStore == nil {
return fmt.Errorf("secret store is not initialized")
}
// TODO (ehazlett): use type switch when more are supported
if s.File == nil {
return fmt.Errorf("secret target type is not a file target")
}
targetPath := filepath.Clean(s.File.Name)
// ensure that the target is a filename only; no paths allowed
if targetPath != filepath.Base(targetPath) {
return fmt.Errorf("error creating secret: secret must not be a path")
@ -187,18 +188,22 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) {
}
logrus.WithFields(logrus.Fields{
"name": s.Name,
"name": s.File.Name,
"path": fPath,
}).Debug("injecting secret")
if err := ioutil.WriteFile(fPath, s.Data, s.Mode); err != nil {
secret := c.SecretStore.Get(s.SecretID)
if secret == nil {
return fmt.Errorf("unable to get secret from secret store")
}
if err := ioutil.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil {
return errors.Wrap(err, "error injecting secret")
}
uid, err := strconv.Atoi(s.UID)
uid, err := strconv.Atoi(s.File.UID)
if err != nil {
return err
}
gid, err := strconv.Atoi(s.GID)
gid, err := strconv.Atoi(s.File.GID)
if err != nil {
return err
}
@ -234,78 +239,6 @@ func killProcessDirectly(container *container.Container) error {
return nil
}
func specDevice(d *configs.Device) specs.Device {
return specs.Device{
Type: string(d.Type),
Path: d.Path,
Major: d.Major,
Minor: d.Minor,
FileMode: fmPtr(int64(d.FileMode)),
UID: u32Ptr(int64(d.Uid)),
GID: u32Ptr(int64(d.Gid)),
}
}
func specDeviceCgroup(d *configs.Device) specs.DeviceCgroup {
t := string(d.Type)
return specs.DeviceCgroup{
Allow: true,
Type: &t,
Major: &d.Major,
Minor: &d.Minor,
Access: &d.Permissions,
}
}
func getDevicesFromPath(deviceMapping containertypes.DeviceMapping) (devs []specs.Device, devPermissions []specs.DeviceCgroup, err error) {
resolvedPathOnHost := deviceMapping.PathOnHost
// check if it is a symbolic link
if src, e := os.Lstat(deviceMapping.PathOnHost); e == nil && src.Mode()&os.ModeSymlink == os.ModeSymlink {
if linkedPathOnHost, e := filepath.EvalSymlinks(deviceMapping.PathOnHost); e == nil {
resolvedPathOnHost = linkedPathOnHost
}
}
device, err := devices.DeviceFromPath(resolvedPathOnHost, deviceMapping.CgroupPermissions)
// if there was no error, return the device
if err == nil {
device.Path = deviceMapping.PathInContainer
return append(devs, specDevice(device)), append(devPermissions, specDeviceCgroup(device)), nil
}
// if the device is not a device node
// try to see if it's a directory holding many devices
if err == devices.ErrNotADevice {
// check if it is a directory
if src, e := os.Stat(resolvedPathOnHost); e == nil && src.IsDir() {
// mount the internal devices recursively
filepath.Walk(resolvedPathOnHost, func(dpath string, f os.FileInfo, e error) error {
childDevice, e := devices.DeviceFromPath(dpath, deviceMapping.CgroupPermissions)
if e != nil {
// ignore the device
return nil
}
// add the device to userSpecified devices
childDevice.Path = strings.Replace(dpath, resolvedPathOnHost, deviceMapping.PathInContainer, 1)
devs = append(devs, specDevice(childDevice))
devPermissions = append(devPermissions, specDeviceCgroup(childDevice))
return nil
})
}
}
if len(devs) > 0 {
return devs, devPermissions, nil
}
return devs, devPermissions, fmt.Errorf("error gathering device information while adding custom device %q: %s", deviceMapping.PathOnHost, err)
}
func detachMounted(path string) error {
return syscall.Unmount(path, syscall.MNT_DETACH)
}

View file

@ -221,7 +221,7 @@ func (daemon *Daemon) setRWLayer(container *container.Container) error {
}
// VolumeCreate creates a volume with the specified name, driver, and opts
// This is called directly from the remote API
// This is called directly from the Engine API
func (daemon *Daemon) VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) {
if name == "" {
name = stringid.GenerateNonCryptoID()

View file

@ -24,6 +24,7 @@ import (
"github.com/docker/libnetwork/netlabel"
"github.com/docker/libnetwork/options"
blkiodev "github.com/opencontainers/runc/libcontainer/configs"
"golang.org/x/sys/windows"
)
const (
@ -230,6 +231,11 @@ func checkSystem() error {
if osv.Build < 14393 {
return fmt.Errorf("The docker daemon requires build 14393 or later of Windows Server 2016 or Windows 10")
}
vmcompute := windows.NewLazySystemDLL("vmcompute.dll")
if vmcompute.Load() != nil {
return fmt.Errorf("Failed to load vmcompute.dll. Ensure that the Containers role is installed.")
}
return nil
}

View file

@ -140,7 +140,7 @@ func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemo
// VolumeRm removes the volume with the given name.
// If the volume is referenced by a container it is not removed
// This is called directly from the remote API
// This is called directly from the Engine API
func (daemon *Daemon) VolumeRm(name string, force bool) error {
err := daemon.volumeRm(name)
if err == nil || force {

View file

@ -27,7 +27,7 @@ const termProcessTimeout = 10
func (d *Daemon) registerExecCommand(container *container.Container, config *exec.Config) {
// Storing execs in container in order to kill them gracefully whenever the container is stopped or removed.
container.ExecCommands.Add(config.ID, config)
// Storing execs in daemon for easy access via remote API.
// Storing execs in daemon for easy access via Engine API.
d.execCommands.Add(config.ID, config)
}

View file

@ -1,6 +1,7 @@
package daemon
import (
"fmt"
"os"
"runtime"
"sync/atomic"
@ -69,29 +70,26 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
}
})
securityOptions := []types.SecurityOpt{}
securityOptions := []string{}
if sysInfo.AppArmor {
securityOptions = append(securityOptions, types.SecurityOpt{Key: "Name", Value: "apparmor"})
securityOptions = append(securityOptions, "name=apparmor")
}
if sysInfo.Seccomp && supportsSeccomp {
profile := daemon.seccompProfilePath
if profile == "" {
profile = "default"
}
securityOptions = append(securityOptions,
types.SecurityOpt{Key: "Name", Value: "seccomp"},
types.SecurityOpt{Key: "Profile", Value: profile},
)
securityOptions = append(securityOptions, fmt.Sprintf("name=seccomp,profile=%s", profile))
}
if selinuxEnabled() {
securityOptions = append(securityOptions, types.SecurityOpt{Key: "Name", Value: "selinux"})
securityOptions = append(securityOptions, "name=selinux")
}
uid, gid := daemon.GetRemappedUIDGID()
if uid != 0 || gid != 0 {
securityOptions = append(securityOptions, types.SecurityOpt{Key: "Name", Value: "userns"})
securityOptions = append(securityOptions, "name=userns")
}
v := &types.InfoBase{
v := &types.Info{
ID: daemon.ID,
Containers: int(cRunning + cPaused + cStopped),
ContainersRunning: int(cRunning),
@ -129,6 +127,7 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
HTTPSProxy: sockets.GetProxyEnv("https_proxy"),
NoProxy: sockets.GetProxyEnv("no_proxy"),
LiveRestoreEnabled: daemon.configStore.LiveRestoreEnabled,
SecurityOptions: securityOptions,
Isolation: daemon.defaultIsolation,
}
@ -143,12 +142,7 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
}
v.Name = hostname
i := &types.Info{
InfoBase: v,
SecurityOptions: securityOptions,
}
return i, nil
return v, nil
}
// SystemVersion returns version information about the daemon.

View file

@ -14,7 +14,7 @@ import (
)
// FillPlatformInfo fills the platform related info.
func (daemon *Daemon) FillPlatformInfo(v *types.InfoBase, sysInfo *sysinfo.SysInfo) {
func (daemon *Daemon) FillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) {
v.MemoryLimit = sysInfo.MemoryLimit
v.SwapLimit = sysInfo.SwapLimit
v.KernelMemory = sysInfo.KernelMemory

View file

@ -6,5 +6,5 @@ import (
)
// FillPlatformInfo fills the platform related info.
func (daemon *Daemon) FillPlatformInfo(v *types.InfoBase, sysInfo *sysinfo.SysInfo) {
func (daemon *Daemon) FillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) {
}

View file

@ -88,7 +88,7 @@ func setDevices(s *specs.Spec, c *container.Container) error {
return err
}
for _, d := range hostDevices {
devs = append(devs, specDevice(d))
devs = append(devs, oci.Device(d))
}
rwm := "rwm"
devPermissions = []specs.DeviceCgroup{
@ -99,7 +99,7 @@ func setDevices(s *specs.Spec, c *container.Container) error {
}
} else {
for _, deviceMapping := range c.HostConfig.Devices {
d, dPermissions, err := getDevicesFromPath(deviceMapping)
d, dPermissions, err := oci.DevicesFromPath(deviceMapping.PathOnHost, deviceMapping.PathInContainer, deviceMapping.CgroupPermissions)
if err != nil {
return err
}
@ -221,18 +221,6 @@ func setCapabilities(s *specs.Spec, c *container.Container) error {
return nil
}
func delNamespace(s *specs.Spec, nsType specs.NamespaceType) {
idx := -1
for i, n := range s.Linux.Namespaces {
if n.Type == nsType {
idx = i
}
}
if idx >= 0 {
s.Linux.Namespaces = append(s.Linux.Namespaces[:idx], s.Linux.Namespaces[idx+1:]...)
}
}
func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error {
userNS := false
// user
@ -283,7 +271,7 @@ func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error
setNamespace(s, nsUser)
}
} else if c.HostConfig.IpcMode.IsHost() {
delNamespace(s, specs.NamespaceType("ipc"))
oci.RemoveNamespace(s, specs.NamespaceType("ipc"))
} else {
ns := specs.Namespace{Type: "ipc"}
setNamespace(s, ns)
@ -304,14 +292,14 @@ func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error
setNamespace(s, nsUser)
}
} else if c.HostConfig.PidMode.IsHost() {
delNamespace(s, specs.NamespaceType("pid"))
oci.RemoveNamespace(s, specs.NamespaceType("pid"))
} else {
ns := specs.Namespace{Type: "pid"}
setNamespace(s, ns)
}
// uts
if c.HostConfig.UTSMode.IsHost() {
delNamespace(s, specs.NamespaceType("uts"))
oci.RemoveNamespace(s, specs.NamespaceType("uts"))
s.Hostname = ""
}

View file

@ -2,12 +2,25 @@ package daemon
import (
"github.com/Sirupsen/logrus"
containertypes "github.com/docker/docker/api/types/container"
swarmtypes "github.com/docker/docker/api/types/swarm"
"github.com/docker/swarmkit/agent/exec"
)
// SetContainerSecrets sets the container secrets needed
func (daemon *Daemon) SetContainerSecrets(name string, secrets []*containertypes.ContainerSecret) error {
if !secretsSupported() && len(secrets) > 0 {
// SetContainerSecretStore sets the secret store backend for the container
func (daemon *Daemon) SetContainerSecretStore(name string, store exec.SecretGetter) error {
c, err := daemon.GetContainer(name)
if err != nil {
return err
}
c.SecretStore = store
return nil
}
// SetContainerSecretReferences sets the container secret references needed
func (daemon *Daemon) SetContainerSecretReferences(name string, refs []*swarmtypes.SecretReference) error {
if !secretsSupported() && len(refs) > 0 {
logrus.Warn("secrets are not supported on this platform")
return nil
}
@ -17,7 +30,7 @@ func (daemon *Daemon) SetContainerSecrets(name string, secrets []*containertypes
return err
}
c.Secrets = secrets
c.SecretReferences = refs
return nil
}

View file

@ -23,7 +23,7 @@ func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, c
if runtime.GOOS == "solaris" {
return fmt.Errorf("%+v does not support stats", runtime.GOOS)
}
// Remote API version (used for backwards compatibility)
// Engine API version (used for backwards compatibility)
apiVersion := config.Version
container, err := daemon.GetContainer(prefixOrName)
@ -31,8 +31,8 @@ func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, c
return err
}
// If the container is not running and requires no stream, return an empty stats.
if !container.IsRunning() && !config.Stream {
// If the container is either not running or restarting and requires no stream, return an empty stats.
if (!container.IsRunning() || container.IsRestarting()) && !config.Stream {
return json.NewEncoder(config.OutStream).Encode(&types.Stats{})
}

View file

@ -26,7 +26,7 @@ var (
type mounts []container.Mount
// volumeToAPIType converts a volume.Volume to the type used by the remote API
// volumeToAPIType converts a volume.Volume to the type used by the Engine API
func volumeToAPIType(v volume.Volume) *types.Volume {
tv := &types.Volume{
Name: v.Name(),

View file

@ -140,6 +140,7 @@ func (daemon *Daemon) verifyVolumesInfo(container *container.Container) error {
if err != nil {
return errors.Wrap(err, "could not open container config")
}
defer f.Close()
var cv volumes
if err := json.NewDecoder(f).Decode(&cv); err != nil {
return errors.Wrap(err, "could not decode container config")

View file

@ -1,7 +1,10 @@
---
title: "Remote API v1.18"
title: "Engine API v1.18"
description: "API Documentation for Docker"
keywords: "API, Docker, rcli, REST, documentation"
redirect_from:
- /engine/reference/api/docker_remote_api_v1.18/
- /reference/api/docker_remote_api_v1.18/
---
<!-- This file is maintained within the docker/docker Github
@ -13,11 +16,10 @@ keywords: "API, Docker, rcli, REST, documentation"
will be rejected.
-->
# Docker Remote API v1.18
# Docker Engine API v1.18
# 1. Brief introduction
- The Remote API has replaced `rcli`.
- The daemon listens on `unix:///var/run/docker.sock` but you can
[Bind Docker to another host/port or a Unix socket](../commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket).
- The API tends to be REST, but for some complex commands, like `attach`
@ -2148,7 +2150,7 @@ This might change in the future.
## 3.3 CORS Requests
To set cross origin requests to the remote api please give values to
To set cross origin requests to the Engine API please give values to
`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all,
default or blank means CORS disabled

View file

@ -1,7 +1,10 @@
---
title: "Remote API v1.19"
title: "Engine API v1.19"
description: "API Documentation for Docker"
keywords: "API, Docker, rcli, REST, documentation"
redirect_from:
- /engine/reference/api/docker_remote_api_v1.19/
- /reference/api/docker_remote_api_v1.19/
---
<!-- This file is maintained within the docker/docker Github
@ -13,11 +16,10 @@ keywords: "API, Docker, rcli, REST, documentation"
will be rejected.
-->
# Docker Remote API v1.19
# Docker Engine API v1.19
## 1. Brief introduction
- The Remote API has replaced `rcli`.
- The daemon listens on `unix:///var/run/docker.sock` but you can
[Bind Docker to another host/port or a Unix socket](../commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket).
- The API tends to be REST. However, for some complex commands, like `attach`
@ -2230,7 +2232,7 @@ from **200 OK** to **101 UPGRADED** and resends the same headers.
## 3.3 CORS Requests
To set cross origin requests to the remote api please give values to
To set cross origin requests to the Engine API please give values to
`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all,
default or blank means CORS disabled

View file

@ -1,7 +1,10 @@
---
title: "Remote API v1.20"
title: "Engine API v1.20"
description: "API Documentation for Docker"
keywords: "API, Docker, rcli, REST, documentation"
redirect_from:
- /engine/reference/api/docker_remote_api_v1.20/
- /reference/api/docker_remote_api_v1.20/
---
<!-- This file is maintained within the docker/docker Github
@ -13,11 +16,10 @@ keywords: "API, Docker, rcli, REST, documentation"
will be rejected.
-->
# Docker Remote API v1.20
# Docker Engine API v1.20
# 1. Brief introduction
- The Remote API has replaced `rcli`.
- The daemon listens on `unix:///var/run/docker.sock` but you can
[Bind Docker to another host/port or a Unix socket](../commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket).
- The API tends to be REST. However, for some complex commands, like `attach`
@ -2383,7 +2385,7 @@ from **200 OK** to **101 UPGRADED** and resends the same headers.
## 3.3 CORS Requests
To set cross origin requests to the remote api please give values to
To set cross origin requests to the Engine API please give values to
`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all,
default or blank means CORS disabled

View file

@ -1,7 +1,10 @@
---
title: "Remote API v1.21"
title: "Engine API v1.21"
description: "API Documentation for Docker"
keywords: "API, Docker, rcli, REST, documentation"
redirect_from:
- /engine/reference/api/docker_remote_api_v1.21/
- /reference/api/docker_remote_api_v1.21/
---
<!-- This file is maintained within the docker/docker Github
@ -13,11 +16,10 @@ keywords: "API, Docker, rcli, REST, documentation"
will be rejected.
-->
# Docker Remote API v1.21
# Docker Engine API v1.21
## 1. Brief introduction
- The Remote API has replaced `rcli`.
- The daemon listens on `unix:///var/run/docker.sock` but you can
[Bind Docker to another host/port or a Unix socket](../commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket).
- The API tends to be REST. However, for some complex commands, like `attach`
@ -2961,7 +2963,7 @@ from **200 OK** to **101 UPGRADED** and resends the same headers.
## 3.3 CORS Requests
To set cross origin requests to the remote api please give values to
To set cross origin requests to the Engine API please give values to
`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all,
default or blank means CORS disabled

View file

@ -1,7 +1,10 @@
---
title: "Remote API v1.22"
title: "Engine API v1.22"
description: "API Documentation for Docker"
keywords: "API, Docker, rcli, REST, documentation"
redirect_from:
- /engine/reference/api/docker_remote_api_v1.22/
- /reference/api/docker_remote_api_v1.22/
---
<!-- This file is maintained within the docker/docker Github
@ -13,11 +16,10 @@ keywords: "API, Docker, rcli, REST, documentation"
will be rejected.
-->
# Docker Remote API v1.22
# Docker Engine API v1.22
# 1. Brief introduction
- The Remote API has replaced `rcli`.
- The daemon listens on `unix:///var/run/docker.sock` but you can
[Bind Docker to another host/port or a Unix socket](../commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket).
- The API tends to be REST. However, for some complex commands, like `attach`
@ -3299,7 +3301,7 @@ from **200 OK** to **101 UPGRADED** and resends the same headers.
## 3.3 CORS Requests
To set cross origin requests to the remote api please give values to
To set cross origin requests to the Engine API please give values to
`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all,
default or blank means CORS disabled

View file

@ -1,7 +1,10 @@
---
title: "Remote API v1.23"
title: "Engine API v1.23"
description: "API Documentation for Docker"
keywords: "API, Docker, rcli, REST, documentation"
redirect_from:
- /engine/reference/api/docker_remote_api_v1.23/
- /reference/api/docker_remote_api_v1.23/
---
<!-- This file is maintained within the docker/docker Github
@ -13,11 +16,10 @@ keywords: "API, Docker, rcli, REST, documentation"
will be rejected.
-->
# Docker Remote API v1.23
# Docker Engine API v1.23
## 1. Brief introduction
- The Remote API has replaced `rcli`.
- The daemon listens on `unix:///var/run/docker.sock` but you can
[Bind Docker to another host/port or a Unix socket](../commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket).
- The API tends to be REST. However, for some complex commands, like `attach`
@ -3416,7 +3418,7 @@ from **200 OK** to **101 UPGRADED** and resends the same headers.
## 3.3 CORS Requests
To set cross origin requests to the remote api please give values to
To set cross origin requests to the Engine API please give values to
`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all,
default or blank means CORS disabled

View file

@ -1,7 +1,10 @@
---
title: "Remote API v1.24"
title: "Engine API v1.24"
description: "API Documentation for Docker"
keywords: "API, Docker, rcli, REST, documentation"
redirect_from:
- /engine/reference/api/docker_remote_api_v1.24/
- /reference/api/docker_remote_api_v1.24/
---
<!-- This file is maintained within the docker/docker Github
@ -13,11 +16,10 @@ keywords: "API, Docker, rcli, REST, documentation"
will be rejected.
-->
# Docker Remote API v1.24
# Docker Engine API v1.24
# 1. Brief introduction
- The Remote API has replaced `rcli`.
- The daemon listens on `unix:///var/run/docker.sock` but you can
[Bind Docker to another host/port or a Unix socket](../commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket).
- The API tends to be REST. However, for some complex commands, like `attach`
@ -26,7 +28,7 @@ keywords: "API, Docker, rcli, REST, documentation"
# 2. Errors
The Remote API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format:
The Engine API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format:
{
"message": "page not found"
@ -5274,7 +5276,7 @@ from **200 OK** to **101 UPGRADED** and resends the same headers.
## 4.3 CORS Requests
To set cross origin requests to the remote api please give values to
To set cross origin requests to the Engine API please give values to
`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all,
default or blank means CORS disabled

View file

@ -1,6 +1,6 @@
---
title: "Remote API"
description: "API Documentation for Docker"
title: "Engine API version history"
description: "Documentation of changes that have been made to Engine API."
keywords: "API, Docker, rcli, REST, documentation"
---
@ -13,133 +13,9 @@ keywords: "API, Docker, rcli, REST, documentation"
will be rejected.
-->
# Docker Remote API
## v1.25 API changes
Docker's Remote API uses an open schema model. In this model, unknown
properties in incoming messages are ignored. Client applications need to take
this behavior into account to ensure they do not break when talking to newer
Docker daemons.
The API tends to be REST, but for some complex commands, like attach or pull,
the HTTP connection is hijacked to transport STDOUT, STDIN, and STDERR.
By default the Docker daemon listens on `unix:///var/run/docker.sock` and the
client must have `root` access to interact with the daemon. If a group named
`docker` exists on your system, `docker` applies ownership of the socket to the
group.
To connect to the Docker daemon with cURL you need to use cURL 7.40 or
later, as these versions have the `--unix-socket` flag available. To
run `curl` against the daemon on the default socket, use the
following:
When using cUrl 7.50 or later:
```console
$ curl --unix-socket /var/run/docker.sock http://localhost/v1.25/containers/json
```
When using cURL 7.40, `localhost` must be omitted:
```console
$ curl --unix-socket /var/run/docker.sock http://v1.25/containers/json
```
If you have bound the Docker daemon to a different socket path or TCP
port, you would reference that in your cURL rather than the
default.
## Versioning
It is required to to supply a version to API calls. This is done by prefixing
the URL with the version number.
The current version of the API is 1.25, so to call the `/info` endpoint, you
would send a request to the URL `/v1.25/info`. To call, for example, version
1.24 of the API instead, you would request `/v1.24/info`.
If a newer daemon is installed, new properties may be returned even when
calling older versions of the API.
In previous versions of Docker, it was possible to access the API without
providing a version. This behaviour is now deprecated will be removed in a
future version of Docker.
Use the table below to find the API version for a Docker version:
Docker version | API version | Changes
----------------|------------------------------------|------------------------------------------------------
1.13.x | [1.25](docker_remote_api_v1.25.md) | [API changes](docker_remote_api.md#v1-25-api-changes)
1.12.x | [1.24](docker_remote_api_v1.24.md) | [API changes](docker_remote_api.md#v1-24-api-changes)
1.11.x | [1.23](docker_remote_api_v1.23.md) | [API changes](docker_remote_api.md#v1-23-api-changes)
1.10.x | [1.22](docker_remote_api_v1.22.md) | [API changes](docker_remote_api.md#v1-22-api-changes)
1.9.x | [1.21](docker_remote_api_v1.21.md) | [API changes](docker_remote_api.md#v1-21-api-changes)
1.8.x | [1.20](docker_remote_api_v1.20.md) | [API changes](docker_remote_api.md#v1-20-api-changes)
1.7.x | [1.19](docker_remote_api_v1.19.md) | [API changes](docker_remote_api.md#v1-19-api-changes)
1.6.x | [1.18](docker_remote_api_v1.18.md) | [API changes](docker_remote_api.md#v1-18-api-changes)
Refer to the [GitHub repository](
https://github.com/docker/docker/tree/master/docs/reference/api) for
older releases.
## Authentication
Authentication configuration is handled client side, so the
client has to send the `authConfig` as a `POST` in `/images/(name)/push`. The
`authConfig`, set as the `X-Registry-Auth` header, is currently a Base64 encoded
(JSON) string with the following structure:
```JSON
{"username": "string", "password": "string", "email": "string",
"serveraddress" : "string", "auth": ""}
```
Callers should leave the `auth` empty. The `serveraddress` is a domain/ip
without protocol. Throughout this structure, double quotes are required.
## Using Docker Machine with the API
If you are using `docker-machine`, the Docker daemon is on a host that
uses an encrypted TCP socket using TLS. This means, for Docker Machine users,
you need to add extra parameters to `curl` or `wget` when making test
API requests, for example:
```
curl --insecure \
--cert $DOCKER_CERT_PATH/cert.pem \
--key $DOCKER_CERT_PATH/key.pem \
https://YOUR_VM_IP:2376/v1.25/images/json
wget --no-check-certificate --certificate=$DOCKER_CERT_PATH/cert.pem \
--private-key=$DOCKER_CERT_PATH/key.pem \
https://YOUR_VM_IP:2376/v1.25/images/json -O - -q
```
## Docker Events
The following diagram depicts the container states accessible through the API.
![States](images/event_state.png)
Some container-related events are not affected by container state, so they are not included in this diagram. These events are:
* **export** emitted by `docker export`
* **exec_create** emitted by `docker exec`
* **exec_start** emitted by `docker exec` after **exec_create**
* **detach** emitted when client is detached from container process
* **exec_detach** emitted when client is detached from exec process
Running `docker rmi` emits an **untag** event when removing an image name. The `rmi` command may also emit **delete** events when images are deleted by ID directly or by deleting the last tag referring to the image.
> **Acknowledgment**: This diagram and the accompanying text were used with the permission of Matt Good and Gilder Labs. See Matt's original blog post [Docker Events Explained](https://gliderlabs.com/blog/2015/04/14/docker-events-explained/).
## Version history
This section lists each version from latest to oldest. Each listing includes a link to the full documentation set and the changes relevant in that release.
### v1.25 API changes
[Docker Remote API v1.25](docker_remote_api_v1.25.md) documentation
[Docker Engine API v1.25](v1.25.md) documentation
* The API version is now required in all API calls. Instead of just requesting, for example, the URL `/containers/json`, you must now request `/v1.25/containers/json`.
* `GET /version` now returns `MinAPIVersion`.
@ -198,9 +74,9 @@ This section lists each version from latest to oldest. Each listing includes a
* `GET /images/json` now support a `reference` filter.
### v1.24 API changes
## v1.24 API changes
[Docker Remote API v1.24](docker_remote_api_v1.24.md) documentation
[Docker Engine API v1.24](v1.24.md) documentation
* `POST /containers/create` now takes `StorageOpt` field.
* `GET /info` now returns `SecurityOptions` field, showing if `apparmor`, `seccomp`, or `selinux` is supported.
@ -230,9 +106,9 @@ This section lists each version from latest to oldest. Each listing includes a
* `POST /containers/create/` `HostConfig.PidMode` field now accepts `container:<name|id>`,
to have the container join the PID namespace of an existing container.
### v1.23 API changes
## v1.23 API changes
[Docker Remote API v1.23](docker_remote_api_v1.23.md) documentation
[Docker Engine API v1.23](v1.23.md) documentation
* `GET /containers/json` returns the state of the container, one of `created`, `restarting`, `running`, `paused`, `exited` or `dead`.
* `GET /containers/json` returns the mount points for the container.
@ -252,9 +128,9 @@ This section lists each version from latest to oldest. Each listing includes a
* `GET /containers/(id or name)/logs` now accepts a `details` query parameter to stream the extra attributes that were provided to the containers `LogOpts`, such as environment variables and labels, with the logs.
* `POST /images/load` now returns progress information as a JSON stream, and has a `quiet` query parameter to suppress progress details.
### v1.22 API changes
## v1.22 API changes
[Docker Remote API v1.22](docker_remote_api_v1.22.md) documentation
[Docker Engine API v1.22](v1.22.md) documentation
* `POST /container/(name)/update` updates the resources of a container.
* `GET /containers/json` supports filter `isolation` on Windows.
@ -286,9 +162,9 @@ This section lists each version from latest to oldest. Each listing includes a
* `GET /info` can now return a `SystemStatus` field useful for returning additional information about applications
that are built on top of engine.
### v1.21 API changes
## v1.21 API changes
[Docker Remote API v1.21](docker_remote_api_v1.21.md) documentation
[Docker Engine API v1.21](v1.21.md) documentation
* `GET /volumes` lists volumes from all volume drivers.
* `POST /volumes/create` to create a volume.
@ -321,9 +197,9 @@ This section lists each version from latest to oldest. Each listing includes a
badness heuristic. This heuristic selects which processes the OOM killer kills
under out-of-memory conditions.
### v1.20 API changes
## v1.20 API changes
[Docker Remote API v1.20](docker_remote_api_v1.20.md) documentation
[Docker Engine API v1.20](v1.20.md) documentation
* `GET /containers/(id)/archive` get an archive of filesystem content from a container.
* `PUT /containers/(id)/archive` upload an archive of content to be extracted to
@ -333,9 +209,9 @@ endpoint which can be used to download files and directories from a container.
* The `hostConfig` option now accepts the field `GroupAdd`, which specifies a
list of additional groups that the container process will run as.
### v1.19 API changes
## v1.19 API changes
[Docker Remote API v1.19](docker_remote_api_v1.19.md) documentation
[Docker Engine API v1.19](v1.19.md) documentation
* When the daemon detects a version mismatch with the client, usually when
the client is newer than the daemon, an HTTP 400 is now returned instead
@ -349,9 +225,9 @@ end point now returns the new boolean fields `CpuCfsPeriod`, `CpuCfsQuota`, and
* The `hostConfig` option now accepts the fields `CpuPeriod` and `CpuQuota`
* `POST /build` accepts `cpuperiod` and `cpuquota` options
### v1.18 API changes
## v1.18 API changes
[Docker Remote API v1.18](docker_remote_api_v1.18.md) documentation
[Docker Engine API v1.18](v1.18.md) documentation
* `GET /version` now returns `Os`, `Arch` and `KernelVersion`.
* `POST /containers/create` and `POST /containers/(id)/start`allow you to set ulimit settings for use in the container.

View file

@ -242,7 +242,18 @@ of the `--changes` flag that allows to pass `Dockerfile` commands.
### Interacting with V1 registries
Version 1.9 adds a flag (`--disable-legacy-registry=false`) which prevents the docker daemon from `pull`, `push`, and `login` operations against v1 registries. Though disabled by default, this signals the intent to deprecate the v1 protocol.
**Disabled By Default In Release: v1.14**
**Target For Removal In Release: v1.17**
Version 1.9 adds a flag (`--disable-legacy-registry=false`) which prevents the
docker daemon from `pull`, `push`, and `login` operations against v1
registries. Though enabled by default, this signals the intent to deprecate
the v1 protocol.
Support for the v1 protocol to the public registry was removed in 1.13. Any
mirror configurations using v1 should be updated to use a
[v2 registry mirror](https://docs.docker.com/registry/recipes/mirror/).
### Docker Content Trust ENV passphrase variables name change
**Deprecated In Release: [v1.9.0](https://github.com/docker/docker/releases/tag/v1.9.0)**

View file

@ -16,6 +16,7 @@ keywords: "API, Usage, plugins, documentation, developer"
will be rejected.
-->
# Plugin Config Version 0 of Plugin V2
This document outlines the format of the V0 plugin configuration. The plugin
@ -85,10 +86,6 @@ Config provides the base accessible fields for working with V0 plugin format
- **host**
- **none**
- **`capabilities`** *array*
capabilities of the plugin (*Linux only*), see list [`here`](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md#security)
- **`mounts`** *PluginMount array*
mount of the plugin, struct consisting of the following fields, see [`MOUNTS`](https://github.com/opencontainers/runtime-spec/blob/master/config.md#mounts)
@ -117,22 +114,6 @@ Config provides the base accessible fields for working with V0 plugin format
options of the mount.
- **`devices`** *PluginDevice array*
device of the plugin, (*Linux only*), struct consisting of the following fields, see [`DEVICES`](https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#devices)
- **`name`** *string*
name of the device.
- **`description`** *string*
description of the device.
- **`path`** *string*
path of the device.
- **`env`** *PluginEnv array*
env of the plugin, struct consisting of the following fields
@ -165,6 +146,27 @@ Config provides the base accessible fields for working with V0 plugin format
values of the args.
- **`linux`** *PluginLinux*
- **`capabilities`** *string array*
capabilities of the plugin (*Linux only*), see list [`here`](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md#security)
- **`devices`** *PluginDevice array*
device of the plugin, (*Linux only*), struct consisting of the following fields, see [`DEVICES`](https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#devices)
- **`name`** *string*
name of the device.
- **`description`** *string*
description of the device.
- **`path`** *string*
path of the device.
## Example Config

View file

@ -27,6 +27,9 @@ volume drivers, but more plugin driver types will be available in future release
For information about the legacy plugin system available in Docker Engine 1.12
and earlier, see [Understand legacy Docker Engine plugins](legacy_plugins.md).
> **Note**: Docker Engine managed plugins are currently not supported
on Windows daemons.
## Installing and using a plugin
Plugins are distributed as Docker images and can be hosted on Docker Hub or on

View file

@ -22,7 +22,7 @@ refer to [Docker Engine plugin system](index.md).
Docker's out-of-the-box authorization model is all or nothing. Any user with
permission to access the Docker daemon can run any Docker client command. The
same is true for callers using Docker's remote API to contact the daemon. If you
same is true for callers using Docker's Engine API to contact the daemon. If you
require greater access control, you can create authorization plugins and add
them to your Docker daemon configuration. Using an authorization plugin, a
Docker administrator can configure granular access policies for managing access
@ -69,7 +69,7 @@ can be ordered. Each request to the daemon passes in order through the chain.
Only when all the plugins grant access to the resource, is the access granted.
When an HTTP request is made to the Docker daemon through the CLI or via the
remote API, the authentication subsystem passes the request to the installed
Engine API, the authentication subsystem passes the request to the installed
authentication plugin(s). The request contains the user (caller) and command
context. The plugin is responsible for deciding whether to allow or deny the
request.

View file

@ -1,22 +0,0 @@
---
published: false
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
This directory holds the authoritative specifications of APIs defined and implemented by Docker. Currently this includes:
* The remote API by which a docker node can be queried over HTTP
* The registry API by which a docker node can download and upload
images for storage and sharing
* The index search API by which a docker node can search the public
index for images to download
* The docker.io OAuth and accounts API which 3rd party services can
use to access account information

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

View file

@ -1,20 +0,0 @@
---
published: false
title: "Docker Hub API"
description: "API Documentation for the Docker Hub API"
keywords: "API, Docker, index, REST, documentation, Docker Hub, registry"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# Docker Hub API
This API is deprecated as of 1.7. To view the old version, see the [Docker Hub
API](https://docs.docker.com/v1.7/docker/reference/api/docker-io_api/) in the 1.7 documentation.

View file

@ -1,281 +0,0 @@
---
title: "docker.io accounts API"
description: "API Documentation for docker.io accounts."
keywords: "API, Docker, accounts, REST, documentation"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# docker.io accounts API
## Get a single user
`GET /api/v1.1/users/:username/`
Get profile info for the specified user.
Parameters:
- **username** username of the user whose profile info is being
requested.
Request Headers:
- **Authorization** required authentication credentials of
either type HTTP Basic or OAuth Bearer Token.
Status Codes:
- **200** success, user data returned.
- **401** authentication error.
- **403** permission error, authenticated user must be the user
whose data is being requested, OAuth access tokens must have
`profile_read` scope.
- **404** the specified username does not exist.
**Example request**:
GET /api/v1.1/users/janedoe/ HTTP/1.1
Host: www.docker.io
Accept: application/json
Authorization: Basic dXNlcm5hbWU6cGFzc3dvcmQ=
**Example response**:
HTTP/1.1 200 OK
Content-Type: application/json
{
"id": 2,
"username": "janedoe",
"url": "https://www.docker.io/api/v1.1/users/janedoe/",
"date_joined": "2014-02-12T17:58:01.431312Z",
"type": "User",
"full_name": "Jane Doe",
"location": "San Francisco, CA",
"company": "Success, Inc.",
"profile_url": "https://docker.io/",
"gravatar_url": "https://secure.gravatar.com/avatar/0212b397124be4acd4e7dea9aa357.jpg?s=80&r=g&d=mm"
"email": "jane.doe@example.com",
"is_active": true
}
## Update a single user
`PATCH /api/v1.1/users/:username/`
Update profile info for the specified user.
Parameters:
- **username** username of the user whose profile info is being
updated.
Json Parameters:
- **full_name** (*string*) (optional) the new name of the user.
- **location** (*string*) (optional) the new location.
- **company** (*string*) (optional) the new company of the user.
- **profile_url** (*string*) (optional) the new profile url.
- **gravatar_email** (*string*) (optional) the new Gravatar
email address.
Request Headers:
- **Authorization** required authentication credentials of
either type HTTP Basic or OAuth Bearer Token.
- **Content-Type** MIME Type of post data. JSON, url-encoded
form data, etc.
Status Codes:
- **200** success, user data updated.
- **400** post data validation error.
- **401** authentication error.
- **403** permission error, authenticated user must be the user
whose data is being updated, OAuth access tokens must have
`profile_write` scope.
- **404** the specified username does not exist.
**Example request**:
PATCH /api/v1.1/users/janedoe/ HTTP/1.1
Host: www.docker.io
Accept: application/json
Authorization: Basic dXNlcm5hbWU6cGFzc3dvcmQ=
{
"location": "Private Island",
"profile_url": "http://janedoe.com/",
"company": "Retired",
}
**Example response**:
HTTP/1.1 200 OK
Content-Type: application/json
{
"id": 2,
"username": "janedoe",
"url": "https://www.docker.io/api/v1.1/users/janedoe/",
"date_joined": "2014-02-12T17:58:01.431312Z",
"type": "User",
"full_name": "Jane Doe",
"location": "Private Island",
"company": "Retired",
"profile_url": "http://janedoe.com/",
"gravatar_url": "https://secure.gravatar.com/avatar/0212b397124be4acd4e7dea9aa357.jpg?s=80&r=g&d=mm"
"email": "jane.doe@example.com",
"is_active": true
}
## List email addresses for a user
`GET /api/v1.1/users/:username/emails/`
List email info for the specified user.
Parameters:
- **username** username of the user whose profile info is being
updated.
Request Headers:
- **Authorization** required authentication credentials of
either type HTTP Basic or OAuth Bearer Token
Status Codes:
- **200** success, user data updated.
- **401** authentication error.
- **403** permission error, authenticated user must be the user
whose data is being requested, OAuth access tokens must have
`email_read` scope.
- **404** the specified username does not exist.
**Example request**:
GET /api/v1.1/users/janedoe/emails/ HTTP/1.1
Host: www.docker.io
Accept: application/json
Authorization: Bearer zAy0BxC1wDv2EuF3tGs4HrI6qJp6KoL7nM
**Example response**:
HTTP/1.1 200 OK
Content-Type: application/json
[
{
"email": "jane.doe@example.com",
"verified": true,
"primary": true
}
]
## Add email address for a user
`POST /api/v1.1/users/:username/emails/`
Add a new email address to the specified user's account. The email
address must be verified separately, a confirmation email is not
automatically sent.
Json Parameters:
- **email** (*string*) email address to be added.
Request Headers:
- **Authorization** required authentication credentials of
either type HTTP Basic or OAuth Bearer Token.
- **Content-Type** MIME Type of post data. JSON, url-encoded
form data, etc.
Status Codes:
- **201** success, new email added.
- **400** data validation error.
- **401** authentication error.
- **403** permission error, authenticated user must be the user
whose data is being requested, OAuth access tokens must have
`email_write` scope.
- **404** the specified username does not exist.
**Example request**:
POST /api/v1.1/users/janedoe/emails/ HTTP/1.1
Host: www.docker.io
Accept: application/json
Content-Type: application/json
Authorization: Bearer zAy0BxC1wDv2EuF3tGs4HrI6qJp6KoL7nM
{
"email": "jane.doe+other@example.com"
}
**Example response**:
HTTP/1.1 201 Created
Content-Type: application/json
{
"email": "jane.doe+other@example.com",
"verified": false,
"primary": false
}
## Delete email address for a user
`DELETE /api/v1.1/users/:username/emails/`
Delete an email address from the specified user's account. You
cannot delete a user's primary email address.
Json Parameters:
- **email** (*string*) email address to be deleted.
Request Headers:
- **Authorization** required authentication credentials of
either type HTTP Basic or OAuth Bearer Token.
- **Content-Type** MIME Type of post data. JSON, url-encoded
form data, etc.
Status Codes:
- **204** success, email address removed.
- **400** validation error.
- **401** authentication error.
- **403** permission error, authenticated user must be the user
whose data is being requested, OAuth access tokens must have
`email_write` scope.
- **404** the specified username or email address does not
exist.
**Example request**:
DELETE /api/v1.1/users/janedoe/emails/ HTTP/1.1
Host: www.docker.io
Accept: application/json
Content-Type: application/json
Authorization: Bearer zAy0BxC1wDv2EuF3tGs4HrI6qJp6KoL7nM
{
"email": "jane.doe+other@example.com"
}
**Example response**:
HTTP/1.1 204 NO CONTENT
Content-Length: 0

File diff suppressed because it is too large Load diff

View file

@ -1,23 +0,0 @@
---
published: false
title: "The Docker Hub and the Registry v1"
description: "Documentation for docker Registry and Registry API"
keywords: "docker, registry, api, hub"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# The Docker Hub and the Registry v1
This API is deprecated as of 1.7. To view the old version, see the [go
here](https://docs.docker.com/v1.7/docker/reference/api/hub_registry_spec/) in
the 1.7 documentation. If you want an overview of the current features in
Docker Hub or other image management features see the [image management
overview](https://docs.docker.com/engine/userguide/eng-image/image_management/) in the current documentation set.

File diff suppressed because one or more lines are too long

Binary file not shown.

Before

Width:  |  Height:  |  Size: 76 KiB

View file

@ -1,20 +0,0 @@
---
title: "API Reference"
description: "Reference"
keywords: "Engine"
identifier: "engine_remoteapi"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# API Reference
* [Docker Remote API](docker_remote_api.md)
* [Docker Remote API client libraries](remote_api_client_libraries.md)

View file

@ -1,138 +0,0 @@
---
title: "Remote API client libraries"
description: "Various client libraries available to use with the Docker remote API"
keywords: "API, Docker, index, registry, REST, documentation, clients, C#, Erlang, Go, Groovy, Java, JavaScript, Perl, PHP, Python, Ruby, Rust, Scala"
---
<!-- This file is maintained within the docker/docker Github
repository at https://github.com/docker/docker/. Make all
pull requests against that repo. If you see this file in
another repository, consider it read-only there, as it will
periodically be overwritten by the definitive file. Pull
requests which include edits to this file in other repositories
will be rejected.
-->
# Docker Remote API client libraries
These libraries make it easier to build applications on top of the Docker
Remote API with various programming languages. They have not been tested by the
Docker maintainers for compatibility, so if you run into any issues, file them
with the library maintainers.
<table border="1" class="docutils">
<colgroup>
<col width="29%">
<col width="23%">
<col width="48%">
</colgroup>
<thead valign="bottom">
<tr>
<th class="head">Language/Framework</th>
<th class="head">Name</th>
<th class="head">Repository</th>
</tr>
</thead>
<tbody valign = "top">
<tr>
<td>C#</td>
<td>Docker.DotNet</td>
<td><a class="reference external" href="https://github.com/ahmetalpbalkan/Docker.DotNet">https://github.com/ahmetalpbalkan/Docker.DotNet</a></td>
</tr>
<tr>
<td>C++</td>
<td>lasote/docker_client</td>
<td><a class="reference external" href="https://github.com/lasote/docker_client">https://github.com/lasote/docker_client</a></td>
</tr>
<tr>
<td>Erlang</td>
<td>erldocker</td>
<td><a class="reference external" href="https://github.com/proger/erldocker">https://github.com/proger/erldocker</a></td>
</tr>
<tr>
<td>Dart</td>
<td>bwu_docker</td>
<td><a class="reference external" href="https://github.com/bwu-dart/bwu_docker">https://github.com/bwu-dart/bwu_docker</a></td>
</tr>
<tr>
<td>Go</td>
<td>Docker Go client</td>
<td><a class="reference external" href="https://godoc.org/github.com/docker/docker/client">https://godoc.org/github.com/docker/docker/client</a></td>
</tr>
<tr>
<td>Gradle</td>
<td>gradle-docker-plugin</td>
<td><a class="reference external" href="https://github.com/gesellix/gradle-docker-plugin">https://github.com/gesellix/gradle-docker-plugin</a></td>
</tr>
<tr>
<td>Groovy</td>
<td>docker-client</td>
<td><a class="reference external" href="https://github.com/gesellix/docker-client">https://github.com/gesellix/docker-client</a></td>
</tr>
<tr>
<td>Haskell</td>
<td>docker-hs</td>
<td><a class="reference external" href="https://github.com/denibertovic/docker-hs">https://github.com/denibertovic/docker-hs</a></td>
</tr>
<tr>
<td>HTML (Web Components)</td>
<td>docker-elements</td>
<td><a class="reference external" href="https://github.com/kapalhq/docker-elements">https://github.com/kapalhq/docker-elements</a></td>
</tr>
<tr>
<td>Java</td>
<td>docker-java</td>
<td><a class="reference external" href="https://github.com/docker-java/docker-java">https://github.com/docker-java/docker-java</a></td>
</tr>
<tr>
<td>Java</td>
<td>docker-client</td>
<td><a class="reference external" href="https://github.com/spotify/docker-client">https://github.com/spotify/docker-client</a></td>
</tr>
<tr>
<td>NodeJS</td>
<td>dockerode</td>
<td><a class="reference external" href="https://github.com/apocas/dockerode">https://github.com/apocas/dockerode</a></td>
</tr>
<tr>
<td>Perl</td>
<td>Eixo::Docker</td>
<td><a class="reference external" href="https://github.com/alambike/eixo-docker">https://github.com/alambike/eixo-docker</a></td>
</tr>
<tr>
<td>PHP</td>
<td>Docker-PHP</td>
<td><a class="reference external" href="https://github.com/docker-php/docker-php">https://github.com/docker-php/docker-php</a></td>
</tr>
<tr>
<td>Python</td>
<td>docker-py</td>
<td><a class="reference external" href="https://github.com/docker/docker-py">https://github.com/docker/docker-py</a></td>
</tr>
<tr>
<td>Ruby</td>
<td>docker-api</td>
<td><a class="reference external" href="https://github.com/swipely/docker-api">https://github.com/swipely/docker-api</a></td>
</tr>
<tr>
<td>Rust</td>
<td>docker-rust</td>
<td><a class="reference external" href="https://github.com/abh1nav/docker-rust">https://github.com/abh1nav/docker-rust</a></td>
</tr>
<tr>
<td>Rust</td>
<td>shiplift</td>
<td><a class="reference external" href="https://github.com/softprops/shiplift">https://github.com/softprops/shiplift</a></td>
</tr>
<tr>
<td>Scala</td>
<td>tugboat</td>
<td><a class="reference external" href="https://github.com/softprops/tugboat">https://github.com/softprops/tugboat</a></td>
</tr>
<tr>
<td>Scala</td>
<td>reactive-docker</td>
<td><a class="reference external" href="https://github.com/almoehi/reactive-docker">https://github.com/almoehi/reactive-docker</a></td>
</tr>
</tbody>
</table>

Some files were not shown because too many files have changed in this diff Show more