mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Merge pull request #37701 from dperny/add-swarmkit-sysctl-support
Add support for sysctl options in services
This commit is contained in:
commit
9f296d1e6f
8 changed files with 149 additions and 4 deletions
|
@ -182,8 +182,17 @@ func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter,
|
||||||
encodedAuth := r.Header.Get("X-Registry-Auth")
|
encodedAuth := r.Header.Get("X-Registry-Auth")
|
||||||
cliVersion := r.Header.Get("version")
|
cliVersion := r.Header.Get("version")
|
||||||
queryRegistry := false
|
queryRegistry := false
|
||||||
if cliVersion != "" && versions.LessThan(cliVersion, "1.30") {
|
if cliVersion != "" {
|
||||||
queryRegistry = true
|
if versions.LessThan(cliVersion, "1.30") {
|
||||||
|
queryRegistry = true
|
||||||
|
}
|
||||||
|
if versions.LessThan(cliVersion, "1.39") {
|
||||||
|
if service.TaskTemplate.ContainerSpec != nil {
|
||||||
|
// Sysctls for docker swarm services weren't supported before
|
||||||
|
// API version 1.39
|
||||||
|
service.TaskTemplate.ContainerSpec.Sysctls = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := sr.backend.CreateService(service, encodedAuth, queryRegistry)
|
resp, err := sr.backend.CreateService(service, encodedAuth, queryRegistry)
|
||||||
|
@ -216,8 +225,17 @@ func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter,
|
||||||
flags.Rollback = r.URL.Query().Get("rollback")
|
flags.Rollback = r.URL.Query().Get("rollback")
|
||||||
cliVersion := r.Header.Get("version")
|
cliVersion := r.Header.Get("version")
|
||||||
queryRegistry := false
|
queryRegistry := false
|
||||||
if cliVersion != "" && versions.LessThan(cliVersion, "1.30") {
|
if cliVersion != "" {
|
||||||
queryRegistry = true
|
if versions.LessThan(cliVersion, "1.30") {
|
||||||
|
queryRegistry = true
|
||||||
|
}
|
||||||
|
if versions.LessThan(cliVersion, "1.39") {
|
||||||
|
if service.TaskTemplate.ContainerSpec != nil {
|
||||||
|
// Sysctls for docker swarm services weren't supported before
|
||||||
|
// API version 1.39
|
||||||
|
service.TaskTemplate.ContainerSpec.Sysctls = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := sr.backend.UpdateService(vars["id"], version, service, flags, queryRegistry)
|
resp, err := sr.backend.UpdateService(vars["id"], version, service, flags, queryRegistry)
|
||||||
|
|
|
@ -2750,6 +2750,18 @@ definitions:
|
||||||
description: "Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used."
|
description: "Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used."
|
||||||
type: "boolean"
|
type: "boolean"
|
||||||
x-nullable: true
|
x-nullable: true
|
||||||
|
Sysctls:
|
||||||
|
description: |
|
||||||
|
Set kernel namedspaced parameters (sysctls) in the container.
|
||||||
|
The Sysctls option on services accepts the same sysctls as the
|
||||||
|
are supported on containers. Note that while the same sysctls are
|
||||||
|
supported, no guarantees or checks are made about their
|
||||||
|
suitability for a clustered environment, and it's up to the user
|
||||||
|
to determine whether a given sysctl will work properly in a
|
||||||
|
Service.
|
||||||
|
type: "object"
|
||||||
|
additionalProperties:
|
||||||
|
type: "string"
|
||||||
NetworkAttachmentSpec:
|
NetworkAttachmentSpec:
|
||||||
description: |
|
description: |
|
||||||
Read-only spec type for non-swarm containers attached to swarm overlay
|
Read-only spec type for non-swarm containers attached to swarm overlay
|
||||||
|
|
|
@ -71,4 +71,5 @@ type ContainerSpec struct {
|
||||||
Secrets []*SecretReference `json:",omitempty"`
|
Secrets []*SecretReference `json:",omitempty"`
|
||||||
Configs []*ConfigReference `json:",omitempty"`
|
Configs []*ConfigReference `json:",omitempty"`
|
||||||
Isolation container.Isolation `json:",omitempty"`
|
Isolation container.Isolation `json:",omitempty"`
|
||||||
|
Sysctls map[string]string `json:",omitempty"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,6 +36,7 @@ func containerSpecFromGRPC(c *swarmapi.ContainerSpec) *types.ContainerSpec {
|
||||||
Configs: configReferencesFromGRPC(c.Configs),
|
Configs: configReferencesFromGRPC(c.Configs),
|
||||||
Isolation: IsolationFromGRPC(c.Isolation),
|
Isolation: IsolationFromGRPC(c.Isolation),
|
||||||
Init: initFromGRPC(c.Init),
|
Init: initFromGRPC(c.Init),
|
||||||
|
Sysctls: c.Sysctls,
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.DNSConfig != nil {
|
if c.DNSConfig != nil {
|
||||||
|
@ -251,6 +252,7 @@ func containerToGRPC(c *types.ContainerSpec) (*swarmapi.ContainerSpec, error) {
|
||||||
Configs: configReferencesToGRPC(c.Configs),
|
Configs: configReferencesToGRPC(c.Configs),
|
||||||
Isolation: isolationToGRPC(c.Isolation),
|
Isolation: isolationToGRPC(c.Isolation),
|
||||||
Init: initToGRPC(c.Init),
|
Init: initToGRPC(c.Init),
|
||||||
|
Sysctls: c.Sysctls,
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.DNSConfig != nil {
|
if c.DNSConfig != nil {
|
||||||
|
|
|
@ -364,6 +364,7 @@ func (c *containerConfig) hostConfig() *enginecontainer.HostConfig {
|
||||||
ReadonlyRootfs: c.spec().ReadOnly,
|
ReadonlyRootfs: c.spec().ReadOnly,
|
||||||
Isolation: c.isolation(),
|
Isolation: c.isolation(),
|
||||||
Init: c.init(),
|
Init: c.init(),
|
||||||
|
Sysctls: c.spec().Sysctls,
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.spec().DNSConfig != nil {
|
if c.spec().DNSConfig != nil {
|
||||||
|
|
|
@ -30,6 +30,12 @@ keywords: "API, Docker, rcli, REST, documentation"
|
||||||
on the node.label. The format of the label filter is `node.label=<key>`/`node.label=<key>=<value>`
|
on the node.label. The format of the label filter is `node.label=<key>`/`node.label=<key>=<value>`
|
||||||
to return those with the specified labels, or `node.label!=<key>`/`node.label!=<key>=<value>`
|
to return those with the specified labels, or `node.label!=<key>`/`node.label!=<key>=<value>`
|
||||||
to return those without the specified labels.
|
to return those without the specified labels.
|
||||||
|
* `GET /services` now returns `Sysctls` as part of the `ContainerSpec`.
|
||||||
|
* `GET /services/{id}` now returns `Sysctls` as part of the `ContainerSpec`.
|
||||||
|
* `POST /services/create` now accepts `Sysctls` as part of the `ContainerSpec`.
|
||||||
|
* `POST /services/{id}/update` now accepts `Sysctls` as part of the `ContainerSpec`.
|
||||||
|
* `GET /tasks` now returns `Sysctls` as part of the `ContainerSpec`.
|
||||||
|
* `GET /tasks/{id}` now returns `Sysctls` as part of the `ContainerSpec`.
|
||||||
|
|
||||||
## V1.38 API changes
|
## V1.38 API changes
|
||||||
|
|
||||||
|
|
|
@ -159,6 +159,14 @@ func ServiceWithEndpoint(endpoint *swarmtypes.EndpointSpec) ServiceSpecOpt {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ServiceWithSysctls sets the Sysctls option of the service's ContainerSpec.
|
||||||
|
func ServiceWithSysctls(sysctls map[string]string) ServiceSpecOpt {
|
||||||
|
return func(spec *swarmtypes.ServiceSpec) {
|
||||||
|
ensureContainerSpec(spec)
|
||||||
|
spec.TaskTemplate.ContainerSpec.Sysctls = sysctls
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// GetRunningTasks gets the list of running tasks for a service
|
// GetRunningTasks gets the list of running tasks for a service
|
||||||
func GetRunningTasks(t *testing.T, d *daemon.Daemon, serviceID string) []swarmtypes.Task {
|
func GetRunningTasks(t *testing.T, d *daemon.Daemon, serviceID string) []swarmtypes.Task {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
|
@ -10,6 +10,7 @@ import (
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
swarmtypes "github.com/docker/docker/api/types/swarm"
|
swarmtypes "github.com/docker/docker/api/types/swarm"
|
||||||
|
"github.com/docker/docker/api/types/versions"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
"github.com/docker/docker/integration/internal/network"
|
"github.com/docker/docker/integration/internal/network"
|
||||||
"github.com/docker/docker/integration/internal/swarm"
|
"github.com/docker/docker/integration/internal/swarm"
|
||||||
|
@ -17,6 +18,7 @@ import (
|
||||||
"gotest.tools/assert"
|
"gotest.tools/assert"
|
||||||
is "gotest.tools/assert/cmp"
|
is "gotest.tools/assert/cmp"
|
||||||
"gotest.tools/poll"
|
"gotest.tools/poll"
|
||||||
|
"gotest.tools/skip"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestServiceCreateInit(t *testing.T) {
|
func TestServiceCreateInit(t *testing.T) {
|
||||||
|
@ -309,6 +311,101 @@ func TestCreateServiceConfigFileMode(t *testing.T) {
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestServiceCreateSysctls tests that a service created with sysctl options in
|
||||||
|
// the ContainerSpec correctly applies those options.
|
||||||
|
//
|
||||||
|
// To test this, we're going to create a service with the sysctl option
|
||||||
|
//
|
||||||
|
// {"net.ipv4.ip_nonlocal_bind": "0"}
|
||||||
|
//
|
||||||
|
// We'll get the service's tasks to get the container ID, and then we'll
|
||||||
|
// inspect the container. If the output of the container inspect contains the
|
||||||
|
// sysctl option with the correct value, we can assume that the sysctl has been
|
||||||
|
// plumbed correctly.
|
||||||
|
//
|
||||||
|
// Next, we'll remove that service and create a new service with that option
|
||||||
|
// set to 1. This means that no matter what the default is, we can be confident
|
||||||
|
// that the sysctl option is applying as intended.
|
||||||
|
//
|
||||||
|
// Additionally, we'll do service and task inspects to verify that the inspect
|
||||||
|
// output includes the desired sysctl option.
|
||||||
|
//
|
||||||
|
// We're using net.ipv4.ip_nonlocal_bind because it's something that I'm fairly
|
||||||
|
// confident won't be modified by the container runtime, and won't blow
|
||||||
|
// anything up in the test environment
|
||||||
|
func TestCreateServiceSysctls(t *testing.T) {
|
||||||
|
skip.If(
|
||||||
|
t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.39"),
|
||||||
|
"setting service sysctls is unsupported before api v1.39",
|
||||||
|
)
|
||||||
|
|
||||||
|
defer setupTest(t)()
|
||||||
|
d := swarm.NewSwarm(t, testEnv)
|
||||||
|
defer d.Stop(t)
|
||||||
|
client := d.NewClientT(t)
|
||||||
|
defer client.Close()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// run thie block twice, so that no matter what the default value of
|
||||||
|
// net.ipv4.ip_nonlocal_bind is, we can verify that setting the sysctl
|
||||||
|
// options works
|
||||||
|
for _, expected := range []string{"0", "1"} {
|
||||||
|
|
||||||
|
// store the map we're going to be using everywhere.
|
||||||
|
expectedSysctls := map[string]string{"net.ipv4.ip_nonlocal_bind": expected}
|
||||||
|
|
||||||
|
// Create the service with the sysctl options
|
||||||
|
var instances uint64 = 1
|
||||||
|
serviceID := swarm.CreateService(t, d,
|
||||||
|
swarm.ServiceWithSysctls(expectedSysctls),
|
||||||
|
)
|
||||||
|
|
||||||
|
// wait for the service to converge to 1 running task as expected
|
||||||
|
poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, instances))
|
||||||
|
|
||||||
|
// we're going to check 3 things:
|
||||||
|
//
|
||||||
|
// 1. Does the container, when inspected, have the sysctl option set?
|
||||||
|
// 2. Does the task have the sysctl in the spec?
|
||||||
|
// 3. Does the service have the sysctl in the spec?
|
||||||
|
//
|
||||||
|
// if all 3 of these things are true, we know that the sysctl has been
|
||||||
|
// plumbed correctly through the engine.
|
||||||
|
//
|
||||||
|
// We don't actually have to get inside the container and check its
|
||||||
|
// logs or anything. If we see the sysctl set on the container inspect,
|
||||||
|
// we know that the sysctl is plumbed correctly. everything below that
|
||||||
|
// level has been tested elsewhere. (thanks @thaJeztah, because an
|
||||||
|
// earlier version of this test had to get container logs and was much
|
||||||
|
// more complex)
|
||||||
|
|
||||||
|
// get all of the tasks of the service, so we can get the container
|
||||||
|
filter := filters.NewArgs()
|
||||||
|
filter.Add("service", serviceID)
|
||||||
|
tasks, err := client.TaskList(ctx, types.TaskListOptions{
|
||||||
|
Filters: filter,
|
||||||
|
})
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Check(t, is.Equal(len(tasks), 1))
|
||||||
|
|
||||||
|
// verify that the container has the sysctl option set
|
||||||
|
ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.DeepEqual(t, ctnr.HostConfig.Sysctls, expectedSysctls)
|
||||||
|
|
||||||
|
// verify that the task has the sysctl option set in the task object
|
||||||
|
assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.Sysctls, expectedSysctls)
|
||||||
|
|
||||||
|
// verify that the service also has the sysctl set in the spec.
|
||||||
|
service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{})
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.DeepEqual(t,
|
||||||
|
service.Spec.TaskTemplate.ContainerSpec.Sysctls, expectedSysctls,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func serviceRunningTasksCount(client client.ServiceAPIClient, serviceID string, instances uint64) func(log poll.LogT) poll.Result {
|
func serviceRunningTasksCount(client client.ServiceAPIClient, serviceID string, instances uint64) func(log poll.LogT) poll.Result {
|
||||||
return func(log poll.LogT) poll.Result {
|
return func(log poll.LogT) poll.Result {
|
||||||
filter := filters.NewArgs()
|
filter := filters.NewArgs()
|
||||||
|
|
Loading…
Add table
Reference in a new issue