2018-02-05 16:05:59 -05:00
|
|
|
package service // import "github.com/docker/docker/integration/service"
|
2017-08-29 02:49:26 -04:00
|
|
|
|
|
|
|
import (
|
2018-04-19 18:30:59 -04:00
|
|
|
"context"
|
2018-06-01 06:47:38 -04:00
|
|
|
"fmt"
|
2018-01-28 02:15:44 -05:00
|
|
|
"io/ioutil"
|
2019-12-13 13:16:44 -05:00
|
|
|
"strings"
|
2017-08-29 02:49:26 -04:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/docker/docker/api/types"
|
|
|
|
"github.com/docker/docker/api/types/filters"
|
2018-02-02 17:36:59 -05:00
|
|
|
swarmtypes "github.com/docker/docker/api/types/swarm"
|
2018-08-22 16:24:14 -04:00
|
|
|
"github.com/docker/docker/api/types/versions"
|
2017-08-29 02:49:26 -04:00
|
|
|
"github.com/docker/docker/client"
|
2019-01-09 08:47:06 -05:00
|
|
|
"github.com/docker/docker/errdefs"
|
2018-06-13 12:10:02 -04:00
|
|
|
"github.com/docker/docker/integration/internal/network"
|
2018-02-09 13:13:26 -05:00
|
|
|
"github.com/docker/docker/integration/internal/swarm"
|
2019-08-29 16:52:40 -04:00
|
|
|
"github.com/docker/docker/testutil/daemon"
|
2020-02-07 08:39:24 -05:00
|
|
|
"gotest.tools/v3/assert"
|
|
|
|
is "gotest.tools/v3/assert/cmp"
|
|
|
|
"gotest.tools/v3/poll"
|
|
|
|
"gotest.tools/v3/skip"
|
2017-08-29 02:49:26 -04:00
|
|
|
)
|
|
|
|
|
2018-06-01 06:47:38 -04:00
|
|
|
func TestServiceCreateInit(t *testing.T) {
|
|
|
|
defer setupTest(t)()
|
|
|
|
t.Run("daemonInitDisabled", testServiceCreateInit(false))
|
|
|
|
t.Run("daemonInitEnabled", testServiceCreateInit(true))
|
|
|
|
}
|
|
|
|
|
|
|
|
func testServiceCreateInit(daemonEnabled bool) func(t *testing.T) {
|
|
|
|
return func(t *testing.T) {
|
2019-09-17 13:44:35 -04:00
|
|
|
var ops = []daemon.Option{}
|
2018-06-01 06:47:38 -04:00
|
|
|
|
|
|
|
if daemonEnabled {
|
2019-09-30 08:44:09 -04:00
|
|
|
ops = append(ops, daemon.WithInit())
|
2018-06-01 06:47:38 -04:00
|
|
|
}
|
|
|
|
d := swarm.NewSwarm(t, testEnv, ops...)
|
|
|
|
defer d.Stop(t)
|
|
|
|
client := d.NewClientT(t)
|
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
booleanTrue := true
|
|
|
|
booleanFalse := false
|
|
|
|
|
|
|
|
serviceID := swarm.CreateService(t, d)
|
2019-01-19 13:54:32 -05:00
|
|
|
poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll)
|
2018-06-01 06:47:38 -04:00
|
|
|
i := inspectServiceContainer(t, client, serviceID)
|
|
|
|
// HostConfig.Init == nil means that it delegates to daemon configuration
|
|
|
|
assert.Check(t, i.HostConfig.Init == nil)
|
|
|
|
|
|
|
|
serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanTrue))
|
2019-01-19 13:54:32 -05:00
|
|
|
poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll)
|
2018-06-01 06:47:38 -04:00
|
|
|
i = inspectServiceContainer(t, client, serviceID)
|
|
|
|
assert.Check(t, is.Equal(true, *i.HostConfig.Init))
|
|
|
|
|
|
|
|
serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanFalse))
|
2019-01-19 13:54:32 -05:00
|
|
|
poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll)
|
2018-06-01 06:47:38 -04:00
|
|
|
i = inspectServiceContainer(t, client, serviceID)
|
|
|
|
assert.Check(t, is.Equal(false, *i.HostConfig.Init))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func inspectServiceContainer(t *testing.T, client client.APIClient, serviceID string) types.ContainerJSON {
|
|
|
|
t.Helper()
|
|
|
|
filter := filters.NewArgs()
|
|
|
|
filter.Add("label", fmt.Sprintf("com.docker.swarm.service.id=%s", serviceID))
|
|
|
|
containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{Filters: filter})
|
|
|
|
assert.NilError(t, err)
|
|
|
|
assert.Check(t, is.Len(containers, 1))
|
|
|
|
|
|
|
|
i, err := client.ContainerInspect(context.Background(), containers[0].ID)
|
|
|
|
assert.NilError(t, err)
|
|
|
|
return i
|
|
|
|
}
|
|
|
|
|
2017-11-07 01:12:03 -05:00
|
|
|
func TestCreateServiceMultipleTimes(t *testing.T) {
|
2018-04-19 05:14:15 -04:00
|
|
|
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
2017-08-29 02:49:26 -04:00
|
|
|
defer setupTest(t)()
|
2018-02-02 17:36:59 -05:00
|
|
|
d := swarm.NewSwarm(t, testEnv)
|
2017-08-29 02:49:26 -04:00
|
|
|
defer d.Stop(t)
|
2018-04-13 11:02:56 -04:00
|
|
|
client := d.NewClientT(t)
|
|
|
|
defer client.Close()
|
2019-01-13 20:20:09 -05:00
|
|
|
ctx := context.Background()
|
2017-08-29 02:49:26 -04:00
|
|
|
|
2018-06-07 11:55:08 -04:00
|
|
|
overlayName := "overlay1_" + t.Name()
|
2019-06-06 06:00:19 -04:00
|
|
|
overlayID := network.CreateNoError(ctx, t, client, overlayName,
|
2018-06-13 12:10:02 -04:00
|
|
|
network.WithCheckDuplicate(),
|
|
|
|
network.WithDriver("overlay"),
|
|
|
|
)
|
2017-08-29 02:49:26 -04:00
|
|
|
|
2017-11-07 01:12:03 -05:00
|
|
|
var instances uint64 = 4
|
2017-08-29 02:49:26 -04:00
|
|
|
|
2018-06-07 11:55:08 -04:00
|
|
|
serviceName := "TestService_" + t.Name()
|
2018-05-10 05:45:11 -04:00
|
|
|
serviceSpec := []swarm.ServiceSpecOpt{
|
|
|
|
swarm.ServiceWithReplicas(instances),
|
2018-06-07 11:55:08 -04:00
|
|
|
swarm.ServiceWithName(serviceName),
|
2018-05-10 05:45:11 -04:00
|
|
|
swarm.ServiceWithNetwork(overlayName),
|
|
|
|
}
|
2017-08-29 02:49:26 -04:00
|
|
|
|
2018-05-10 05:45:11 -04:00
|
|
|
serviceID := swarm.CreateService(t, d, serviceSpec...)
|
2019-01-19 13:54:32 -05:00
|
|
|
poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll)
|
2017-08-29 02:49:26 -04:00
|
|
|
|
2018-06-13 12:10:02 -04:00
|
|
|
_, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{})
|
2018-03-13 15:28:34 -04:00
|
|
|
assert.NilError(t, err)
|
2017-08-29 02:49:26 -04:00
|
|
|
|
|
|
|
err = client.ServiceRemove(context.Background(), serviceID)
|
2018-03-13 15:28:34 -04:00
|
|
|
assert.NilError(t, err)
|
2017-08-29 02:49:26 -04:00
|
|
|
|
2019-01-13 20:20:09 -05:00
|
|
|
poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll)
|
2017-11-07 01:12:03 -05:00
|
|
|
|
2018-05-10 05:45:11 -04:00
|
|
|
serviceID2 := swarm.CreateService(t, d, serviceSpec...)
|
2019-01-19 13:54:32 -05:00
|
|
|
poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID2, instances), swarm.ServicePoll)
|
2017-11-07 01:12:03 -05:00
|
|
|
|
|
|
|
err = client.ServiceRemove(context.Background(), serviceID2)
|
2018-03-13 15:28:34 -04:00
|
|
|
assert.NilError(t, err)
|
2017-11-07 01:12:03 -05:00
|
|
|
|
2019-12-13 13:16:44 -05:00
|
|
|
// we can't just wait on no tasks for the service, counter-intuitively.
|
|
|
|
// Tasks may briefly exist but not show up, if they are are in the process
|
|
|
|
// of being deallocated. To avoid this case, we should retry network remove
|
|
|
|
// a few times, to give tasks time to be deallcoated
|
2019-01-13 20:20:09 -05:00
|
|
|
poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID2), swarm.ServicePoll)
|
2017-11-07 01:12:03 -05:00
|
|
|
|
2019-12-13 13:16:44 -05:00
|
|
|
for retry := 0; retry < 5; retry++ {
|
|
|
|
err = client.NetworkRemove(context.Background(), overlayID)
|
|
|
|
// TODO(dperny): using strings.Contains for error checking is awful,
|
|
|
|
// but so is the fact that swarm functions don't return errdefs errors.
|
|
|
|
// I don't have time at this moment to fix the latter, so I guess I'll
|
|
|
|
// go with the former.
|
|
|
|
//
|
|
|
|
// The full error we're looking for is something like this:
|
|
|
|
//
|
|
|
|
// Error response from daemon: rpc error: code = FailedPrecondition desc = network %v is in use by task %v
|
|
|
|
//
|
|
|
|
// The safest way to catch this, I think, will be to match on "is in
|
|
|
|
// use by", as this is an uninterrupted string that best identifies
|
|
|
|
// this error.
|
|
|
|
if err == nil || !strings.Contains(err.Error(), "is in use by") {
|
|
|
|
// if there is no error, or the error isn't this kind of error,
|
|
|
|
// then we'll break the loop body, and either fail the test or
|
|
|
|
// continue.
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2018-03-13 15:28:34 -04:00
|
|
|
assert.NilError(t, err)
|
2017-08-29 02:49:26 -04:00
|
|
|
|
2019-01-12 17:59:49 -05:00
|
|
|
poll.WaitOn(t, network.IsRemoved(context.Background(), client, overlayID), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second))
|
2017-08-29 02:49:26 -04:00
|
|
|
}
|
|
|
|
|
2018-11-05 09:59:26 -05:00
|
|
|
func TestCreateServiceConflict(t *testing.T) {
|
|
|
|
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
|
|
|
defer setupTest(t)()
|
|
|
|
d := swarm.NewSwarm(t, testEnv)
|
|
|
|
defer d.Stop(t)
|
2019-01-09 08:47:06 -05:00
|
|
|
c := d.NewClientT(t)
|
|
|
|
defer c.Close()
|
|
|
|
ctx := context.Background()
|
2018-11-05 09:59:26 -05:00
|
|
|
|
|
|
|
serviceName := "TestService_" + t.Name()
|
|
|
|
serviceSpec := []swarm.ServiceSpecOpt{
|
|
|
|
swarm.ServiceWithName(serviceName),
|
|
|
|
}
|
|
|
|
|
|
|
|
swarm.CreateService(t, d, serviceSpec...)
|
|
|
|
|
|
|
|
spec := swarm.CreateServiceSpec(t, serviceSpec...)
|
2019-01-09 08:47:06 -05:00
|
|
|
_, err := c.ServiceCreate(ctx, spec, types.ServiceCreateOptions{})
|
|
|
|
assert.Check(t, errdefs.IsConflict(err))
|
|
|
|
assert.ErrorContains(t, err, "service "+serviceName+" already exists")
|
2018-11-05 09:59:26 -05:00
|
|
|
}
|
|
|
|
|
2018-09-30 08:28:37 -04:00
|
|
|
func TestCreateServiceMaxReplicas(t *testing.T) {
|
|
|
|
defer setupTest(t)()
|
|
|
|
d := swarm.NewSwarm(t, testEnv)
|
|
|
|
defer d.Stop(t)
|
|
|
|
client := d.NewClientT(t)
|
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
var maxReplicas uint64 = 2
|
|
|
|
serviceSpec := []swarm.ServiceSpecOpt{
|
|
|
|
swarm.ServiceWithReplicas(maxReplicas),
|
|
|
|
swarm.ServiceWithMaxReplicas(maxReplicas),
|
|
|
|
}
|
|
|
|
|
|
|
|
serviceID := swarm.CreateService(t, d, serviceSpec...)
|
2019-01-19 13:54:32 -05:00
|
|
|
poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, maxReplicas), swarm.ServicePoll)
|
2018-09-30 08:28:37 -04:00
|
|
|
|
|
|
|
_, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{})
|
|
|
|
assert.NilError(t, err)
|
|
|
|
}
|
|
|
|
|
2017-10-31 16:05:24 -04:00
|
|
|
func TestCreateWithDuplicateNetworkNames(t *testing.T) {
|
2018-04-19 05:14:15 -04:00
|
|
|
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
2017-10-31 16:05:24 -04:00
|
|
|
defer setupTest(t)()
|
2018-02-02 17:36:59 -05:00
|
|
|
d := swarm.NewSwarm(t, testEnv)
|
2017-10-31 16:05:24 -04:00
|
|
|
defer d.Stop(t)
|
2018-04-13 11:02:56 -04:00
|
|
|
client := d.NewClientT(t)
|
|
|
|
defer client.Close()
|
2019-01-13 20:20:09 -05:00
|
|
|
ctx := context.Background()
|
2017-10-31 16:05:24 -04:00
|
|
|
|
2018-06-07 11:55:08 -04:00
|
|
|
name := "foo_" + t.Name()
|
2019-06-06 06:00:19 -04:00
|
|
|
n1 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge"))
|
|
|
|
n2 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge"))
|
2017-10-31 16:05:24 -04:00
|
|
|
|
2018-09-06 23:26:04 -04:00
|
|
|
// Duplicates with name but with different driver
|
2019-06-06 06:00:19 -04:00
|
|
|
n3 := network.CreateNoError(ctx, t, client, name, network.WithDriver("overlay"))
|
2017-10-31 16:05:24 -04:00
|
|
|
|
|
|
|
// Create Service with the same name
|
|
|
|
var instances uint64 = 1
|
|
|
|
|
2018-06-07 11:55:08 -04:00
|
|
|
serviceName := "top_" + t.Name()
|
2018-05-10 05:45:11 -04:00
|
|
|
serviceID := swarm.CreateService(t, d,
|
|
|
|
swarm.ServiceWithReplicas(instances),
|
2018-06-07 11:55:08 -04:00
|
|
|
swarm.ServiceWithName(serviceName),
|
2018-05-10 05:45:11 -04:00
|
|
|
swarm.ServiceWithNetwork(name),
|
|
|
|
)
|
2017-10-31 16:05:24 -04:00
|
|
|
|
2019-01-19 13:54:32 -05:00
|
|
|
poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll)
|
2017-10-31 16:05:24 -04:00
|
|
|
|
2019-01-13 20:20:09 -05:00
|
|
|
resp, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{})
|
2018-03-13 15:28:34 -04:00
|
|
|
assert.NilError(t, err)
|
2018-06-13 12:10:02 -04:00
|
|
|
assert.Check(t, is.Equal(n3, resp.Spec.TaskTemplate.Networks[0].Target))
|
2017-10-31 16:05:24 -04:00
|
|
|
|
2019-01-13 20:20:09 -05:00
|
|
|
// Remove Service, and wait for its tasks to be removed
|
|
|
|
err = client.ServiceRemove(ctx, serviceID)
|
2018-03-13 15:28:34 -04:00
|
|
|
assert.NilError(t, err)
|
2019-01-13 20:20:09 -05:00
|
|
|
poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll)
|
2017-10-31 16:05:24 -04:00
|
|
|
|
|
|
|
// Remove networks
|
2018-06-13 12:10:02 -04:00
|
|
|
err = client.NetworkRemove(context.Background(), n3)
|
2018-03-13 15:28:34 -04:00
|
|
|
assert.NilError(t, err)
|
2017-10-31 16:05:24 -04:00
|
|
|
|
2018-06-13 12:10:02 -04:00
|
|
|
err = client.NetworkRemove(context.Background(), n2)
|
2018-03-13 15:28:34 -04:00
|
|
|
assert.NilError(t, err)
|
2017-10-31 16:05:24 -04:00
|
|
|
|
2018-06-13 12:10:02 -04:00
|
|
|
err = client.NetworkRemove(context.Background(), n1)
|
2018-03-13 15:28:34 -04:00
|
|
|
assert.NilError(t, err)
|
2017-10-31 16:05:24 -04:00
|
|
|
|
|
|
|
// Make sure networks have been destroyed.
|
2019-01-12 17:59:49 -05:00
|
|
|
poll.WaitOn(t, network.IsRemoved(context.Background(), client, n3), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second))
|
|
|
|
poll.WaitOn(t, network.IsRemoved(context.Background(), client, n2), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second))
|
|
|
|
poll.WaitOn(t, network.IsRemoved(context.Background(), client, n1), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second))
|
2017-10-31 16:05:24 -04:00
|
|
|
}
|
|
|
|
|
2018-01-28 02:15:44 -05:00
|
|
|
func TestCreateServiceSecretFileMode(t *testing.T) {
|
2018-04-19 05:14:15 -04:00
|
|
|
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
2018-01-28 02:15:44 -05:00
|
|
|
defer setupTest(t)()
|
2018-02-02 17:36:59 -05:00
|
|
|
d := swarm.NewSwarm(t, testEnv)
|
2018-01-28 02:15:44 -05:00
|
|
|
defer d.Stop(t)
|
2018-04-13 11:02:56 -04:00
|
|
|
client := d.NewClientT(t)
|
|
|
|
defer client.Close()
|
2018-01-28 02:15:44 -05:00
|
|
|
|
|
|
|
ctx := context.Background()
|
2018-06-07 11:55:08 -04:00
|
|
|
secretName := "TestSecret_" + t.Name()
|
2018-02-02 17:36:59 -05:00
|
|
|
secretResp, err := client.SecretCreate(ctx, swarmtypes.SecretSpec{
|
|
|
|
Annotations: swarmtypes.Annotations{
|
2018-06-07 11:55:08 -04:00
|
|
|
Name: secretName,
|
2018-01-28 02:15:44 -05:00
|
|
|
},
|
|
|
|
Data: []byte("TESTSECRET"),
|
|
|
|
})
|
2018-03-13 15:28:34 -04:00
|
|
|
assert.NilError(t, err)
|
2018-01-28 02:15:44 -05:00
|
|
|
|
|
|
|
var instances uint64 = 1
|
2018-06-07 11:55:08 -04:00
|
|
|
serviceName := "TestService_" + t.Name()
|
2018-05-10 05:45:11 -04:00
|
|
|
serviceID := swarm.CreateService(t, d,
|
|
|
|
swarm.ServiceWithReplicas(instances),
|
2018-06-07 11:55:08 -04:00
|
|
|
swarm.ServiceWithName(serviceName),
|
2018-05-10 05:45:11 -04:00
|
|
|
swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/secret || /bin/top"}),
|
|
|
|
swarm.ServiceWithSecret(&swarmtypes.SecretReference{
|
|
|
|
File: &swarmtypes.SecretReferenceFileTarget{
|
|
|
|
Name: "/etc/secret",
|
|
|
|
UID: "0",
|
|
|
|
GID: "0",
|
|
|
|
Mode: 0777,
|
2018-01-28 02:15:44 -05:00
|
|
|
},
|
2018-05-10 05:45:11 -04:00
|
|
|
SecretID: secretResp.ID,
|
2018-06-07 11:55:08 -04:00
|
|
|
SecretName: secretName,
|
2018-05-10 05:45:11 -04:00
|
|
|
}),
|
|
|
|
)
|
2018-01-28 02:15:44 -05:00
|
|
|
|
2019-01-19 13:54:32 -05:00
|
|
|
poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll)
|
2018-01-28 02:15:44 -05:00
|
|
|
|
|
|
|
filter := filters.NewArgs()
|
2018-05-10 05:45:11 -04:00
|
|
|
filter.Add("service", serviceID)
|
2018-01-28 02:15:44 -05:00
|
|
|
tasks, err := client.TaskList(ctx, types.TaskListOptions{
|
|
|
|
Filters: filter,
|
|
|
|
})
|
2018-03-13 15:28:34 -04:00
|
|
|
assert.NilError(t, err)
|
|
|
|
assert.Check(t, is.Equal(len(tasks), 1))
|
2018-01-28 02:15:44 -05:00
|
|
|
|
|
|
|
body, err := client.ContainerLogs(ctx, tasks[0].Status.ContainerStatus.ContainerID, types.ContainerLogsOptions{
|
|
|
|
ShowStdout: true,
|
|
|
|
})
|
2018-03-13 15:28:34 -04:00
|
|
|
assert.NilError(t, err)
|
2018-01-28 02:15:44 -05:00
|
|
|
defer body.Close()
|
|
|
|
|
|
|
|
content, err := ioutil.ReadAll(body)
|
2018-03-13 15:28:34 -04:00
|
|
|
assert.NilError(t, err)
|
|
|
|
assert.Check(t, is.Contains(string(content), "-rwxrwxrwx"))
|
2018-01-28 02:15:44 -05:00
|
|
|
|
2018-05-10 05:45:11 -04:00
|
|
|
err = client.ServiceRemove(ctx, serviceID)
|
2018-03-13 15:28:34 -04:00
|
|
|
assert.NilError(t, err)
|
2019-01-13 20:20:09 -05:00
|
|
|
poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll)
|
2018-01-28 02:15:44 -05:00
|
|
|
|
2018-06-07 11:55:08 -04:00
|
|
|
err = client.SecretRemove(ctx, secretName)
|
2018-03-13 15:28:34 -04:00
|
|
|
assert.NilError(t, err)
|
2018-01-28 02:15:44 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestCreateServiceConfigFileMode(t *testing.T) {
|
2018-04-19 05:14:15 -04:00
|
|
|
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
2018-01-28 02:15:44 -05:00
|
|
|
defer setupTest(t)()
|
2018-02-02 17:36:59 -05:00
|
|
|
d := swarm.NewSwarm(t, testEnv)
|
2018-01-28 02:15:44 -05:00
|
|
|
defer d.Stop(t)
|
2018-04-13 11:02:56 -04:00
|
|
|
client := d.NewClientT(t)
|
|
|
|
defer client.Close()
|
2018-01-28 02:15:44 -05:00
|
|
|
|
|
|
|
ctx := context.Background()
|
2018-06-07 11:55:08 -04:00
|
|
|
configName := "TestConfig_" + t.Name()
|
2018-02-02 17:36:59 -05:00
|
|
|
configResp, err := client.ConfigCreate(ctx, swarmtypes.ConfigSpec{
|
|
|
|
Annotations: swarmtypes.Annotations{
|
2018-06-07 11:55:08 -04:00
|
|
|
Name: configName,
|
2018-01-28 02:15:44 -05:00
|
|
|
},
|
|
|
|
Data: []byte("TESTCONFIG"),
|
|
|
|
})
|
2018-03-13 15:28:34 -04:00
|
|
|
assert.NilError(t, err)
|
2018-01-28 02:15:44 -05:00
|
|
|
|
|
|
|
var instances uint64 = 1
|
2018-06-07 11:55:08 -04:00
|
|
|
serviceName := "TestService_" + t.Name()
|
2018-05-10 05:45:11 -04:00
|
|
|
serviceID := swarm.CreateService(t, d,
|
2018-06-07 11:55:08 -04:00
|
|
|
swarm.ServiceWithName(serviceName),
|
2018-05-10 05:45:11 -04:00
|
|
|
swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/config || /bin/top"}),
|
|
|
|
swarm.ServiceWithReplicas(instances),
|
|
|
|
swarm.ServiceWithConfig(&swarmtypes.ConfigReference{
|
|
|
|
File: &swarmtypes.ConfigReferenceFileTarget{
|
|
|
|
Name: "/etc/config",
|
|
|
|
UID: "0",
|
|
|
|
GID: "0",
|
|
|
|
Mode: 0777,
|
2018-01-28 02:15:44 -05:00
|
|
|
},
|
2018-05-10 05:45:11 -04:00
|
|
|
ConfigID: configResp.ID,
|
2018-06-07 11:55:08 -04:00
|
|
|
ConfigName: configName,
|
2018-05-10 05:45:11 -04:00
|
|
|
}),
|
|
|
|
)
|
2018-01-28 02:15:44 -05:00
|
|
|
|
2019-01-19 13:54:32 -05:00
|
|
|
poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances))
|
2018-01-28 02:15:44 -05:00
|
|
|
|
|
|
|
filter := filters.NewArgs()
|
2018-05-10 05:45:11 -04:00
|
|
|
filter.Add("service", serviceID)
|
2018-01-28 02:15:44 -05:00
|
|
|
tasks, err := client.TaskList(ctx, types.TaskListOptions{
|
|
|
|
Filters: filter,
|
|
|
|
})
|
2018-03-13 15:28:34 -04:00
|
|
|
assert.NilError(t, err)
|
|
|
|
assert.Check(t, is.Equal(len(tasks), 1))
|
2018-01-28 02:15:44 -05:00
|
|
|
|
|
|
|
body, err := client.ContainerLogs(ctx, tasks[0].Status.ContainerStatus.ContainerID, types.ContainerLogsOptions{
|
|
|
|
ShowStdout: true,
|
|
|
|
})
|
2018-03-13 15:28:34 -04:00
|
|
|
assert.NilError(t, err)
|
2018-01-28 02:15:44 -05:00
|
|
|
defer body.Close()
|
|
|
|
|
|
|
|
content, err := ioutil.ReadAll(body)
|
2018-03-13 15:28:34 -04:00
|
|
|
assert.NilError(t, err)
|
|
|
|
assert.Check(t, is.Contains(string(content), "-rwxrwxrwx"))
|
2018-01-28 02:15:44 -05:00
|
|
|
|
2018-05-10 05:45:11 -04:00
|
|
|
err = client.ServiceRemove(ctx, serviceID)
|
2018-03-13 15:28:34 -04:00
|
|
|
assert.NilError(t, err)
|
2019-01-13 20:20:09 -05:00
|
|
|
poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID))
|
2018-01-28 02:15:44 -05:00
|
|
|
|
2018-06-07 11:55:08 -04:00
|
|
|
err = client.ConfigRemove(ctx, configName)
|
2018-03-13 15:28:34 -04:00
|
|
|
assert.NilError(t, err)
|
2018-01-28 02:15:44 -05:00
|
|
|
}
|
|
|
|
|
2018-08-22 16:24:14 -04:00
|
|
|
// TestServiceCreateSysctls tests that a service created with sysctl options in
|
|
|
|
// the ContainerSpec correctly applies those options.
|
|
|
|
//
|
|
|
|
// To test this, we're going to create a service with the sysctl option
|
|
|
|
//
|
|
|
|
// {"net.ipv4.ip_nonlocal_bind": "0"}
|
|
|
|
//
|
|
|
|
// We'll get the service's tasks to get the container ID, and then we'll
|
|
|
|
// inspect the container. If the output of the container inspect contains the
|
|
|
|
// sysctl option with the correct value, we can assume that the sysctl has been
|
|
|
|
// plumbed correctly.
|
|
|
|
//
|
|
|
|
// Next, we'll remove that service and create a new service with that option
|
|
|
|
// set to 1. This means that no matter what the default is, we can be confident
|
|
|
|
// that the sysctl option is applying as intended.
|
|
|
|
//
|
|
|
|
// Additionally, we'll do service and task inspects to verify that the inspect
|
|
|
|
// output includes the desired sysctl option.
|
|
|
|
//
|
|
|
|
// We're using net.ipv4.ip_nonlocal_bind because it's something that I'm fairly
|
|
|
|
// confident won't be modified by the container runtime, and won't blow
|
|
|
|
// anything up in the test environment
|
|
|
|
func TestCreateServiceSysctls(t *testing.T) {
|
|
|
|
skip.If(
|
2018-10-26 09:39:36 -04:00
|
|
|
t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.40"),
|
|
|
|
"setting service sysctls is unsupported before api v1.40",
|
2018-08-22 16:24:14 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
defer setupTest(t)()
|
|
|
|
d := swarm.NewSwarm(t, testEnv)
|
|
|
|
defer d.Stop(t)
|
|
|
|
client := d.NewClientT(t)
|
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
// run thie block twice, so that no matter what the default value of
|
|
|
|
// net.ipv4.ip_nonlocal_bind is, we can verify that setting the sysctl
|
|
|
|
// options works
|
|
|
|
for _, expected := range []string{"0", "1"} {
|
|
|
|
|
|
|
|
// store the map we're going to be using everywhere.
|
|
|
|
expectedSysctls := map[string]string{"net.ipv4.ip_nonlocal_bind": expected}
|
|
|
|
|
|
|
|
// Create the service with the sysctl options
|
|
|
|
var instances uint64 = 1
|
|
|
|
serviceID := swarm.CreateService(t, d,
|
|
|
|
swarm.ServiceWithSysctls(expectedSysctls),
|
|
|
|
)
|
|
|
|
|
|
|
|
// wait for the service to converge to 1 running task as expected
|
2019-01-19 13:54:32 -05:00
|
|
|
poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances))
|
2018-08-22 16:24:14 -04:00
|
|
|
|
|
|
|
// we're going to check 3 things:
|
|
|
|
//
|
|
|
|
// 1. Does the container, when inspected, have the sysctl option set?
|
|
|
|
// 2. Does the task have the sysctl in the spec?
|
|
|
|
// 3. Does the service have the sysctl in the spec?
|
|
|
|
//
|
|
|
|
// if all 3 of these things are true, we know that the sysctl has been
|
|
|
|
// plumbed correctly through the engine.
|
|
|
|
//
|
|
|
|
// We don't actually have to get inside the container and check its
|
|
|
|
// logs or anything. If we see the sysctl set on the container inspect,
|
|
|
|
// we know that the sysctl is plumbed correctly. everything below that
|
|
|
|
// level has been tested elsewhere. (thanks @thaJeztah, because an
|
|
|
|
// earlier version of this test had to get container logs and was much
|
|
|
|
// more complex)
|
|
|
|
|
|
|
|
// get all of the tasks of the service, so we can get the container
|
|
|
|
filter := filters.NewArgs()
|
|
|
|
filter.Add("service", serviceID)
|
|
|
|
tasks, err := client.TaskList(ctx, types.TaskListOptions{
|
|
|
|
Filters: filter,
|
|
|
|
})
|
|
|
|
assert.NilError(t, err)
|
|
|
|
assert.Check(t, is.Equal(len(tasks), 1))
|
|
|
|
|
|
|
|
// verify that the container has the sysctl option set
|
|
|
|
ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID)
|
|
|
|
assert.NilError(t, err)
|
|
|
|
assert.DeepEqual(t, ctnr.HostConfig.Sysctls, expectedSysctls)
|
|
|
|
|
|
|
|
// verify that the task has the sysctl option set in the task object
|
|
|
|
assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.Sysctls, expectedSysctls)
|
|
|
|
|
|
|
|
// verify that the service also has the sysctl set in the spec.
|
|
|
|
service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{})
|
|
|
|
assert.NilError(t, err)
|
|
|
|
assert.DeepEqual(t,
|
|
|
|
service.Spec.TaskTemplate.ContainerSpec.Sysctls, expectedSysctls,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
2019-05-05 11:28:24 -04:00
|
|
|
|
|
|
|
// TestServiceCreateCapabilities tests that a service created with capabilities options in
|
|
|
|
// the ContainerSpec correctly applies those options.
|
|
|
|
//
|
|
|
|
// To test this, we're going to create a service with the capabilities option
|
|
|
|
//
|
|
|
|
// []string{"CAP_NET_RAW", "CAP_SYS_CHROOT"}
|
|
|
|
//
|
|
|
|
// We'll get the service's tasks to get the container ID, and then we'll
|
|
|
|
// inspect the container. If the output of the container inspect contains the
|
|
|
|
// capabilities option with the correct value, we can assume that the capabilities has been
|
|
|
|
// plumbed correctly.
|
|
|
|
func TestCreateServiceCapabilities(t *testing.T) {
|
|
|
|
skip.If(
|
|
|
|
t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.41"),
|
|
|
|
"setting service capabilities is unsupported before api v1.41",
|
|
|
|
)
|
|
|
|
|
|
|
|
defer setupTest(t)()
|
|
|
|
d := swarm.NewSwarm(t, testEnv)
|
|
|
|
defer d.Stop(t)
|
|
|
|
client := d.NewClientT(t)
|
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
// store the map we're going to be using everywhere.
|
|
|
|
expectedCapabilities := []string{"CAP_NET_RAW", "CAP_SYS_CHROOT"}
|
|
|
|
|
|
|
|
// Create the service with the capabilities options
|
|
|
|
var instances uint64 = 1
|
|
|
|
serviceID := swarm.CreateService(t, d,
|
|
|
|
swarm.ServiceWithCapabilities(expectedCapabilities),
|
|
|
|
)
|
|
|
|
|
|
|
|
// wait for the service to converge to 1 running task as expected
|
|
|
|
poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances))
|
|
|
|
|
|
|
|
// we're going to check 3 things:
|
|
|
|
//
|
|
|
|
// 1. Does the container, when inspected, have the capabilities option set?
|
|
|
|
// 2. Does the task have the capabilities in the spec?
|
|
|
|
// 3. Does the service have the capabilities in the spec?
|
|
|
|
//
|
|
|
|
// if all 3 of these things are true, we know that the capabilities has been
|
|
|
|
// plumbed correctly through the engine.
|
|
|
|
//
|
|
|
|
// We don't actually have to get inside the container and check its
|
|
|
|
// logs or anything. If we see the capabilities set on the container inspect,
|
|
|
|
// we know that the capabilities is plumbed correctly. everything below that
|
|
|
|
// level has been tested elsewhere.
|
|
|
|
|
|
|
|
// get all of the tasks of the service, so we can get the container
|
|
|
|
filter := filters.NewArgs()
|
|
|
|
filter.Add("service", serviceID)
|
|
|
|
tasks, err := client.TaskList(ctx, types.TaskListOptions{
|
|
|
|
Filters: filter,
|
|
|
|
})
|
|
|
|
assert.NilError(t, err)
|
|
|
|
assert.Check(t, is.Equal(len(tasks), 1))
|
|
|
|
|
|
|
|
// verify that the container has the capabilities option set
|
|
|
|
ctnr, err := client.ContainerInspect(ctx, tasks[0].Status.ContainerStatus.ContainerID)
|
|
|
|
assert.NilError(t, err)
|
|
|
|
assert.DeepEqual(t, ctnr.HostConfig.Capabilities, expectedCapabilities)
|
|
|
|
|
|
|
|
// verify that the task has the capabilities option set in the task object
|
|
|
|
assert.DeepEqual(t, tasks[0].Spec.ContainerSpec.Capabilities, expectedCapabilities)
|
|
|
|
|
|
|
|
// verify that the service also has the capabilities set in the spec.
|
|
|
|
service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{})
|
|
|
|
assert.NilError(t, err)
|
|
|
|
assert.DeepEqual(t,
|
|
|
|
service.Spec.TaskTemplate.ContainerSpec.Capabilities, expectedCapabilities,
|
|
|
|
)
|
|
|
|
}
|