support cluster events

Signed-off-by: Dong Chen <dongluo.chen@docker.com>
This commit is contained in:
Dong Chen 2017-04-02 15:21:56 -07:00
parent 6f6ee6fd04
commit 59d45c384a
10 changed files with 514 additions and 11 deletions

View File

@ -13,6 +13,12 @@ const (
PluginEventType = "plugin"
// VolumeEventType is the event type that volumes generate
VolumeEventType = "volume"
// ServiceEventType is the event type that services generate
ServiceEventType = "service"
// NodeEventType is the event type that nodes generate
NodeEventType = "node"
// SecretEventType is the event type that secrets generate
SecretEventType = "secret"
)
// Actor describes something that generates events,
@ -36,6 +42,8 @@ type Message struct {
Type string
Action string
Actor Actor
// Engine events are local scope. Cluster events are swarm scope.
Scope string `json:"scope,omitempty"`
Time int64 `json:"time,omitempty"`
TimeNano int64 `json:"timeNano,omitempty"`

View File

@ -1,6 +1,7 @@
package main
import (
"context"
"crypto/tls"
"fmt"
"os"
@ -44,6 +45,7 @@ import (
"github.com/docker/docker/registry"
"github.com/docker/docker/runconfig"
"github.com/docker/go-connections/tlsconfig"
swarmapi "github.com/docker/swarmkit/api"
"github.com/spf13/pflag"
)
@ -227,6 +229,10 @@ func (cli *DaemonCli) start(opts daemonOptions) (err error) {
name, _ := os.Hostname()
// Use a buffered channel to pass changes from store watch API to daemon
// A buffer allows store watch API and daemon processing to not wait for each other
watchStream := make(chan *swarmapi.WatchMessage, 32)
c, err := cluster.New(cluster.Config{
Root: cli.Config.Root,
Name: name,
@ -234,6 +240,7 @@ func (cli *DaemonCli) start(opts daemonOptions) (err error) {
NetworkSubnetsProvider: d,
DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr,
RuntimeRoot: cli.getSwarmRunRoot(),
WatchStream: watchStream,
})
if err != nil {
logrus.Fatalf("Error creating cluster component: %v", err)
@ -261,6 +268,11 @@ func (cli *DaemonCli) start(opts daemonOptions) (err error) {
initRouter(api, d, c)
// process cluster change notifications
watchCtx, cancel := context.WithCancel(context.Background())
defer cancel()
go d.ProcessClusterNotifications(watchCtx, watchStream)
cli.setupConfigReloadTrap()
// The serve API routine never exits unless an error occurs

View File

@ -105,6 +105,9 @@ type Config struct {
// path to store runtime state, such as the swarm control socket
RuntimeRoot string
// WatchStream is a channel to pass watch API notifications to daemon
WatchStream chan *swarmapi.WatchMessage
}
// Cluster provides capabilities to participate in a cluster as a worker or a
@ -118,6 +121,7 @@ type Cluster struct {
config Config
configEvent chan lncluster.ConfigEventType // todo: make this array and goroutine safe
attachers map[string]*attacher
watchStream chan *swarmapi.WatchMessage
}
// attacher manages the in-memory attachment state of a container
@ -151,6 +155,7 @@ func New(config Config) (*Cluster, error) {
configEvent: make(chan lncluster.ConfigEventType, 10),
runtimeRoot: config.RuntimeRoot,
attachers: make(map[string]*attacher),
watchStream: config.WatchStream,
}
return c, nil
}

View File

@ -159,6 +159,8 @@ func (n *nodeRunner) handleControlSocketChange(ctx context.Context, node *swarmn
} else {
n.controlClient = swarmapi.NewControlClient(conn)
n.logsClient = swarmapi.NewLogsClient(conn)
// push store changes to daemon
go n.watchClusterEvents(ctx, conn)
}
}
n.grpcConn = conn
@ -167,6 +169,48 @@ func (n *nodeRunner) handleControlSocketChange(ctx context.Context, node *swarmn
}
}
func (n *nodeRunner) watchClusterEvents(ctx context.Context, conn *grpc.ClientConn) {
client := swarmapi.NewWatchClient(conn)
watch, err := client.Watch(ctx, &swarmapi.WatchRequest{
Entries: []*swarmapi.WatchRequest_WatchEntry{
{
Kind: "node",
Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove,
},
{
Kind: "service",
Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove,
},
{
Kind: "network",
Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove,
},
{
Kind: "secret",
Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove,
},
},
IncludeOldObject: true,
})
if err != nil {
logrus.WithError(err).Error("failed to watch cluster store")
return
}
for {
msg, err := watch.Recv()
if err != nil {
// store watch is broken
logrus.WithError(err).Error("failed to receive changes from store watch API")
return
}
select {
case <-ctx.Done():
return
case n.cluster.watchStream <- msg:
}
}
}
func (n *nodeRunner) handleReadyEvent(ctx context.Context, node *swarmnode.Node, ready chan struct{}) {
select {
case <-node.Ready():

View File

@ -1,14 +1,27 @@
package daemon
import (
"context"
"strconv"
"strings"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/container"
daemonevents "github.com/docker/docker/daemon/events"
"github.com/docker/libnetwork"
swarmapi "github.com/docker/swarmkit/api"
gogotypes "github.com/gogo/protobuf/types"
)
var (
clusterEventAction = map[swarmapi.WatchActionKind]string{
swarmapi.WatchActionKindCreate: "create",
swarmapi.WatchActionKindUpdate: "update",
swarmapi.WatchActionKindRemove: "remove",
}
)
// LogContainerEvent generates an event related to a container with only the default attributes.
@ -130,3 +143,180 @@ func copyAttributes(attributes, labels map[string]string) {
attributes[k] = v
}
}
// ProcessClusterNotifications gets changes from store and add them to event list
func (daemon *Daemon) ProcessClusterNotifications(ctx context.Context, watchStream chan *swarmapi.WatchMessage) {
for {
select {
case <-ctx.Done():
return
case message, ok := <-watchStream:
if !ok {
logrus.Debug("cluster event channel has stopped")
return
}
daemon.generateClusterEvent(message)
}
}
}
func (daemon *Daemon) generateClusterEvent(msg *swarmapi.WatchMessage) {
for _, event := range msg.Events {
if event.Object == nil {
logrus.Errorf("event without object: %v", event)
continue
}
switch v := event.Object.GetObject().(type) {
case *swarmapi.Object_Node:
daemon.logNodeEvent(event.Action, v.Node, event.OldObject.GetNode())
case *swarmapi.Object_Service:
daemon.logServiceEvent(event.Action, v.Service, event.OldObject.GetService())
case *swarmapi.Object_Network:
daemon.logNetworkEvent(event.Action, v.Network, event.OldObject.GetNetwork())
case *swarmapi.Object_Secret:
daemon.logSecretEvent(event.Action, v.Secret, event.OldObject.GetSecret())
default:
logrus.Warnf("unrecognized event: %v", event)
}
}
}
func (daemon *Daemon) logNetworkEvent(action swarmapi.WatchActionKind, net *swarmapi.Network, oldNet *swarmapi.Network) {
attributes := map[string]string{
"name": net.Spec.Annotations.Name,
}
eventTime := eventTimestamp(net.Meta, action)
daemon.logClusterEvent(action, net.ID, "network", attributes, eventTime)
}
func (daemon *Daemon) logSecretEvent(action swarmapi.WatchActionKind, secret *swarmapi.Secret, oldSecret *swarmapi.Secret) {
attributes := map[string]string{
"name": secret.Spec.Annotations.Name,
}
eventTime := eventTimestamp(secret.Meta, action)
daemon.logClusterEvent(action, secret.ID, "secret", attributes, eventTime)
}
func (daemon *Daemon) logNodeEvent(action swarmapi.WatchActionKind, node *swarmapi.Node, oldNode *swarmapi.Node) {
name := node.Spec.Annotations.Name
if name == "" && node.Description != nil {
name = node.Description.Hostname
}
attributes := map[string]string{
"name": name,
}
eventTime := eventTimestamp(node.Meta, action)
// In an update event, display the changes in attributes
if action == swarmapi.WatchActionKindUpdate && oldNode != nil {
if node.Spec.Availability != oldNode.Spec.Availability {
attributes["availability.old"] = strings.ToLower(oldNode.Spec.Availability.String())
attributes["availability.new"] = strings.ToLower(node.Spec.Availability.String())
}
if node.Role != oldNode.Role {
attributes["role.old"] = strings.ToLower(oldNode.Role.String())
attributes["role.new"] = strings.ToLower(node.Role.String())
}
if node.Status.State != oldNode.Status.State {
attributes["state.old"] = strings.ToLower(oldNode.Status.State.String())
attributes["state.new"] = strings.ToLower(node.Status.State.String())
}
// This handles change within manager role
if node.ManagerStatus != nil && oldNode.ManagerStatus != nil {
// leader change
if node.ManagerStatus.Leader != oldNode.ManagerStatus.Leader {
if node.ManagerStatus.Leader {
attributes["leader.old"] = "false"
attributes["leader.new"] = "true"
} else {
attributes["leader.old"] = "true"
attributes["leader.new"] = "false"
}
}
if node.ManagerStatus.Reachability != oldNode.ManagerStatus.Reachability {
attributes["reachability.old"] = strings.ToLower(oldNode.ManagerStatus.Reachability.String())
attributes["reachability.new"] = strings.ToLower(node.ManagerStatus.Reachability.String())
}
}
}
daemon.logClusterEvent(action, node.ID, "node", attributes, eventTime)
}
func (daemon *Daemon) logServiceEvent(action swarmapi.WatchActionKind, service *swarmapi.Service, oldService *swarmapi.Service) {
attributes := map[string]string{
"name": service.Spec.Annotations.Name,
}
eventTime := eventTimestamp(service.Meta, action)
if action == swarmapi.WatchActionKindUpdate && oldService != nil {
// check image
if x, ok := service.Spec.Task.GetRuntime().(*swarmapi.TaskSpec_Container); ok {
containerSpec := x.Container
if y, ok := oldService.Spec.Task.GetRuntime().(*swarmapi.TaskSpec_Container); ok {
oldContainerSpec := y.Container
if containerSpec.Image != oldContainerSpec.Image {
attributes["image.old"] = oldContainerSpec.Image
attributes["image.new"] = containerSpec.Image
}
} else {
// This should not happen.
logrus.Errorf("service %s runtime changed from %T to %T", service.Spec.Annotations.Name, oldService.Spec.Task.GetRuntime(), service.Spec.Task.GetRuntime())
}
}
// check replicated count change
if x, ok := service.Spec.GetMode().(*swarmapi.ServiceSpec_Replicated); ok {
replicas := x.Replicated.Replicas
if y, ok := oldService.Spec.GetMode().(*swarmapi.ServiceSpec_Replicated); ok {
oldReplicas := y.Replicated.Replicas
if replicas != oldReplicas {
attributes["replicas.old"] = strconv.FormatUint(oldReplicas, 10)
attributes["replicas.new"] = strconv.FormatUint(replicas, 10)
}
} else {
// This should not happen.
logrus.Errorf("service %s mode changed from %T to %T", service.Spec.Annotations.Name, oldService.Spec.GetMode(), service.Spec.GetMode())
}
}
if service.UpdateStatus != nil {
if oldService.UpdateStatus == nil {
attributes["updatestate.new"] = strings.ToLower(service.UpdateStatus.State.String())
} else if service.UpdateStatus.State != oldService.UpdateStatus.State {
attributes["updatestate.old"] = strings.ToLower(oldService.UpdateStatus.State.String())
attributes["updatestate.new"] = strings.ToLower(service.UpdateStatus.State.String())
}
}
}
daemon.logClusterEvent(action, service.ID, "service", attributes, eventTime)
}
func (daemon *Daemon) logClusterEvent(action swarmapi.WatchActionKind, id, eventType string, attributes map[string]string, eventTime time.Time) {
actor := events.Actor{
ID: id,
Attributes: attributes,
}
jm := events.Message{
Action: clusterEventAction[action],
Type: eventType,
Actor: actor,
Scope: "swarm",
Time: eventTime.UTC().Unix(),
TimeNano: eventTime.UTC().UnixNano(),
}
daemon.EventsService.PublishMessage(jm)
}
func eventTimestamp(meta swarmapi.Meta, action swarmapi.WatchActionKind) time.Time {
var eventTime time.Time
switch action {
case swarmapi.WatchActionKindCreate:
eventTime, _ = gogotypes.TimestampFromProto(meta.CreatedAt)
case swarmapi.WatchActionKindUpdate:
eventTime, _ = gogotypes.TimestampFromProto(meta.UpdatedAt)
case swarmapi.WatchActionKindRemove:
// There is no timestamp from store message for remove operations.
// Use current time.
eventTime = time.Now()
}
return eventTime
}

View File

@ -9,7 +9,7 @@ import (
)
const (
eventsLimit = 64
eventsLimit = 256
bufferSize = 1024
)
@ -78,15 +78,14 @@ func (e *Events) Evict(l chan interface{}) {
e.pub.Evict(l)
}
// Log broadcasts event to listeners. Each listener has 100 milliseconds to
// receive the event or it will be skipped.
// Log creates a local scope message and publishes it
func (e *Events) Log(action, eventType string, actor eventtypes.Actor) {
eventsCounter.Inc()
now := time.Now().UTC()
jm := eventtypes.Message{
Action: action,
Type: eventType,
Actor: actor,
Scope: "local",
Time: now.Unix(),
TimeNano: now.UnixNano(),
}
@ -102,6 +101,14 @@ func (e *Events) Log(action, eventType string, actor eventtypes.Actor) {
jm.Status = action
}
e.PublishMessage(jm)
}
// PublishMessage broadcasts event to listeners. Each listener has 100 milliseconds to
// receive the event or it will be skipped.
func (e *Events) PublishMessage(jm eventtypes.Message) {
eventsCounter.Inc()
e.mu.Lock()
if len(e.events) == cap(e.events) {
// discard oldest event

View File

@ -139,17 +139,17 @@ func TestLogEvents(t *testing.T) {
t.Fatalf("First action is %s, must be action_16", first.Status)
}
last := current[len(current)-1]
if last.Status != "action_79" {
t.Fatalf("Last action is %s, must be action_79", last.Status)
if last.Status != "action_271" {
t.Fatalf("Last action is %s, must be action_271", last.Status)
}
firstC := msgs[0]
if firstC.Status != "action_80" {
t.Fatalf("First action is %s, must be action_80", firstC.Status)
if firstC.Status != "action_272" {
t.Fatalf("First action is %s, must be action_272", firstC.Status)
}
lastC := msgs[len(msgs)-1]
if lastC.Status != "action_89" {
t.Fatalf("Last action is %s, must be action_89", lastC.Status)
if lastC.Status != "action_281" {
t.Fatalf("Last action is %s, must be action_281", lastC.Status)
}
}

View File

@ -20,6 +20,7 @@ func NewFilter(filter filters.Args) *Filter {
func (ef *Filter) Include(ev events.Message) bool {
return ef.matchEvent(ev) &&
ef.filter.ExactMatch("type", ev.Type) &&
ef.matchScope(ev.Scope) &&
ef.matchDaemon(ev) &&
ef.matchContainer(ev) &&
ef.matchPlugin(ev) &&
@ -47,6 +48,13 @@ func (ef *Filter) filterContains(field string, values map[string]struct{}) bool
return false
}
func (ef *Filter) matchScope(scope string) bool {
if !ef.filter.Include("scope") {
return true
}
return ef.filter.ExactMatch("scope", scope)
}
func (ef *Filter) matchLabels(attributes map[string]string) bool {
if !ef.filter.Include("label") {
return true
@ -74,6 +82,18 @@ func (ef *Filter) matchNetwork(ev events.Message) bool {
return ef.fuzzyMatchName(ev, events.NetworkEventType)
}
func (ef *Filter) matchService(ev events.Message) bool {
return ef.fuzzyMatchName(ev, events.ServiceEventType)
}
func (ef *Filter) matchNode(ev events.Message) bool {
return ef.fuzzyMatchName(ev, events.NodeEventType)
}
func (ef *Filter) matchSecret(ev events.Message) bool {
return ef.fuzzyMatchName(ev, events.SecretEventType)
}
func (ef *Filter) fuzzyMatchName(ev events.Message, eventType string) bool {
return ef.filter.FuzzyMatch(eventType, ev.Actor.ID) ||
ef.filter.FuzzyMatch(eventType, ev.Actor.Attributes["name"])

View File

@ -119,7 +119,7 @@ func (s *DockerSuite) TestEventsLimit(c *check.C) {
out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c))
events := strings.Split(out, "\n")
nEvents := len(events) - 1
c.Assert(nEvents, checker.Equals, 64, check.Commentf("events should be limited to 64, but received %d", nEvents))
c.Assert(nEvents, checker.Equals, 256, check.Commentf("events should be limited to 256, but received %d", nEvents))
}
func (s *DockerSuite) TestEventsContainerEvents(c *check.C) {

View File

@ -2001,3 +2001,220 @@ func (s *DockerSwarmSuite) TestSwarmJoinLeave(c *check.C) {
c.Assert(err, checker.IsNil)
}
}
func (s *DockerSwarmSuite) TestSwarmClusterEventsSource(c *check.C) {
d1 := s.AddDaemon(c, true, true)
d2 := s.AddDaemon(c, true, true)
d3 := s.AddDaemon(c, true, false)
// create a network
out, err := d1.Cmd("network", "create", "--attachable", "-d", "overlay", "foo")
c.Assert(err, checker.IsNil, check.Commentf(out))
networkID := strings.TrimSpace(out)
c.Assert(networkID, checker.Not(checker.Equals), "")
until := daemonUnixTime(c)
// d1 is a manager
out, err = d1.Cmd("events", "--since=0", "--until", until, "-f scope=swarm")
c.Assert(err, checker.IsNil, check.Commentf(out))
c.Assert(strings.TrimSpace(out), checker.Contains, "network create "+networkID)
// d2 is a manager
out, err = d2.Cmd("events", "--since=0", "--until", until, "-f scope=swarm")
c.Assert(err, checker.IsNil, check.Commentf(out))
c.Assert(strings.TrimSpace(out), checker.Contains, "network create "+networkID)
// d3 is a worker, not able to get cluster events
out, err = d3.Cmd("events", "--since=0", "--until", until, "-f scope=swarm")
c.Assert(err, checker.IsNil, check.Commentf(out))
c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), "network create ")
}
func (s *DockerSwarmSuite) TestSwarmClusterEventsScope(c *check.C) {
d := s.AddDaemon(c, true, true)
// create a service
out, err := d.Cmd("service", "create", "--name", "test", "--detach=false", "busybox", "top")
c.Assert(err, checker.IsNil, check.Commentf(out))
serviceID := strings.Split(out, "\n")[0]
until := daemonUnixTime(c)
// scope swarm filters cluster events
out, err = d.Cmd("events", "--since=0", "--until", until, "-f scope=swarm")
c.Assert(err, checker.IsNil, check.Commentf(out))
c.Assert(strings.TrimSpace(out), checker.Contains, "service create "+serviceID)
c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), "container create ")
// without scope all events are returned
out, err = d.Cmd("events", "--since=0", "--until", until)
c.Assert(err, checker.IsNil, check.Commentf(out))
c.Assert(strings.TrimSpace(out), checker.Contains, "service create "+serviceID)
c.Assert(strings.TrimSpace(out), checker.Contains, "container create ")
// scope local only show non-cluster events
out, err = d.Cmd("events", "--since=0", "--until", until, "-f scope=local")
c.Assert(err, checker.IsNil, check.Commentf(out))
c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), "service create ")
c.Assert(strings.TrimSpace(out), checker.Contains, "container create ")
}
func (s *DockerSwarmSuite) TestSwarmClusterEventsType(c *check.C) {
d := s.AddDaemon(c, true, true)
// create a service
out, err := d.Cmd("service", "create", "--name", "test", "--detach=false", "busybox", "top")
c.Assert(err, checker.IsNil, check.Commentf(out))
serviceID := strings.Split(out, "\n")[0]
// create a network
out, err = d.Cmd("network", "create", "--attachable", "-d", "overlay", "foo")
c.Assert(err, checker.IsNil, check.Commentf(out))
networkID := strings.TrimSpace(out)
c.Assert(networkID, checker.Not(checker.Equals), "")
until := daemonUnixTime(c)
// filter by service
out, err = d.Cmd("events", "--since=0", "--until", until, "-f type=service")
c.Assert(err, checker.IsNil, check.Commentf(out))
c.Assert(out, checker.Contains, "service create "+serviceID)
c.Assert(out, checker.Not(checker.Contains), "network create")
// filter by network
out, err = d.Cmd("events", "--since=0", "--until", until, "-f type=network")
c.Assert(err, checker.IsNil, check.Commentf(out))
c.Assert(out, checker.Contains, "network create "+networkID)
c.Assert(out, checker.Not(checker.Contains), "service create")
}
func (s *DockerSwarmSuite) TestSwarmClusterEventsService(c *check.C) {
d := s.AddDaemon(c, true, true)
// create a service
out, err := d.Cmd("service", "create", "--name", "test", "--detach=false", "busybox", "top")
c.Assert(err, checker.IsNil, check.Commentf(out))
serviceID := strings.Split(out, "\n")[0]
t1 := daemonUnixTime(c)
// validate service create event
out, err = d.Cmd("events", "--since=0", "--until", t1, "-f scope=swarm")
c.Assert(err, checker.IsNil, check.Commentf(out))
c.Assert(out, checker.Contains, "service create "+serviceID)
out, err = d.Cmd("service", "update", "--force", "--detach=false", "test")
c.Assert(err, checker.IsNil, check.Commentf(out))
t2 := daemonUnixTime(c)
out, err = d.Cmd("events", "--since", t1, "--until", t2, "-f scope=swarm")
c.Assert(err, checker.IsNil, check.Commentf(out))
c.Assert(out, checker.Contains, "service update "+serviceID)
c.Assert(out, checker.Contains, "updatestate.new=updating")
c.Assert(out, checker.Contains, "updatestate.new=completed, updatestate.old=updating")
// scale service
out, err = d.Cmd("service", "scale", "test=3")
c.Assert(err, checker.IsNil, check.Commentf(out))
t3 := daemonUnixTime(c)
out, err = d.Cmd("events", "--since", t2, "--until", t3, "-f scope=swarm")
c.Assert(err, checker.IsNil, check.Commentf(out))
c.Assert(out, checker.Contains, "service update "+serviceID)
c.Assert(out, checker.Contains, "replicas.new=3, replicas.old=1")
// remove service
out, err = d.Cmd("service", "rm", "test")
c.Assert(err, checker.IsNil, check.Commentf(out))
t4 := daemonUnixTime(c)
out, err = d.Cmd("events", "--since", t3, "--until", t4, "-f scope=swarm")
c.Assert(err, checker.IsNil, check.Commentf(out))
c.Assert(out, checker.Contains, "service remove "+serviceID)
}
func (s *DockerSwarmSuite) TestSwarmClusterEventsNode(c *check.C) {
d1 := s.AddDaemon(c, true, true)
s.AddDaemon(c, true, true)
d3 := s.AddDaemon(c, true, true)
d3ID := d3.NodeID
t1 := daemonUnixTime(c)
out, err := d1.Cmd("events", "--since=0", "--until", t1, "-f scope=swarm")
c.Assert(err, checker.IsNil, check.Commentf(out))
c.Assert(out, checker.Contains, "node create "+d3ID)
out, err = d1.Cmd("node", "update", "--availability=pause", d3ID)
c.Assert(err, checker.IsNil, check.Commentf(out))
t2 := daemonUnixTime(c)
// filter by type
out, err = d1.Cmd("events", "--since", t1, "--until", t2, "-f type=node")
c.Assert(err, checker.IsNil, check.Commentf(out))
c.Assert(out, checker.Contains, "node update "+d3ID)
c.Assert(out, checker.Contains, "availability.new=pause, availability.old=active")
out, err = d1.Cmd("node", "demote", d3ID)
c.Assert(err, checker.IsNil, check.Commentf(out))
t3 := daemonUnixTime(c)
// filter by type and scope
out, err = d1.Cmd("events", "--since", t2, "--until", t3, "-f type=node", "-f scope=swarm")
c.Assert(err, checker.IsNil, check.Commentf(out))
c.Assert(out, checker.Contains, "node update "+d3ID)
out, err = d1.Cmd("node", "rm", "-f", d3ID)
c.Assert(err, checker.IsNil, check.Commentf(out))
t4 := daemonUnixTime(c)
// filter by type and scope
out, err = d1.Cmd("events", "--since", t3, "--until", t4, "-f type=node", "-f scope=swarm")
c.Assert(err, checker.IsNil, check.Commentf(out))
c.Assert(out, checker.Contains, "node remove "+d3ID)
}
func (s *DockerSwarmSuite) TestSwarmClusterEventsNetwork(c *check.C) {
d := s.AddDaemon(c, true, true)
// create a network
out, err := d.Cmd("network", "create", "--attachable", "-d", "overlay", "foo")
c.Assert(err, checker.IsNil, check.Commentf(out))
networkID := strings.TrimSpace(out)
t1 := daemonUnixTime(c)
out, err = d.Cmd("events", "--since=0", "--until", t1, "-f scope=swarm")
c.Assert(err, checker.IsNil, check.Commentf(out))
c.Assert(out, checker.Contains, "network create "+networkID)
// remove network
out, err = d.Cmd("network", "rm", "foo")
c.Assert(err, checker.IsNil, check.Commentf(out))
t2 := daemonUnixTime(c)
// filtered by network
out, err = d.Cmd("events", "--since", t1, "--until", t2, "-f type=network")
c.Assert(err, checker.IsNil, check.Commentf(out))
c.Assert(out, checker.Contains, "network remove "+networkID)
}
func (s *DockerSwarmSuite) TestSwarmClusterEventsSecret(c *check.C) {
d := s.AddDaemon(c, true, true)
testName := "test_secret"
id := d.CreateSecret(c, swarm.SecretSpec{
Annotations: swarm.Annotations{
Name: testName,
},
Data: []byte("TESTINGDATA"),
})
c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id))
t1 := daemonUnixTime(c)
out, err := d.Cmd("events", "--since=0", "--until", t1, "-f scope=swarm")
c.Assert(err, checker.IsNil, check.Commentf(out))
c.Assert(out, checker.Contains, "secret create "+id)
d.DeleteSecret(c, id)
t2 := daemonUnixTime(c)
// filtered by secret
out, err = d.Cmd("events", "--since", t1, "--until", t2, "-f type=secret")
c.Assert(err, checker.IsNil, check.Commentf(out))
c.Assert(out, checker.Contains, "secret remove "+id)
}