1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Let swarmkit handle cluster defaults in swarm init if not specified

This fix tries to address the issue raised in 24958 where previously
`docker swarm init` will automatically fill in all the default value
(instead of letting swarmkit to handle the default).

This fix update the `swarm init` so that initial value are passed only
when a flag change has been detected.

This fix fixes 24958.

Signed-off-by: Yong Tang <yong.tang.github@outlook.com>
This commit is contained in:
Yong Tang 2016-08-25 21:08:53 -07:00
parent 789e686890
commit eb19c2f080
7 changed files with 75 additions and 81 deletions

View file

@ -39,7 +39,7 @@ type Spec struct {
type OrchestrationConfig struct {
// TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or
// node. If negative, never remove completed or failed tasks.
TaskHistoryRetentionLimit int64 `json:",omitempty"`
TaskHistoryRetentionLimit *int64 `json:",omitempty"`
}
// TaskDefaults parameterizes cluster-level task creation with default values.

View file

@ -59,7 +59,7 @@ func runInit(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts initOption
ListenAddr: opts.listenAddr.String(),
AdvertiseAddr: opts.advertiseAddr,
ForceNewCluster: opts.forceNewCluster,
Spec: opts.swarmOptions.ToSpec(),
Spec: opts.swarmOptions.ToSpec(flags),
}
nodeID, err := client.SwarmInit(ctx, req)

View file

@ -169,11 +169,20 @@ func addSwarmFlags(flags *pflag.FlagSet, opts *swarmOptions) {
flags.Var(&opts.externalCA, flagExternalCA, "Specifications of one or more certificate signing endpoints")
}
func (opts *swarmOptions) ToSpec() swarm.Spec {
func (opts *swarmOptions) ToSpec(flags *pflag.FlagSet) swarm.Spec {
spec := swarm.Spec{}
spec.Orchestration.TaskHistoryRetentionLimit = opts.taskHistoryLimit
if flags.Changed(flagTaskHistoryLimit) {
spec.Orchestration.TaskHistoryRetentionLimit = &opts.taskHistoryLimit
}
if flags.Changed(flagDispatcherHeartbeat) {
spec.Dispatcher.HeartbeatPeriod = opts.dispatcherHeartbeat
}
if flags.Changed(flagCertExpiry) {
spec.CAConfig.NodeCertExpiry = opts.nodeCertExpiry
}
if flags.Changed(flagExternalCA) {
spec.CAConfig.ExternalCAs = opts.externalCA.Value()
}
return spec
}

View file

@ -58,7 +58,8 @@ func mergeSwarm(swarm *swarm.Swarm, flags *pflag.FlagSet) error {
spec := &swarm.Spec
if flags.Changed(flagTaskHistoryLimit) {
spec.Orchestration.TaskHistoryRetentionLimit, _ = flags.GetInt64(flagTaskHistoryLimit)
taskHistoryRetentionLimit, _ := flags.GetInt64(flagTaskHistoryLimit)
spec.Orchestration.TaskHistoryRetentionLimit = &taskHistoryRetentionLimit
}
if flags.Changed(flagDispatcherHeartbeat) {

View file

@ -107,7 +107,11 @@ func prettyPrintInfo(dockerCli *command.DockerCli, info types.Info) error {
fmt.Fprintf(dockerCli.Out(), " Managers: %d\n", info.Swarm.Managers)
fmt.Fprintf(dockerCli.Out(), " Nodes: %d\n", info.Swarm.Nodes)
fmt.Fprintf(dockerCli.Out(), " Orchestration:\n")
fmt.Fprintf(dockerCli.Out(), " Task History Retention Limit: %d\n", info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit)
taskHistoryRetentionLimit := int64(0)
if info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit != nil {
taskHistoryRetentionLimit = *info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit
}
fmt.Fprintf(dockerCli.Out(), " Task History Retention Limit: %d\n", taskHistoryRetentionLimit)
fmt.Fprintf(dockerCli.Out(), " Raft:\n")
fmt.Fprintf(dockerCli.Out(), " Snapshot Interval: %d\n", info.Swarm.Cluster.Spec.Raft.SnapshotInterval)
fmt.Fprintf(dockerCli.Out(), " Heartbeat Tick: %d\n", info.Swarm.Cluster.Spec.Raft.HeartbeatTick)

View file

@ -55,26 +55,6 @@ var ErrPendingSwarmExists = fmt.Errorf("This node is processing an existing join
// ErrSwarmJoinTimeoutReached is returned when cluster join could not complete before timeout was reached.
var ErrSwarmJoinTimeoutReached = fmt.Errorf("Timeout was reached before node was joined. The attempt to join the swarm will continue in the background. Use the \"docker info\" command to see the current swarm status of your node.")
// defaultSpec contains some sane defaults if cluster options are missing on init
var defaultSpec = types.Spec{
Raft: types.RaftConfig{
SnapshotInterval: 10000,
KeepOldSnapshots: 0,
LogEntriesForSlowFollowers: 500,
HeartbeatTick: 1,
ElectionTick: 3,
},
CAConfig: types.CAConfig{
NodeCertExpiry: 90 * 24 * time.Hour,
},
Dispatcher: types.DispatcherConfig{
HeartbeatPeriod: 5 * time.Second,
},
Orchestration: types.OrchestrationConfig{
TaskHistoryRetentionLimit: 10,
},
}
type state struct {
// LocalAddr is this machine's local IP or hostname, if specified.
LocalAddr string
@ -676,7 +656,10 @@ func (c *Cluster) Update(version uint64, spec types.Spec, flags types.UpdateFlag
return err
}
swarmSpec, err := convert.SwarmSpecToGRPC(spec)
// In update, client should provide the complete spec of the swarm, including
// Name and Labels. If a field is specified with 0 or nil, then the default value
// will be used to swarmkit.
clusterSpec, err := convert.SwarmSpecToGRPC(spec)
if err != nil {
return err
}
@ -685,7 +668,7 @@ func (c *Cluster) Update(version uint64, spec types.Spec, flags types.UpdateFlag
ctx,
&swarmapi.UpdateClusterRequest{
ClusterID: swarm.ID,
Spec: &swarmSpec,
Spec: &clusterSpec,
ClusterVersion: &swarmapi.Version{
Index: version,
},
@ -1517,32 +1500,6 @@ func validateAndSanitizeInitRequest(req *types.InitRequest) error {
return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err)
}
spec := &req.Spec
// provide sane defaults instead of erroring
if spec.Name == "" {
spec.Name = "default"
}
if spec.Raft.SnapshotInterval == 0 {
spec.Raft.SnapshotInterval = defaultSpec.Raft.SnapshotInterval
}
if spec.Raft.LogEntriesForSlowFollowers == 0 {
spec.Raft.LogEntriesForSlowFollowers = defaultSpec.Raft.LogEntriesForSlowFollowers
}
if spec.Raft.ElectionTick == 0 {
spec.Raft.ElectionTick = defaultSpec.Raft.ElectionTick
}
if spec.Raft.HeartbeatTick == 0 {
spec.Raft.HeartbeatTick = defaultSpec.Raft.HeartbeatTick
}
if spec.Dispatcher.HeartbeatPeriod == 0 {
spec.Dispatcher.HeartbeatPeriod = defaultSpec.Dispatcher.HeartbeatPeriod
}
if spec.CAConfig.NodeCertExpiry == 0 {
spec.CAConfig.NodeCertExpiry = defaultSpec.CAConfig.NodeCertExpiry
}
if spec.Orchestration.TaskHistoryRetentionLimit == 0 {
spec.Orchestration.TaskHistoryRetentionLimit = defaultSpec.Orchestration.TaskHistoryRetentionLimit
}
return nil
}
@ -1599,14 +1556,20 @@ func initClusterSpec(node *node, spec types.Spec) error {
cluster = lcr.Clusters[0]
break
}
newspec, err := convert.SwarmSpecToGRPC(spec)
// In init, we take the initial default values from swarmkit, and merge
// any non nil or 0 value from spec to GRPC spec. This will leave the
// default value alone.
// Note that this is different from Update(), as in Update() we expect
// user to specify the complete spec of the cluster (as they already know
// the existing one and knows which field to update)
clusterSpec, err := convert.MergeSwarmSpecToGRPC(spec, cluster.Spec)
if err != nil {
return fmt.Errorf("error updating cluster settings: %v", err)
}
_, err = client.UpdateCluster(ctx, &swarmapi.UpdateClusterRequest{
ClusterID: cluster.ID,
ClusterVersion: &cluster.Meta.Version,
Spec: &newspec,
Spec: &clusterSpec,
})
if err != nil {
return fmt.Errorf("error updating cluster settings: %v", err)

View file

@ -17,7 +17,7 @@ func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm {
ID: c.ID,
Spec: types.Spec{
Orchestration: types.OrchestrationConfig{
TaskHistoryRetentionLimit: c.Spec.Orchestration.TaskHistoryRetentionLimit,
TaskHistoryRetentionLimit: &c.Spec.Orchestration.TaskHistoryRetentionLimit,
},
Raft: types.RaftConfig{
SnapshotInterval: c.Spec.Raft.SnapshotInterval,
@ -61,27 +61,44 @@ func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm {
// SwarmSpecToGRPC converts a Spec to a grpc ClusterSpec.
func SwarmSpecToGRPC(s types.Spec) (swarmapi.ClusterSpec, error) {
spec := swarmapi.ClusterSpec{
Annotations: swarmapi.Annotations{
Name: s.Name,
Labels: s.Labels,
},
Orchestration: swarmapi.OrchestrationConfig{
TaskHistoryRetentionLimit: s.Orchestration.TaskHistoryRetentionLimit,
},
Raft: swarmapi.RaftConfig{
SnapshotInterval: s.Raft.SnapshotInterval,
KeepOldSnapshots: s.Raft.KeepOldSnapshots,
LogEntriesForSlowFollowers: s.Raft.LogEntriesForSlowFollowers,
HeartbeatTick: uint32(s.Raft.HeartbeatTick),
ElectionTick: uint32(s.Raft.ElectionTick),
},
Dispatcher: swarmapi.DispatcherConfig{
HeartbeatPeriod: ptypes.DurationProto(time.Duration(s.Dispatcher.HeartbeatPeriod)),
},
CAConfig: swarmapi.CAConfig{
NodeCertExpiry: ptypes.DurationProto(s.CAConfig.NodeCertExpiry),
},
return MergeSwarmSpecToGRPC(s, swarmapi.ClusterSpec{})
}
// MergeSwarmSpecToGRPC merges a Spec with an initial grpc ClusterSpec
func MergeSwarmSpecToGRPC(s types.Spec, spec swarmapi.ClusterSpec) (swarmapi.ClusterSpec, error) {
// We take the initSpec (either created from scratch, or returned by swarmkit),
// and will only change the value if the one taken from types.Spec is not nil or 0.
// In other words, if the value taken from types.Spec is nil or 0, we will maintain the status quo.
if s.Annotations.Name != "" {
spec.Annotations.Name = s.Annotations.Name
}
if len(s.Annotations.Labels) != 0 {
spec.Annotations.Labels = s.Annotations.Labels
}
if s.Orchestration.TaskHistoryRetentionLimit != nil {
spec.Orchestration.TaskHistoryRetentionLimit = *s.Orchestration.TaskHistoryRetentionLimit
}
if s.Raft.SnapshotInterval != 0 {
spec.Raft.SnapshotInterval = s.Raft.SnapshotInterval
}
if s.Raft.KeepOldSnapshots != 0 {
spec.Raft.KeepOldSnapshots = s.Raft.KeepOldSnapshots
}
if s.Raft.LogEntriesForSlowFollowers != 0 {
spec.Raft.LogEntriesForSlowFollowers = s.Raft.LogEntriesForSlowFollowers
}
if s.Raft.HeartbeatTick != 0 {
spec.Raft.HeartbeatTick = uint32(s.Raft.HeartbeatTick)
}
if s.Raft.ElectionTick != 0 {
spec.Raft.ElectionTick = uint32(s.Raft.ElectionTick)
}
if s.Dispatcher.HeartbeatPeriod != 0 {
spec.Dispatcher.HeartbeatPeriod = ptypes.DurationProto(time.Duration(s.Dispatcher.HeartbeatPeriod))
}
if s.CAConfig.NodeCertExpiry != 0 {
spec.CAConfig.NodeCertExpiry = ptypes.DurationProto(s.CAConfig.NodeCertExpiry)
}
for _, ca := range s.CAConfig.ExternalCAs {