1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00
moby--moby/daemon/config/config.go

703 lines
26 KiB
Go
Raw Normal View History

package config // import "github.com/docker/docker/daemon/config"
import (
"bytes"
"encoding/json"
"fmt"
"net"
"net/url"
"os"
"path/filepath"
"strings"
"sync"
"github.com/containerd/containerd/runtime/v2/shim"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/registry"
"github.com/imdario/mergo"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
const (
// DefaultMaxConcurrentDownloads is the default value for
// maximum number of downloads that
// may take place at a time for each pull.
DefaultMaxConcurrentDownloads = 3
// DefaultMaxConcurrentUploads is the default value for
// maximum number of uploads that
// may take place at a time for each push.
DefaultMaxConcurrentUploads = 5
Adding ability to change max download attempts Moby works perfectly when you are in a situation when one has a good and stable internet connection. Operating in area's where internet connectivity is likely to be lost in undetermined intervals, like a satellite connection or 4G/LTE in rural area's, can become a problem when pulling a new image. When connection is lost while image layers are being pulled, Moby will try to reconnect up to 5 times. If this fails, the incompletely downloaded layers are lost will need to be completely downloaded again during the next pull request. This means that we are using more data than we might have to. Pulling a layer multiple times from the start can become costly over a satellite or 4G/LTE connection. As these techniques (especially 4G) quite common in IoT and Moby is used to run Azure IoT Edge devices, I would like to add a settable maximum download attempts. The maximum download attempts is currently set at 5 (distribution/xfer/download.go). I would like to change this constant to a variable that the user can set. The default will still be 5, so nothing will change from the current version unless specified when starting the daemon with the added flag or in the config file. I added a default value of 5 for DefaultMaxDownloadAttempts and a settable max-download-attempts in the daemon config file. It is also added to the config of dockerd so it can be set with a flag when starting the daemon. This value gets stored in the imageService of the daemon when it is initiated and can be passed to the NewLayerDownloadManager as a parameter. It will be stored in the LayerDownloadManager when initiated. This enables us to set the max amount of retries in makeDownoadFunc equal to the max download attempts. I also added some tests that are based on maxConcurrentDownloads/maxConcurrentUploads. You can pull this version and test in a development container. Either create a config `file /etc/docker/daemon.json` with `{"max-download-attempts"=3}``, or use `dockerd --max-download-attempts=3 -D &` to start up the dockerd. Start downloading a container and disconnect from the internet whilst downloading. The result would be that it stops pulling after three attempts. Signed-off-by: Lukas Heeren <lukas-heeren@hotmail.com> Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2019-06-25 09:26:36 -04:00
// DefaultDownloadAttempts is the default value for
// maximum number of attempts that
// may take place at a time for each pull when the connection is lost.
DefaultDownloadAttempts = 5
// DefaultShmSize is the default value for container's shm size (64 MiB)
DefaultShmSize int64 = 64 * 1024 * 1024
// DefaultNetworkMtu is the default value for network MTU
DefaultNetworkMtu = 1500
// DisableNetworkBridge is the default value of the option to disable network bridge
DisableNetworkBridge = "none"
// DefaultShutdownTimeout is the default shutdown timeout (in seconds) for
// the daemon for containers to stop when it is shutting down.
DefaultShutdownTimeout = 15
// DefaultInitBinary is the name of the default init binary
DefaultInitBinary = "docker-init"
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "runc"
// DefaultContainersNamespace is the name of the default containerd namespace used for users containers.
DefaultContainersNamespace = "moby"
// DefaultPluginNamespace is the name of the default containerd namespace used for plugins.
DefaultPluginNamespace = "plugins.moby"
// LinuxV2RuntimeName is the runtime used to specify the containerd v2 runc shim
LinuxV2RuntimeName = "io.containerd.runc.v2"
// SeccompProfileDefault is the built-in default seccomp profile.
SeccompProfileDefault = "builtin"
// SeccompProfileUnconfined is a special profile name for seccomp to use an
// "unconfined" seccomp profile.
SeccompProfileUnconfined = "unconfined"
)
var builtinRuntimes = map[string]bool{
StockRuntimeName: true,
LinuxV2RuntimeName: true,
}
// flatOptions contains configuration keys
// that MUST NOT be parsed as deep structures.
// Use this to differentiate these options
// with others like the ones in CommonTLSOptions.
var flatOptions = map[string]bool{
"cluster-store-opts": true,
"log-opts": true,
"runtimes": true,
"default-ulimits": true,
"features": true,
"builder": true,
}
// skipValidateOptions contains configuration keys
// that will be skipped from findConfigurationConflicts
// for unknown flag validation.
var skipValidateOptions = map[string]bool{
"features": true,
"builder": true,
// Corresponding flag has been removed because it was already unusable
"deprecated-key-path": true,
}
// skipDuplicates contains configuration keys that
// will be skipped when checking duplicated
// configuration field defined in both daemon
// config file and from dockerd cli flags.
// This allows some configurations to be merged
// during the parsing.
var skipDuplicates = map[string]bool{
"runtimes": true,
}
// LogConfig represents the default log configuration.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type LogConfig struct {
Type string `json:"log-driver,omitempty"`
Config map[string]string `json:"log-opts,omitempty"`
}
// commonBridgeConfig stores all the platform-common bridge driver specific
// configuration.
type commonBridgeConfig struct {
Iface string `json:"bridge,omitempty"`
FixedCIDR string `json:"fixed-cidr,omitempty"`
}
// NetworkConfig stores the daemon-wide networking configurations
type NetworkConfig struct {
// Default address pools for docker networks
DefaultAddressPools opts.PoolsOpt `json:"default-address-pools,omitempty"`
// NetworkControlPlaneMTU allows to specify the control plane MTU, this will allow to optimize the network use in some components
NetworkControlPlaneMTU int `json:"network-control-plane-mtu,omitempty"`
}
// CommonTLSOptions defines TLS configuration for the daemon server.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonTLSOptions struct {
CAFile string `json:"tlscacert,omitempty"`
CertFile string `json:"tlscert,omitempty"`
KeyFile string `json:"tlskey,omitempty"`
}
// DNSConfig defines the DNS configurations.
type DNSConfig struct {
DNS []string `json:"dns,omitempty"`
DNSOptions []string `json:"dns-opts,omitempty"`
DNSSearch []string `json:"dns-search,omitempty"`
HostGatewayIP net.IP `json:"host-gateway-ip,omitempty"`
}
// CommonConfig defines the configuration of a docker daemon which is
// common across platforms.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonConfig struct {
AuthzMiddleware *authorization.Middleware `json:"-"`
AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins
AutoRestart bool `json:"-"`
Context map[string][]string `json:"-"`
DisableBridge bool `json:"-"`
ExecOptions []string `json:"exec-opts,omitempty"`
GraphDriver string `json:"storage-driver,omitempty"`
GraphOptions []string `json:"storage-opts,omitempty"`
Labels []string `json:"labels,omitempty"`
Mtu int `json:"mtu,omitempty"`
NetworkDiagnosticPort int `json:"network-diagnostic-port,omitempty"`
Pidfile string `json:"pidfile,omitempty"`
RawLogs bool `json:"raw-logs,omitempty"`
RootDeprecated string `json:"graph,omitempty"` // Deprecated: use Root instead. TODO(thaJeztah): remove in next release.
Root string `json:"data-root,omitempty"`
ExecRoot string `json:"exec-root,omitempty"`
SocketGroup string `json:"group,omitempty"`
CorsHeaders string `json:"api-cors-header,omitempty"`
daemon/config: move proxy settings to "proxies" struct within daemon.json This is a follow-up to 427c7cc5f86364466c7173e8ca59b97c3876471d, which added proxy-configuration options ("http-proxy", "https-proxy", "no-proxy") to the dockerd cli and in `daemon.json`. While working on documentation changes for this feature, I realised that those options won't be "next" to each-other when formatting the daemon.json JSON, for example using `jq` (which sorts the fields alphabetically). As it's possible that additional proxy configuration options are added in future, I considered that grouping these options in a struct within the JSON may help setting these options, as well as discovering related options. This patch introduces a "proxies" field in the JSON, which includes the "http-proxy", "https-proxy", "no-proxy" options. Conflict detection continues to work as before; with this patch applied: mkdir -p /etc/docker/ echo '{"proxies":{"http-proxy":"http-config", "https-proxy":"https-config", "no-proxy": "no-proxy-config"}}' > /etc/docker/daemon.json dockerd --http-proxy=http-flag --https-proxy=https-flag --no-proxy=no-proxy-flag --validate unable to configure the Docker daemon with file /etc/docker/daemon.json: the following directives are specified both as a flag and in the configuration file: http-proxy: (from flag: http-flag, from file: http-config), https-proxy: (from flag: https-flag, from file: https-config), no-proxy: (from flag: no-proxy-flag, from file: no-proxy-config) Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-04-02 10:03:39 -04:00
// Proxies holds the proxies that are configured for the daemon.
Proxies `json:"proxies"`
// TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests
// when pushing to a registry which does not support schema 2. This field is marked as
// deprecated because schema 1 manifests are deprecated in favor of schema 2 and the
// daemon ID will use a dedicated identifier not shared with exported signatures.
TrustKeyPath string `json:"deprecated-key-path,omitempty"`
// LiveRestoreEnabled determines whether we should keep containers
// alive upon daemon shutdown/start
LiveRestoreEnabled bool `json:"live-restore,omitempty"`
// ClusterStore is the storage backend used for the cluster information. It is used by both
// multihost networking (to store networks and endpoints information) and by the node discovery
// mechanism.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterStore string `json:"cluster-store,omitempty"`
// ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such
// as TLS configuration settings.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"`
// ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node
// discovery. This should be a 'host:port' combination on which that daemon instance is
// reachable by other hosts.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterAdvertise string `json:"cluster-advertise,omitempty"`
// MaxConcurrentDownloads is the maximum number of downloads that
// may take place at a time for each pull.
MaxConcurrentDownloads int `json:"max-concurrent-downloads,omitempty"`
// MaxConcurrentUploads is the maximum number of uploads that
// may take place at a time for each push.
MaxConcurrentUploads int `json:"max-concurrent-uploads,omitempty"`
Adding ability to change max download attempts Moby works perfectly when you are in a situation when one has a good and stable internet connection. Operating in area's where internet connectivity is likely to be lost in undetermined intervals, like a satellite connection or 4G/LTE in rural area's, can become a problem when pulling a new image. When connection is lost while image layers are being pulled, Moby will try to reconnect up to 5 times. If this fails, the incompletely downloaded layers are lost will need to be completely downloaded again during the next pull request. This means that we are using more data than we might have to. Pulling a layer multiple times from the start can become costly over a satellite or 4G/LTE connection. As these techniques (especially 4G) quite common in IoT and Moby is used to run Azure IoT Edge devices, I would like to add a settable maximum download attempts. The maximum download attempts is currently set at 5 (distribution/xfer/download.go). I would like to change this constant to a variable that the user can set. The default will still be 5, so nothing will change from the current version unless specified when starting the daemon with the added flag or in the config file. I added a default value of 5 for DefaultMaxDownloadAttempts and a settable max-download-attempts in the daemon config file. It is also added to the config of dockerd so it can be set with a flag when starting the daemon. This value gets stored in the imageService of the daemon when it is initiated and can be passed to the NewLayerDownloadManager as a parameter. It will be stored in the LayerDownloadManager when initiated. This enables us to set the max amount of retries in makeDownoadFunc equal to the max download attempts. I also added some tests that are based on maxConcurrentDownloads/maxConcurrentUploads. You can pull this version and test in a development container. Either create a config `file /etc/docker/daemon.json` with `{"max-download-attempts"=3}``, or use `dockerd --max-download-attempts=3 -D &` to start up the dockerd. Start downloading a container and disconnect from the internet whilst downloading. The result would be that it stops pulling after three attempts. Signed-off-by: Lukas Heeren <lukas-heeren@hotmail.com> Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2019-06-25 09:26:36 -04:00
// MaxDownloadAttempts is the maximum number of attempts that
// may take place at a time for each push.
MaxDownloadAttempts int `json:"max-download-attempts,omitempty"`
Adding ability to change max download attempts Moby works perfectly when you are in a situation when one has a good and stable internet connection. Operating in area's where internet connectivity is likely to be lost in undetermined intervals, like a satellite connection or 4G/LTE in rural area's, can become a problem when pulling a new image. When connection is lost while image layers are being pulled, Moby will try to reconnect up to 5 times. If this fails, the incompletely downloaded layers are lost will need to be completely downloaded again during the next pull request. This means that we are using more data than we might have to. Pulling a layer multiple times from the start can become costly over a satellite or 4G/LTE connection. As these techniques (especially 4G) quite common in IoT and Moby is used to run Azure IoT Edge devices, I would like to add a settable maximum download attempts. The maximum download attempts is currently set at 5 (distribution/xfer/download.go). I would like to change this constant to a variable that the user can set. The default will still be 5, so nothing will change from the current version unless specified when starting the daemon with the added flag or in the config file. I added a default value of 5 for DefaultMaxDownloadAttempts and a settable max-download-attempts in the daemon config file. It is also added to the config of dockerd so it can be set with a flag when starting the daemon. This value gets stored in the imageService of the daemon when it is initiated and can be passed to the NewLayerDownloadManager as a parameter. It will be stored in the LayerDownloadManager when initiated. This enables us to set the max amount of retries in makeDownoadFunc equal to the max download attempts. I also added some tests that are based on maxConcurrentDownloads/maxConcurrentUploads. You can pull this version and test in a development container. Either create a config `file /etc/docker/daemon.json` with `{"max-download-attempts"=3}``, or use `dockerd --max-download-attempts=3 -D &` to start up the dockerd. Start downloading a container and disconnect from the internet whilst downloading. The result would be that it stops pulling after three attempts. Signed-off-by: Lukas Heeren <lukas-heeren@hotmail.com> Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2019-06-25 09:26:36 -04:00
// ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container
// to stop when daemon is being shutdown
ShutdownTimeout int `json:"shutdown-timeout,omitempty"`
Debug bool `json:"debug,omitempty"`
Hosts []string `json:"hosts,omitempty"`
LogLevel string `json:"log-level,omitempty"`
TLS *bool `json:"tls,omitempty"`
TLSVerify *bool `json:"tlsverify,omitempty"`
// Embedded structs that allow config
// deserialization without the full struct.
CommonTLSOptions
Split advertised address from listen address There are currently problems with "swarm init" and "swarm join" when an explicit --listen-addr flag is not provided. swarmkit defaults to finding the IP address associated with the default route, and in cloud setups this is often the wrong choice. Introduce a notion of "advertised address", with the client flag --advertise-addr, and the daemon flag --swarm-default-advertise-addr to provide a default. The default listening address is now 0.0.0.0, but a valid advertised address must be detected or specified. If no explicit advertised address is specified, error out if there is more than one usable candidate IP address on the system. This requires a user to explicitly choose instead of letting swarmkit make the wrong choice. For the purposes of this autodetection, we ignore certain interfaces that are unlikely to be relevant (currently docker*). The user is also required to choose a listen address on swarm init if they specify an explicit advertise address that is a hostname or an IP address that's not local to the system. This is a requirement for overlay networking. Also support specifying interface names to --listen-addr, --advertise-addr, and the daemon flag --swarm-default-advertise-addr. This will fail if the interface has multiple IP addresses (unless it has a single IPv4 address and a single IPv6 address - then we resolve the tie in favor of IPv4). This change also exposes the node's externally-reachable address in docker info, as requested by #24017. Make corresponding API and CLI docs changes. Signed-off-by: Aaron Lehmann <aaron.lehmann@docker.com>
2016-06-30 21:07:35 -04:00
// SwarmDefaultAdvertiseAddr is the default host/IP or network interface
// to use if a wildcard address is specified in the ListenAddr value
// given to the /swarm/init endpoint and no advertise address is
// specified.
SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"`
// SwarmRaftHeartbeatTick is the number of ticks in time for swarm mode raft quorum heartbeat
// Typical value is 1
SwarmRaftHeartbeatTick uint32 `json:"swarm-raft-heartbeat-tick"`
// SwarmRaftElectionTick is the number of ticks to elapse before followers in the quorum can propose
// a new round of leader election. Default, recommended value is at least 10X that of Heartbeat tick.
// Higher values can make the quorum less sensitive to transient faults in the environment, but this also
// means it takes longer for the managers to detect a down leader.
SwarmRaftElectionTick uint32 `json:"swarm-raft-election-tick"`
MetricsAddress string `json:"metrics-addr"`
Split advertised address from listen address There are currently problems with "swarm init" and "swarm join" when an explicit --listen-addr flag is not provided. swarmkit defaults to finding the IP address associated with the default route, and in cloud setups this is often the wrong choice. Introduce a notion of "advertised address", with the client flag --advertise-addr, and the daemon flag --swarm-default-advertise-addr to provide a default. The default listening address is now 0.0.0.0, but a valid advertised address must be detected or specified. If no explicit advertised address is specified, error out if there is more than one usable candidate IP address on the system. This requires a user to explicitly choose instead of letting swarmkit make the wrong choice. For the purposes of this autodetection, we ignore certain interfaces that are unlikely to be relevant (currently docker*). The user is also required to choose a listen address on swarm init if they specify an explicit advertise address that is a hostname or an IP address that's not local to the system. This is a requirement for overlay networking. Also support specifying interface names to --listen-addr, --advertise-addr, and the daemon flag --swarm-default-advertise-addr. This will fail if the interface has multiple IP addresses (unless it has a single IPv4 address and a single IPv6 address - then we resolve the tie in favor of IPv4). This change also exposes the node's externally-reachable address in docker info, as requested by #24017. Make corresponding API and CLI docs changes. Signed-off-by: Aaron Lehmann <aaron.lehmann@docker.com>
2016-06-30 21:07:35 -04:00
DNSConfig
LogConfig
BridgeConfig // bridgeConfig holds bridge network specific configuration.
NetworkConfig
registry.ServiceOptions
sync.Mutex
// FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags
// It should probably be handled outside this package.
ValuesSet map[string]interface{} `json:"-"`
Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not
// Exposed node Generic Resources
// e.g: ["orange=red", "orange=green", "orange=blue", "apple=3"]
NodeGenericResources []string `json:"node-generic-resources,omitempty"`
// ContainerAddr is the address used to connect to containerd if we're
// not starting it ourselves
ContainerdAddr string `json:"containerd,omitempty"`
// CriContainerd determines whether a supervised containerd instance
// should be configured with the CRI plugin enabled. This allows using
// Docker's containerd instance directly with a Kubernetes kubelet.
CriContainerd bool `json:"cri-containerd,omitempty"`
// Features contains a list of feature key value pairs indicating what features are enabled or disabled.
// If a certain feature doesn't appear in this list then it's unset (i.e. neither true nor false).
Features map[string]bool `json:"features,omitempty"`
Builder BuilderConfig `json:"builder,omitempty"`
ContainerdNamespace string `json:"containerd-namespace,omitempty"`
ContainerdPluginNamespace string `json:"containerd-plugin-namespace,omitempty"`
DefaultRuntime string `json:"default-runtime,omitempty"`
}
daemon/config: move proxy settings to "proxies" struct within daemon.json This is a follow-up to 427c7cc5f86364466c7173e8ca59b97c3876471d, which added proxy-configuration options ("http-proxy", "https-proxy", "no-proxy") to the dockerd cli and in `daemon.json`. While working on documentation changes for this feature, I realised that those options won't be "next" to each-other when formatting the daemon.json JSON, for example using `jq` (which sorts the fields alphabetically). As it's possible that additional proxy configuration options are added in future, I considered that grouping these options in a struct within the JSON may help setting these options, as well as discovering related options. This patch introduces a "proxies" field in the JSON, which includes the "http-proxy", "https-proxy", "no-proxy" options. Conflict detection continues to work as before; with this patch applied: mkdir -p /etc/docker/ echo '{"proxies":{"http-proxy":"http-config", "https-proxy":"https-config", "no-proxy": "no-proxy-config"}}' > /etc/docker/daemon.json dockerd --http-proxy=http-flag --https-proxy=https-flag --no-proxy=no-proxy-flag --validate unable to configure the Docker daemon with file /etc/docker/daemon.json: the following directives are specified both as a flag and in the configuration file: http-proxy: (from flag: http-flag, from file: http-config), https-proxy: (from flag: https-flag, from file: https-config), no-proxy: (from flag: no-proxy-flag, from file: no-proxy-config) Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-04-02 10:03:39 -04:00
// Proxies holds the proxies that are configured for the daemon.
type Proxies struct {
Add http(s) proxy properties to daemon configuration This allows configuring the daemon's proxy server through the daemon.json con- figuration file or command-line flags configuration file, in addition to the existing option (through environment variables). Configuring environment variables on Windows to configure a service is more complicated than on Linux, and adding alternatives for this to the daemon con- figuration makes the configuration more transparent and easier to use. The configuration as set through command-line flags or through the daemon.json configuration file takes precedence over env-vars in the daemon's environment, which allows the daemon to use a different proxy. If both command-line flags and a daemon.json configuration option is set, an error is produced when starting the daemon. Note that this configuration is not "live reloadable" due to Golang's use of `sync.Once()` for proxy configuration, which means that changing the proxy configuration requires a restart of the daemon (reload / SIGHUP will not update the configuration. With this patch: cat /etc/docker/daemon.json { "http-proxy": "http://proxytest.example.com:80", "https-proxy": "https://proxytest.example.com:443" } docker pull busybox Using default tag: latest Error response from daemon: Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host docker build . Sending build context to Docker daemon 89.28MB Step 1/3 : FROM golang:1.16-alpine AS base Get "https://registry-1.docker.io/v2/": proxyconnect tcp: dial tcp: lookup proxytest.example.com on 127.0.0.11:53: no such host Integration tests were added to test the behavior: - verify that the configuration through all means are used (env-var, command-line flags, damon.json), and used in the expected order of preference. - verify that conflicting options produce an error. Signed-off-by: Anca Iordache <anca.iordache@docker.com> Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2021-07-16 03:33:00 -04:00
HTTPProxy string `json:"http-proxy,omitempty"`
HTTPSProxy string `json:"https-proxy,omitempty"`
NoProxy string `json:"no-proxy,omitempty"`
}
// IsValueSet returns true if a configuration value
// was explicitly set in the configuration file.
func (conf *Config) IsValueSet(name string) bool {
if conf.ValuesSet == nil {
return false
}
_, ok := conf.ValuesSet[name]
return ok
}
// New returns a new fully initialized Config struct
func New() *Config {
return &Config{
CommonConfig: CommonConfig{
ShutdownTimeout: DefaultShutdownTimeout,
LogConfig: LogConfig{
Config: make(map[string]string),
},
MaxConcurrentDownloads: DefaultMaxConcurrentDownloads,
MaxConcurrentUploads: DefaultMaxConcurrentUploads,
MaxDownloadAttempts: DefaultDownloadAttempts,
Mtu: DefaultNetworkMtu,
NetworkConfig: NetworkConfig{
NetworkControlPlaneMTU: DefaultNetworkMtu,
},
ContainerdNamespace: DefaultContainersNamespace,
ContainerdPluginNamespace: DefaultPluginNamespace,
DefaultRuntime: StockRuntimeName,
},
}
}
// GetConflictFreeLabels validates Labels for conflict
// In swarm the duplicates for labels are removed
// so we only take same values here, no conflict values
// If the key-value is the same we will only take the last label
func GetConflictFreeLabels(labels []string) ([]string, error) {
labelMap := map[string]string{}
for _, label := range labels {
stringSlice := strings.SplitN(label, "=", 2)
if len(stringSlice) > 1 {
// If there is a conflict we will return an error
if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] {
return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v)
}
labelMap[stringSlice[0]] = stringSlice[1]
}
}
newLabels := []string{}
for k, v := range labelMap {
newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v))
}
return newLabels, nil
}
// Reload reads the configuration in the host and reloads the daemon and server.
func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error {
logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile)
newConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
if flags.Changed("config-file") || !os.IsNotExist(err) {
return errors.Wrapf(err, "unable to configure the Docker daemon with file %s", configFile)
}
newConfig = New()
}
// Check if duplicate label-keys with different values are found
newLabels, err := GetConflictFreeLabels(newConfig.Labels)
if err != nil {
return err
}
newConfig.Labels = newLabels
// TODO(thaJeztah) This logic is problematic and needs a rewrite;
// This is validating newConfig before the "reload()" callback is executed.
// At this point, newConfig may be a partial configuration, to be merged
// with the existing configuration in the "reload()" callback. Validating
// this config before it's merged can result in incorrect validation errors.
//
// However, the current "reload()" callback we use is DaemonCli.reloadConfig(),
// which includes a call to Daemon.Reload(), which both performs "merging"
// and validation, as well as actually updating the daemon configuration.
// Calling DaemonCli.reloadConfig() *before* validation, could thus lead to
// a failure in that function (making the reload non-atomic).
//
// While *some* errors could always occur when applying/updating the config,
// we should make it more atomic, and;
//
// 1. get (a copy of) the active configuration
// 2. get the new configuration
// 3. apply the (reloadable) options from the new configuration
// 4. validate the merged results
// 5. apply the new configuration.
if err := Validate(newConfig); err != nil {
return errors.Wrap(err, "file configuration validation failed")
}
reload(newConfig)
return nil
}
// boolValue is an interface that boolean value flags implement
// to tell the command line how to make -name equivalent to -name=true.
type boolValue interface {
IsBoolFlag() bool
}
// MergeDaemonConfigurations reads a configuration file,
// loads the file configuration in an isolated structure,
// and merges the configuration provided from flags on top
// if there are no conflicts.
func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) {
fileConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
return nil, err
}
// merge flags configuration on top of the file configuration
if err := mergo.Merge(fileConfig, flagsConfig); err != nil {
return nil, err
}
// validate the merged fileConfig and flagsConfig
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "merged configuration validation from file and command line flags failed")
}
return fileConfig, nil
}
// getConflictFreeConfiguration loads the configuration from a JSON file.
// It compares that configuration with the one provided by the flags,
// and returns an error if there are conflicts.
func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) {
b, err := os.ReadFile(configFile)
if err != nil {
return nil, err
}
var config Config
b = bytes.TrimSpace(b)
if len(b) == 0 {
// empty config file
return &config, nil
}
if flags != nil {
var jsonConfig map[string]interface{}
if err := json.Unmarshal(b, &jsonConfig); err != nil {
return nil, err
}
configSet := configValuesSet(jsonConfig)
if err := findConfigurationConflicts(configSet, flags); err != nil {
return nil, err
}
// Override flag values to make sure the values set in the config file with nullable values, like `false`,
// are not overridden by default truthy values from the flags that were not explicitly set.
// See https://github.com/docker/docker/issues/20289 for an example.
//
// TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers.
namedOptions := make(map[string]interface{})
for key, value := range configSet {
f := flags.Lookup(key)
if f == nil { // ignore named flags that don't match
namedOptions[key] = value
continue
}
if _, ok := f.Value.(boolValue); ok {
f.Value.Set(fmt.Sprintf("%v", value))
}
}
if len(namedOptions) > 0 {
// set also default for mergeVal flags that are boolValue at the same time.
flags.VisitAll(func(f *pflag.Flag) {
if opt, named := f.Value.(opts.NamedOption); named {
v, set := namedOptions[opt.Name()]
_, boolean := f.Value.(boolValue)
if set && boolean {
f.Value.Set(fmt.Sprintf("%v", v))
}
}
})
}
config.ValuesSet = configSet
}
if err := json.Unmarshal(b, &config); err != nil {
return nil, err
}
return &config, nil
}
// configValuesSet returns the configuration values explicitly set in the file.
func configValuesSet(config map[string]interface{}) map[string]interface{} {
flatten := make(map[string]interface{})
for k, v := range config {
if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] {
for km, vm := range m {
flatten[km] = vm
}
continue
}
flatten[k] = v
}
return flatten
}
// findConfigurationConflicts iterates over the provided flags searching for
// duplicated configurations and unknown keys. It returns an error with all the conflicts if
// it finds any.
func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error {
// 1. Search keys from the file that we don't recognize as flags.
unknownKeys := make(map[string]interface{})
for key, value := range config {
if flag := flags.Lookup(key); flag == nil && !skipValidateOptions[key] {
unknownKeys[key] = value
}
}
// 2. Discard values that implement NamedOption.
// Their configuration name differs from their flag name, like `labels` and `label`.
if len(unknownKeys) > 0 {
unknownNamedConflicts := func(f *pflag.Flag) {
if namedOption, ok := f.Value.(opts.NamedOption); ok {
delete(unknownKeys, namedOption.Name())
}
}
flags.VisitAll(unknownNamedConflicts)
}
if len(unknownKeys) > 0 {
var unknown []string
for key := range unknownKeys {
unknown = append(unknown, key)
}
return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", "))
}
var conflicts []string
printConflict := func(name string, flagValue, fileValue interface{}) string {
switch name {
case "http-proxy", "https-proxy":
flagValue = MaskCredentials(flagValue.(string))
fileValue = MaskCredentials(fileValue.(string))
}
return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue)
}
// 3. Search keys that are present as a flag and as a file option.
duplicatedConflicts := func(f *pflag.Flag) {
// search option name in the json configuration payload if the value is a named option
if namedOption, ok := f.Value.(opts.NamedOption); ok {
if optsValue, ok := config[namedOption.Name()]; ok && !skipDuplicates[namedOption.Name()] {
conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue))
}
} else {
// search flag name in the json configuration payload
for _, name := range []string{f.Name, f.Shorthand} {
if value, ok := config[name]; ok && !skipDuplicates[name] {
conflicts = append(conflicts, printConflict(name, f.Value.String(), value))
break
}
}
}
}
flags.Visit(duplicatedConflicts)
if len(conflicts) > 0 {
return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
}
return nil
}
// Validate validates some specific configs.
// such as config.DNS, config.Labels, config.DNSSearch,
Adding ability to change max download attempts Moby works perfectly when you are in a situation when one has a good and stable internet connection. Operating in area's where internet connectivity is likely to be lost in undetermined intervals, like a satellite connection or 4G/LTE in rural area's, can become a problem when pulling a new image. When connection is lost while image layers are being pulled, Moby will try to reconnect up to 5 times. If this fails, the incompletely downloaded layers are lost will need to be completely downloaded again during the next pull request. This means that we are using more data than we might have to. Pulling a layer multiple times from the start can become costly over a satellite or 4G/LTE connection. As these techniques (especially 4G) quite common in IoT and Moby is used to run Azure IoT Edge devices, I would like to add a settable maximum download attempts. The maximum download attempts is currently set at 5 (distribution/xfer/download.go). I would like to change this constant to a variable that the user can set. The default will still be 5, so nothing will change from the current version unless specified when starting the daemon with the added flag or in the config file. I added a default value of 5 for DefaultMaxDownloadAttempts and a settable max-download-attempts in the daemon config file. It is also added to the config of dockerd so it can be set with a flag when starting the daemon. This value gets stored in the imageService of the daemon when it is initiated and can be passed to the NewLayerDownloadManager as a parameter. It will be stored in the LayerDownloadManager when initiated. This enables us to set the max amount of retries in makeDownoadFunc equal to the max download attempts. I also added some tests that are based on maxConcurrentDownloads/maxConcurrentUploads. You can pull this version and test in a development container. Either create a config `file /etc/docker/daemon.json` with `{"max-download-attempts"=3}``, or use `dockerd --max-download-attempts=3 -D &` to start up the dockerd. Start downloading a container and disconnect from the internet whilst downloading. The result would be that it stops pulling after three attempts. Signed-off-by: Lukas Heeren <lukas-heeren@hotmail.com> Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2019-06-25 09:26:36 -04:00
// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads and config.MaxDownloadAttempts.
func Validate(config *Config) error {
//nolint:staticcheck // TODO(thaJeztah): remove in next release.
if config.RootDeprecated != "" {
return errors.New(`the "graph" config file option is deprecated; use "data-root" instead`)
}
// validate log-level
if config.LogLevel != "" {
if _, err := logrus.ParseLevel(config.LogLevel); err != nil {
return fmt.Errorf("invalid logging level: %s", config.LogLevel)
}
}
// validate DNS
for _, dns := range config.DNS {
if _, err := opts.ValidateIPAddress(dns); err != nil {
return err
}
}
// validate DNSSearch
for _, dnsSearch := range config.DNSSearch {
if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil {
return err
}
}
// validate Labels
for _, label := range config.Labels {
if _, err := opts.ValidateLabel(label); err != nil {
return err
}
}
// TODO(thaJeztah) Validations below should not accept "0" to be valid; see Validate() for a more in-depth description of this problem
if config.Mtu < 0 {
return fmt.Errorf("invalid default MTU: %d", config.Mtu)
}
if config.MaxConcurrentDownloads < 0 {
return fmt.Errorf("invalid max concurrent downloads: %d", config.MaxConcurrentDownloads)
}
if config.MaxConcurrentUploads < 0 {
return fmt.Errorf("invalid max concurrent uploads: %d", config.MaxConcurrentUploads)
}
if config.MaxDownloadAttempts < 0 {
return fmt.Errorf("invalid max download attempts: %d", config.MaxDownloadAttempts)
Adding ability to change max download attempts Moby works perfectly when you are in a situation when one has a good and stable internet connection. Operating in area's where internet connectivity is likely to be lost in undetermined intervals, like a satellite connection or 4G/LTE in rural area's, can become a problem when pulling a new image. When connection is lost while image layers are being pulled, Moby will try to reconnect up to 5 times. If this fails, the incompletely downloaded layers are lost will need to be completely downloaded again during the next pull request. This means that we are using more data than we might have to. Pulling a layer multiple times from the start can become costly over a satellite or 4G/LTE connection. As these techniques (especially 4G) quite common in IoT and Moby is used to run Azure IoT Edge devices, I would like to add a settable maximum download attempts. The maximum download attempts is currently set at 5 (distribution/xfer/download.go). I would like to change this constant to a variable that the user can set. The default will still be 5, so nothing will change from the current version unless specified when starting the daemon with the added flag or in the config file. I added a default value of 5 for DefaultMaxDownloadAttempts and a settable max-download-attempts in the daemon config file. It is also added to the config of dockerd so it can be set with a flag when starting the daemon. This value gets stored in the imageService of the daemon when it is initiated and can be passed to the NewLayerDownloadManager as a parameter. It will be stored in the LayerDownloadManager when initiated. This enables us to set the max amount of retries in makeDownoadFunc equal to the max download attempts. I also added some tests that are based on maxConcurrentDownloads/maxConcurrentUploads. You can pull this version and test in a development container. Either create a config `file /etc/docker/daemon.json` with `{"max-download-attempts"=3}``, or use `dockerd --max-download-attempts=3 -D &` to start up the dockerd. Start downloading a container and disconnect from the internet whilst downloading. The result would be that it stops pulling after three attempts. Signed-off-by: Lukas Heeren <lukas-heeren@hotmail.com> Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2019-06-25 09:26:36 -04:00
}
// validate that "default" runtime is not reset
if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 {
if _, ok := runtimes[StockRuntimeName]; ok {
return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName)
}
}
if _, err := ParseGenericResources(config.NodeGenericResources); err != nil {
return err
}
if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" {
if !builtinRuntimes[defaultRuntime] {
runtimes := config.GetAllRuntimes()
if _, ok := runtimes[defaultRuntime]; !ok && !IsPermissibleC8dRuntimeName(defaultRuntime) {
return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
}
}
}
for _, h := range config.Hosts {
if _, err := opts.ValidateHost(h); err != nil {
return err
}
}
Implement none, private, and shareable ipc modes Since the commit d88fe447df0e8 ("Add support for sharing /dev/shm/ and /dev/mqueue between containers") container's /dev/shm is mounted on the host first, then bind-mounted inside the container. This is done that way in order to be able to share this container's IPC namespace (and the /dev/shm mount point) with another container. Unfortunately, this functionality breaks container checkpoint/restore (even if IPC is not shared). Since /dev/shm is an external mount, its contents is not saved by `criu checkpoint`, and so upon restore any application that tries to access data under /dev/shm is severily disappointed (which usually results in a fatal crash). This commit solves the issue by introducing new IPC modes for containers (in addition to 'host' and 'container:ID'). The new modes are: - 'shareable': enables sharing this container's IPC with others (this used to be the implicit default); - 'private': disables sharing this container's IPC. In 'private' mode, container's /dev/shm is truly mounted inside the container, without any bind-mounting from the host, which solves the issue. While at it, let's also implement 'none' mode. The motivation, as eloquently put by Justin Cormack, is: > I wondered a while back about having a none shm mode, as currently it is > not possible to have a totally unwriteable container as there is always > a /dev/shm writeable mount. It is a bit of a niche case (and clearly > should never be allowed to be daemon default) but it would be trivial to > add now so maybe we should... ...so here's yet yet another mode: - 'none': no /dev/shm mount inside the container (though it still has its own private IPC namespace). Now, to ultimately solve the abovementioned checkpoint/restore issue, we'd need to make 'private' the default mode, but unfortunately it breaks the backward compatibility. So, let's make the default container IPC mode per-daemon configurable (with the built-in default set to 'shareable' for now). The default can be changed either via a daemon CLI option (--default-shm-mode) or a daemon.json configuration file parameter of the same name. Note one can only set either 'shareable' or 'private' IPC modes as a daemon default (i.e. in this context 'host', 'container', or 'none' do not make much sense). Some other changes this patch introduces are: 1. A mount for /dev/shm is added to default OCI Linux spec. 2. IpcMode.Valid() is simplified to remove duplicated code that parsed 'container:ID' form. Note the old version used to check that ID does not contain a semicolon -- this is no longer the case (tests are modified accordingly). The motivation is we should either do a proper check for container ID validity, or don't check it at all (since it is checked in other places anyway). I chose the latter. 3. IpcMode.Container() is modified to not return container ID if the mode value does not start with "container:", unifying the check to be the same as in IpcMode.IsContainer(). 3. IPC mode unit tests (runconfig/hostconfig_test.go) are modified to add checks for newly added values. [v2: addressed review at https://github.com/moby/moby/pull/34087#pullrequestreview-51345997] [v3: addressed review at https://github.com/moby/moby/pull/34087#pullrequestreview-53902833] [v4: addressed the case of upgrading from older daemon, in this case container.HostConfig.IpcMode is unset and this is valid] [v5: document old and new IpcMode values in api/swagger.yaml] [v6: add the 'none' mode, changelog entry to docs/api/version-history.md] Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2017-06-27 17:58:50 -04:00
// validate platform-specific settings
return config.ValidatePlatformConfig()
}
// GetDefaultRuntimeName returns the current default runtime
func (conf *Config) GetDefaultRuntimeName() string {
conf.Lock()
rt := conf.DefaultRuntime
conf.Unlock()
return rt
}
// MaskCredentials masks credentials that are in an URL.
func MaskCredentials(rawURL string) string {
parsedURL, err := url.Parse(rawURL)
if err != nil || parsedURL.User == nil {
return rawURL
}
parsedURL.User = url.UserPassword("xxxxx", "xxxxx")
return parsedURL.String()
}
// IsPermissibleC8dRuntimeName tests whether name is safe to pass into
// containerd as a runtime name, and whether the name is well-formed.
// It does not check if the runtime is installed.
//
// A runtime name containing slash characters is interpreted by containerd as
// the path to a runtime binary. If we allowed this, anyone with Engine API
// access could get containerd to execute an arbitrary binary as root. Although
// Engine API access is already equivalent to root on the host, the runtime name
// has not historically been a vector to run arbitrary code as root so users are
// not expecting it to become one.
//
// This restriction is not configurable. There are viable workarounds for
// legitimate use cases: administrators and runtime developers can make runtimes
// available for use with Docker by installing them onto PATH following the
// [binary naming convention] for containerd Runtime v2.
//
// [binary naming convention]: https://github.com/containerd/containerd/blob/main/runtime/v2/README.md#binary-naming
func IsPermissibleC8dRuntimeName(name string) bool {
// containerd uses a rather permissive test to validate runtime names:
//
// - Any name for which filepath.IsAbs(name) is interpreted as the absolute
// path to a shim binary. We want to block this behaviour.
// - Any name which contains at least one '.' character and no '/' characters
// and does not begin with a '.' character is a valid runtime name. The shim
// binary name is derived from the final two components of the name and
// searched for on the PATH. The name "a.." is technically valid per
// containerd's implementation: it would resolve to a binary named
// "containerd-shim---".
//
// https://github.com/containerd/containerd/blob/11ded166c15f92450958078cd13c6d87131ec563/runtime/v2/manager.go#L297-L317
// https://github.com/containerd/containerd/blob/11ded166c15f92450958078cd13c6d87131ec563/runtime/v2/shim/util.go#L83-L93
return !filepath.IsAbs(name) && !strings.ContainsRune(name, '/') && shim.BinaryName(name) != ""
}