1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00
moby--moby/daemon/reload.go

283 lines
10 KiB
Go
Raw Normal View History

package daemon // import "github.com/docker/docker/daemon"
import (
"encoding/json"
"fmt"
"strconv"
"github.com/docker/docker/daemon/config"
"github.com/sirupsen/logrus"
)
// Reload reads configuration changes and modifies the
// daemon according to those changes.
// These are the settings that Reload changes:
// - Platform runtime
// - Daemon debug log level
// - Daemon max concurrent downloads
// - Daemon max concurrent uploads
Adding ability to change max download attempts Moby works perfectly when you are in a situation when one has a good and stable internet connection. Operating in area's where internet connectivity is likely to be lost in undetermined intervals, like a satellite connection or 4G/LTE in rural area's, can become a problem when pulling a new image. When connection is lost while image layers are being pulled, Moby will try to reconnect up to 5 times. If this fails, the incompletely downloaded layers are lost will need to be completely downloaded again during the next pull request. This means that we are using more data than we might have to. Pulling a layer multiple times from the start can become costly over a satellite or 4G/LTE connection. As these techniques (especially 4G) quite common in IoT and Moby is used to run Azure IoT Edge devices, I would like to add a settable maximum download attempts. The maximum download attempts is currently set at 5 (distribution/xfer/download.go). I would like to change this constant to a variable that the user can set. The default will still be 5, so nothing will change from the current version unless specified when starting the daemon with the added flag or in the config file. I added a default value of 5 for DefaultMaxDownloadAttempts and a settable max-download-attempts in the daemon config file. It is also added to the config of dockerd so it can be set with a flag when starting the daemon. This value gets stored in the imageService of the daemon when it is initiated and can be passed to the NewLayerDownloadManager as a parameter. It will be stored in the LayerDownloadManager when initiated. This enables us to set the max amount of retries in makeDownoadFunc equal to the max download attempts. I also added some tests that are based on maxConcurrentDownloads/maxConcurrentUploads. You can pull this version and test in a development container. Either create a config `file /etc/docker/daemon.json` with `{"max-download-attempts"=3}``, or use `dockerd --max-download-attempts=3 -D &` to start up the dockerd. Start downloading a container and disconnect from the internet whilst downloading. The result would be that it stops pulling after three attempts. Signed-off-by: Lukas Heeren <lukas-heeren@hotmail.com> Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2019-06-25 09:26:36 -04:00
// - Daemon max download attempts
// - Daemon shutdown timeout (in seconds)
// - Cluster discovery (reconfigure and restart)
// - Daemon labels
// - Insecure registries
// - Registry mirrors
// - Daemon live restore
func (daemon *Daemon) Reload(conf *config.Config) (err error) {
daemon.configStore.Lock()
attributes := map[string]string{}
defer func() {
if err == nil {
jsonString, _ := json.Marshal(&struct {
*config.Config
daemon/config: move proxy settings to "proxies" struct within daemon.json This is a follow-up to 427c7cc5f86364466c7173e8ca59b97c3876471d, which added proxy-configuration options ("http-proxy", "https-proxy", "no-proxy") to the dockerd cli and in `daemon.json`. While working on documentation changes for this feature, I realised that those options won't be "next" to each-other when formatting the daemon.json JSON, for example using `jq` (which sorts the fields alphabetically). As it's possible that additional proxy configuration options are added in future, I considered that grouping these options in a struct within the JSON may help setting these options, as well as discovering related options. This patch introduces a "proxies" field in the JSON, which includes the "http-proxy", "https-proxy", "no-proxy" options. Conflict detection continues to work as before; with this patch applied: mkdir -p /etc/docker/ echo '{"proxies":{"http-proxy":"http-config", "https-proxy":"https-config", "no-proxy": "no-proxy-config"}}' > /etc/docker/daemon.json dockerd --http-proxy=http-flag --https-proxy=https-flag --no-proxy=no-proxy-flag --validate unable to configure the Docker daemon with file /etc/docker/daemon.json: the following directives are specified both as a flag and in the configuration file: http-proxy: (from flag: http-flag, from file: http-config), https-proxy: (from flag: https-flag, from file: https-config), no-proxy: (from flag: no-proxy-flag, from file: no-proxy-config) Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-04-02 10:03:39 -04:00
config.Proxies `json:"proxies"`
}{
Config: daemon.configStore,
daemon/config: move proxy settings to "proxies" struct within daemon.json This is a follow-up to 427c7cc5f86364466c7173e8ca59b97c3876471d, which added proxy-configuration options ("http-proxy", "https-proxy", "no-proxy") to the dockerd cli and in `daemon.json`. While working on documentation changes for this feature, I realised that those options won't be "next" to each-other when formatting the daemon.json JSON, for example using `jq` (which sorts the fields alphabetically). As it's possible that additional proxy configuration options are added in future, I considered that grouping these options in a struct within the JSON may help setting these options, as well as discovering related options. This patch introduces a "proxies" field in the JSON, which includes the "http-proxy", "https-proxy", "no-proxy" options. Conflict detection continues to work as before; with this patch applied: mkdir -p /etc/docker/ echo '{"proxies":{"http-proxy":"http-config", "https-proxy":"https-config", "no-proxy": "no-proxy-config"}}' > /etc/docker/daemon.json dockerd --http-proxy=http-flag --https-proxy=https-flag --no-proxy=no-proxy-flag --validate unable to configure the Docker daemon with file /etc/docker/daemon.json: the following directives are specified both as a flag and in the configuration file: http-proxy: (from flag: http-flag, from file: http-config), https-proxy: (from flag: https-flag, from file: https-config), no-proxy: (from flag: no-proxy-flag, from file: no-proxy-config) Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-04-02 10:03:39 -04:00
Proxies: config.Proxies{
HTTPProxy: config.MaskCredentials(daemon.configStore.HTTPProxy),
HTTPSProxy: config.MaskCredentials(daemon.configStore.HTTPSProxy),
NoProxy: config.MaskCredentials(daemon.configStore.NoProxy),
},
})
logrus.Infof("Reloaded configuration: %s", jsonString)
}
// we're unlocking here, because
// LogDaemonEventWithAttributes() -> SystemInfo() -> GetAllRuntimes()
// holds that lock too.
daemon.configStore.Unlock()
if err == nil {
daemon.LogDaemonEventWithAttributes("reload", attributes)
}
}()
Implement none, private, and shareable ipc modes Since the commit d88fe447df0e8 ("Add support for sharing /dev/shm/ and /dev/mqueue between containers") container's /dev/shm is mounted on the host first, then bind-mounted inside the container. This is done that way in order to be able to share this container's IPC namespace (and the /dev/shm mount point) with another container. Unfortunately, this functionality breaks container checkpoint/restore (even if IPC is not shared). Since /dev/shm is an external mount, its contents is not saved by `criu checkpoint`, and so upon restore any application that tries to access data under /dev/shm is severily disappointed (which usually results in a fatal crash). This commit solves the issue by introducing new IPC modes for containers (in addition to 'host' and 'container:ID'). The new modes are: - 'shareable': enables sharing this container's IPC with others (this used to be the implicit default); - 'private': disables sharing this container's IPC. In 'private' mode, container's /dev/shm is truly mounted inside the container, without any bind-mounting from the host, which solves the issue. While at it, let's also implement 'none' mode. The motivation, as eloquently put by Justin Cormack, is: > I wondered a while back about having a none shm mode, as currently it is > not possible to have a totally unwriteable container as there is always > a /dev/shm writeable mount. It is a bit of a niche case (and clearly > should never be allowed to be daemon default) but it would be trivial to > add now so maybe we should... ...so here's yet yet another mode: - 'none': no /dev/shm mount inside the container (though it still has its own private IPC namespace). Now, to ultimately solve the abovementioned checkpoint/restore issue, we'd need to make 'private' the default mode, but unfortunately it breaks the backward compatibility. So, let's make the default container IPC mode per-daemon configurable (with the built-in default set to 'shareable' for now). The default can be changed either via a daemon CLI option (--default-shm-mode) or a daemon.json configuration file parameter of the same name. Note one can only set either 'shareable' or 'private' IPC modes as a daemon default (i.e. in this context 'host', 'container', or 'none' do not make much sense). Some other changes this patch introduces are: 1. A mount for /dev/shm is added to default OCI Linux spec. 2. IpcMode.Valid() is simplified to remove duplicated code that parsed 'container:ID' form. Note the old version used to check that ID does not contain a semicolon -- this is no longer the case (tests are modified accordingly). The motivation is we should either do a proper check for container ID validity, or don't check it at all (since it is checked in other places anyway). I chose the latter. 3. IpcMode.Container() is modified to not return container ID if the mode value does not start with "container:", unifying the check to be the same as in IpcMode.IsContainer(). 3. IPC mode unit tests (runconfig/hostconfig_test.go) are modified to add checks for newly added values. [v2: addressed review at https://github.com/moby/moby/pull/34087#pullrequestreview-51345997] [v3: addressed review at https://github.com/moby/moby/pull/34087#pullrequestreview-53902833] [v4: addressed the case of upgrading from older daemon, in this case container.HostConfig.IpcMode is unset and this is valid] [v5: document old and new IpcMode values in api/swagger.yaml] [v6: add the 'none' mode, changelog entry to docs/api/version-history.md] Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2017-06-27 17:58:50 -04:00
if err := daemon.reloadPlatform(conf, attributes); err != nil {
return err
}
daemon.reloadDebug(conf, attributes)
daemon.reloadMaxConcurrentDownloadsAndUploads(conf, attributes)
daemon.reloadMaxDownloadAttempts(conf, attributes)
daemon.reloadShutdownTimeout(conf, attributes)
daemon.reloadFeatures(conf, attributes)
if err := daemon.reloadLabels(conf, attributes); err != nil {
return err
}
if err := daemon.reloadAllowNondistributableArtifacts(conf, attributes); err != nil {
return err
}
if err := daemon.reloadInsecureRegistries(conf, attributes); err != nil {
return err
}
if err := daemon.reloadRegistryMirrors(conf, attributes); err != nil {
return err
}
if err := daemon.reloadLiveRestore(conf, attributes); err != nil {
return err
}
return daemon.reloadNetworkDiagnosticPort(conf, attributes)
}
// reloadDebug updates configuration with Debug option
// and updates the passed attributes
func (daemon *Daemon) reloadDebug(conf *config.Config, attributes map[string]string) {
// update corresponding configuration
if conf.IsValueSet("debug") {
daemon.configStore.Debug = conf.Debug
}
// prepare reload event attributes with updatable configurations
attributes["debug"] = strconv.FormatBool(daemon.configStore.Debug)
}
// reloadMaxConcurrentDownloadsAndUploads updates configuration with max concurrent
// download and upload options and updates the passed attributes
func (daemon *Daemon) reloadMaxConcurrentDownloadsAndUploads(conf *config.Config, attributes map[string]string) {
// We always "reset" as the cost is lightweight and easy to maintain.
daemon.configStore.MaxConcurrentDownloads = config.DefaultMaxConcurrentDownloads
daemon.configStore.MaxConcurrentUploads = config.DefaultMaxConcurrentUploads
if conf.IsValueSet("max-concurrent-downloads") && conf.MaxConcurrentDownloads != 0 {
daemon.configStore.MaxConcurrentDownloads = conf.MaxConcurrentDownloads
}
if conf.IsValueSet("max-concurrent-uploads") && conf.MaxConcurrentUploads != 0 {
daemon.configStore.MaxConcurrentUploads = conf.MaxConcurrentUploads
}
if daemon.imageService != nil {
daemon.imageService.UpdateConfig(
daemon.configStore.MaxConcurrentDownloads,
daemon.configStore.MaxConcurrentUploads,
)
}
// prepare reload event attributes with updatable configurations
attributes["max-concurrent-downloads"] = strconv.Itoa(daemon.configStore.MaxConcurrentDownloads)
attributes["max-concurrent-uploads"] = strconv.Itoa(daemon.configStore.MaxConcurrentUploads)
logrus.Debug("Reset Max Concurrent Downloads: ", attributes["max-concurrent-downloads"])
logrus.Debug("Reset Max Concurrent Uploads: ", attributes["max-concurrent-uploads"])
}
Adding ability to change max download attempts Moby works perfectly when you are in a situation when one has a good and stable internet connection. Operating in area's where internet connectivity is likely to be lost in undetermined intervals, like a satellite connection or 4G/LTE in rural area's, can become a problem when pulling a new image. When connection is lost while image layers are being pulled, Moby will try to reconnect up to 5 times. If this fails, the incompletely downloaded layers are lost will need to be completely downloaded again during the next pull request. This means that we are using more data than we might have to. Pulling a layer multiple times from the start can become costly over a satellite or 4G/LTE connection. As these techniques (especially 4G) quite common in IoT and Moby is used to run Azure IoT Edge devices, I would like to add a settable maximum download attempts. The maximum download attempts is currently set at 5 (distribution/xfer/download.go). I would like to change this constant to a variable that the user can set. The default will still be 5, so nothing will change from the current version unless specified when starting the daemon with the added flag or in the config file. I added a default value of 5 for DefaultMaxDownloadAttempts and a settable max-download-attempts in the daemon config file. It is also added to the config of dockerd so it can be set with a flag when starting the daemon. This value gets stored in the imageService of the daemon when it is initiated and can be passed to the NewLayerDownloadManager as a parameter. It will be stored in the LayerDownloadManager when initiated. This enables us to set the max amount of retries in makeDownoadFunc equal to the max download attempts. I also added some tests that are based on maxConcurrentDownloads/maxConcurrentUploads. You can pull this version and test in a development container. Either create a config `file /etc/docker/daemon.json` with `{"max-download-attempts"=3}``, or use `dockerd --max-download-attempts=3 -D &` to start up the dockerd. Start downloading a container and disconnect from the internet whilst downloading. The result would be that it stops pulling after three attempts. Signed-off-by: Lukas Heeren <lukas-heeren@hotmail.com> Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2019-06-25 09:26:36 -04:00
// reloadMaxDownloadAttempts updates configuration with max concurrent
// download attempts when a connection is lost and updates the passed attributes
func (daemon *Daemon) reloadMaxDownloadAttempts(conf *config.Config, attributes map[string]string) {
Adding ability to change max download attempts Moby works perfectly when you are in a situation when one has a good and stable internet connection. Operating in area's where internet connectivity is likely to be lost in undetermined intervals, like a satellite connection or 4G/LTE in rural area's, can become a problem when pulling a new image. When connection is lost while image layers are being pulled, Moby will try to reconnect up to 5 times. If this fails, the incompletely downloaded layers are lost will need to be completely downloaded again during the next pull request. This means that we are using more data than we might have to. Pulling a layer multiple times from the start can become costly over a satellite or 4G/LTE connection. As these techniques (especially 4G) quite common in IoT and Moby is used to run Azure IoT Edge devices, I would like to add a settable maximum download attempts. The maximum download attempts is currently set at 5 (distribution/xfer/download.go). I would like to change this constant to a variable that the user can set. The default will still be 5, so nothing will change from the current version unless specified when starting the daemon with the added flag or in the config file. I added a default value of 5 for DefaultMaxDownloadAttempts and a settable max-download-attempts in the daemon config file. It is also added to the config of dockerd so it can be set with a flag when starting the daemon. This value gets stored in the imageService of the daemon when it is initiated and can be passed to the NewLayerDownloadManager as a parameter. It will be stored in the LayerDownloadManager when initiated. This enables us to set the max amount of retries in makeDownoadFunc equal to the max download attempts. I also added some tests that are based on maxConcurrentDownloads/maxConcurrentUploads. You can pull this version and test in a development container. Either create a config `file /etc/docker/daemon.json` with `{"max-download-attempts"=3}``, or use `dockerd --max-download-attempts=3 -D &` to start up the dockerd. Start downloading a container and disconnect from the internet whilst downloading. The result would be that it stops pulling after three attempts. Signed-off-by: Lukas Heeren <lukas-heeren@hotmail.com> Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2019-06-25 09:26:36 -04:00
// We always "reset" as the cost is lightweight and easy to maintain.
daemon.configStore.MaxDownloadAttempts = config.DefaultDownloadAttempts
if conf.IsValueSet("max-download-attempts") && conf.MaxDownloadAttempts != 0 {
daemon.configStore.MaxDownloadAttempts = conf.MaxDownloadAttempts
Adding ability to change max download attempts Moby works perfectly when you are in a situation when one has a good and stable internet connection. Operating in area's where internet connectivity is likely to be lost in undetermined intervals, like a satellite connection or 4G/LTE in rural area's, can become a problem when pulling a new image. When connection is lost while image layers are being pulled, Moby will try to reconnect up to 5 times. If this fails, the incompletely downloaded layers are lost will need to be completely downloaded again during the next pull request. This means that we are using more data than we might have to. Pulling a layer multiple times from the start can become costly over a satellite or 4G/LTE connection. As these techniques (especially 4G) quite common in IoT and Moby is used to run Azure IoT Edge devices, I would like to add a settable maximum download attempts. The maximum download attempts is currently set at 5 (distribution/xfer/download.go). I would like to change this constant to a variable that the user can set. The default will still be 5, so nothing will change from the current version unless specified when starting the daemon with the added flag or in the config file. I added a default value of 5 for DefaultMaxDownloadAttempts and a settable max-download-attempts in the daemon config file. It is also added to the config of dockerd so it can be set with a flag when starting the daemon. This value gets stored in the imageService of the daemon when it is initiated and can be passed to the NewLayerDownloadManager as a parameter. It will be stored in the LayerDownloadManager when initiated. This enables us to set the max amount of retries in makeDownoadFunc equal to the max download attempts. I also added some tests that are based on maxConcurrentDownloads/maxConcurrentUploads. You can pull this version and test in a development container. Either create a config `file /etc/docker/daemon.json` with `{"max-download-attempts"=3}``, or use `dockerd --max-download-attempts=3 -D &` to start up the dockerd. Start downloading a container and disconnect from the internet whilst downloading. The result would be that it stops pulling after three attempts. Signed-off-by: Lukas Heeren <lukas-heeren@hotmail.com> Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2019-06-25 09:26:36 -04:00
}
// prepare reload event attributes with updatable configurations
attributes["max-download-attempts"] = strconv.Itoa(daemon.configStore.MaxDownloadAttempts)
logrus.Debug("Reset Max Download Attempts: ", attributes["max-download-attempts"])
Adding ability to change max download attempts Moby works perfectly when you are in a situation when one has a good and stable internet connection. Operating in area's where internet connectivity is likely to be lost in undetermined intervals, like a satellite connection or 4G/LTE in rural area's, can become a problem when pulling a new image. When connection is lost while image layers are being pulled, Moby will try to reconnect up to 5 times. If this fails, the incompletely downloaded layers are lost will need to be completely downloaded again during the next pull request. This means that we are using more data than we might have to. Pulling a layer multiple times from the start can become costly over a satellite or 4G/LTE connection. As these techniques (especially 4G) quite common in IoT and Moby is used to run Azure IoT Edge devices, I would like to add a settable maximum download attempts. The maximum download attempts is currently set at 5 (distribution/xfer/download.go). I would like to change this constant to a variable that the user can set. The default will still be 5, so nothing will change from the current version unless specified when starting the daemon with the added flag or in the config file. I added a default value of 5 for DefaultMaxDownloadAttempts and a settable max-download-attempts in the daemon config file. It is also added to the config of dockerd so it can be set with a flag when starting the daemon. This value gets stored in the imageService of the daemon when it is initiated and can be passed to the NewLayerDownloadManager as a parameter. It will be stored in the LayerDownloadManager when initiated. This enables us to set the max amount of retries in makeDownoadFunc equal to the max download attempts. I also added some tests that are based on maxConcurrentDownloads/maxConcurrentUploads. You can pull this version and test in a development container. Either create a config `file /etc/docker/daemon.json` with `{"max-download-attempts"=3}``, or use `dockerd --max-download-attempts=3 -D &` to start up the dockerd. Start downloading a container and disconnect from the internet whilst downloading. The result would be that it stops pulling after three attempts. Signed-off-by: Lukas Heeren <lukas-heeren@hotmail.com> Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2019-06-25 09:26:36 -04:00
}
// reloadShutdownTimeout updates configuration with daemon shutdown timeout option
// and updates the passed attributes
func (daemon *Daemon) reloadShutdownTimeout(conf *config.Config, attributes map[string]string) {
// update corresponding configuration
if conf.IsValueSet("shutdown-timeout") {
daemon.configStore.ShutdownTimeout = conf.ShutdownTimeout
logrus.Debugf("Reset Shutdown Timeout: %d", daemon.configStore.ShutdownTimeout)
}
// prepare reload event attributes with updatable configurations
attributes["shutdown-timeout"] = strconv.Itoa(daemon.configStore.ShutdownTimeout)
}
// reloadLabels updates configuration with engine labels
// and updates the passed attributes
func (daemon *Daemon) reloadLabels(conf *config.Config, attributes map[string]string) error {
// update corresponding configuration
if conf.IsValueSet("labels") {
daemon.configStore.Labels = conf.Labels
}
// prepare reload event attributes with updatable configurations
if daemon.configStore.Labels != nil {
labels, err := json.Marshal(daemon.configStore.Labels)
if err != nil {
return err
}
attributes["labels"] = string(labels)
} else {
attributes["labels"] = "[]"
}
return nil
}
// reloadAllowNondistributableArtifacts updates the configuration with allow-nondistributable-artifacts options
// and updates the passed attributes.
func (daemon *Daemon) reloadAllowNondistributableArtifacts(conf *config.Config, attributes map[string]string) error {
// Update corresponding configuration.
if conf.IsValueSet("allow-nondistributable-artifacts") {
daemon.configStore.AllowNondistributableArtifacts = conf.AllowNondistributableArtifacts
if err := daemon.registryService.LoadAllowNondistributableArtifacts(conf.AllowNondistributableArtifacts); err != nil {
return err
}
}
// Prepare reload event attributes with updatable configurations.
if daemon.configStore.AllowNondistributableArtifacts != nil {
v, err := json.Marshal(daemon.configStore.AllowNondistributableArtifacts)
if err != nil {
return err
}
attributes["allow-nondistributable-artifacts"] = string(v)
} else {
attributes["allow-nondistributable-artifacts"] = "[]"
}
return nil
}
// reloadInsecureRegistries updates configuration with insecure registry option
// and updates the passed attributes
func (daemon *Daemon) reloadInsecureRegistries(conf *config.Config, attributes map[string]string) error {
// update corresponding configuration
if conf.IsValueSet("insecure-registries") {
daemon.configStore.InsecureRegistries = conf.InsecureRegistries
if err := daemon.registryService.LoadInsecureRegistries(conf.InsecureRegistries); err != nil {
return err
}
}
// prepare reload event attributes with updatable configurations
if daemon.configStore.InsecureRegistries != nil {
insecureRegistries, err := json.Marshal(daemon.configStore.InsecureRegistries)
if err != nil {
return err
}
attributes["insecure-registries"] = string(insecureRegistries)
} else {
attributes["insecure-registries"] = "[]"
}
return nil
}
// reloadRegistryMirrors updates configuration with registry mirror options
// and updates the passed attributes
func (daemon *Daemon) reloadRegistryMirrors(conf *config.Config, attributes map[string]string) error {
// update corresponding configuration
if conf.IsValueSet("registry-mirrors") {
daemon.configStore.Mirrors = conf.Mirrors
if err := daemon.registryService.LoadMirrors(conf.Mirrors); err != nil {
return err
}
}
// prepare reload event attributes with updatable configurations
if daemon.configStore.Mirrors != nil {
mirrors, err := json.Marshal(daemon.configStore.Mirrors)
if err != nil {
return err
}
attributes["registry-mirrors"] = string(mirrors)
} else {
attributes["registry-mirrors"] = "[]"
}
return nil
}
// reloadLiveRestore updates configuration with live restore option
// and updates the passed attributes
func (daemon *Daemon) reloadLiveRestore(conf *config.Config, attributes map[string]string) error {
// update corresponding configuration
if conf.IsValueSet("live-restore") {
daemon.configStore.LiveRestoreEnabled = conf.LiveRestoreEnabled
}
// prepare reload event attributes with updatable configurations
attributes["live-restore"] = strconv.FormatBool(daemon.configStore.LiveRestoreEnabled)
return nil
}
// reloadNetworkDiagnosticPort updates the network controller starting the diagnostic if the config is valid
func (daemon *Daemon) reloadNetworkDiagnosticPort(conf *config.Config, attributes map[string]string) error {
if conf == nil || daemon.netController == nil || !conf.IsValueSet("network-diagnostic-port") ||
conf.NetworkDiagnosticPort < 1 || conf.NetworkDiagnosticPort > 65535 {
// If there is no config make sure that the diagnostic is off
if daemon.netController != nil {
daemon.netController.StopDiagnostic()
}
return nil
}
// Enable the network diagnostic if the flag is set with a valid port within the range
logrus.WithFields(logrus.Fields{"port": conf.NetworkDiagnosticPort, "ip": "127.0.0.1"}).Warn("Starting network diagnostic server")
daemon.netController.StartDiagnostic(conf.NetworkDiagnosticPort)
return nil
}
// reloadFeatures updates configuration with enabled/disabled features
func (daemon *Daemon) reloadFeatures(conf *config.Config, attributes map[string]string) {
// update corresponding configuration
// note that we allow features option to be entirely unset
daemon.configStore.Features = conf.Features
// prepare reload event attributes with updatable configurations
attributes["features"] = fmt.Sprintf("%v", daemon.configStore.Features)
}