mirror of
https://github.com/moby/moby.git
synced 2026-01-11 18:51:37 +00:00
- Move logging out of config.Reload and daemon.Reload itself, as it was not
the right place to know whether it was a "signal" that triggered the reload.
- Use Daemon.Config() to get the new config after reloading. This returns an
immutable copy of the daemon's config, so we can redact fields without having
to use an ad-hoc struct to shadow the underlying fields.
- Use structured logs for logging config reload events.
Before this (plain text):
INFO[2025-02-08T12:13:53.389649297Z] Got signal to reload configuration, reloading from: /etc/docker/daemon.json
INFO[2025-02-08T12:30:34.857691260Z] Reloaded configuration: {"pidfile":"/var/run/docker.pid","data-root":"/var/lib/docker","exec-root":"/var/run/docker","group":"docker","max-concurrent-downloads":3,"max-concurrent-uploads":5,"max-download-attempts":5,"shutdown-timeout":15,"hosts":["unix:///var/run/docker.sock"],"log-level":"info","log-format":"text","swarm-default-advertise-addr":"","swarm-raft-heartbeat-tick":0,"swarm-raft-election-tick":0,"metrics-addr":"","host-gateway-ips":[""],"log-driver":"json-file","mtu":1500,"ip":"0.0.0.0","icc":true,"iptables":true,"ip6tables":true,"ip-forward":true,"ip-masq":true,"userland-proxy":true,"userland-proxy-path":"/usr/local/bin/docker-proxy","default-address-pools":{"Values":null},"network-control-plane-mtu":1500,"experimental":false,"containerd":"/var/run/docker/containerd/containerd.sock","features":{"containerd-snapshotter":false},"builder":{"GC":{},"Entitlements":{}},"containerd-namespace":"moby","containerd-plugin-namespace":"plugins.moby","default-runtime":"runc","runtimes":{"crun":{"path":"/usr/local/bin/crun"}},"seccomp-profile":"builtin","default-shm-size":67108864,"default-ipc-mode":"private","default-cgroupns-mode":"private","resolv-conf":"/etc/resolv.conf","proxies":{}}
Before this (JSON logs):
{"level":"info","msg":"Reloaded configuration: {\"pidfile\":\"/var/run/docker.pid\",\"data-root\":\"/var/lib/docker\",\"exec-root\":\"/var/run/docker\",\"group\":\"docker\",\"max-concurrent-downloads\":3,\"max-concurrent-uploads\":5,\"max-download-attempts\":5,\"shutdown-timeout\":15,\"hosts\":[\"unix:///var/run/docker.sock\"],\"log-level\":\"info\",\"log-format\":\"json\",\"swarm-default-advertise-addr\":\"\",\"swarm-raft-heartbeat-tick\":0,\"swarm-raft-election-tick\":0,\"metrics-addr\":\"\",\"host-gateway-ips\":[\"\"],\"log-driver\":\"json-file\",\"mtu\":1500,\"ip\":\"0.0.0.0\",\"icc\":true,\"iptables\":true,\"ip6tables\":true,\"ip-forward\":true,\"ip-masq\":true,\"userland-proxy\":true,\"userland-proxy-path\":\"/usr/local/bin/docker-proxy\",\"default-address-pools\":{\"Values\":null},\"network-control-plane-mtu\":1500,\"experimental\":false,\"containerd\":\"/var/run/docker/containerd/containerd.sock\",\"features\":{\"containerd-snapshotter\":false},\"builder\":{\"GC\":{},\"Entitlements\":{}},\"containerd-namespace\":\"moby\",\"containerd-plugin-namespace\":\"plugins.moby\",\"default-runtime\":\"runc\",\"runtimes\":{\"crun\":{\"path\":\"/usr/local/bin/crun\"}},\"seccomp-profile\":\"builtin\",\"default-shm-size\":67108864,\"default-ipc-mode\":\"private\",\"default-cgroupns-mode\":\"private\",\"resolv-conf\":\"/etc/resolv.conf\",\"proxies\":{}}","time":"2025-02-08T12:24:38.600761054Z"}
After this (plain text):
INFO[2025-02-08T12:30:34.835953594Z] Got signal to reload configuration config-file=/etc/docker/daemon.json
INFO[2025-02-08T12:30:34.857614135Z] Reloaded configuration config="{\"pidfile\":\"/var/run/docker.pid\",\"data-root\":\"/var/lib/docker\",\"exec-root\":\"/var/run/docker\",\"group\":\"docker\",\"max-concurrent-downloads\":3,\"max-concurrent-uploads\":5,\"max-download-attempts\":5,\"shutdown-timeout\":15,\"hosts\":[\"unix:///var/run/docker.sock\"],\"log-level\":\"info\",\"log-format\":\"text\",\"swarm-default-advertise-addr\":\"\",\"swarm-raft-heartbeat-tick\":0,\"swarm-raft-election-tick\":0,\"metrics-addr\":\"\",\"host-gateway-ips\":[\"\"],\"log-driver\":\"json-file\",\"mtu\":1500,\"ip\":\"0.0.0.0\",\"icc\":true,\"iptables\":true,\"ip6tables\":true,\"ip-forward\":true,\"ip-masq\":true,\"userland-proxy\":true,\"userland-proxy-path\":\"/usr/local/bin/docker-proxy\",\"default-address-pools\":{\"Values\":null},\"network-control-plane-mtu\":1500,\"experimental\":false,\"containerd\":\"/var/run/docker/containerd/containerd.sock\",\"features\":{\"containerd-snapshotter\":false},\"builder\":{\"GC\":{},\"Entitlements\":{}},\"containerd-namespace\":\"moby\",\"containerd-plugin-namespace\":\"plugins.moby\",\"default-runtime\":\"runc\",\"runtimes\":{\"crun\":{\"path\":\"/usr/local/bin/crun\"}},\"seccomp-profile\":\"builtin\",\"default-shm-size\":67108864,\"default-ipc-mode\":\"private\",\"default-cgroupns-mode\":\"private\",\"resolv-conf\":\"/etc/resolv.conf\",\"proxies\":{}}"
After this (JSON logs):
{"config-file":"/etc/docker/daemon.json","level":"info","msg":"Got signal to reload configuration","time":"2025-02-08T12:24:38.589955637Z"}
{"config":"{\"pidfile\":\"/var/run/docker.pid\",\"data-root\":\"/var/lib/docker\",\"exec-root\":\"/var/run/docker\",\"group\":\"docker\",\"max-concurrent-downloads\":3,\"max-concurrent-uploads\":5,\"max-download-attempts\":5,\"shutdown-timeout\":15,\"hosts\":[\"unix:///var/run/docker.sock\"],\"log-level\":\"info\",\"log-format\":\"json\",\"swarm-default-advertise-addr\":\"\",\"swarm-raft-heartbeat-tick\":0,\"swarm-raft-election-tick\":0,\"metrics-addr\":\"\",\"host-gateway-ips\":[\"\"],\"log-driver\":\"json-file\",\"mtu\":1500,\"ip\":\"0.0.0.0\",\"icc\":true,\"iptables\":true,\"ip6tables\":true,\"ip-forward\":true,\"ip-masq\":true,\"userland-proxy\":true,\"userland-proxy-path\":\"/usr/local/bin/docker-proxy\",\"default-address-pools\":{\"Values\":null},\"network-control-plane-mtu\":1500,\"experimental\":false,\"containerd\":\"/var/run/docker/containerd/containerd.sock\",\"features\":{\"containerd-snapshotter\":false},\"builder\":{\"GC\":{},\"Entitlements\":{}},\"containerd-namespace\":\"moby\",\"containerd-plugin-namespace\":\"plugins.moby\",\"default-runtime\":\"runc\",\"runtimes\":{\"crun\":{\"path\":\"/usr/local/bin/crun\"}},\"seccomp-profile\":\"builtin\",\"default-shm-size\":67108864,\"default-ipc-mode\":\"private\",\"default-cgroupns-mode\":\"private\",\"resolv-conf\":\"/etc/resolv.conf\",\"proxies\":{}}","level":"info","msg":"Reloaded configuration","time":"2025-02-08T12:24:38.600736179Z"}
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
282 lines
10 KiB
Go
282 lines
10 KiB
Go
package daemon // import "github.com/docker/docker/daemon"
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"fmt"
|
|
"strconv"
|
|
|
|
"github.com/containerd/log"
|
|
"github.com/docker/docker/api/types/events"
|
|
"github.com/hashicorp/go-multierror"
|
|
"github.com/mitchellh/copystructure"
|
|
|
|
"github.com/docker/docker/daemon/config"
|
|
)
|
|
|
|
// reloadTxn is used to defer side effects of a config reload.
|
|
type reloadTxn struct {
|
|
onCommit, onRollback []func() error
|
|
}
|
|
|
|
// OnCommit defers a function to be called when a config reload is being finalized.
|
|
// The error returned from cb is purely informational.
|
|
func (tx *reloadTxn) OnCommit(cb func() error) {
|
|
tx.onCommit = append(tx.onCommit, cb)
|
|
}
|
|
|
|
// OnRollback defers a function to be called when a config reload is aborted.
|
|
// The error returned from cb is purely informational.
|
|
func (tx *reloadTxn) OnRollback(cb func() error) {
|
|
tx.onCommit = append(tx.onRollback, cb)
|
|
}
|
|
|
|
func (tx *reloadTxn) run(cbs []func() error) error {
|
|
tx.onCommit = nil
|
|
tx.onRollback = nil
|
|
|
|
var res *multierror.Error
|
|
for _, cb := range cbs {
|
|
res = multierror.Append(res, cb())
|
|
}
|
|
return res.ErrorOrNil()
|
|
}
|
|
|
|
// Commit calls all functions registered with OnCommit.
|
|
// Any errors returned by the functions are collated into a
|
|
// *github.com/hashicorp/go-multierror.Error value.
|
|
func (tx *reloadTxn) Commit() error {
|
|
return tx.run(tx.onCommit)
|
|
}
|
|
|
|
// Rollback calls all functions registered with OnRollback.
|
|
// Any errors returned by the functions are collated into a
|
|
// *github.com/hashicorp/go-multierror.Error value.
|
|
func (tx *reloadTxn) Rollback() error {
|
|
return tx.run(tx.onRollback)
|
|
}
|
|
|
|
// Reload modifies the live daemon configuration from conf.
|
|
// conf is assumed to be a validated configuration.
|
|
//
|
|
// These are the settings that Reload changes:
|
|
// - Platform runtime
|
|
// - Daemon debug log level
|
|
// - Daemon max concurrent downloads
|
|
// - Daemon max concurrent uploads
|
|
// - Daemon max download attempts
|
|
// - Daemon shutdown timeout (in seconds)
|
|
// - Cluster discovery (reconfigure and restart)
|
|
// - Daemon labels
|
|
// - Insecure registries
|
|
// - Registry mirrors
|
|
// - Daemon live restore
|
|
func (daemon *Daemon) Reload(conf *config.Config) error {
|
|
daemon.configReload.Lock()
|
|
defer daemon.configReload.Unlock()
|
|
copied, err := copystructure.Copy(daemon.config().Config)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
newCfg := &configStore{
|
|
Config: copied.(config.Config),
|
|
}
|
|
|
|
attributes := map[string]string{}
|
|
|
|
// Ideally reloading should be transactional: the reload either completes
|
|
// successfully, or the daemon config and state are left untouched. We use a
|
|
// two-phase commit protocol to achieve this. Any fallible reload operation is
|
|
// split into two phases. The first phase performs all the fallible operations
|
|
// and mutates the newCfg copy. The second phase atomically swaps newCfg into
|
|
// the live daemon configuration and executes any commit functions the first
|
|
// phase registered to apply the side effects. If any first-phase returns an
|
|
// error, the reload transaction is rolled back by discarding newCfg and
|
|
// executing any registered rollback functions.
|
|
|
|
var txn reloadTxn
|
|
for _, reload := range []func(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error{
|
|
// TODO(thaJeztah): most of these are defined as method, but don't use the daemon receiver; consider making them regular functions.
|
|
daemon.reloadPlatform,
|
|
daemon.reloadDebug,
|
|
daemon.reloadMaxConcurrentDownloadsAndUploads,
|
|
daemon.reloadMaxDownloadAttempts,
|
|
daemon.reloadShutdownTimeout,
|
|
daemon.reloadFeatures,
|
|
daemon.reloadLabels,
|
|
daemon.reloadRegistryConfig,
|
|
daemon.reloadLiveRestore,
|
|
daemon.reloadNetworkDiagnosticPort,
|
|
} {
|
|
if err := reload(&txn, newCfg, conf, attributes); err != nil {
|
|
if rollbackErr := txn.Rollback(); rollbackErr != nil {
|
|
return multierror.Append(nil, err, rollbackErr)
|
|
}
|
|
return err
|
|
}
|
|
}
|
|
|
|
daemon.configStore.Store(newCfg)
|
|
daemon.LogDaemonEventWithAttributes(events.ActionReload, attributes)
|
|
return txn.Commit()
|
|
}
|
|
|
|
func marshalAttributeSlice(v []string) string {
|
|
if v == nil {
|
|
return "[]"
|
|
}
|
|
b, err := json.Marshal(v)
|
|
if err != nil {
|
|
panic(err) // Should never happen as the input type is fixed.
|
|
}
|
|
return string(b)
|
|
}
|
|
|
|
// reloadDebug updates configuration with Debug option
|
|
// and updates the passed attributes
|
|
func (daemon *Daemon) reloadDebug(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
|
|
// update corresponding configuration
|
|
if conf.IsValueSet("debug") {
|
|
newCfg.Debug = conf.Debug
|
|
}
|
|
// prepare reload event attributes with updatable configurations
|
|
attributes["debug"] = strconv.FormatBool(newCfg.Debug)
|
|
return nil
|
|
}
|
|
|
|
// reloadMaxConcurrentDownloadsAndUploads updates configuration with max concurrent
|
|
// download and upload options and updates the passed attributes
|
|
func (daemon *Daemon) reloadMaxConcurrentDownloadsAndUploads(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
|
|
// We always "reset" as the cost is lightweight and easy to maintain.
|
|
newCfg.MaxConcurrentDownloads = config.DefaultMaxConcurrentDownloads
|
|
newCfg.MaxConcurrentUploads = config.DefaultMaxConcurrentUploads
|
|
|
|
if conf.IsValueSet("max-concurrent-downloads") && conf.MaxConcurrentDownloads != 0 {
|
|
newCfg.MaxConcurrentDownloads = conf.MaxConcurrentDownloads
|
|
}
|
|
if conf.IsValueSet("max-concurrent-uploads") && conf.MaxConcurrentUploads != 0 {
|
|
newCfg.MaxConcurrentUploads = conf.MaxConcurrentUploads
|
|
}
|
|
txn.OnCommit(func() error {
|
|
if daemon.imageService != nil {
|
|
daemon.imageService.UpdateConfig(
|
|
newCfg.MaxConcurrentDownloads,
|
|
newCfg.MaxConcurrentUploads,
|
|
)
|
|
}
|
|
return nil
|
|
})
|
|
|
|
// prepare reload event attributes with updatable configurations
|
|
attributes["max-concurrent-downloads"] = strconv.Itoa(newCfg.MaxConcurrentDownloads)
|
|
attributes["max-concurrent-uploads"] = strconv.Itoa(newCfg.MaxConcurrentUploads)
|
|
log.G(context.TODO()).Debug("Reset Max Concurrent Downloads: ", attributes["max-concurrent-downloads"])
|
|
log.G(context.TODO()).Debug("Reset Max Concurrent Uploads: ", attributes["max-concurrent-uploads"])
|
|
return nil
|
|
}
|
|
|
|
// reloadMaxDownloadAttempts updates configuration with max concurrent
|
|
// download attempts when a connection is lost and updates the passed attributes
|
|
func (daemon *Daemon) reloadMaxDownloadAttempts(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
|
|
// We always "reset" as the cost is lightweight and easy to maintain.
|
|
newCfg.MaxDownloadAttempts = config.DefaultDownloadAttempts
|
|
if conf.IsValueSet("max-download-attempts") && conf.MaxDownloadAttempts != 0 {
|
|
newCfg.MaxDownloadAttempts = conf.MaxDownloadAttempts
|
|
}
|
|
|
|
// prepare reload event attributes with updatable configurations
|
|
attributes["max-download-attempts"] = strconv.Itoa(newCfg.MaxDownloadAttempts)
|
|
log.G(context.TODO()).Debug("Reset Max Download Attempts: ", attributes["max-download-attempts"])
|
|
return nil
|
|
}
|
|
|
|
// reloadShutdownTimeout updates configuration with daemon shutdown timeout option
|
|
// and updates the passed attributes
|
|
func (daemon *Daemon) reloadShutdownTimeout(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
|
|
// update corresponding configuration
|
|
if conf.IsValueSet("shutdown-timeout") {
|
|
newCfg.ShutdownTimeout = conf.ShutdownTimeout
|
|
log.G(context.TODO()).Debugf("Reset Shutdown Timeout: %d", newCfg.ShutdownTimeout)
|
|
}
|
|
|
|
// prepare reload event attributes with updatable configurations
|
|
attributes["shutdown-timeout"] = strconv.Itoa(newCfg.ShutdownTimeout)
|
|
return nil
|
|
}
|
|
|
|
// reloadLabels updates configuration with engine labels
|
|
// and updates the passed attributes
|
|
func (daemon *Daemon) reloadLabels(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
|
|
// update corresponding configuration
|
|
if conf.IsValueSet("labels") {
|
|
newCfg.Labels = conf.Labels
|
|
}
|
|
|
|
// prepare reload event attributes with updatable configurations
|
|
attributes["labels"] = marshalAttributeSlice(newCfg.Labels)
|
|
return nil
|
|
}
|
|
|
|
// reloadRegistryConfig updates the configuration with registry options
|
|
// and updates the passed attributes.
|
|
func (daemon *Daemon) reloadRegistryConfig(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
|
|
if conf.IsValueSet("insecure-registries") {
|
|
newCfg.ServiceOptions.InsecureRegistries = conf.InsecureRegistries
|
|
}
|
|
if conf.IsValueSet("registry-mirrors") {
|
|
newCfg.ServiceOptions.Mirrors = conf.Mirrors
|
|
}
|
|
|
|
commit, err := daemon.registryService.ReplaceConfig(newCfg.ServiceOptions)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
txn.OnCommit(func() error { commit(); return nil })
|
|
|
|
attributes["insecure-registries"] = marshalAttributeSlice(newCfg.ServiceOptions.InsecureRegistries)
|
|
attributes["registry-mirrors"] = marshalAttributeSlice(newCfg.ServiceOptions.Mirrors)
|
|
|
|
return nil
|
|
}
|
|
|
|
// reloadLiveRestore updates configuration with live restore option
|
|
// and updates the passed attributes
|
|
func (daemon *Daemon) reloadLiveRestore(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
|
|
// update corresponding configuration
|
|
if conf.IsValueSet("live-restore") {
|
|
newCfg.LiveRestoreEnabled = conf.LiveRestoreEnabled
|
|
}
|
|
|
|
// prepare reload event attributes with updatable configurations
|
|
attributes["live-restore"] = strconv.FormatBool(newCfg.LiveRestoreEnabled)
|
|
return nil
|
|
}
|
|
|
|
// reloadNetworkDiagnosticPort updates the network controller starting the diagnostic if the config is valid
|
|
func (daemon *Daemon) reloadNetworkDiagnosticPort(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
|
|
txn.OnCommit(func() error {
|
|
if conf == nil || daemon.netController == nil || !conf.IsValueSet("network-diagnostic-port") || conf.NetworkDiagnosticPort == 0 {
|
|
// If there is no config make sure that the diagnostic is off
|
|
if daemon.netController != nil {
|
|
daemon.netController.StopDiagnostic()
|
|
}
|
|
return nil
|
|
}
|
|
// Enable the network diagnostic if the flag is set with a valid port within the range
|
|
daemon.netController.StartDiagnostic(conf.NetworkDiagnosticPort)
|
|
return nil
|
|
})
|
|
return nil
|
|
}
|
|
|
|
// reloadFeatures updates configuration with enabled/disabled features
|
|
func (daemon *Daemon) reloadFeatures(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
|
|
// update corresponding configuration
|
|
// note that we allow features option to be entirely unset
|
|
newCfg.Features = conf.Features
|
|
|
|
// prepare reload event attributes with updatable configurations
|
|
attributes["features"] = fmt.Sprintf("%v", newCfg.Features)
|
|
return nil
|
|
}
|