mirror of
https://github.com/moby/moby.git
synced 2026-01-11 18:51:37 +00:00
The Container.State struct holds the container's state, and most of
its fields are expected to change dynamically. Some o these state-changes
are explicit, for example, setting the container to be "stopped". Other
state changes can be more explicit, for example due to the containers'
process exiting or being "OOM" killed by the kernel.
The distinction between explicit ("desired") state changes and "state"
("actual state") is sometimes vague; for some properties, we clearly
separated them, for example if a user requested the container to be
stopped or restarted, we store state in the Container object itself;
HasBeenManuallyStopped bool // used for unless-stopped restart policy
HasBeenManuallyRestarted bool `json:"-"` // used to distinguish restart caused by restart policy from the manual one
Other properties are more ambiguous. such as "HasBeenStartedBefore" and
"RestartCount", which are stored on the Container (and persisted to
disk), but may be more related to "actual" state, and likely should
not be persisted;
RestartCount int
HasBeenStartedBefore bool
Given that (per the above) concurrency must be taken into account, most
changes to the `container.State` struct should be protected; here's where
things get blurry. While the `State` type provides various accessor methods,
only some of them take concurrency into account; for example, [State.IsRunning]
and [State.GetPID] acquire a lock, whereas [State.ExitCodeValue] does not.
Even the (commonly used) [State.StateString] has no locking at all.
The way to handle this is error-prone; [container.State] contains a mutex,
and it's exported. Given that its embedded in the [container.Container]
struct, it's also exposed as an exported mutex for the container. The
assumption here is that by "merging" the two, the caller to acquire a lock
when either the container _or_ its state must be mutated. However, because
some methods on `container.State` handle their own locking, consumers must
be deeply familiar with the internals; if both changes to the `Container`
AND `Container.State` must be made. This gets amplified more as some
(exported!) methods, such as [container.SetRunning] mutate multiple fields,
but don't acquire a lock (so expect the caller to hold one), but their
(also exported) counterpart (e.g. [State.IsRunning]) do.
It should be clear from the above, that this needs some architectural
changes; a clearer separation between "desired" and "actual" state (opening
the potential to update the container's config without manually touching
its `State`), possibly a method to obtain a read-only copy of the current
state (for those querying state), and reviewing which fields belong where
(and should be persisted to disk, or only remain in memory).
This PR preserves the status quo; it makes no structural changes, other
than exposing where we access the container's state. Where previously the
State fields and methods were referred to as "part of the container"
(e.g. `ctr.IsRunning()` or `ctr.Running`), we now explicitly reference
the embedded `State` (`ctr.State.IsRunning`, `ctr.State.Running`).
The exception (for now) is the mutex, which is still referenced through
the embedded struct (`ctr.Lock()` instead of `ctr.State.Lock()`), as this
is (mostly) by design to protect the container, and what's in it (including
its `State`).
[State.IsRunning]: c4afa77157/daemon/container/state.go (L205-L209)
[State.GetPID]: c4afa77157/daemon/container/state.go (L211-L216)
[State.ExitCodeValue]: c4afa77157/daemon/container/state.go (L218-L228)
[State.StateString]: c4afa77157/daemon/container/state.go (L102-L131)
[container.State]: c4afa77157/daemon/container/state.go (L15-L23)
[container.Container]: c4afa77157/daemon/container/container.go (L67-L75)
[container.SetRunning]: c4afa77157/daemon/container/state.go (L230-L277)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
223 lines
6.4 KiB
Go
223 lines
6.4 KiB
Go
package daemon
|
|
|
|
import (
|
|
"context"
|
|
"strconv"
|
|
"time"
|
|
|
|
"github.com/containerd/containerd/v2/pkg/tracing"
|
|
"github.com/containerd/log"
|
|
containertypes "github.com/moby/moby/api/types/container"
|
|
"github.com/moby/moby/v2/daemon/config"
|
|
"github.com/moby/moby/v2/daemon/container"
|
|
"github.com/moby/moby/v2/daemon/internal/timestamp"
|
|
"github.com/moby/moby/v2/daemon/logger"
|
|
logcache "github.com/moby/moby/v2/daemon/logger/loggerutils/cache"
|
|
"github.com/moby/moby/v2/daemon/server/backend"
|
|
"github.com/moby/moby/v2/errdefs"
|
|
"github.com/pkg/errors"
|
|
)
|
|
|
|
// ContainerLogs copies the container's log channel to the channel provided in
|
|
// the config. If ContainerLogs returns an error, no messages have been copied.
|
|
// and the channel will be closed without data.
|
|
//
|
|
// if it returns nil, the config channel will be active and return log
|
|
// messages until it runs out or the context is canceled.
|
|
func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, config *backend.ContainerLogsOptions) (messages <-chan *backend.LogMessage, isTTY bool, retErr error) {
|
|
ctx, span := tracing.StartSpan(ctx, "daemon.ContainerLogs")
|
|
defer func() {
|
|
span.SetStatus(retErr)
|
|
span.End()
|
|
}()
|
|
|
|
lg := log.G(ctx).WithFields(log.Fields{
|
|
"module": "daemon",
|
|
"method": "(*Daemon).ContainerLogs",
|
|
"container": containerName,
|
|
})
|
|
|
|
if !config.ShowStdout && !config.ShowStderr {
|
|
return nil, false, errdefs.InvalidParameter(errors.New("You must choose at least one stream"))
|
|
}
|
|
ctr, err := daemon.GetContainer(containerName)
|
|
if err != nil {
|
|
return nil, false, err
|
|
}
|
|
|
|
if ctr.State.RemovalInProgress || ctr.State.Dead {
|
|
return nil, false, errdefs.Conflict(errors.New("can not get logs from container which is dead or marked for removal"))
|
|
}
|
|
|
|
if ctr.HostConfig.LogConfig.Type == "none" {
|
|
return nil, false, logger.ErrReadLogsNotSupported{}
|
|
}
|
|
|
|
cLog, cLogCreated, err := daemon.getLogger(ctr)
|
|
if err != nil {
|
|
return nil, false, err
|
|
}
|
|
if cLogCreated {
|
|
defer func() {
|
|
if retErr != nil {
|
|
if err = cLog.Close(); err != nil {
|
|
log.G(ctx).Errorf("Error closing logger: %v", err)
|
|
}
|
|
}
|
|
}()
|
|
}
|
|
|
|
logReader, ok := cLog.(logger.LogReader)
|
|
if !ok {
|
|
return nil, false, logger.ErrReadLogsNotSupported{}
|
|
}
|
|
|
|
tailLines, err := strconv.Atoi(config.Tail)
|
|
if err != nil {
|
|
tailLines = -1
|
|
}
|
|
|
|
var since time.Time
|
|
if config.Since != "" {
|
|
s, n, err := timestamp.ParseTimestamps(config.Since, 0)
|
|
if err != nil {
|
|
return nil, false, err
|
|
}
|
|
since = time.Unix(s, n)
|
|
}
|
|
|
|
var until time.Time
|
|
if config.Until != "" && config.Until != "0" {
|
|
s, n, err := timestamp.ParseTimestamps(config.Until, 0)
|
|
if err != nil {
|
|
return nil, false, err
|
|
}
|
|
until = time.Unix(s, n)
|
|
}
|
|
|
|
follow := config.Follow && !cLogCreated
|
|
logs := logReader.ReadLogs(ctx, logger.ReadConfig{
|
|
Since: since,
|
|
Until: until,
|
|
Tail: tailLines,
|
|
Follow: follow,
|
|
})
|
|
|
|
// past this point, we can't possibly return any errors, so we can just
|
|
// start a goroutine and return to tell the caller not to expect errors
|
|
// (if the caller wants to give up on logs, they have to cancel the context)
|
|
// this goroutine functions as a shim between the logger and the caller.
|
|
messageChan := make(chan *backend.LogMessage, 1)
|
|
go func() {
|
|
if cLogCreated {
|
|
defer func() {
|
|
if err = cLog.Close(); err != nil {
|
|
log.G(ctx).Errorf("Error closing logger: %v", err)
|
|
}
|
|
}()
|
|
}
|
|
// signal that the log reader is gone
|
|
defer logs.ConsumerGone()
|
|
|
|
// close the messages channel. closing is the only way to signal above
|
|
// that we're doing with logs (other than context cancel i guess).
|
|
defer close(messageChan)
|
|
|
|
lg.Debug("begin logs")
|
|
defer lg.Debugf("end logs (%v)", ctx.Err())
|
|
|
|
for {
|
|
select {
|
|
// i do not believe as the system is currently designed any error
|
|
// is possible, but we should be prepared to handle it anyway. if
|
|
// we do get an error, copy only the error field to a new object so
|
|
// we don't end up with partial data in the other fields
|
|
case err := <-logs.Err:
|
|
lg.Errorf("Error streaming logs: %v", err)
|
|
select {
|
|
case <-ctx.Done():
|
|
case messageChan <- &backend.LogMessage{Err: err}:
|
|
}
|
|
return
|
|
case <-ctx.Done():
|
|
return
|
|
case msg, ok := <-logs.Msg:
|
|
// there is some kind of pool or ring buffer in the logger that
|
|
// produces these messages, and a possible future optimization
|
|
// might be to use that pool and reuse message objects
|
|
if !ok {
|
|
return
|
|
}
|
|
m := msg.AsLogMessage() // just a pointer conversion, does not copy data
|
|
|
|
// there could be a case where the reader stops accepting
|
|
// messages and the context is canceled. we need to check that
|
|
// here, or otherwise we risk blocking forever on the message
|
|
// send.
|
|
select {
|
|
case <-ctx.Done():
|
|
return
|
|
case messageChan <- m:
|
|
}
|
|
}
|
|
}
|
|
}()
|
|
return messageChan, ctr.Config.Tty, nil
|
|
}
|
|
|
|
func (daemon *Daemon) getLogger(container *container.Container) (_ logger.Logger, created bool, _ error) {
|
|
var logDriver logger.Logger
|
|
container.Lock()
|
|
if container.State.Running {
|
|
logDriver = container.LogDriver
|
|
}
|
|
container.Unlock()
|
|
if logDriver != nil {
|
|
return logDriver, false, nil
|
|
}
|
|
logDriver, err := container.StartLogger()
|
|
if err != nil {
|
|
// Let's assume a driver was created, but failed to start;
|
|
// see https://github.com/moby/moby/pull/49493#discussion_r1979120968
|
|
//
|
|
// TODO(thaJeztah): check if we're not leaking resources if a logger was created, but failed to start.
|
|
return nil, true, err
|
|
}
|
|
return logDriver, true, nil
|
|
}
|
|
|
|
// mergeAndVerifyLogConfig merges the daemon log config to the container's log config if the container's log driver is not specified.
|
|
func (daemon *Daemon) mergeAndVerifyLogConfig(cfg *containertypes.LogConfig) error {
|
|
if cfg.Type == "" {
|
|
cfg.Type = daemon.defaultLogConfig.Type
|
|
}
|
|
|
|
if cfg.Config == nil {
|
|
cfg.Config = make(map[string]string)
|
|
}
|
|
|
|
if cfg.Type == daemon.defaultLogConfig.Type {
|
|
for k, v := range daemon.defaultLogConfig.Config {
|
|
if _, ok := cfg.Config[k]; !ok {
|
|
cfg.Config[k] = v
|
|
}
|
|
}
|
|
}
|
|
|
|
logcache.MergeDefaultLogConfig(cfg.Config, daemon.defaultLogConfig.Config)
|
|
|
|
return logger.ValidateLogOpts(cfg.Type, cfg.Config)
|
|
}
|
|
|
|
func defaultLogConfig(cfg *config.Config) (containertypes.LogConfig, error) {
|
|
if len(cfg.LogConfig.Config) > 0 {
|
|
if err := logger.ValidateLogOpts(cfg.LogConfig.Type, cfg.LogConfig.Config); err != nil {
|
|
return containertypes.LogConfig{}, errors.Wrap(err, "failed to set log opts")
|
|
}
|
|
}
|
|
return containertypes.LogConfig{
|
|
Type: cfg.LogConfig.Type,
|
|
Config: cfg.LogConfig.Config,
|
|
}, nil
|
|
}
|