explicitly access Container.State instead of through embedded struct

The Container.State struct holds the container's state, and most of
its fields are expected to change dynamically. Some o these state-changes
are explicit, for example, setting the container to be "stopped". Other
state changes can be more explicit, for example due to the containers'
process exiting or being "OOM" killed by the kernel.

The distinction between explicit ("desired") state changes and "state"
("actual state") is sometimes vague; for some properties, we clearly
separated them, for example if a user requested the container to be
stopped or restarted, we store state in the Container object itself;

    HasBeenManuallyStopped   bool // used for unless-stopped restart policy
    HasBeenManuallyRestarted bool `json:"-"` // used to distinguish restart caused by restart policy from the manual one

Other properties are more ambiguous. such as "HasBeenStartedBefore" and
"RestartCount", which are stored on the Container (and persisted to
disk), but may be more related to "actual" state, and likely should
not be persisted;

    RestartCount             int
    HasBeenStartedBefore     bool

Given that (per the above) concurrency must be taken into account, most
changes to the `container.State` struct should be protected; here's where
things get blurry. While the `State` type provides various accessor methods,
only some of them take concurrency into account; for example, [State.IsRunning]
and [State.GetPID] acquire a lock, whereas [State.ExitCodeValue] does not.
Even the (commonly used) [State.StateString] has no locking at all.

The way to handle this is error-prone; [container.State] contains a mutex,
and it's exported. Given that its embedded in the [container.Container]
struct, it's also exposed as an exported mutex for the container. The
assumption here is that by "merging" the two, the caller to acquire a lock
when either the container _or_ its state must be mutated. However, because
some methods on `container.State` handle their own locking, consumers must
be deeply familiar with the internals; if both changes to the `Container`
AND `Container.State` must be made. This gets amplified more as some
(exported!) methods, such as [container.SetRunning] mutate multiple fields,
but don't acquire a lock (so expect the caller to hold one), but their
(also exported) counterpart (e.g. [State.IsRunning]) do.

It should be clear from the above, that this needs some architectural
changes; a clearer separation between "desired" and "actual" state (opening
the potential to update the container's config without manually touching
its `State`), possibly a method to obtain a read-only copy of the current
state (for those querying state), and reviewing which fields belong where
(and should be persisted to disk, or only remain in memory).

This PR preserves the status quo; it makes no structural changes, other
than exposing where we access the container's state. Where previously the
State fields and methods were referred to as "part of the container"
(e.g. `ctr.IsRunning()` or `ctr.Running`), we now explicitly reference
the embedded `State` (`ctr.State.IsRunning`, `ctr.State.Running`).

The exception (for now) is the mutex, which is still referenced through
the embedded struct (`ctr.Lock()` instead of `ctr.State.Lock()`), as this
is (mostly) by design to protect the container, and what's in it (including
its `State`).

[State.IsRunning]: c4afa77157/daemon/container/state.go (L205-L209)
[State.GetPID]: c4afa77157/daemon/container/state.go (L211-L216)
[State.ExitCodeValue]: c4afa77157/daemon/container/state.go (L218-L228)
[State.StateString]: c4afa77157/daemon/container/state.go (L102-L131)
[container.State]: c4afa77157/daemon/container/state.go (L15-L23)
[container.Container]: c4afa77157/daemon/container/container.go (L67-L75)
[container.SetRunning]: c4afa77157/daemon/container/state.go (L230-L277)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn
2025-07-27 18:07:25 +02:00
parent 0967d6ea6b
commit 0df791cb72
38 changed files with 158 additions and 156 deletions

View File

@@ -302,7 +302,7 @@ func (daemon *Daemon) containerCopy(container *container.Container, resource str
// loaded inside the utility VM, not the host. // loaded inside the utility VM, not the host.
// IMPORTANT: The container lock MUST be held when calling this function. // IMPORTANT: The container lock MUST be held when calling this function.
func (daemon *Daemon) isOnlineFSOperationPermitted(ctr *container.Container) error { func (daemon *Daemon) isOnlineFSOperationPermitted(ctr *container.Container) error {
if !ctr.Running { if !ctr.State.Running {
return nil return nil
} }

View File

@@ -33,10 +33,10 @@ func (daemon *Daemon) ContainerAttach(prefixOrName string, req *backend.Containe
if err != nil { if err != nil {
return err return err
} }
if ctr.IsPaused() { if ctr.State.IsPaused() {
return errdefs.Conflict(fmt.Errorf("container %s is paused, unpause the container before attach", prefixOrName)) return errdefs.Conflict(fmt.Errorf("container %s is paused, unpause the container before attach", prefixOrName))
} }
if ctr.IsRestarting() { if ctr.State.IsRestarting() {
return errdefs.Conflict(fmt.Errorf("container %s is restarting, wait until the container is running", prefixOrName)) return errdefs.Conflict(fmt.Errorf("container %s is restarting, wait until the container is running", prefixOrName))
} }
@@ -192,7 +192,7 @@ func (daemon *Daemon) containerAttach(ctr *container.Container, cfg *stream.Atta
if ctr.Config.StdinOnce && !ctr.Config.Tty { if ctr.Config.StdinOnce && !ctr.Config.Tty {
// Wait for the container to stop before returning. // Wait for the container to stop before returning.
waitChan := ctr.Wait(context.Background(), containertypes.WaitConditionNotRunning) waitChan := ctr.State.Wait(context.Background(), containertypes.WaitConditionNotRunning)
defer func() { defer func() {
<-waitChan // Ignore returned exit code. <-waitChan // Ignore returned exit code.
}() }()

View File

@@ -18,7 +18,7 @@ func (daemon *Daemon) ContainerChanges(ctx context.Context, name string) ([]arch
return nil, err return nil, err
} }
if isWindows && container.IsRunning() { if isWindows && container.State.IsRunning() {
return nil, errors.New("Windows does not support diff of a running container") return nil, errors.New("Windows does not support diff of a running container")
} }

View File

@@ -132,19 +132,19 @@ func (daemon *Daemon) CreateImageFromContainer(ctx context.Context, name string,
} }
// It is not possible to commit a running container on Windows // It is not possible to commit a running container on Windows
if isWindows && container.IsRunning() { if isWindows && container.State.IsRunning() {
return "", errors.Errorf("%+v does not support commit of a running container", runtime.GOOS) return "", errors.Errorf("%+v does not support commit of a running container", runtime.GOOS)
} }
if container.IsDead() { if container.State.IsDead() {
return "", errdefs.Conflict(fmt.Errorf("You cannot commit container %s which is Dead", container.ID)) return "", errdefs.Conflict(fmt.Errorf("You cannot commit container %s which is Dead", container.ID))
} }
if container.IsRemovalInProgress() { if container.State.IsRemovalInProgress() {
return "", errdefs.Conflict(fmt.Errorf("You cannot commit container %s which is being removed", container.ID)) return "", errdefs.Conflict(fmt.Errorf("You cannot commit container %s which is being removed", container.ID))
} }
if c.Pause && !container.IsPaused() { if c.Pause && !container.State.IsPaused() {
daemon.containerPause(container) daemon.containerPause(container)
defer daemon.containerUnpause(container) defer daemon.containerUnpause(container)
} }

View File

@@ -535,7 +535,11 @@ func (container *Container) GetExecIDs() []string {
// ShouldRestart decides whether the daemon should restart the container or not. // ShouldRestart decides whether the daemon should restart the container or not.
// This is based on the container's restart policy. // This is based on the container's restart policy.
func (container *Container) ShouldRestart() bool { func (container *Container) ShouldRestart() bool {
shouldRestart, _, _ := container.RestartManager().ShouldRestart(uint32(container.ExitCode()), container.HasBeenManuallyStopped, container.FinishedAt.Sub(container.StartedAt)) shouldRestart, _, _ := container.RestartManager().ShouldRestart(
uint32(container.State.ExitCode()),
container.HasBeenManuallyStopped,
container.State.FinishedAt.Sub(container.State.StartedAt),
)
return shouldRestart return shouldRestart
} }
@@ -837,11 +841,11 @@ func (container *Container) RestoreTask(ctx context.Context, client libcontainer
container.Lock() container.Lock()
defer container.Unlock() defer container.Unlock()
var err error var err error
container.ctr, err = client.LoadContainer(ctx, container.ID) container.State.ctr, err = client.LoadContainer(ctx, container.ID)
if err != nil { if err != nil {
return err return err
} }
container.task, err = container.ctr.AttachTask(ctx, container.InitializeStdio) container.State.task, err = container.State.ctr.AttachTask(ctx, container.InitializeStdio)
if err != nil && !cerrdefs.IsNotFound(err) { if err != nil && !cerrdefs.IsNotFound(err) {
return err return err
} }
@@ -857,10 +861,10 @@ func (container *Container) RestoreTask(ctx context.Context, client libcontainer
// //
// The container lock must be held when calling this method. // The container lock must be held when calling this method.
func (container *Container) GetRunningTask() (libcontainerdtypes.Task, error) { func (container *Container) GetRunningTask() (libcontainerdtypes.Task, error) {
if !container.Running { if !container.State.Running {
return nil, errdefs.Conflict(fmt.Errorf("container %s is not running", container.ID)) return nil, errdefs.Conflict(fmt.Errorf("container %s is not running", container.ID))
} }
tsk, ok := container.Task() tsk, ok := container.State.Task()
if !ok { if !ok {
return nil, errdefs.System(errors.WithStack(fmt.Errorf("container %s is in Running state but has no containerd Task set", container.ID))) return nil, errdefs.System(errors.WithStack(fmt.Errorf("container %s is in Running state but has no containerd Task set", container.ID)))
} }

View File

@@ -294,14 +294,9 @@ func (v *View) GetAllNames() map[string][]string {
func (v *View) transform(ctr *Container) *Snapshot { func (v *View) transform(ctr *Container) *Snapshot {
health := container.NoHealthcheck health := container.NoHealthcheck
failingStreak := 0 failingStreak := 0
if ctr.Health != nil { if ctr.State.Health != nil {
health = ctr.Health.Status() health = ctr.State.Health.Status()
failingStreak = ctr.Health.FailingStreak failingStreak = ctr.State.Health.FailingStreak
}
healthSummary := &container.HealthSummary{
Status: health,
FailingStreak: failingStreak,
} }
snapshot := &Snapshot{ snapshot := &Snapshot{
@@ -313,20 +308,23 @@ func (v *View) transform(ctr *Container) *Snapshot {
Mounts: ctr.GetMountPoints(), Mounts: ctr.GetMountPoints(),
State: ctr.State.StateString(), State: ctr.State.StateString(),
Status: ctr.State.String(), Status: ctr.State.String(),
Health: healthSummary, Health: &container.HealthSummary{
Status: health,
FailingStreak: failingStreak,
},
Created: ctr.Created.Unix(), Created: ctr.Created.Unix(),
}, },
CreatedAt: ctr.Created, CreatedAt: ctr.Created,
StartedAt: ctr.StartedAt, StartedAt: ctr.State.StartedAt,
Name: ctr.Name, Name: ctr.Name,
Pid: ctr.Pid, Pid: ctr.State.Pid,
Managed: ctr.Managed, Managed: ctr.Managed,
ExposedPorts: make(container.PortSet), ExposedPorts: make(container.PortSet),
PortBindings: make(container.PortSet), PortBindings: make(container.PortSet),
Health: health, Health: health,
Running: ctr.Running, Running: ctr.State.Running,
Paused: ctr.Paused, Paused: ctr.State.Paused,
ExitCode: ctr.ExitCode(), ExitCode: ctr.State.ExitCode(),
} }
if snapshot.Names == nil { if snapshot.Names == nil {

View File

@@ -41,10 +41,10 @@ func TestViewAll(t *testing.T) {
one := newContainer(t, tmpDir) one := newContainer(t, tmpDir)
two := newContainer(t, tmpDir) two := newContainer(t, tmpDir)
one.Pid = 10 one.State.Pid = 10
assert.NilError(t, one.CheckpointTo(context.Background(), db)) assert.NilError(t, one.CheckpointTo(context.Background(), db))
two.Pid = 20 two.State.Pid = 20
assert.NilError(t, two.CheckpointTo(context.Background(), db)) assert.NilError(t, two.CheckpointTo(context.Background(), db))
all, err := db.Snapshot().All() all, err := db.Snapshot().All()
@@ -56,8 +56,8 @@ func TestViewAll(t *testing.T) {
byID[c.ID] = c.Pid byID[c.ID] = c.Pid
} }
expected := map[string]int{ expected := map[string]int{
one.ID: one.Pid, one.ID: one.State.Pid,
two.ID: two.Pid, two.ID: two.State.Pid,
} }
assert.DeepEqual(t, expected, byID) assert.DeepEqual(t, expected, byID)
} }
@@ -146,7 +146,7 @@ func TestViewWithHealthCheck(t *testing.T) {
tmpDir := t.TempDir() tmpDir := t.TempDir()
one := newContainer(t, tmpDir) one := newContainer(t, tmpDir)
one.Health = &Health{ one.State.Health = &Health{
Health: container.Health{ Health: container.Health{
Status: container.Starting, Status: container.Starting,
}, },

View File

@@ -939,10 +939,10 @@ func (daemon *Daemon) getNetworkedContainer(containerID, connectedContainerPrefi
// FIXME (thaJeztah): turns out we don't validate "--network container:<self>" during container create! // FIXME (thaJeztah): turns out we don't validate "--network container:<self>" during container create!
return nil, errdefs.System(errdefs.InvalidParameter(errors.New("cannot join own network namespace"))) return nil, errdefs.System(errdefs.InvalidParameter(errors.New("cannot join own network namespace")))
} }
if !nc.IsRunning() { if !nc.State.IsRunning() {
return nil, errdefs.Conflict(fmt.Errorf("cannot join network namespace of a non running container: container %s is %s", strings.TrimPrefix(nc.Name, "/"), nc.StateString())) return nil, errdefs.Conflict(fmt.Errorf("cannot join network namespace of a non running container: container %s is %s", strings.TrimPrefix(nc.Name, "/"), nc.State.StateString()))
} }
if nc.IsRestarting() { if nc.State.IsRestarting() {
return nil, fmt.Errorf("cannot join network namespace of container: %w", errContainerIsRestarting(connectedContainerPrefixOrName)) return nil, fmt.Errorf("cannot join network namespace of container: %w", errContainerIsRestarting(connectedContainerPrefixOrName))
} }
return nc, nil return nc, nil
@@ -1015,8 +1015,8 @@ func (daemon *Daemon) ConnectToNetwork(ctx context.Context, ctr *container.Conta
ctr.Lock() ctr.Lock()
defer ctr.Unlock() defer ctr.Unlock()
if !ctr.Running { if !ctr.State.Running {
if ctr.RemovalInProgress || ctr.Dead { if ctr.State.RemovalInProgress || ctr.State.Dead {
return errRemovalContainer(ctr.ID) return errRemovalContainer(ctr.ID)
} }
@@ -1048,8 +1048,8 @@ func (daemon *Daemon) DisconnectFromNetwork(ctx context.Context, ctr *container.
ctr.Lock() ctr.Lock()
defer ctr.Unlock() defer ctr.Unlock()
if !ctr.Running || (err != nil && force) { if !ctr.State.Running || (err != nil && force) {
if ctr.RemovalInProgress || ctr.Dead { if ctr.State.RemovalInProgress || ctr.State.Dead {
return errRemovalContainer(ctr.ID) return errRemovalContainer(ctr.ID)
} }
// In case networkName is resolved we will use n.Name() // In case networkName is resolved we will use n.Name()

View File

@@ -37,7 +37,7 @@ func (daemon *Daemon) setupLinkedContainers(ctr *container.Container) ([]string,
var env []string var env []string
for linkAlias, child := range daemon.linkIndex.children(ctr) { for linkAlias, child := range daemon.linkIndex.children(ctr) {
if !child.IsRunning() { if !child.State.IsRunning() {
return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias) return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias)
} }
@@ -153,10 +153,10 @@ func (daemon *Daemon) getIPCContainer(id string) (*container.Container, error) {
if err != nil { if err != nil {
return nil, errdefs.InvalidParameter(err) return nil, errdefs.InvalidParameter(err)
} }
if !ctr.IsRunning() { if !ctr.State.IsRunning() {
return nil, errNotRunning(id) return nil, errNotRunning(id)
} }
if ctr.IsRestarting() { if ctr.State.IsRestarting() {
return nil, errContainerIsRestarting(id) return nil, errContainerIsRestarting(id)
} }
@@ -177,10 +177,10 @@ func (daemon *Daemon) getPIDContainer(id string) (*container.Container, error) {
if err != nil { if err != nil {
return nil, errdefs.InvalidParameter(err) return nil, errdefs.InvalidParameter(err)
} }
if !ctr.IsRunning() { if !ctr.State.IsRunning() {
return nil, errNotRunning(id) return nil, errNotRunning(id)
} }
if ctr.IsRestarting() { if ctr.State.IsRestarting() {
return nil, errContainerIsRestarting(id) return nil, errContainerIsRestarting(id)
} }
@@ -458,7 +458,7 @@ func (daemon *Daemon) cleanupSecretDir(ctr *container.Container) {
} }
func killProcessDirectly(ctr *container.Container) error { func killProcessDirectly(ctr *container.Container) error {
pid := ctr.GetPID() pid := ctr.State.GetPID()
if pid == 0 { if pid == 0 {
// Ensure that we don't kill ourselves // Ensure that we don't kill ourselves
return nil return nil

View File

@@ -494,7 +494,7 @@ func (i *ImageService) untagReferences(ctx context.Context, refs []c8dimages.Ima
func (i *ImageService) checkImageDeleteConflict(ctx context.Context, imgID image.ID, all []c8dimages.Image, mask conflictType) error { func (i *ImageService) checkImageDeleteConflict(ctx context.Context, imgID image.ID, all []c8dimages.Image, mask conflictType) error {
if mask&conflictRunningContainer != 0 { if mask&conflictRunningContainer != 0 {
running := func(c *container.Container) bool { running := func(c *container.Container) bool {
return c.ImageID == imgID && c.IsRunning() return c.ImageID == imgID && c.State.IsRunning()
} }
if ctr := i.containers.First(running); ctr != nil { if ctr := i.containers.First(running); ctr != nil {
return &imageDeleteConflict{ return &imageDeleteConflict{
@@ -508,7 +508,7 @@ func (i *ImageService) checkImageDeleteConflict(ctx context.Context, imgID image
if mask&conflictStoppedContainer != 0 { if mask&conflictStoppedContainer != 0 {
stopped := func(c *container.Container) bool { stopped := func(c *container.Container) bool {
return !c.IsRunning() && c.ImageID == imgID return !c.State.IsRunning() && c.ImageID == imgID
} }
if ctr := i.containers.First(stopped); ctr != nil { if ctr := i.containers.First(stopped); ctr != nil {
return &imageDeleteConflict{ return &imageDeleteConflict{

View File

@@ -296,8 +296,8 @@ func (daemon *Daemon) restore(ctx context.Context, cfg *configStore, containers
} }
c.RWLayer = rwlayer c.RWLayer = rwlayer
logger.WithFields(log.Fields{ logger.WithFields(log.Fields{
"running": c.IsRunning(), "running": c.State.IsRunning(),
"paused": c.IsPaused(), "paused": c.State.IsPaused(),
}).Debug("loaded container") }).Debug("loaded container")
if err := daemon.registerName(c); err != nil { if err := daemon.registerName(c); err != nil {
@@ -376,9 +376,9 @@ func (daemon *Daemon) restore(ctx context.Context, cfg *configStore, containers
logger := func(c *container.Container) *log.Entry { logger := func(c *container.Container) *log.Entry {
return baseLogger.WithFields(log.Fields{ return baseLogger.WithFields(log.Fields{
"running": c.IsRunning(), "running": c.State.IsRunning(),
"paused": c.IsPaused(), "paused": c.State.IsPaused(),
"restarting": c.IsRestarting(), "restarting": c.State.IsRestarting(),
}) })
} }
@@ -393,7 +393,7 @@ func (daemon *Daemon) restore(ctx context.Context, cfg *configStore, containers
alive := false alive := false
status := containerd.Unknown status := containerd.Unknown
if tsk, ok := c.Task(); ok { if tsk, ok := c.State.Task(); ok {
s, err := tsk.Status(context.Background()) s, err := tsk.Status(context.Background())
if err != nil { if err != nil {
logger(c).WithError(err).Error("failed to get task status") logger(c).WithError(err).Error("failed to get task status")
@@ -422,13 +422,13 @@ func (daemon *Daemon) restore(ctx context.Context, cfg *configStore, containers
// If the containerd task for the container was not found, docker's view of the // If the containerd task for the container was not found, docker's view of the
// container state will be updated accordingly via SetStopped further down. // container state will be updated accordingly via SetStopped further down.
if c.IsRunning() || c.IsPaused() { if c.State.IsRunning() || c.State.IsPaused() {
logger(c).Debug("syncing container on disk state with real state") logger(c).Debug("syncing container on disk state with real state")
c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking
switch { switch {
case c.IsPaused() && alive: case c.State.IsPaused() && alive:
logger(c).WithField("state", status).Info("restored container paused") logger(c).WithField("state", status).Info("restored container paused")
switch status { switch status {
case containerd.Paused, containerd.Pausing: case containerd.Paused, containerd.Pausing:
@@ -438,7 +438,7 @@ func (daemon *Daemon) restore(ctx context.Context, cfg *configStore, containers
default: default:
// running // running
c.Lock() c.Lock()
c.Paused = false c.State.Paused = false
daemon.setStateCounter(c) daemon.setStateCounter(c)
daemon.initHealthMonitor(c) daemon.initHealthMonitor(c)
if err := c.CheckpointTo(context.TODO(), daemon.containersReplica); err != nil { if err := c.CheckpointTo(context.TODO(), daemon.containersReplica); err != nil {
@@ -446,7 +446,7 @@ func (daemon *Daemon) restore(ctx context.Context, cfg *configStore, containers
} }
c.Unlock() c.Unlock()
} }
case !c.IsPaused() && alive: case !c.State.IsPaused() && alive:
logger(c).Debug("restoring healthcheck") logger(c).Debug("restoring healthcheck")
c.Lock() c.Lock()
daemon.initHealthMonitor(c) daemon.initHealthMonitor(c)
@@ -463,7 +463,7 @@ func (daemon *Daemon) restore(ctx context.Context, cfg *configStore, containers
} else { } else {
ces.ExitCode = 255 ces.ExitCode = 255
} }
c.SetStopped(&ces) c.State.SetStopped(&ces)
daemon.Cleanup(context.TODO(), c) daemon.Cleanup(context.TODO(), c)
if err := c.CheckpointTo(context.TODO(), daemon.containersReplica); err != nil { if err := c.CheckpointTo(context.TODO(), daemon.containersReplica); err != nil {
baseLogger.WithError(err).Error("failed to update stopped container state") baseLogger.WithError(err).Error("failed to update stopped container state")
@@ -488,7 +488,7 @@ func (daemon *Daemon) restore(ctx context.Context, cfg *configStore, containers
} }
c.ResetRestartManager(false) c.ResetRestartManager(false)
if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() { if !c.HostConfig.NetworkMode.IsContainer() && c.State.IsRunning() {
options, err := buildSandboxOptions(&cfg.Config, c) options, err := buildSandboxOptions(&cfg.Config, c)
if err != nil { if err != nil {
logger(c).WithError(err).Warn("failed to build sandbox option to restore container") logger(c).WithError(err).Warn("failed to build sandbox option to restore container")
@@ -522,7 +522,7 @@ func (daemon *Daemon) restore(ctx context.Context, cfg *configStore, containers
c.Lock() c.Lock()
// TODO(thaJeztah): we no longer persist RemovalInProgress on disk, so this code is likely redundant; see https://github.com/moby/moby/pull/49968 // TODO(thaJeztah): we no longer persist RemovalInProgress on disk, so this code is likely redundant; see https://github.com/moby/moby/pull/49968
if c.RemovalInProgress { if c.State.RemovalInProgress {
// We probably crashed in the middle of a removal, reset // We probably crashed in the middle of a removal, reset
// the flag. // the flag.
// //
@@ -531,8 +531,8 @@ func (daemon *Daemon) restore(ctx context.Context, cfg *configStore, containers
// associated volumes, network links or both to also // associated volumes, network links or both to also
// be removed. So we put the container in the "dead" // be removed. So we put the container in the "dead"
// state and leave further processing up to them. // state and leave further processing up to them.
c.RemovalInProgress = false c.State.RemovalInProgress = false
c.Dead = true c.State.Dead = true
if err := c.CheckpointTo(context.TODO(), daemon.containersReplica); err != nil { if err := c.CheckpointTo(context.TODO(), daemon.containersReplica); err != nil {
baseLogger.WithError(err).Error("failed to update RemovalInProgress container state") baseLogger.WithError(err).Error("failed to update RemovalInProgress container state")
} else { } else {
@@ -695,7 +695,7 @@ func (daemon *Daemon) restartSwarmContainers(ctx context.Context, cfg *configSto
sem := semaphore.NewWeighted(int64(parallelLimit)) sem := semaphore.NewWeighted(int64(parallelLimit))
for _, c := range daemon.List() { for _, c := range daemon.List() {
if !c.IsRunning() && !c.IsPaused() { if !c.State.IsRunning() && !c.State.IsPaused() {
// Autostart all the containers which has a // Autostart all the containers which has a
// swarm endpoint now that the cluster is // swarm endpoint now that the cluster is
// initialized. // initialized.
@@ -1422,7 +1422,7 @@ func (daemon *Daemon) shutdownContainer(c *container.Container) error {
// Wait without timeout for the container to exit. // Wait without timeout for the container to exit.
// Ignore the result. // Ignore the result.
<-c.Wait(ctx, containertypes.WaitConditionNotRunning) <-c.State.Wait(ctx, containertypes.WaitConditionNotRunning)
return nil return nil
} }
@@ -1479,7 +1479,7 @@ func (daemon *Daemon) Shutdown(ctx context.Context) error {
log.G(ctx).Debugf("daemon configured with a %d seconds minimum shutdown timeout", cfg.ShutdownTimeout) log.G(ctx).Debugf("daemon configured with a %d seconds minimum shutdown timeout", cfg.ShutdownTimeout)
log.G(ctx).Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.shutdownTimeout(cfg)) log.G(ctx).Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.shutdownTimeout(cfg))
daemon.containers.ApplyAll(func(c *container.Container) { daemon.containers.ApplyAll(func(c *container.Container) {
if !c.IsRunning() { if !c.State.IsRunning() {
return return
} }
logger := log.G(ctx).WithField("container", c.ID) logger := log.G(ctx).WithField("container", c.ID)

View File

@@ -36,11 +36,11 @@ func (daemon *Daemon) containerRm(cfg *config.Config, name string, opts *backend
} }
// Container state RemovalInProgress should be used to avoid races. // Container state RemovalInProgress should be used to avoid races.
if inProgress := ctr.SetRemovalInProgress(); inProgress { if inProgress := ctr.State.SetRemovalInProgress(); inProgress {
err := fmt.Errorf("removal of container %s is already in progress", name) err := fmt.Errorf("removal of container %s is already in progress", name)
return errdefs.Conflict(err) return errdefs.Conflict(err)
} }
defer ctr.ResetRemovalInProgress() defer ctr.State.ResetRemovalInProgress()
// check if container wasn't deregistered by previous rm since Get // check if container wasn't deregistered by previous rm since Get
if c := daemon.containers.Get(ctr.ID); c == nil { if c := daemon.containers.Get(ctr.ID); c == nil {
@@ -87,12 +87,12 @@ func (daemon *Daemon) rmLink(cfg *config.Config, ctr *container.Container, name
// cleanupContainer unregisters a container from the daemon, stops stats // cleanupContainer unregisters a container from the daemon, stops stats
// collection and cleanly removes contents and metadata from the filesystem. // collection and cleanly removes contents and metadata from the filesystem.
func (daemon *Daemon) cleanupContainer(ctr *container.Container, config backend.ContainerRmConfig) error { func (daemon *Daemon) cleanupContainer(ctr *container.Container, config backend.ContainerRmConfig) error {
if ctr.IsRunning() { if ctr.State.IsRunning() {
if !config.ForceRemove { if !config.ForceRemove {
if ctr.Paused { if ctr.State.Paused {
return errdefs.Conflict(errors.New("container is paused and must be unpaused first")) return errdefs.Conflict(errors.New("container is paused and must be unpaused first"))
} else { } else {
return errdefs.Conflict(fmt.Errorf("container is %s: stop the container before removing or force remove", ctr.StateString())) return errdefs.Conflict(fmt.Errorf("container is %s: stop the container before removing or force remove", ctr.State.StateString()))
} }
} }
if err := daemon.Kill(ctr); err != nil && !isNotRunning(err) { if err := daemon.Kill(ctr); err != nil && !isNotRunning(err) {
@@ -122,7 +122,7 @@ func (daemon *Daemon) cleanupContainer(ctr *container.Container, config backend.
// Mark container dead. We don't want anybody to be restarting it. // Mark container dead. We don't want anybody to be restarting it.
ctr.Lock() ctr.Lock()
ctr.Dead = true ctr.State.Dead = true
// Copy RWLayer for releasing and clear the reference while holding the container lock. // Copy RWLayer for releasing and clear the reference while holding the container lock.
rwLayer := ctr.RWLayer rwLayer := ctr.RWLayer
@@ -144,7 +144,7 @@ func (daemon *Daemon) cleanupContainer(ctr *container.Container, config backend.
ctr.Lock() ctr.Lock()
ctr.RWLayer = rwLayer ctr.RWLayer = rwLayer
ctr.Unlock() ctr.Unlock()
ctr.SetRemovalError(err) ctr.State.SetRemovalError(err)
return err return err
} }
} }
@@ -160,7 +160,7 @@ func (daemon *Daemon) cleanupContainer(ctr *container.Container, config backend.
ctr.Unlock() ctr.Unlock()
if err != nil { if err != nil {
err = errors.Wrap(err, "unable to remove filesystem") err = errors.Wrap(err, "unable to remove filesystem")
ctr.SetRemovalError(err) ctr.State.SetRemovalError(err)
return err return err
} }
@@ -174,7 +174,7 @@ func (daemon *Daemon) cleanupContainer(ctr *container.Container, config backend.
for _, name := range linkNames { for _, name := range linkNames {
daemon.releaseName(name) daemon.releaseName(name)
} }
ctr.SetRemoved() ctr.State.SetRemoved()
metrics.StateCtr.Delete(ctr.ID) metrics.StateCtr.Delete(ctr.ID)
daemon.LogContainerEvent(ctr, events.ActionDestroy) daemon.LogContainerEvent(ctr, events.ActionDestroy)

View File

@@ -53,8 +53,8 @@ func TestContainerDelete(t *testing.T) {
errMsg: "container is restarting: stop the container before removing or force remove", errMsg: "container is restarting: stop the container before removing or force remove",
initContainer: func() *container.Container { initContainer: func() *container.Container {
c := newContainerWithState(container.NewState()) c := newContainerWithState(container.NewState())
c.SetRunning(nil, nil, time.Now()) c.State.SetRunning(nil, nil, time.Now())
c.SetRestarting(&container.ExitStatus{}) c.State.SetRestarting(&container.ExitStatus{})
return c return c
}, },
}, },
@@ -85,7 +85,7 @@ func TestContainerDoubleDelete(t *testing.T) {
c := newContainerWithState(container.NewState()) c := newContainerWithState(container.NewState())
// Mark the container as having a delete in progress // Mark the container as having a delete in progress
c.SetRemovalInProgress() c.State.SetRemovalInProgress()
d, cleanup := newDaemonWithTmpRoot(t) d, cleanup := newDaemonWithTmpRoot(t)
defer cleanup() defer cleanup()

View File

@@ -57,13 +57,13 @@ func (daemon *Daemon) getExecConfig(name string) (*container.ExecConfig, error)
if ctr == nil { if ctr == nil {
return nil, containerNotFound(name) return nil, containerNotFound(name)
} }
if !ctr.IsRunning() { if !ctr.State.IsRunning() {
return nil, errNotRunning(ctr.ID) return nil, errNotRunning(ctr.ID)
} }
if ctr.IsPaused() { if ctr.State.IsPaused() {
return nil, errExecPaused(ctr.ID) return nil, errExecPaused(ctr.ID)
} }
if ctr.IsRestarting() { if ctr.State.IsRestarting() {
return nil, errContainerIsRestarting(ctr.ID) return nil, errContainerIsRestarting(ctr.ID)
} }
return ec, nil return ec, nil
@@ -80,13 +80,13 @@ func (daemon *Daemon) getActiveContainer(name string) (*container.Container, err
return nil, err return nil, err
} }
if !ctr.IsRunning() { if !ctr.State.IsRunning() {
return nil, errNotRunning(ctr.ID) return nil, errNotRunning(ctr.ID)
} }
if ctr.IsPaused() { if ctr.State.IsPaused() {
return nil, errExecPaused(name) return nil, errExecPaused(name)
} }
if ctr.IsRestarting() { if ctr.State.IsRestarting() {
return nil, errContainerIsRestarting(ctr.ID) return nil, errContainerIsRestarting(ctr.ID)
} }
return ctr, nil return ctr, nil

View File

@@ -26,12 +26,12 @@ func (daemon *Daemon) ContainerExport(ctx context.Context, name string, out io.W
return errors.New("the daemon on this operating system does not support exporting Windows containers") return errors.New("the daemon on this operating system does not support exporting Windows containers")
} }
if ctr.IsDead() { if ctr.State.IsDead() {
err := fmt.Errorf("You cannot export container %s which is Dead", ctr.ID) err := fmt.Errorf("You cannot export container %s which is Dead", ctr.ID)
return errdefs.Conflict(err) return errdefs.Conflict(err)
} }
if ctr.IsRemovalInProgress() { if ctr.State.IsRemovalInProgress() {
err := fmt.Errorf("You cannot export container %s which is being removed", ctr.ID) err := fmt.Errorf("You cannot export container %s which is being removed", ctr.ID)
return errdefs.Conflict(err) return errdefs.Conflict(err)
} }

View File

@@ -264,7 +264,7 @@ func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe)
return probeInterval return probeInterval
} }
c.Lock() c.Lock()
status := c.Health.Health.Status status := c.State.Health.Health.Status
c.Unlock() c.Unlock()
if status == containertypes.Starting { if status == containertypes.Starting {
@@ -351,11 +351,11 @@ func (daemon *Daemon) updateHealthMonitor(c *container.Container) {
return // No healthcheck configured return // No healthcheck configured
} }
probe := getProbe(c) healthProbe := getProbe(c)
wantRunning := c.Running && !c.Paused && probe != nil wantRunning := c.State.Running && !c.State.Paused && healthProbe != nil
if wantRunning { if wantRunning {
if stop := h.OpenMonitorChannel(); stop != nil { if stop := h.OpenMonitorChannel(); stop != nil {
go monitor(daemon, c, stop, probe) go monitor(daemon, c, stop, healthProbe)
} }
} else { } else {
h.CloseMonitorChannel() h.CloseMonitorChannel()

View File

@@ -384,7 +384,7 @@ func (i *ImageService) checkImageDeleteConflict(imgID image.ID, mask conflictTyp
if mask&conflictRunningContainer != 0 { if mask&conflictRunningContainer != 0 {
// Check if any running container is using the image. // Check if any running container is using the image.
running := func(c *container.Container) bool { running := func(c *container.Container) bool {
return c.ImageID == imgID && c.IsRunning() return c.ImageID == imgID && c.State.IsRunning()
} }
if ctr := i.containers.First(running); ctr != nil { if ctr := i.containers.First(running); ctr != nil {
return &imageDeleteConflict{ return &imageDeleteConflict{
@@ -407,7 +407,7 @@ func (i *ImageService) checkImageDeleteConflict(imgID image.ID, mask conflictTyp
if mask&conflictStoppedContainer != 0 { if mask&conflictStoppedContainer != 0 {
// Check if any stopped containers reference this image. // Check if any stopped containers reference this image.
stopped := func(c *container.Container) bool { stopped := func(c *container.Container) bool {
return !c.IsRunning() && c.ImageID == imgID return !c.State.IsRunning() && c.ImageID == imgID
} }
if ctr := i.containers.First(stopped); ctr != nil { if ctr := i.containers.First(stopped); ctr != nil {
return &imageDeleteConflict{ return &imageDeleteConflict{

View File

@@ -162,7 +162,7 @@ func (daemon *Daemon) getInspectData(daemonCfg *config.Config, ctr *container.Co
} }
if ctr.RWLayer == nil { if ctr.RWLayer == nil {
if ctr.Dead { if ctr.State.Dead {
return inspectResponse, nil return inspectResponse, nil
} }
return nil, errdefs.System(errors.New("RWLayer of container " + ctr.ID + " is unexpectedly nil")) return nil, errdefs.System(errors.New("RWLayer of container " + ctr.ID + " is unexpectedly nil"))
@@ -170,7 +170,7 @@ func (daemon *Daemon) getInspectData(daemonCfg *config.Config, ctr *container.Co
graphDriverData, err := ctr.RWLayer.Metadata() graphDriverData, err := ctr.RWLayer.Metadata()
if err != nil { if err != nil {
if ctr.Dead { if ctr.State.Dead {
// container is marked as Dead, and its graphDriver metadata may // container is marked as Dead, and its graphDriver metadata may
// have been removed; we can ignore errors. // have been removed; we can ignore errors.
return inspectResponse, nil return inspectResponse, nil

View File

@@ -29,7 +29,7 @@ func TestGetInspectData(t *testing.T) {
_, err := d.getInspectData(&cfg.Config, c) _, err := d.getInspectData(&cfg.Config, c)
assert.Check(t, is.ErrorContains(err, "RWLayer of container inspect-me is unexpectedly nil")) assert.Check(t, is.ErrorContains(err, "RWLayer of container inspect-me is unexpectedly nil"))
c.Dead = true c.State.Dead = true
_, err = d.getInspectData(&cfg.Config, c) _, err = d.getInspectData(&cfg.Config, c)
assert.Check(t, err) assert.Check(t, err)
} }

View File

@@ -84,11 +84,11 @@ func (daemon *Daemon) killWithSignal(container *containerpkg.Container, stopSign
} }
if containerStopSignal == stopSignal { if containerStopSignal == stopSignal {
container.ExitOnNext() container.ExitOnNext()
unpause = container.Paused unpause = container.State.Paused
} }
} else { } else {
container.ExitOnNext() container.ExitOnNext()
unpause = container.Paused unpause = container.State.Paused
} }
if !daemon.IsShuttingDown() { if !daemon.IsShuttingDown() {
@@ -104,7 +104,7 @@ func (daemon *Daemon) killWithSignal(container *containerpkg.Container, stopSign
// if the container is currently restarting we do not need to send the signal // if the container is currently restarting we do not need to send the signal
// to the process. Telling the monitor that it should exit on its next event // to the process. Telling the monitor that it should exit on its next event
// loop is enough // loop is enough
if container.Restarting { if container.State.Restarting {
return nil return nil
} }
@@ -124,7 +124,7 @@ func (daemon *Daemon) killWithSignal(container *containerpkg.Container, stopSign
// But this prevents race conditions in processing the container. // But this prevents race conditions in processing the container.
ctx, cancel := context.WithTimeout(context.TODO(), time.Duration(container.StopTimeout())*time.Second) ctx, cancel := context.WithTimeout(context.TODO(), time.Duration(container.StopTimeout())*time.Second)
defer cancel() defer cancel()
s := <-container.Wait(ctx, containertypes.WaitConditionNotRunning) s := <-container.State.Wait(ctx, containertypes.WaitConditionNotRunning)
if s.Err() != nil { if s.Err() != nil {
if err := daemon.handleContainerExit(container, nil); err != nil { if err := daemon.handleContainerExit(container, nil); err != nil {
log.G(context.TODO()).WithFields(log.Fields{ log.G(context.TODO()).WithFields(log.Fields{
@@ -159,7 +159,7 @@ func (daemon *Daemon) killWithSignal(container *containerpkg.Container, stopSign
// Kill forcefully terminates a container. // Kill forcefully terminates a container.
func (daemon *Daemon) Kill(container *containerpkg.Container) error { func (daemon *Daemon) Kill(container *containerpkg.Container) error {
if !container.IsRunning() { if !container.State.IsRunning() {
return errNotRunning(container.ID) return errNotRunning(container.ID)
} }
@@ -179,7 +179,7 @@ func (daemon *Daemon) Kill(container *containerpkg.Container) error {
ctx, cancel := context.WithTimeout(context.Background(), waitTimeout) ctx, cancel := context.WithTimeout(context.Background(), waitTimeout)
defer cancel() defer cancel()
status := <-container.Wait(ctx, containertypes.WaitConditionNotRunning) status := <-container.State.Wait(ctx, containertypes.WaitConditionNotRunning)
if status.Err() == nil { if status.Err() == nil {
return nil return nil
} }
@@ -197,7 +197,7 @@ func (daemon *Daemon) Kill(container *containerpkg.Container) error {
ctx2, cancel2 := context.WithTimeout(context.Background(), 2*time.Second) ctx2, cancel2 := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel2() defer cancel2()
if status := <-container.Wait(ctx2, containertypes.WaitConditionNotRunning); status.Err() != nil { if status := <-container.State.Wait(ctx2, containertypes.WaitConditionNotRunning); status.Err() != nil {
return errors.New("tried to kill container, but did not receive an exit event") return errors.New("tried to kill container, but did not receive an exit event")
} }
return nil return nil
@@ -207,7 +207,7 @@ func (daemon *Daemon) Kill(container *containerpkg.Container) error {
func (daemon *Daemon) killPossiblyDeadProcess(container *containerpkg.Container, sig syscall.Signal) error { func (daemon *Daemon) killPossiblyDeadProcess(container *containerpkg.Container, sig syscall.Signal) error {
err := daemon.killWithSignal(container, sig) err := daemon.killWithSignal(container, sig)
if cerrdefs.IsNotFound(err) { if cerrdefs.IsNotFound(err) {
err = errNoSuchProcess{container.GetPID(), sig} err = errNoSuchProcess{container.State.GetPID(), sig}
log.G(context.TODO()).Debug(err) log.G(context.TODO()).Debug(err)
return err return err
} }

View File

@@ -51,7 +51,7 @@ func setupContainerWithName(t *testing.T, name string, daemon *Daemon) *containe
name = "/" + name name = "/" + name
} }
c.Name = name c.Name = name
c.Running = true c.State.Running = true
c.HostConfig = &containertypes.HostConfig{} c.HostConfig = &containertypes.HostConfig{}
c.Created = time.Now() c.Created = time.Now()

View File

@@ -45,7 +45,7 @@ func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, c
return nil, false, err return nil, false, err
} }
if ctr.RemovalInProgress || ctr.Dead { if ctr.State.RemovalInProgress || ctr.State.Dead {
return nil, false, errdefs.Conflict(errors.New("can not get logs from container which is dead or marked for removal")) return nil, false, errdefs.Conflict(errors.New("can not get logs from container which is dead or marked for removal"))
} }

View File

@@ -19,7 +19,7 @@ import (
) )
func (daemon *Daemon) setStateCounter(c *container.Container) { func (daemon *Daemon) setStateCounter(c *container.Container) {
switch c.StateString() { switch c.State.StateString() {
case "paused": case "paused":
metrics.StateCtr.Set(c.ID, "paused") metrics.StateCtr.Set(c.ID, "paused")
case "running": case "running":
@@ -42,7 +42,7 @@ func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontaine
// c.ErrorMsg is set by [daemon.containerStart], and doesn't preserve the // c.ErrorMsg is set by [daemon.containerStart], and doesn't preserve the
// error type (because this field is persisted on disk). So, use string // error type (because this field is persisted on disk). So, use string
// matching instead of usual error comparison methods. // matching instead of usual error comparison methods.
if strings.Contains(c.ErrorMsg, errSetupNetworking) { if strings.Contains(c.State.ErrorMsg, errSetupNetworking) {
c.Unlock() c.Unlock()
return nil return nil
} }
@@ -53,7 +53,7 @@ func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontaine
// container is started again. // container is started again.
daemon.stopHealthchecks(c) daemon.stopHealthchecks(c)
tsk, ok := c.Task() tsk, ok := c.State.Task()
if ok { if ok {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
es, err := tsk.Delete(ctx) es, err := tsk.Delete(ctx)
@@ -81,12 +81,12 @@ func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontaine
ctrExitStatus.ExitCode = int(e.ExitCode) ctrExitStatus.ExitCode = int(e.ExitCode)
ctrExitStatus.ExitedAt = e.ExitedAt ctrExitStatus.ExitedAt = e.ExitedAt
if e.Error != nil { if e.Error != nil {
c.SetError(e.Error) c.State.SetError(e.Error)
} }
} }
daemonShutdown := daemon.IsShuttingDown() daemonShutdown := daemon.IsShuttingDown()
execDuration := time.Since(c.StartedAt) execDuration := time.Since(c.State.StartedAt)
restart, wait, err := c.RestartManager().ShouldRestart(uint32(ctrExitStatus.ExitCode), daemonShutdown || c.HasBeenManuallyStopped, execDuration) restart, wait, err := c.RestartManager().ShouldRestart(uint32(ctrExitStatus.ExitCode), daemonShutdown || c.HasBeenManuallyStopped, execDuration)
if err != nil { if err != nil {
log.G(ctx).WithFields(log.Fields{ log.G(ctx).WithFields(log.Fields{
@@ -115,9 +115,9 @@ func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontaine
"exitStatus": ctrExitStatus, "exitStatus": ctrExitStatus,
"manualRestart": c.HasBeenManuallyRestarted, "manualRestart": c.HasBeenManuallyRestarted,
}).Debug("Restarting container") }).Debug("Restarting container")
c.SetRestarting(&ctrExitStatus) c.State.SetRestarting(&ctrExitStatus)
} else { } else {
c.SetStopped(&ctrExitStatus) c.State.SetStopped(&ctrExitStatus)
if !c.HasBeenManuallyRestarted { if !c.HasBeenManuallyRestarted {
defer daemon.autoRemove(&cfg.Config, c) defer daemon.autoRemove(&cfg.Config, c)
} }
@@ -148,7 +148,7 @@ func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontaine
} }
if waitErr != nil { if waitErr != nil {
c.Lock() c.Lock()
c.SetStopped(&ctrExitStatus) c.State.SetStopped(&ctrExitStatus)
daemon.setStateCounter(c) daemon.setStateCounter(c)
c.CheckpointTo(context.TODO(), daemon.containersReplica) c.CheckpointTo(context.TODO(), daemon.containersReplica)
c.Unlock() c.Unlock()
@@ -179,7 +179,7 @@ func (daemon *Daemon) ProcessEvent(id string, e libcontainerdtypes.EventType, ei
c.Lock() c.Lock()
defer c.Unlock() defer c.Unlock()
c.OOMKilled = true c.State.OOMKilled = true
daemon.updateHealthMonitor(c) daemon.updateHealthMonitor(c)
if err := c.CheckpointTo(context.TODO(), daemon.containersReplica); err != nil { if err := c.CheckpointTo(context.TODO(), daemon.containersReplica); err != nil {
return err return err
@@ -247,7 +247,7 @@ func (daemon *Daemon) ProcessEvent(id string, e libcontainerdtypes.EventType, ei
defer c.Unlock() defer c.Unlock()
// This is here to handle start not generated by docker // This is here to handle start not generated by docker
if !c.Running { if !c.State.Running {
ctr, err := daemon.containerd.LoadContainer(context.Background(), c.ID) ctr, err := daemon.containerd.LoadContainer(context.Background(), c.ID)
if err != nil { if err != nil {
if cerrdefs.IsNotFound(err) { if cerrdefs.IsNotFound(err) {
@@ -272,7 +272,7 @@ func (daemon *Daemon) ProcessEvent(id string, e libcontainerdtypes.EventType, ei
} }
return err return err
} }
c.SetRunningExternal(ctr, tsk) c.State.SetRunningExternal(ctr, tsk)
c.HasBeenManuallyStopped = false c.HasBeenManuallyStopped = false
c.HasBeenStartedBefore = true c.HasBeenStartedBefore = true
daemon.setStateCounter(c) daemon.setStateCounter(c)
@@ -290,8 +290,8 @@ func (daemon *Daemon) ProcessEvent(id string, e libcontainerdtypes.EventType, ei
c.Lock() c.Lock()
defer c.Unlock() defer c.Unlock()
if !c.Paused { if !c.State.Paused {
c.Paused = true c.State.Paused = true
daemon.setStateCounter(c) daemon.setStateCounter(c)
daemon.updateHealthMonitor(c) daemon.updateHealthMonitor(c)
if err := c.CheckpointTo(context.TODO(), daemon.containersReplica); err != nil { if err := c.CheckpointTo(context.TODO(), daemon.containersReplica); err != nil {
@@ -304,8 +304,8 @@ func (daemon *Daemon) ProcessEvent(id string, e libcontainerdtypes.EventType, ei
c.Lock() c.Lock()
defer c.Unlock() defer c.Unlock()
if c.Paused { if c.State.Paused {
c.Paused = false c.State.Paused = false
daemon.setStateCounter(c) daemon.setStateCounter(c)
daemon.updateHealthMonitor(c) daemon.updateHealthMonitor(c)

View File

@@ -12,7 +12,7 @@ import (
) )
func (daemon *Daemon) prepareMountPoints(container *container.Container) error { func (daemon *Daemon) prepareMountPoints(container *container.Container) error {
alive := container.IsRunning() alive := container.State.IsRunning()
for _, config := range container.MountPoints { for _, config := range container.MountPoints {
if err := daemon.lazyInitializeVolume(container.ID, config); err != nil { if err := daemon.lazyInitializeVolume(container.ID, config); err != nil {
return err return err

View File

@@ -31,12 +31,12 @@ func (daemon *Daemon) containerPause(container *container.Container) error {
} }
// We cannot Pause the container which is already paused // We cannot Pause the container which is already paused
if container.Paused { if container.State.Paused {
return errNotPaused(container.ID) return errNotPaused(container.ID)
} }
// We cannot Pause the container which is restarting // We cannot Pause the container which is restarting
if container.Restarting { if container.State.Restarting {
return errContainerIsRestarting(container.ID) return errContainerIsRestarting(container.ID)
} }
@@ -44,7 +44,7 @@ func (daemon *Daemon) containerPause(container *container.Container) error {
return fmt.Errorf("cannot pause container %s: %s", container.ID, err) return fmt.Errorf("cannot pause container %s: %s", container.ID, err)
} }
container.Paused = true container.State.Paused = true
daemon.setStateCounter(container) daemon.setStateCounter(container)
daemon.updateHealthMonitor(container) daemon.updateHealthMonitor(container)
daemon.LogContainerEvent(container, events.ActionPause) daemon.LogContainerEvent(container, events.ActionPause)

View File

@@ -61,7 +61,7 @@ func (daemon *Daemon) ContainersPrune(ctx context.Context, pruneFilters filters.
default: default:
} }
if !c.IsRunning() { if !c.State.IsRunning() {
if !until.IsZero() && c.Created.After(until) { if !until.IsZero() && c.Created.After(until) {
continue continue
} }

View File

@@ -82,7 +82,7 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) (retErr error) {
return err return err
} }
if !ctr.Running { if !ctr.State.Running {
daemon.LogContainerEventWithAttributes(ctr, events.ActionRename, map[string]string{ daemon.LogContainerEventWithAttributes(ctr, events.ActionRename, map[string]string{
"oldName": oldName, "oldName": oldName,
}) })

View File

@@ -55,7 +55,7 @@ func (daemon *Daemon) containerRestart(ctx context.Context, daemonCfg *configSto
} }
} }
if container.IsRunning() { if container.State.IsRunning() {
container.Lock() container.Lock()
container.HasBeenManuallyRestarted = true container.HasBeenManuallyRestarted = true
container.Unlock() container.Unlock()

View File

@@ -29,9 +29,9 @@ func validateState(ctr *container.Container) error {
// Intentionally checking paused first, because a container can be // Intentionally checking paused first, because a container can be
// BOTH running AND paused. To start a paused (but running) container, // BOTH running AND paused. To start a paused (but running) container,
// it must be thawed ("un-paused"). // it must be thawed ("un-paused").
if ctr.Paused { if ctr.State.Paused {
return errdefs.Conflict(errors.New("cannot start a paused container, try unpause instead")) return errdefs.Conflict(errors.New("cannot start a paused container, try unpause instead"))
} else if ctr.Running { } else if ctr.State.Running {
// This is not an actual error, but produces a 304 "not modified" // This is not an actual error, but produces a 304 "not modified"
// when returned through the API to indicates the container is // when returned through the API to indicates the container is
// already in the desired state. It's implemented as an error // already in the desired state. It's implemented as an error
@@ -39,7 +39,7 @@ func validateState(ctr *container.Container) error {
// no further processing is needed). // no further processing is needed).
return errdefs.NotModified(errors.New("container is already running")) return errdefs.NotModified(errors.New("container is already running"))
} }
if ctr.RemovalInProgress || ctr.Dead { if ctr.State.RemovalInProgress || ctr.State.Dead {
return errdefs.Conflict(errors.New("container is marked for removal and cannot be started")) return errdefs.Conflict(errors.New("container is marked for removal and cannot be started"))
} }
return nil return nil
@@ -88,11 +88,11 @@ func (daemon *Daemon) containerStart(ctx context.Context, daemonCfg *configStore
container.Lock() container.Lock()
defer container.Unlock() defer container.Unlock()
if resetRestartManager && container.Running { // skip this check if already in restarting step and resetRestartManager==false if resetRestartManager && container.State.Running { // skip this check if already in restarting step and resetRestartManager==false
return nil return nil
} }
if container.RemovalInProgress || container.Dead { if container.State.RemovalInProgress || container.State.Dead {
return errdefs.Conflict(errors.New("container is marked for removal and cannot be started")) return errdefs.Conflict(errors.New("container is marked for removal and cannot be started"))
} }
@@ -105,10 +105,10 @@ func (daemon *Daemon) containerStart(ctx context.Context, daemonCfg *configStore
// setup has been cleaned up properly // setup has been cleaned up properly
defer func() { defer func() {
if retErr != nil { if retErr != nil {
container.SetError(retErr) container.State.SetError(retErr)
// if no one else has set it, make sure we don't leave it at zero // if no one else has set it, make sure we don't leave it at zero
if container.ExitCode() == 0 { if container.State.ExitCode() == 0 {
container.SetExitCode(exitUnknown) container.State.SetExitCode(exitUnknown)
} }
if err := container.CheckpointTo(context.WithoutCancel(ctx), daemon.containersReplica); err != nil { if err := container.CheckpointTo(context.WithoutCancel(ctx), daemon.containersReplica); err != nil {
log.G(ctx).Errorf("%s: failed saving state on start failure: %v", container.ID, err) log.G(ctx).Errorf("%s: failed saving state on start failure: %v", container.ID, err)
@@ -211,7 +211,7 @@ func (daemon *Daemon) containerStart(ctx context.Context, daemonCfg *configStore
return nil return nil
}) })
if err != nil { if err != nil {
return setExitCodeFromError(container.SetExitCode, err) return setExitCodeFromError(container.State.SetExitCode, err)
} }
defer func() { defer func() {
if retErr != nil { if retErr != nil {
@@ -228,7 +228,7 @@ func (daemon *Daemon) containerStart(ctx context.Context, daemonCfg *configStore
checkpointDir, container.StreamConfig.Stdin() != nil || container.Config.Tty, checkpointDir, container.StreamConfig.Stdin() != nil || container.Config.Tty,
container.InitializeStdio) container.InitializeStdio)
if err != nil { if err != nil {
return setExitCodeFromError(container.SetExitCode, err) return setExitCodeFromError(container.State.SetExitCode, err)
} }
defer func() { defer func() {
if retErr != nil { if retErr != nil {
@@ -244,11 +244,11 @@ func (daemon *Daemon) containerStart(ctx context.Context, daemonCfg *configStore
} }
if err := tsk.Start(context.WithoutCancel(ctx)); err != nil { // passing a cancelable ctx caused integration tests to be stuck in the cleanup phase if err := tsk.Start(context.WithoutCancel(ctx)); err != nil { // passing a cancelable ctx caused integration tests to be stuck in the cleanup phase
return setExitCodeFromError(container.SetExitCode, err) return setExitCodeFromError(container.State.SetExitCode, err)
} }
container.HasBeenManuallyRestarted = false container.HasBeenManuallyRestarted = false
container.SetRunning(ctr, tsk, startupTime) container.State.SetRunning(ctr, tsk, startupTime)
container.HasBeenStartedBefore = true container.HasBeenStartedBefore = true
daemon.setStateCounter(container) daemon.setStateCounter(container)
@@ -270,7 +270,7 @@ func (daemon *Daemon) containerStart(ctx context.Context, daemonCfg *configStore
func (daemon *Daemon) Cleanup(ctx context.Context, container *container.Container) { func (daemon *Daemon) Cleanup(ctx context.Context, container *container.Container) {
// Microsoft HCS containers get in a bad state if host resources are // Microsoft HCS containers get in a bad state if host resources are
// released while the container still exists. // released while the container still exists.
if ctr, ok := container.C8dContainer(); ok { if ctr, ok := container.State.C8dContainer(); ok {
if err := ctr.Delete(context.Background()); err != nil { if err := ctr.Delete(context.Background()); err != nil {
log.G(ctx).Errorf("%s cleanup: failed to delete container from containerd: %v", container.ID, err) log.G(ctx).Errorf("%s cleanup: failed to delete container from containerd: %v", container.ID, err)
} }

View File

@@ -18,7 +18,7 @@ func (daemon *Daemon) getLibcontainerdCreateOptions(daemonCfg *configStore, cont
shim, opts, err := daemonCfg.Runtimes.Get(container.HostConfig.Runtime) shim, opts, err := daemonCfg.Runtimes.Get(container.HostConfig.Runtime)
if err != nil { if err != nil {
return "", nil, setExitCodeFromError(container.SetExitCode, err) return "", nil, setExitCodeFromError(container.State.SetExitCode, err)
} }
return shim, opts, nil return shim, opts, nil

View File

@@ -28,7 +28,7 @@ func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, c
} }
// If the container is either not running or restarting and requires no stream, return an empty stats. // If the container is either not running or restarting and requires no stream, return an empty stats.
if !config.Stream && (!ctr.IsRunning() || ctr.IsRestarting()) { if !config.Stream && (!ctr.State.IsRunning() || ctr.State.IsRestarting()) {
return json.NewEncoder(config.OutStream()).Encode(&containertypes.StatsResponse{ return json.NewEncoder(config.OutStream()).Encode(&containertypes.StatsResponse{
Name: ctr.Name, Name: ctr.Name,
ID: ctr.ID, ID: ctr.ID,

View File

@@ -27,7 +27,7 @@ func (daemon *Daemon) ContainerStop(ctx context.Context, name string, options ba
if err != nil { if err != nil {
return err return err
} }
if !ctr.IsRunning() { if !ctr.State.IsRunning() {
// This is not an actual error, but produces a 304 "not modified" // This is not an actual error, but produces a 304 "not modified"
// when returned through the API to indicates the container is // when returned through the API to indicates the container is
// already in the desired state. It's implemented as an error // already in the desired state. It's implemented as an error
@@ -49,7 +49,7 @@ func (daemon *Daemon) containerStop(ctx context.Context, ctr *container.Containe
// Cancelling the request should not cancel the stop. // Cancelling the request should not cancel the stop.
ctx = context.WithoutCancel(ctx) ctx = context.WithoutCancel(ctx)
if !ctr.IsRunning() { if !ctr.State.IsRunning() {
return nil return nil
} }
@@ -96,7 +96,7 @@ func (daemon *Daemon) containerStop(ctx context.Context, ctr *container.Containe
} }
defer cancel() defer cancel()
if status := <-ctr.Wait(subCtx, containertypes.WaitConditionNotRunning); status.Err() == nil { if status := <-ctr.State.Wait(subCtx, containertypes.WaitConditionNotRunning); status.Err() == nil {
// container did exit, so ignore any previous errors and return // container did exit, so ignore any previous errors and return
return nil return nil
} }
@@ -118,7 +118,7 @@ func (daemon *Daemon) containerStop(ctx context.Context, ctr *container.Containe
// got a kill error, but give container 2 more seconds to exit just in case // got a kill error, but give container 2 more seconds to exit just in case
subCtx, cancel := context.WithTimeout(ctx, 2*time.Second) subCtx, cancel := context.WithTimeout(ctx, 2*time.Second)
defer cancel() defer cancel()
status := <-ctr.Wait(subCtx, containertypes.WaitConditionNotRunning) status := <-ctr.State.Wait(subCtx, containertypes.WaitConditionNotRunning)
if status.Err() != nil { if status.Err() != nil {
log.G(ctx).WithError(err).WithField("container", ctr.ID).Errorf("error killing container: %v", status.Err()) log.G(ctx).WithError(err).WithField("container", ctr.ID).Errorf("error killing container: %v", status.Err())
return err return err

View File

@@ -159,7 +159,7 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*container.TopRe
if err != nil { if err != nil {
return nil, err return nil, err
} }
if ctr.Restarting { if ctr.State.Restarting {
return nil, errContainerIsRestarting(ctr.ID) return nil, errContainerIsRestarting(ctr.ID)
} }
return tsk, nil return tsk, nil

View File

@@ -45,7 +45,7 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*container.TopRe
if err != nil { if err != nil {
return nil, err return nil, err
} }
if ctr.Restarting { if ctr.State.Restarting {
return nil, errContainerIsRestarting(ctr.ID) return nil, errContainerIsRestarting(ctr.ID)
} }
return task, nil return task, nil

View File

@@ -24,7 +24,7 @@ func (daemon *Daemon) containerUnpause(ctr *container.Container) error {
defer ctr.Unlock() defer ctr.Unlock()
// We cannot unpause the container which is not paused // We cannot unpause the container which is not paused
if !ctr.Paused { if !ctr.State.Paused {
return fmt.Errorf("Container %s is not paused", ctr.ID) return fmt.Errorf("Container %s is not paused", ctr.ID)
} }
tsk, err := ctr.GetRunningTask() tsk, err := ctr.GetRunningTask()
@@ -36,7 +36,7 @@ func (daemon *Daemon) containerUnpause(ctr *container.Container) error {
return fmt.Errorf("Cannot unpause container %s: %s", ctr.ID, err) return fmt.Errorf("Cannot unpause container %s: %s", ctr.ID, err)
} }
ctr.Paused = false ctr.State.Paused = false
daemon.setStateCounter(ctr) daemon.setStateCounter(ctr)
daemon.updateHealthMonitor(ctr) daemon.updateHealthMonitor(ctr)
daemon.LogContainerEvent(ctr, events.ActionUnPause) daemon.LogContainerEvent(ctr, events.ActionUnPause)

View File

@@ -43,7 +43,7 @@ func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) erro
defer func() { defer func() {
if restoreConfig { if restoreConfig {
ctr.Lock() ctr.Lock()
if !ctr.RemovalInProgress && !ctr.Dead { if !ctr.State.RemovalInProgress && !ctr.State.Dead {
ctr.HostConfig = &backupHostConfig ctr.HostConfig = &backupHostConfig
ctr.CheckpointTo(context.WithoutCancel(context.TODO()), daemon.containersReplica) ctr.CheckpointTo(context.WithoutCancel(context.TODO()), daemon.containersReplica)
} }
@@ -53,7 +53,7 @@ func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) erro
ctr.Lock() ctr.Lock()
if ctr.RemovalInProgress || ctr.Dead { if ctr.State.RemovalInProgress || ctr.State.Dead {
ctr.Unlock() ctr.Unlock()
return errCannotUpdate(ctr.ID, errors.New(`container is marked for removal and cannot be "update"`)) return errCannotUpdate(ctr.ID, errors.New(`container is marked for removal and cannot be "update"`))
} }
@@ -83,7 +83,7 @@ func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) erro
// If container is running (including paused), we need to update configs // If container is running (including paused), we need to update configs
// to the real world. // to the real world.
ctr.Lock() ctr.Lock()
isRestarting := ctr.Restarting isRestarting := ctr.State.Restarting
tsk, err := ctr.GetRunningTask() tsk, err := ctr.GetRunningTask()
ctr.Unlock() ctr.Unlock()
if cerrdefs.IsConflict(err) || isRestarting { if cerrdefs.IsConflict(err) || isRestarting {

View File

@@ -20,5 +20,5 @@ func (daemon *Daemon) ContainerWait(ctx context.Context, name string, condition
return nil, err return nil, err
} }
return cntr.Wait(ctx, condition), nil return cntr.State.Wait(ctx, condition), nil
} }

View File

@@ -208,7 +208,7 @@ func TestRestartDaemonWithRestartingContainer(t *testing.T) {
d.Stop(t) d.Stop(t)
d.TamperWithContainerConfig(t, id, func(c *realcontainer.Container) { d.TamperWithContainerConfig(t, id, func(c *realcontainer.Container) {
c.SetRestarting(&realcontainer.ExitStatus{ExitCode: 1}) c.State.SetRestarting(&realcontainer.ExitStatus{ExitCode: 1})
c.HasBeenStartedBefore = true c.HasBeenStartedBefore = true
}) })
@@ -256,7 +256,7 @@ func TestHardRestartWhenContainerIsRunning(t *testing.T) {
for _, id := range []string{noPolicy, onFailure} { for _, id := range []string{noPolicy, onFailure} {
d.TamperWithContainerConfig(t, id, func(c *realcontainer.Container) { d.TamperWithContainerConfig(t, id, func(c *realcontainer.Container) {
c.SetRunning(nil, nil, time.Now()) c.State.SetRunning(nil, nil, time.Now())
c.HasBeenStartedBefore = true c.HasBeenStartedBefore = true
}) })
} }