mirror of
https://github.com/moby/moby.git
synced 2026-01-11 18:51:37 +00:00
Merge pull request #51526 from robmry/refactor-create-mounts
Refactor Daemon.create - prep for call to NRI plugin
This commit is contained in:
@@ -143,7 +143,7 @@ func (daemon *Daemon) newContainer(name string, platform ocispec.Platform, confi
|
||||
base.Path = entrypoint
|
||||
base.Args = args // FIXME: de-duplicate from config
|
||||
base.Config = config
|
||||
base.HostConfig = &containertypes.HostConfig{}
|
||||
base.HostConfig = hostConfig
|
||||
base.ImageID = imgID
|
||||
base.NetworkSettings = &network.Settings{}
|
||||
base.Name = name
|
||||
@@ -205,32 +205,10 @@ func (daemon *Daemon) GetDependentContainers(c *container.Container) []*containe
|
||||
return append(dependentContainers, slices.Collect(maps.Values(daemon.linkIndex.children(c)))...)
|
||||
}
|
||||
|
||||
func (daemon *Daemon) setSecurityOptions(cfg *config.Config, container *container.Container, hostConfig *containertypes.HostConfig) error {
|
||||
func (daemon *Daemon) setSecurityOptions(cfg *config.Config, container *container.Container) error {
|
||||
container.Lock()
|
||||
defer container.Unlock()
|
||||
return daemon.parseSecurityOpt(cfg, &container.SecurityOptions, hostConfig)
|
||||
}
|
||||
|
||||
func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *containertypes.HostConfig, defaultReadOnlyNonRecursive bool) error {
|
||||
// Do not lock while creating volumes since this could be calling out to external plugins
|
||||
// Don't want to block other actions, like `docker ps` because we're waiting on an external plugin
|
||||
if err := daemon.registerMountPoints(container, hostConfig, defaultReadOnlyNonRecursive); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
container.Lock()
|
||||
defer container.Unlock()
|
||||
|
||||
// Register any links from the host config before starting the container
|
||||
if err := daemon.registerLinks(container, hostConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if hostConfig != nil && hostConfig.NetworkMode == "" {
|
||||
hostConfig.NetworkMode = networktypes.NetworkDefault
|
||||
}
|
||||
container.HostConfig = hostConfig
|
||||
return nil
|
||||
return daemon.parseSecurityOpt(cfg, &container.SecurityOptions, container.HostConfig)
|
||||
}
|
||||
|
||||
// verifyContainerSettings performs validation of the hostconfig and config
|
||||
|
||||
@@ -221,11 +221,10 @@ func (daemon *Daemon) create(ctx context.Context, daemonCfg *config.Config, opts
|
||||
}
|
||||
}()
|
||||
|
||||
if err := daemon.setSecurityOptions(daemonCfg, ctr, opts.params.HostConfig); err != nil {
|
||||
if err := daemon.setSecurityOptions(daemonCfg, ctr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctr.HostConfig.StorageOpt = opts.params.HostConfig.StorageOpt
|
||||
ctr.ImageManifest = imgManifest
|
||||
|
||||
// Set RWLayer for container after mount labels have been set
|
||||
@@ -244,11 +243,10 @@ func (daemon *Daemon) create(ctx context.Context, daemonCfg *config.Config, opts
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := daemon.setHostConfig(ctr, opts.params.HostConfig, opts.params.DefaultReadOnlyNonRecursive); err != nil {
|
||||
if err := daemon.registerLinks(ctr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := daemon.createContainerOSSpecificSettings(ctx, ctr, opts.params.Config, opts.params.HostConfig); err != nil {
|
||||
if err := daemon.createContainerOSSpecificSettings(ctx, ctr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -261,8 +259,15 @@ func (daemon *Daemon) create(ctx context.Context, daemonCfg *config.Config, opts
|
||||
if ctr.HostConfig != nil && ctr.HostConfig.NetworkMode == "" {
|
||||
ctr.HostConfig.NetworkMode = networktypes.NetworkDefault
|
||||
}
|
||||
|
||||
daemon.updateContainerNetworkSettings(ctr, endpointsConfigs)
|
||||
|
||||
if err := daemon.registerMountPoints(ctr, opts.params.DefaultReadOnlyNonRecursive); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := daemon.createContainerVolumesOS(ctx, ctr, opts.params.Config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := daemon.register(ctx, ctr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -22,24 +22,26 @@ import (
|
||||
)
|
||||
|
||||
// createContainerOSSpecificSettings performs host-OS specific container create functionality
|
||||
func (daemon *Daemon) createContainerOSSpecificSettings(ctx context.Context, container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error {
|
||||
if err := daemon.Mount(container); err != nil {
|
||||
return err
|
||||
}
|
||||
defer daemon.Unmount(container)
|
||||
|
||||
if err := container.SetupWorkingDirectory(daemon.idMapping.RootPair()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
func (daemon *Daemon) createContainerOSSpecificSettings(ctx context.Context, ctr *container.Container) error {
|
||||
// Set the default masked and readonly paths with regard to the host config options if they are not set.
|
||||
if hostConfig.MaskedPaths == nil && !hostConfig.Privileged {
|
||||
hostConfig.MaskedPaths = oci.DefaultSpec().Linux.MaskedPaths // Set it to the default if nil
|
||||
container.HostConfig.MaskedPaths = hostConfig.MaskedPaths
|
||||
if ctr.HostConfig.MaskedPaths == nil && !ctr.HostConfig.Privileged {
|
||||
ctr.HostConfig.MaskedPaths = oci.DefaultSpec().Linux.MaskedPaths // Set it to the default if nil
|
||||
}
|
||||
if hostConfig.ReadonlyPaths == nil && !hostConfig.Privileged {
|
||||
hostConfig.ReadonlyPaths = oci.DefaultSpec().Linux.ReadonlyPaths // Set it to the default if nil
|
||||
container.HostConfig.ReadonlyPaths = hostConfig.ReadonlyPaths
|
||||
if ctr.HostConfig.ReadonlyPaths == nil && !ctr.HostConfig.Privileged {
|
||||
ctr.HostConfig.ReadonlyPaths = oci.DefaultSpec().Linux.ReadonlyPaths // Set it to the default if nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// createContainerVolumesOS performs host-OS specific volume creation
|
||||
func (daemon *Daemon) createContainerVolumesOS(ctx context.Context, ctr *container.Container, config *containertypes.Config) error {
|
||||
if err := daemon.Mount(ctr); err != nil {
|
||||
return err
|
||||
}
|
||||
defer daemon.Unmount(ctr)
|
||||
|
||||
if err := ctr.SetupWorkingDirectory(daemon.idMapping.RootPair()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for spec := range config.Volumes {
|
||||
@@ -47,12 +49,12 @@ func (daemon *Daemon) createContainerOSSpecificSettings(ctx context.Context, con
|
||||
|
||||
// Skip volumes for which we already have something mounted on that
|
||||
// destination because of a --volume-from.
|
||||
if container.HasMountFor(destination) {
|
||||
log.G(ctx).WithField("container", container.ID).WithField("destination", spec).Debug("mountpoint already exists, skipping anonymous volume")
|
||||
if ctr.HasMountFor(destination) {
|
||||
log.G(ctx).WithField("container", ctr.ID).WithField("destination", spec).Debug("mountpoint already exists, skipping anonymous volume")
|
||||
// Not an error, this could easily have come from the image config.
|
||||
continue
|
||||
}
|
||||
path, err := container.GetResourcePath(destination)
|
||||
path, err := ctr.GetResourcePath(destination)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -62,18 +64,18 @@ func (daemon *Daemon) createContainerOSSpecificSettings(ctx context.Context, con
|
||||
return fmt.Errorf("cannot mount volume over existing file, file exists %s", path)
|
||||
}
|
||||
|
||||
v, err := daemon.volumes.Create(context.TODO(), "", hostConfig.VolumeDriver, volumeopts.WithCreateReference(container.ID))
|
||||
v, err := daemon.volumes.Create(context.TODO(), "", ctr.HostConfig.VolumeDriver, volumeopts.WithCreateReference(ctr.ID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := label.Relabel(v.Mountpoint, container.MountLabel, true); err != nil {
|
||||
if err := label.Relabel(v.Mountpoint, ctr.MountLabel, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
container.AddMountPointWithVolume(destination, &volumeWrapper{v: v, s: daemon.volumes}, true)
|
||||
ctr.AddMountPointWithVolume(destination, &volumeWrapper{v: v, s: daemon.volumes}, true)
|
||||
}
|
||||
return daemon.populateVolumes(ctx, container)
|
||||
return daemon.populateVolumes(ctx, ctr)
|
||||
}
|
||||
|
||||
// populateVolumes copies data from the container's rootfs into the volume for non-binds.
|
||||
|
||||
@@ -11,30 +11,33 @@ import (
|
||||
)
|
||||
|
||||
// createContainerOSSpecificSettings performs host-OS specific container create functionality
|
||||
func (daemon *Daemon) createContainerOSSpecificSettings(ctx context.Context, container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error {
|
||||
if containertypes.Isolation.IsDefault(hostConfig.Isolation) {
|
||||
func (daemon *Daemon) createContainerOSSpecificSettings(ctx context.Context, ctr *container.Container) error {
|
||||
if containertypes.Isolation.IsDefault(ctr.HostConfig.Isolation) {
|
||||
// Make sure the host config has the default daemon isolation if not specified by caller.
|
||||
hostConfig.Isolation = daemon.defaultIsolation
|
||||
ctr.HostConfig.Isolation = daemon.defaultIsolation
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// createContainerVolumesOS performs host-OS specific volume creation
|
||||
func (daemon *Daemon) createContainerVolumesOS(ctx context.Context, ctr *container.Container, config *containertypes.Config) error {
|
||||
parser := volumemounts.NewParser()
|
||||
for spec := range config.Volumes {
|
||||
|
||||
mp, err := parser.ParseMountRaw(spec, hostConfig.VolumeDriver)
|
||||
mp, err := parser.ParseMountRaw(spec, ctr.HostConfig.VolumeDriver)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unrecognised volume spec: %v", err)
|
||||
}
|
||||
|
||||
// Skip volumes for which we already have something mounted on that
|
||||
// destination because of a --volume-from.
|
||||
if container.IsDestinationMounted(mp.Destination) {
|
||||
if ctr.IsDestinationMounted(mp.Destination) {
|
||||
continue
|
||||
}
|
||||
|
||||
volumeDriver := hostConfig.VolumeDriver
|
||||
|
||||
// Create the volume in the volume driver. If it doesn't exist,
|
||||
// a new one will be created.
|
||||
v, err := daemon.volumes.Create(ctx, "", volumeDriver, volumeopts.WithCreateReference(container.ID))
|
||||
v, err := daemon.volumes.Create(ctx, "", ctr.HostConfig.VolumeDriver, volumeopts.WithCreateReference(ctr.ID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -70,7 +73,7 @@ func (daemon *Daemon) createContainerOSSpecificSettings(ctx context.Context, con
|
||||
// }
|
||||
|
||||
// Add it to container.MountPoints
|
||||
container.AddMountPointWithVolume(mp.Destination, &volumeWrapper{v: v, s: daemon.volumes}, mp.RW)
|
||||
ctr.AddMountPointWithVolume(mp.Destination, &volumeWrapper{v: v, s: daemon.volumes}, mp.RW)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -592,7 +592,7 @@ func (daemon *Daemon) restore(ctx context.Context, cfg *configStore, containers
|
||||
go func(c *container.Container) {
|
||||
_ = sem.Acquire(context.Background(), 1)
|
||||
|
||||
if err := daemon.registerLinks(c, c.HostConfig); err != nil {
|
||||
if err := daemon.registerLinks(c); err != nil {
|
||||
log.G(ctx).WithField("container", c.ID).WithError(err).Error("failed to register link for container")
|
||||
}
|
||||
|
||||
|
||||
@@ -1504,12 +1504,12 @@ func getUnmountOnShutdownPath(config *config.Config) string {
|
||||
|
||||
// registerLinks registers network links between container and other containers
|
||||
// with the daemon using the specification in hostConfig.
|
||||
func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error {
|
||||
if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() {
|
||||
func (daemon *Daemon) registerLinks(ctr *container.Container) error {
|
||||
if ctr.HostConfig == nil || ctr.HostConfig.NetworkMode.IsUserDefined() {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, l := range hostConfig.Links {
|
||||
for _, l := range ctr.HostConfig.Links {
|
||||
name, alias, err := opts.ParseLink(l)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1542,7 +1542,7 @@ func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *
|
||||
if child.HostConfig.NetworkMode.IsHost() {
|
||||
return cerrdefs.ErrInvalidArgument.WithMessage("conflicting options: host type networking can't be used with links. This would result in undefined behavior")
|
||||
}
|
||||
if err := daemon.registerLink(container, child, alias); err != nil {
|
||||
if err := daemon.registerLink(ctr, child, alias); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -464,7 +464,7 @@ func initBridgeDriver(controller *libnetwork.Controller, config config.BridgeCon
|
||||
|
||||
// registerLinks sets up links between containers and writes the
|
||||
// configuration out for persistence. As of Windows TP4, links are not supported.
|
||||
func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error {
|
||||
func (daemon *Daemon) registerLinks(container *container.Container) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containerd/log"
|
||||
containertypes "github.com/moby/moby/api/types/container"
|
||||
mounttypes "github.com/moby/moby/api/types/mount"
|
||||
volumetypes "github.com/moby/moby/api/types/volume"
|
||||
"github.com/moby/moby/v2/daemon/container"
|
||||
@@ -67,7 +66,10 @@ func sortMounts(m []container.Mount) []container.Mount {
|
||||
// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination.
|
||||
// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations.
|
||||
// 4. Cleanup old volumes that are about to be reassigned.
|
||||
func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig, defaultReadOnlyNonRecursive bool) (retErr error) {
|
||||
//
|
||||
// Do not lock while creating volumes since this could be calling out to external plugins
|
||||
// Don't want to block other actions, like `docker ps` because we're waiting on an external plugin
|
||||
func (daemon *Daemon) registerMountPoints(ctr *container.Container, defaultReadOnlyNonRecursive bool) (retErr error) {
|
||||
binds := map[string]bool{}
|
||||
mountPoints := map[string]*volumemounts.MountPoint{}
|
||||
parser := volumemounts.NewParser()
|
||||
@@ -80,7 +82,7 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo
|
||||
if m.Volume == nil {
|
||||
continue
|
||||
}
|
||||
daemon.volumes.Release(ctx, m.Volume.Name(), container.ID)
|
||||
daemon.volumes.Release(ctx, m.Volume.Name(), ctr.ID)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -89,18 +91,18 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo
|
||||
if v, ok := mountPoints[destination]; ok {
|
||||
log.G(ctx).Debugf("Duplicate mount point '%s'", destination)
|
||||
if v.Volume != nil {
|
||||
daemon.volumes.Release(ctx, v.Volume.Name(), container.ID)
|
||||
daemon.volumes.Release(ctx, v.Volume.Name(), ctr.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 1. Read already configured mount points.
|
||||
for destination, point := range container.MountPoints {
|
||||
for destination, point := range ctr.MountPoints {
|
||||
mountPoints[destination] = point
|
||||
}
|
||||
|
||||
// 2. Read volumes from other containers.
|
||||
for _, v := range hostConfig.VolumesFrom {
|
||||
for _, v := range ctr.HostConfig.VolumesFrom {
|
||||
containerID, mode, err := parser.ParseVolumesFrom(v)
|
||||
if err != nil {
|
||||
return errdefs.InvalidParameter(err)
|
||||
@@ -125,7 +127,7 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo
|
||||
}
|
||||
|
||||
if cp.Source == "" {
|
||||
v, err := daemon.volumes.Get(ctx, cp.Name, volumeopts.WithGetDriver(cp.Driver), volumeopts.WithGetReference(container.ID))
|
||||
v, err := daemon.volumes.Get(ctx, cp.Name, volumeopts.WithGetDriver(cp.Driver), volumeopts.WithGetReference(ctr.ID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -137,8 +139,8 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo
|
||||
}
|
||||
|
||||
// 3. Read bind mounts
|
||||
for _, b := range hostConfig.Binds {
|
||||
bind, err := parser.ParseMountRaw(b, hostConfig.VolumeDriver)
|
||||
for _, b := range ctr.HostConfig.Binds {
|
||||
bind, err := parser.ParseMountRaw(b, ctr.HostConfig.VolumeDriver)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -151,14 +153,14 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo
|
||||
}
|
||||
|
||||
// #10618
|
||||
_, tmpfsExists := hostConfig.Tmpfs[bind.Destination]
|
||||
_, tmpfsExists := ctr.HostConfig.Tmpfs[bind.Destination]
|
||||
if binds[bind.Destination] || tmpfsExists {
|
||||
return duplicateMountPointError(bind.Destination)
|
||||
}
|
||||
|
||||
if bind.Type == mounttypes.TypeVolume {
|
||||
// create the volume
|
||||
v, err := daemon.volumes.Create(ctx, bind.Name, bind.Driver, volumeopts.WithCreateReference(container.ID))
|
||||
v, err := daemon.volumes.Create(ctx, bind.Name, bind.Driver, volumeopts.WithCreateReference(ctr.ID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -185,7 +187,7 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo
|
||||
mountPoints[bind.Destination] = bind
|
||||
}
|
||||
|
||||
for _, cfg := range hostConfig.Mounts {
|
||||
for _, cfg := range ctr.HostConfig.Mounts {
|
||||
mp, err := parser.ParseMountSpec(cfg)
|
||||
if err != nil {
|
||||
return errdefs.InvalidParameter(err)
|
||||
@@ -212,12 +214,12 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo
|
||||
v, err = daemon.volumes.Create(ctx,
|
||||
mp.Name,
|
||||
mp.Driver,
|
||||
volumeopts.WithCreateReference(container.ID),
|
||||
volumeopts.WithCreateReference(ctr.ID),
|
||||
volumeopts.WithCreateOptions(driverOpts),
|
||||
volumeopts.WithCreateLabels(cfg.VolumeOptions.Labels),
|
||||
)
|
||||
} else {
|
||||
v, err = daemon.volumes.Create(ctx, mp.Name, mp.Driver, volumeopts.WithCreateReference(container.ID))
|
||||
v, err = daemon.volumes.Create(ctx, mp.Name, mp.Driver, volumeopts.WithCreateReference(ctr.ID))
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -254,13 +256,13 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo
|
||||
}
|
||||
|
||||
rwLayerOpts := &layer.CreateRWLayerOpts{
|
||||
StorageOpt: container.HostConfig.StorageOpt,
|
||||
StorageOpt: ctr.HostConfig.StorageOpt,
|
||||
}
|
||||
|
||||
// Include the destination in the layer name to make it unique for each mount point and container.
|
||||
// This makes sure that the same image can be mounted multiple times with different destinations.
|
||||
// Hex encode the destination to create a safe, unique identifier
|
||||
layerName := hex.EncodeToString([]byte(container.ID + ",src=" + mp.Source + ",dst=" + mp.Destination))
|
||||
layerName := hex.EncodeToString([]byte(ctr.ID + ",src=" + mp.Source + ",dst=" + mp.Destination))
|
||||
layer, err := daemon.imageService.CreateLayerFromImage(img, layerName, rwLayerOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -291,19 +293,19 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo
|
||||
mountPoints[mp.Destination] = mp
|
||||
}
|
||||
|
||||
container.Lock()
|
||||
ctr.Lock()
|
||||
|
||||
// 4. Cleanup old volumes that are about to be reassigned.
|
||||
for _, m := range mountPoints {
|
||||
if parser.IsBackwardCompatible(m) {
|
||||
if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil {
|
||||
daemon.volumes.Release(ctx, mp.Volume.Name(), container.ID)
|
||||
if mp, exists := ctr.MountPoints[m.Destination]; exists && mp.Volume != nil {
|
||||
daemon.volumes.Release(ctx, mp.Volume.Name(), ctr.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
container.MountPoints = mountPoints
|
||||
ctr.MountPoints = mountPoints
|
||||
|
||||
container.Unlock()
|
||||
ctr.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user