diff --git a/daemon/containerd/migration/migration.go b/daemon/containerd/migration/migration.go new file mode 100644 index 0000000000..fc380a2a7e --- /dev/null +++ b/daemon/containerd/migration/migration.go @@ -0,0 +1,316 @@ +package migration + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "strings" + "sync" + "time" + + "github.com/containerd/containerd/v2/core/content" + "github.com/containerd/containerd/v2/core/images" + "github.com/containerd/containerd/v2/core/leases" + "github.com/containerd/containerd/v2/core/mount" + "github.com/containerd/containerd/v2/core/snapshots" + "github.com/containerd/containerd/v2/pkg/archive/compression" + "github.com/containerd/continuity/fs" + cerrdefs "github.com/containerd/errdefs" + "github.com/containerd/log" + "github.com/moby/moby/v2/daemon/internal/image" + "github.com/moby/moby/v2/daemon/internal/layer" + refstore "github.com/moby/moby/v2/daemon/internal/refstore" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/identity" + "github.com/opencontainers/image-spec/specs-go" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sync/errgroup" +) + +type LayerMigrator struct { + layers layer.Store + refs refstore.Store + dis image.Store + leases leases.Manager + content content.Store + cis images.Store +} + +type Config struct { + LayerStore layer.Store + ReferenceStore refstore.Store + DockerImageStore image.Store + Leases leases.Manager + Content content.Store + ImageStore images.Store +} + +func NewLayerMigrator(config Config) *LayerMigrator { + return &LayerMigrator{ + layers: config.LayerStore, + refs: config.ReferenceStore, + dis: config.DockerImageStore, + leases: config.Leases, + content: config.Content, + cis: config.ImageStore, + } +} + +// MigrateTocontainerd migrates containers from overlay2 to overlayfs or vfs to native +func (lm *LayerMigrator) MigrateTocontainerd(ctx context.Context, snKey string, sn snapshots.Snapshotter) error { + if sn == nil { + return fmt.Errorf("no snapshotter to migrate to: %w", cerrdefs.ErrNotImplemented) + } + + switch driver := lm.layers.DriverName(); driver { + case "overlay2": + case "vfs": + default: + return fmt.Errorf("%q not supported for migration: %w", driver, cerrdefs.ErrNotImplemented) + } + + var ( + // Zstd makes migration 10x faster + // TODO: make configurable + layerMediaType = ocispec.MediaTypeImageLayerZstd + layerCompression = compression.Zstd + ) + + l, err := lm.leases.Create(ctx, leases.WithRandomID(), leases.WithExpiration(24*time.Hour)) + if err != nil { + return err + } + defer func() { + lm.leases.Delete(ctx, l) + }() + ctx = leases.WithLease(ctx, l.ID) + + for imgID, img := range lm.dis.Heads() { + diffids := img.RootFS.DiffIDs + if len(diffids) == 0 { + continue + } + var ( + parent string + manifest = ocispec.Manifest{ + MediaType: ocispec.MediaTypeImageManifest, + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + Layers: make([]ocispec.Descriptor, len(diffids)), + } + ml sync.Mutex + eg, egctx = errgroup.WithContext(ctx) + ) + for i := range diffids { + chainID := identity.ChainID(diffids[:i+1]) + l, err := lm.layers.Get(chainID) + if err != nil { + return fmt.Errorf("failed to get layer [%d] %q: %w", i, chainID, err) + } + layerIndex := i + eg.Go(func() error { + ctx := egctx + t1 := time.Now() + ts, err := l.TarStream() + if err != nil { + return err + } + + desc := ocispec.Descriptor{ + MediaType: layerMediaType, + } + + cw, err := lm.content.Writer(ctx, + content.WithRef(fmt.Sprintf("ingest-%s", chainID)), + content.WithDescriptor(desc)) + if err != nil { + return fmt.Errorf("failed to get content writer: %w", err) + } + + dgstr := digest.Canonical.Digester() + cs, _ := compression.CompressStream(io.MultiWriter(cw, dgstr.Hash()), layerCompression) + _, err = io.Copy(cs, ts) + if err != nil { + return fmt.Errorf("failed to copy to compressed stream: %w", err) + } + cs.Close() + + status, err := cw.Status() + if err != nil { + return err + } + + desc.Size = status.Offset + desc.Digest = dgstr.Digest() + + if err := cw.Commit(ctx, desc.Size, desc.Digest); err != nil && !cerrdefs.IsAlreadyExists(err) { + return err + } + + log.G(ctx).WithFields(log.Fields{ + "t": time.Since(t1), + "size": desc.Size, + "digest": desc.Digest, + }).Debug("Converted layer to content tar") + + ml.Lock() + manifest.Layers[layerIndex] = desc + ml.Unlock() + return nil + }) + + metadata, err := l.Metadata() + if err != nil { + return err + } + src, ok := metadata["UpperDir"] + if !ok { + src, ok = metadata["SourceDir"] + if !ok { + log.G(ctx).WithField("metadata", metadata).WithField("driver", lm.layers.DriverName()).Debug("no source directory metadata") + return fmt.Errorf("graphdriver not supported: %w", cerrdefs.ErrNotImplemented) + } + } + log.G(ctx).WithField("metadata", metadata).Debugf("migrating %s from %s", chainID, src) + + active := fmt.Sprintf("migration-%s", chainID) + + key := chainID.String() + + snapshotLabels := map[string]string{ + "containerd.io/snapshot.ref": key, + } + mounts, err := sn.Prepare(ctx, active, parent, snapshots.WithLabels(snapshotLabels)) + parent = key + if err != nil { + if cerrdefs.IsAlreadyExists(err) { + continue + } + return err + } + + dst, err := extractSource(mounts) + if err != nil { + return err + } + + t1 := time.Now() + if err := fs.CopyDir(dst, src); err != nil { + return err + } + log.G(ctx).WithFields(log.Fields{ + "t": time.Since(t1), + "key": key, + }).Debug("Copied layer to snapshot") + + if err := sn.Commit(ctx, key, active); err != nil && !cerrdefs.IsAlreadyExists(err) { + return err + } + } + + configBytes := img.RawJSON() + digest.FromBytes(configBytes) + manifest.Config = ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageConfig, + Digest: digest.FromBytes(configBytes), + Size: int64(len(configBytes)), + } + + configLabels := map[string]string{ + fmt.Sprintf("containerd.io/gc.ref.snapshot.%s", snKey): parent, + } + if err = content.WriteBlob(ctx, lm.content, "config"+manifest.Config.Digest.String(), bytes.NewReader(configBytes), manifest.Config, content.WithLabels(configLabels)); err != nil && !cerrdefs.IsAlreadyExists(err) { + return err + } + + if err := eg.Wait(); err != nil { + return err + } + + manifestBytes, err := json.MarshalIndent(manifest, "", " ") + if err != nil { + return err + } + + manifestDesc := ocispec.Descriptor{ + MediaType: manifest.MediaType, + Digest: digest.FromBytes(manifestBytes), + Size: int64(len(manifestBytes)), + } + + manifestLabels := map[string]string{ + "containerd.io/gc.ref.content.config": manifest.Config.Digest.String(), + } + for i := range manifest.Layers { + manifestLabels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = manifest.Layers[i].Digest.String() + } + + if err = content.WriteBlob(ctx, lm.content, "manifest"+manifestDesc.Digest.String(), bytes.NewReader(manifestBytes), manifestDesc, content.WithLabels(manifestLabels)); err != nil && !cerrdefs.IsAlreadyExists(err) { + return err + } + + childrenHandler := images.ChildrenHandler(lm.content) + childrenHandler = images.SetChildrenMappedLabels(lm.content, childrenHandler, nil) + if err = images.Walk(ctx, childrenHandler, manifestDesc); err != nil { + return err + } + + var added bool + for _, named := range lm.refs.References(digest.Digest(imgID)) { + img := images.Image{ + Name: named.String(), + Target: manifestDesc, + // TODO: Any labels? + } + img, err = lm.cis.Create(ctx, img) + if err != nil && !cerrdefs.IsAlreadyExists(err) { + return err + } else if err != nil { + log.G(ctx).Infof("Tag already exists: %s", named) + continue + } + + log.G(ctx).Infof("Migrated image %s to %s", img.Name, img.Target.Digest) + added = true + } + + if !added { + img := images.Image{ + Name: "moby-dangling@" + manifestDesc.Digest.String(), + Target: manifestDesc, + // TODO: Any labels? + } + img, err = lm.cis.Create(ctx, img) + if err != nil && !cerrdefs.IsAlreadyExists(err) { + return err + } else if err == nil { + log.G(ctx).Infof("Migrated image %s to %s", img.Name, img.Target.Digest) + } + } + } + + return nil +} + +func extractSource(mounts []mount.Mount) (string, error) { + if len(mounts) != 1 { + return "", fmt.Errorf("cannot support snapshotters with multiple mount sources: %w", cerrdefs.ErrNotImplemented) + } + switch mounts[0].Type { + case "bind": + return mounts[0].Source, nil + case "overlay": + for _, option := range mounts[0].Options { + if strings.HasPrefix(option, "upperdir=") { + return option[9:], nil + } + } + default: + return "", fmt.Errorf("mount type %q not supported: %w", mounts[0].Type, cerrdefs.ErrNotImplemented) + } + + return "", fmt.Errorf("mount is missing upper option: %w", cerrdefs.ErrNotImplemented) +} diff --git a/daemon/daemon.go b/daemon/daemon.go index 4f7ad50aa6..ee332935d9 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -10,6 +10,7 @@ import ( "crypto/sha256" "encoding/binary" "fmt" + "math" "net" "net/netip" "os" @@ -42,6 +43,7 @@ import ( "github.com/moby/moby/v2/daemon/config" "github.com/moby/moby/v2/daemon/container" ctrd "github.com/moby/moby/v2/daemon/containerd" + "github.com/moby/moby/v2/daemon/containerd/migration" "github.com/moby/moby/v2/daemon/events" _ "github.com/moby/moby/v2/daemon/graphdriver/register" // register graph drivers "github.com/moby/moby/v2/daemon/images" @@ -202,15 +204,15 @@ func (daemon *Daemon) UsesSnapshotter() bool { return daemon.usesSnapshotter } -func (daemon *Daemon) restore(cfg *configStore) error { +func (daemon *Daemon) loadContainers() (map[string]map[string]*container.Container, error) { var mapLock sync.Mutex - containers := make(map[string]*container.Container) + driverContainers := make(map[string]map[string]*container.Container) log.G(context.TODO()).Info("Loading containers: start.") dir, err := os.ReadDir(daemon.repository) if err != nil { - return err + return nil, err } // parallelLimit is the maximum number of parallel startup jobs that we @@ -238,29 +240,39 @@ func (daemon *Daemon) restore(cfg *configStore) error { logger.WithError(err).Error("failed to load container") return } - if c.Driver != daemon.imageService.StorageDriver() { - // Ignore the container if it wasn't created with the current storage-driver - logger.Debugf("not restoring container because it was created with another storage driver (%s)", c.Driver) - return - } - rwlayer, err := daemon.imageService.GetLayerByID(c.ID) - if err != nil { - logger.WithError(err).Error("failed to load container mount") - return - } - c.RWLayer = rwlayer - logger.WithFields(log.Fields{ - "running": c.IsRunning(), - "paused": c.IsPaused(), - }).Debug("loaded container") mapLock.Lock() - containers[c.ID] = c + if containers, ok := driverContainers[c.Driver]; !ok { + driverContainers[c.Driver] = map[string]*container.Container{ + c.ID: c, + } + } else { + containers[c.ID] = c + } mapLock.Unlock() }(v.Name()) } group.Wait() + return driverContainers, nil +} + +func (daemon *Daemon) restore(cfg *configStore, containers map[string]*container.Container) error { + var mapLock sync.Mutex + + log.G(context.TODO()).Info("Restoring containers: start.") + + // parallelLimit is the maximum number of parallel startup jobs that we + // allow (this is the limited used for all startup semaphores). The multipler + // (128) was chosen after some fairly significant benchmarking -- don't change + // it unless you've tested it significantly (this value is adjusted if + // RLIMIT_NOFILE is small to avoid EMFILE). + parallelLimit := adjustParallelLimit(len(containers), 128*runtime.NumCPU()) + + // Re-used for all parallel startup jobs. + var group sync.WaitGroup + sem := semaphore.NewWeighted(int64(parallelLimit)) + removeContainers := make(map[string]*container.Container) restartContainers := make(map[*container.Container]chan struct{}) activeSandboxes := make(map[string]any) @@ -274,6 +286,17 @@ func (daemon *Daemon) restore(cfg *configStore) error { logger := log.G(context.TODO()).WithField("container", c.ID) + rwlayer, err := daemon.imageService.GetLayerByID(c.ID) + if err != nil { + logger.WithError(err).Error("failed to load container mount") + return + } + c.RWLayer = rwlayer + logger.WithFields(log.Fields{ + "running": c.IsRunning(), + "paused": c.IsPaused(), + }).Debug("loaded container") + if err := daemon.registerName(c); err != nil { logger.WithError(err).Errorf("failed to register container name: %s", c.Name) mapLock.Lock() @@ -523,7 +546,7 @@ func (daemon *Daemon) restore(cfg *configStore) error { // // Note that we cannot initialize the network controller earlier, as it // needs to know if there's active sandboxes (running containers). - if err = daemon.initNetworkController(&cfg.Config, activeSandboxes); err != nil { + if err := daemon.initNetworkController(&cfg.Config, activeSandboxes); err != nil { return fmt.Errorf("Error initializing network controller: %v", err) } @@ -833,10 +856,17 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S d.configStore.Store(cfgStore) // TEST_INTEGRATION_USE_SNAPSHOTTER is used for integration tests only. + migrationThreshold := int64(-1) if os.Getenv("TEST_INTEGRATION_USE_SNAPSHOTTER") != "" { d.usesSnapshotter = true } else { - d.usesSnapshotter = config.Features["containerd-snapshotter"] + log.G(ctx).WithField("features", config.Features).Debug("Checking features for migration") + if config.Features["containerd-migration"] { + // TODO: Allow setting the threshold + migrationThreshold = math.MaxInt64 + } else { + d.usesSnapshotter = config.Features["containerd-snapshotter"] + } } // Ensure the daemon is properly shutdown if there is a failure during @@ -1033,12 +1063,17 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S d.linkIndex = newLinkIndex() + containers, err := d.loadContainers() + if err != nil { + return nil, err + } + // On Windows we don't support the environment variable, or a user supplied graphdriver // Unix platforms however run a single graphdriver for all containers, and it can // be set through an environment variable, a daemon start parameter, or chosen through // initialization of the layerstore through driver priority order for example. driverName := os.Getenv("DOCKER_DRIVER") - if isWindows && d.UsesSnapshotter() { + if isWindows && d.usesSnapshotter { // Containerd WCOW snapshotter driverName = "windows" } else if isWindows { @@ -1050,33 +1085,8 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S driverName = cfgStore.GraphDriver } - if d.UsesSnapshotter() { - if os.Getenv("TEST_INTEGRATION_USE_SNAPSHOTTER") != "" { - log.G(ctx).Warn("Enabling containerd snapshotter through the $TEST_INTEGRATION_USE_SNAPSHOTTER environment variable. This should only be used for testing.") - } - log.G(ctx).Info("Starting daemon with containerd snapshotter integration enabled") - - // FIXME(thaJeztah): implement automatic snapshotter-selection similar to graph-driver selection; see https://github.com/moby/moby/issues/44076 - if driverName == "" { - driverName = defaults.DefaultSnapshotter - } - - // Configure and validate the kernels security support. Note this is a Linux/FreeBSD - // operation only, so it is safe to pass *just* the runtime OS graphdriver. - if err := configureKernelSecuritySupport(&cfgStore.Config, driverName); err != nil { - return nil, err - } - d.imageService = ctrd.NewService(ctrd.ImageServiceConfig{ - Client: d.containerdClient, - Containers: d.containers, - Snapshotter: driverName, - RegistryHosts: d.RegistryHosts, - Registry: d.registryService, - EventsService: d.EventsService, - IDMapping: idMapping, - RefCountMounter: snapshotter.NewMounter(config.Root, driverName, idMapping), - }) - } else { + var migrationConfig migration.Config + if !d.usesSnapshotter { layerStore, err := layer.NewStoreFromOptions(layer.StoreOptions{ Root: cfgStore.Root, GraphDriver: driverName, @@ -1161,6 +1171,93 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S log.G(ctx).Debugf("Max Concurrent Downloads: %d", imgSvcConfig.MaxConcurrentDownloads) log.G(ctx).Debugf("Max Concurrent Uploads: %d", imgSvcConfig.MaxConcurrentUploads) log.G(ctx).Debugf("Max Download Attempts: %d", imgSvcConfig.MaxDownloadAttempts) + + // If no containers are running, check whether can migrate image service + if drv := d.imageService.StorageDriver(); len(containers[drv]) == 0 && migrationThreshold >= 0 { + switch drv { + case "overlay2": + driverName = "overlayfs" + case "windowsfilter": + driverName = "windows" + migrationThreshold = 0 + case "vfs": + driverName = "native" + default: + migrationThreshold = -1 + log.G(ctx).Infof("Not migrating to containerd snapshotter, no migration defined for graph driver %q", drv) + } + + var totalSize int64 + ic := d.imageService.CountImages(ctx) + if migrationThreshold >= 0 && ic > 0 { + sum, err := d.imageService.Images(ctx, imagetypes.ListOptions{All: true}) + if err != nil { + return nil, err + } + for _, s := range sum { + // Just add the size, don't consider shared size since this + // represents a maximum size + totalSize += s.Size + } + + } + + if totalSize <= migrationThreshold { + log.G(ctx).WithField("total", totalSize).Infof("Enabling containerd snapshotter because migration set with no containers and %d images in graph driver", ic) + d.usesSnapshotter = true + migrationConfig.LayerStore = imgSvcConfig.LayerStore + migrationConfig.DockerImageStore = imgSvcConfig.ImageStore + migrationConfig.ReferenceStore = imgSvcConfig.ReferenceStore + } else if migrationThreshold >= 0 { + log.G(ctx).Warnf("Not migrating to containerd snapshotter because still have %d images in graph driver", ic) + } + } else { + log.G(ctx).Debugf("Not attempting migration with %d containers and %d image threshold", len(containers[d.imageService.StorageDriver()]), migrationThreshold) + } + } + + if d.usesSnapshotter { + if os.Getenv("TEST_INTEGRATION_USE_SNAPSHOTTER") != "" { + log.G(ctx).Warn("Enabling containerd snapshotter through the $TEST_INTEGRATION_USE_SNAPSHOTTER environment variable. This should only be used for testing.") + } + log.G(ctx).Info("Starting daemon with containerd snapshotter integration enabled") + + // FIXME(thaJeztah): implement automatic snapshotter-selection similar to graph-driver selection; see https://github.com/moby/moby/issues/44076 + if driverName == "" { + driverName = defaults.DefaultSnapshotter + } + + // Configure and validate the kernels security support. Note this is a Linux/FreeBSD + // operation only, so it is safe to pass *just* the runtime OS graphdriver. + if err := configureKernelSecuritySupport(&cfgStore.Config, driverName); err != nil { + return nil, err + } + oldImageService := d.imageService + d.imageService = ctrd.NewService(ctrd.ImageServiceConfig{ + Client: d.containerdClient, + Containers: d.containers, + Snapshotter: driverName, + RegistryHosts: d.RegistryHosts, + Registry: d.registryService, + EventsService: d.EventsService, + IDMapping: idMapping, + RefCountMounter: snapshotter.NewMounter(config.Root, driverName, idMapping), + }) + + if oldImageService != nil { + if count := oldImageService.CountImages(ctx); count > 0 { + migrationConfig.Leases = d.containerdClient.LeasesService() + migrationConfig.Content = d.containerdClient.ContentStore() + migrationConfig.ImageStore = d.containerdClient.ImageService() + m := migration.NewLayerMigrator(migrationConfig) + err := m.MigrateTocontainerd(ctx, driverName, d.containerdClient.SnapshotService(driverName)) + if err != nil { + log.G(ctx).WithError(err).Errorf("Failed to migrate images to containerd, images in graph driver %q are no longer visible", oldImageService.StorageDriver()) + } else { + log.G(ctx).WithField("image_count", count).Infof("Successfully migrated images from %q to containerd", oldImageService.StorageDriver()) + } + } + } } go d.execCommandGC() @@ -1169,9 +1266,22 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S return nil, err } - if err := d.restore(cfgStore); err != nil { + driverContainers, ok := containers[driverName] + // Log containers which are not loaded with current driver + if (!ok && len(containers) > 0) || len(containers) > 1 { + for driver, all := range containers { + if driver == driverName { + continue + } + for id := range all { + log.G(ctx).WithField("container", id).Debugf("not restoring container because it was created with another storage driver (%s)", driver) + } + } + } + if err := d.restore(cfgStore, driverContainers); err != nil { return nil, err } + // Wait for migration to complete close(d.startupDone) info, err := d.SystemInfo(ctx) diff --git a/daemon/graphdriver/vfs/driver.go b/daemon/graphdriver/vfs/driver.go index 48940a5ef3..f416620e3a 100644 --- a/daemon/graphdriver/vfs/driver.go +++ b/daemon/graphdriver/vfs/driver.go @@ -86,7 +86,16 @@ func (d *Driver) Status() [][2]string { // GetMetadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data. func (d *Driver) GetMetadata(id string) (map[string]string, error) { - return nil, nil + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return nil, err + } + + metadata := map[string]string{ + "SourceDir": dir, + } + + return metadata, nil } // Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. diff --git a/integration/daemon/migration_test.go b/integration/daemon/migration_test.go new file mode 100644 index 0000000000..5949c7855c --- /dev/null +++ b/integration/daemon/migration_test.go @@ -0,0 +1,159 @@ +package daemon // import "github.com/docker/docker/integration/daemon" + +import ( + "bytes" + "io" + "os" + "runtime" + "testing" + + containertypes "github.com/moby/moby/api/types/container" + "github.com/moby/moby/api/types/image" + "github.com/moby/moby/v2/integration/internal/container" + "github.com/moby/moby/v2/testutil" + "github.com/moby/moby/v2/testutil/daemon" + "github.com/moby/moby/v2/testutil/fixtures/load" + "gotest.tools/v3/assert" + "gotest.tools/v3/skip" +) + +func TestMigrateOverlaySnapshotter(t *testing.T) { + testMigrateSnapshotter(t, "overlay2", "overlayfs") +} + +func TestMigrateNativeSnapshotter(t *testing.T) { + testMigrateSnapshotter(t, "vfs", "native") +} + +func testMigrateSnapshotter(t *testing.T, graphdriver, snapshotter string) { + skip.If(t, runtime.GOOS != "linux") + skip.If(t, os.Getenv("TEST_INTEGRATION_USE_SNAPSHOTTER") != "") + + ctx := testutil.StartSpan(baseContext, t) + + d := daemon.New(t) + defer d.Stop(t) + + d.Start(t, "--iptables=false", "--ip6tables=false", "-s", graphdriver) + info := d.Info(t) + id := info.ID + assert.Check(t, id != "") + assert.Equal(t, info.Containers, 0) + assert.Equal(t, info.Images, 0) + assert.Equal(t, info.Driver, graphdriver) + + load.FrozenImagesLinux(ctx, d.NewClientT(t), "busybox:latest") + + info = d.Info(t) + allImages := info.Images + assert.Check(t, allImages > 0) + + apiClient := d.NewClientT(t) + + containerID := container.Run(ctx, t, apiClient, func(c *container.TestContainerConfig) { + c.Name = "Migration-1-" + snapshotter + c.Config.Image = "busybox:latest" + c.Config.Cmd = []string{"top"} + }) + + d.Stop(t) + + // Start with migration feature but with a container which will prevent migration + d.Start(t, "--iptables=false", "--ip6tables=false", "-s", graphdriver, "--feature", "containerd-migration") + info = d.Info(t) + assert.Equal(t, info.ID, id) + assert.Equal(t, info.Driver, graphdriver) + assert.Equal(t, info.Containers, 1) + assert.Equal(t, info.Images, allImages) + container.Remove(ctx, t, apiClient, containerID, containertypes.RemoveOptions{ + Force: true, + }) + + d.Stop(t) + + d.Start(t, "--iptables=false", "--ip6tables=false", "-s", graphdriver, "--feature", "containerd-migration") + info = d.Info(t) + assert.Equal(t, info.ID, id) + assert.Equal(t, info.Containers, 0) + assert.Equal(t, info.Driver, snapshotter, "expected migrate to switch from %s to %s", graphdriver, snapshotter) + assert.Equal(t, info.Images, allImages) + + result := container.RunAttach(ctx, t, apiClient, func(c *container.TestContainerConfig) { + c.Name = "Migration-2-" + snapshotter + c.Config.Image = "busybox:latest" + c.Config.Cmd = []string{"echo", "hello"} + }) + assert.Equal(t, result.ExitCode, 0) + container.Remove(ctx, t, apiClient, result.ContainerID, containertypes.RemoveOptions{}) +} + +func TestMigrateSaveLoad(t *testing.T) { + skip.If(t, runtime.GOOS != "linux") + skip.If(t, os.Getenv("TEST_INTEGRATION_USE_SNAPSHOTTER") != "") + + var ( + ctx = testutil.StartSpan(baseContext, t) + d = daemon.New(t) + graphdriver = "overlay2" + snapshotter = "overlayfs" + ) + defer d.Stop(t) + + d.Start(t, "--iptables=false", "--ip6tables=false", "-s", graphdriver) + info := d.Info(t) + id := info.ID + assert.Check(t, id != "") + assert.Equal(t, info.Containers, 0) + assert.Equal(t, info.Images, 0) + assert.Equal(t, info.Driver, graphdriver) + + load.FrozenImagesLinux(ctx, d.NewClientT(t), "busybox:latest") + + info = d.Info(t) + allImages := info.Images + assert.Check(t, allImages > 0) + + d.Stop(t) + + d.Start(t, "--iptables=false", "--ip6tables=false", "-s", graphdriver, "--feature", "containerd-migration") + info = d.Info(t) + assert.Equal(t, info.ID, id) + assert.Equal(t, info.Containers, 0) + assert.Equal(t, info.Driver, snapshotter, "expected migrate to switch from %s to %s", graphdriver, snapshotter) + assert.Equal(t, info.Images, allImages) + + apiClient := d.NewClientT(t) + + // Save image to buffer + rdr, err := apiClient.ImageSave(ctx, []string{"busybox:latest"}, image.SaveOptions{}) + assert.NilError(t, err) + buf := bytes.NewBuffer(nil) + io.Copy(buf, rdr) + rdr.Close() + + // Delete all images + list, err := apiClient.ImageList(ctx, image.ListOptions{}) + assert.NilError(t, err) + for _, i := range list { + _, err = apiClient.ImageRemove(ctx, i.ID, image.RemoveOptions{}) + assert.NilError(t, err) + } + + // Check zero images + info = d.Info(t) + assert.Equal(t, info.Images, 0) + + // Import + lr, err := apiClient.ImageLoad(ctx, bytes.NewReader(buf.Bytes()), image.LoadOptions{Quiet: true}) + assert.NilError(t, err) + io.Copy(io.Discard, lr.Body) + lr.Body.Close() + + result := container.RunAttach(ctx, t, apiClient, func(c *container.TestContainerConfig) { + c.Name = "Migration-save-load-" + snapshotter + c.Config.Image = "busybox:latest" + c.Config.Cmd = []string{"echo", "hello"} + }) + assert.Equal(t, result.ExitCode, 0) + container.Remove(ctx, t, apiClient, result.ContainerID, containertypes.RemoveOptions{}) +}