diff --git a/daemon/daemon.go b/daemon/daemon.go index a8dae6d64a..17e2224dad 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -860,6 +860,11 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S migrationThreshold := int64(-1) isGraphDriver := func(driver string) (bool, error) { + if driver == "" { + if graphdriver.HasPriorDriver(config.Root) { + return true, nil + } + } return graphdriver.IsRegistered(driver), nil } if enabled, ok := config.Features["containerd-snapshotter"]; (ok && !enabled) || os.Getenv("TEST_INTEGRATION_USE_GRAPHDRIVER") != "" { @@ -1140,6 +1145,10 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S return nil, err } + // NewStoreFromOptions will determine the driver if driverName is empty + // so we need to update the driverName to match the driver used. + driverName = layerStore.DriverName() + // Configure and validate the kernels security support. Note this is a Linux/FreeBSD // operation only, so it is safe to pass *just* the runtime OS graphdriver. if err := configureKernelSecuritySupport(&cfgStore.Config, layerStore.DriverName()); err != nil { @@ -1338,6 +1347,10 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S return nil, err } + if driverName == "" { + return nil, errors.New("driverName is empty. Please report it as a bug! As a workaround, please set the storage driver explicitly") + } + driverContainers, ok := containers[driverName] // Log containers which are not loaded with current driver if (!ok && len(containers) > 0) || len(containers) > 1 { @@ -1346,7 +1359,10 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S continue } for id := range all { - log.G(ctx).WithField("container", id).Debugf("not restoring container because it was created with another storage driver (%s)", driver) + log.G(ctx).WithField("container", id). + WithField("driver", driver). + WithField("current_driver", driverName). + Debugf("not restoring container because it was created with another storage driver (%s)", driver) } } } diff --git a/daemon/graphdriver/driver.go b/daemon/graphdriver/driver.go index 039777547e..bc1471b805 100644 --- a/daemon/graphdriver/driver.go +++ b/daemon/graphdriver/driver.go @@ -232,6 +232,12 @@ func New(driverName string, config Options) (Driver, error) { return nil, errors.Errorf("no supported storage driver found") } +// HasPriorDriver returns true if any prior driver is found +func HasPriorDriver(root string) bool { + driversMap := scanPriorDrivers(root) + return len(driversMap) > 0 +} + // scanPriorDrivers returns an un-ordered scan of directories of prior storage // drivers. The 'vfs' storage driver is not taken into account, and ignored. func scanPriorDrivers(root string) map[string]bool { diff --git a/integration/daemon/default_storage_test.go b/integration/daemon/default_storage_test.go new file mode 100644 index 0000000000..0a63a4671e --- /dev/null +++ b/integration/daemon/default_storage_test.go @@ -0,0 +1,82 @@ +package daemon + +import ( + "testing" + + containertypes "github.com/moby/moby/api/types/container" + "github.com/moby/moby/v2/testutil" + "github.com/moby/moby/v2/testutil/daemon" + "gotest.tools/v3/assert" + is "gotest.tools/v3/assert/cmp" + "gotest.tools/v3/skip" +) + +func TestDefaultStorageDriver(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType == "windows", "Windows does not support running sub-daemons") + t.Setenv("DOCKER_DRIVER", "") + t.Setenv("DOCKER_GRAPHDRIVER", "") + t.Setenv("TEST_INTEGRATION_USE_GRAPHDRIVER", "") + _ = testutil.StartSpan(baseContext, t) + + d := daemon.New(t) + defer d.Stop(t) + + d.Start(t, "--iptables=false", "--ip6tables=false") + + info := d.Info(t) + assert.Check(t, is.Equal(info.DriverStatus[0][1], "io.containerd.snapshotter.v1")) +} + +// TestGraphDriverPersistence tests that when a daemon starts with graphdrivers, +// pulls images and creates containers, then is restarted without explicit +// graphdriver configuration, it continues to use graphdrivers instead of +// migrating to containerd snapshotters automatically. +func TestGraphDriverPersistence(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType == "windows", "Windows does not support running sub-daemons") + t.Setenv("DOCKER_DRIVER", "") + t.Setenv("DOCKER_GRAPHDRIVER", "") + t.Setenv("TEST_INTEGRATION_USE_GRAPHDRIVER", "") + ctx := testutil.StartSpan(baseContext, t) + + // Phase 1: Start daemon with explicit graphdriver (overlay2) + d := daemon.New(t) + t.Cleanup(func() { + d.Stop(t) + }) + + const testImage = "busybox:latest" + d.StartWithBusybox(ctx, t, "--iptables=false", "--ip6tables=false", "--storage-driver=overlay2") + c := d.NewClientT(t) + + // Verify we're using graphdriver + info := d.Info(t) + assert.Check(t, info.DriverStatus[0][1] != "io.containerd.snapshotter.v1") + prevDriver := info.Driver + + containerResp, err := c.ContainerCreate(ctx, &containertypes.Config{ + Image: testImage, + Cmd: []string{"echo", "test"}, + }, nil, nil, nil, "test-container") + assert.NilError(t, err, "Failed to create container") + + containerID := containerResp.ID + + d.Stop(t) + + // Phase 2: Start daemon again WITHOUT explicit graphdriver configuration + d.Start(t, "--iptables=false", "--ip6tables=false") + + // Verify daemon still uses graphdriver (not containerd snapshotter) + // Verify we're using graphdriver + info = d.Info(t) + assert.Check(t, info.DriverStatus[0][1] != "io.containerd.snapshotter.v1") + assert.Check(t, is.Equal(info.Driver, prevDriver)) + + // Verify our image is still there + _, err = c.ImageInspect(ctx, testImage) + assert.NilError(t, err, "Test image should still be available after daemon restart") + + // Verify our container is still there + _, err = c.ContainerInspect(ctx, containerID) + assert.NilError(t, err, "Test container should still exist after daemon restart") +}