mirror of
https://github.com/moby/moby.git
synced 2026-01-11 02:31:44 +00:00
Merge pull request #51575 from smerkviladze/25.0-add-windows-integration-tests
[25.0 backport] integration: add Windows network driver and isolation tests
This commit is contained in:
412
integration/container/isolation_windows_test.go
Normal file
412
integration/container/isolation_windows_test.go
Normal file
@@ -0,0 +1,412 @@
|
||||
package container // import "github.com/docker/docker/integration/container"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
"github.com/docker/docker/integration/internal/container"
|
||||
"github.com/docker/docker/testutil"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
)
|
||||
|
||||
// TestWindowsProcessIsolation validates process isolation on Windows.
|
||||
func TestWindowsProcessIsolation(t *testing.T) {
|
||||
ctx := setupTest(t)
|
||||
apiClient := testEnv.APIClient()
|
||||
|
||||
testcases := []struct {
|
||||
name string
|
||||
description string
|
||||
validate func(t *testing.T, ctx context.Context, id string)
|
||||
}{
|
||||
{
|
||||
name: "Process isolation basic container lifecycle",
|
||||
description: "Validate container can start, run, and stop with process isolation",
|
||||
validate: func(t *testing.T, ctx context.Context, id string) {
|
||||
// Verify container is running
|
||||
ctrInfo := container.Inspect(ctx, t, apiClient, id)
|
||||
assert.Check(t, is.Equal(ctrInfo.State.Running, true))
|
||||
assert.Check(t, is.Equal(ctrInfo.HostConfig.Isolation, containertypes.IsolationProcess))
|
||||
|
||||
execCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
res := container.ExecT(execCtx, t, apiClient, id, []string{"cmd", "/c", "echo", "test"})
|
||||
assert.Check(t, is.Equal(res.ExitCode, 0))
|
||||
assert.Check(t, strings.Contains(res.Stdout(), "test"))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Process isolation filesystem access",
|
||||
description: "Validate filesystem operations work correctly with process isolation",
|
||||
validate: func(t *testing.T, ctx context.Context, id string) {
|
||||
execCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Create a test file
|
||||
res := container.ExecT(execCtx, t, apiClient, id,
|
||||
[]string{"cmd", "/c", "echo test123 > C:\\testfile.txt"})
|
||||
assert.Check(t, is.Equal(res.ExitCode, 0))
|
||||
|
||||
// Read the test file
|
||||
execCtx2, cancel2 := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel2()
|
||||
res2 := container.ExecT(execCtx2, t, apiClient, id,
|
||||
[]string{"cmd", "/c", "type", "C:\\testfile.txt"})
|
||||
assert.Check(t, is.Equal(res2.ExitCode, 0))
|
||||
assert.Check(t, strings.Contains(res2.Stdout(), "test123"))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Process isolation network connectivity",
|
||||
description: "Validate network connectivity works with process isolation",
|
||||
validate: func(t *testing.T, ctx context.Context, id string) {
|
||||
execCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Test localhost connectivity
|
||||
res := container.ExecT(execCtx, t, apiClient, id,
|
||||
[]string{"ping", "-n", "1", "-w", "3000", "localhost"})
|
||||
assert.Check(t, is.Equal(res.ExitCode, 0))
|
||||
assert.Check(t, strings.Contains(res.Stdout(), "Reply from") ||
|
||||
strings.Contains(res.Stdout(), "Received = 1"))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Process isolation environment variables",
|
||||
description: "Validate environment variables are properly isolated",
|
||||
validate: func(t *testing.T, ctx context.Context, id string) {
|
||||
execCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Check that container has expected environment variables
|
||||
res := container.ExecT(execCtx, t, apiClient, id,
|
||||
[]string{"cmd", "/c", "set"})
|
||||
assert.Check(t, is.Equal(res.ExitCode, 0))
|
||||
|
||||
// Should have Windows-specific environment variables
|
||||
stdout := res.Stdout()
|
||||
assert.Check(t, strings.Contains(stdout, "COMPUTERNAME") ||
|
||||
strings.Contains(stdout, "OS=Windows"))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Process isolation CPU access",
|
||||
description: "Validate container can access CPU information",
|
||||
validate: func(t *testing.T, ctx context.Context, id string) {
|
||||
execCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Check NUMBER_OF_PROCESSORS environment variable
|
||||
res := container.ExecT(execCtx, t, apiClient, id,
|
||||
[]string{"cmd", "/c", "echo", "%NUMBER_OF_PROCESSORS%"})
|
||||
assert.Check(t, is.Equal(res.ExitCode, 0))
|
||||
|
||||
// Should return a number
|
||||
output := strings.TrimSpace(res.Stdout())
|
||||
assert.Check(t, output != "" && output != "%NUMBER_OF_PROCESSORS%",
|
||||
"NUMBER_OF_PROCESSORS not set")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
|
||||
// Create and start container with process isolation
|
||||
id := container.Run(ctx, t, apiClient,
|
||||
container.WithIsolation(containertypes.IsolationProcess),
|
||||
container.WithCmd("ping", "-t", "localhost"),
|
||||
)
|
||||
defer apiClient.ContainerRemove(ctx, id, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
tc.validate(t, ctx, id)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestWindowsHyperVIsolation validates Hyper-V isolation on Windows.
|
||||
func TestWindowsHyperVIsolation(t *testing.T) {
|
||||
ctx := setupTest(t)
|
||||
apiClient := testEnv.APIClient()
|
||||
|
||||
testcases := []struct {
|
||||
name string
|
||||
description string
|
||||
validate func(t *testing.T, ctx context.Context, id string)
|
||||
}{
|
||||
{
|
||||
name: "Hyper-V isolation basic container lifecycle",
|
||||
description: "Validate container can start, run, and stop with Hyper-V isolation",
|
||||
validate: func(t *testing.T, ctx context.Context, id string) {
|
||||
// Verify container is running
|
||||
ctrInfo := container.Inspect(ctx, t, apiClient, id)
|
||||
assert.Check(t, is.Equal(ctrInfo.State.Running, true))
|
||||
assert.Check(t, is.Equal(ctrInfo.HostConfig.Isolation, containertypes.IsolationHyperV))
|
||||
|
||||
// Execute a simple command
|
||||
execCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
|
||||
defer cancel()
|
||||
res := container.ExecT(execCtx, t, apiClient, id, []string{"cmd", "/c", "echo", "hyperv-test"})
|
||||
assert.Check(t, is.Equal(res.ExitCode, 0))
|
||||
assert.Check(t, strings.Contains(res.Stdout(), "hyperv-test"))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Hyper-V isolation filesystem operations",
|
||||
description: "Validate filesystem isolation with Hyper-V",
|
||||
validate: func(t *testing.T, ctx context.Context, id string) {
|
||||
execCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Test file creation
|
||||
res := container.ExecT(execCtx, t, apiClient, id,
|
||||
[]string{"cmd", "/c", "echo hyperv-file > C:\\hvtest.txt"})
|
||||
assert.Check(t, is.Equal(res.ExitCode, 0))
|
||||
|
||||
// Test file read
|
||||
execCtx2, cancel2 := context.WithTimeout(ctx, 15*time.Second)
|
||||
defer cancel2()
|
||||
res2 := container.ExecT(execCtx2, t, apiClient, id,
|
||||
[]string{"cmd", "/c", "type", "C:\\hvtest.txt"})
|
||||
assert.Check(t, is.Equal(res2.ExitCode, 0))
|
||||
assert.Check(t, strings.Contains(res2.Stdout(), "hyperv-file"))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Hyper-V isolation network connectivity",
|
||||
description: "Validate network works with Hyper-V isolation",
|
||||
validate: func(t *testing.T, ctx context.Context, id string) {
|
||||
execCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Test localhost connectivity
|
||||
res := container.ExecT(execCtx, t, apiClient, id,
|
||||
[]string{"ping", "-n", "1", "-w", "5000", "localhost"})
|
||||
assert.Check(t, is.Equal(res.ExitCode, 0))
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
|
||||
// Create and start container with Hyper-V isolation
|
||||
id := container.Run(ctx, t, apiClient,
|
||||
container.WithIsolation(containertypes.IsolationHyperV),
|
||||
container.WithCmd("ping", "-t", "localhost"),
|
||||
)
|
||||
defer apiClient.ContainerRemove(ctx, id, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
tc.validate(t, ctx, id)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestWindowsIsolationComparison validates that both isolation modes can coexist
|
||||
// and that containers can be created with different isolation modes on Windows.
|
||||
func TestWindowsIsolationComparison(t *testing.T) {
|
||||
ctx := setupTest(t)
|
||||
apiClient := testEnv.APIClient()
|
||||
|
||||
// Create container with process isolation
|
||||
processID := container.Run(ctx, t, apiClient,
|
||||
container.WithIsolation(containertypes.IsolationProcess),
|
||||
container.WithCmd("ping", "-t", "localhost"),
|
||||
)
|
||||
defer apiClient.ContainerRemove(ctx, processID, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
processInfo := container.Inspect(ctx, t, apiClient, processID)
|
||||
assert.Check(t, is.Equal(processInfo.HostConfig.Isolation, containertypes.IsolationProcess))
|
||||
assert.Check(t, is.Equal(processInfo.State.Running, true))
|
||||
|
||||
// Create container with Hyper-V isolation
|
||||
hypervID := container.Run(ctx, t, apiClient,
|
||||
container.WithIsolation(containertypes.IsolationHyperV),
|
||||
container.WithCmd("ping", "-t", "localhost"),
|
||||
)
|
||||
defer apiClient.ContainerRemove(ctx, hypervID, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
hypervInfo := container.Inspect(ctx, t, apiClient, hypervID)
|
||||
assert.Check(t, is.Equal(hypervInfo.HostConfig.Isolation, containertypes.IsolationHyperV))
|
||||
assert.Check(t, is.Equal(hypervInfo.State.Running, true))
|
||||
|
||||
// Verify both containers can run simultaneously
|
||||
processInfo2 := container.Inspect(ctx, t, apiClient, processID)
|
||||
hypervInfo2 := container.Inspect(ctx, t, apiClient, hypervID)
|
||||
assert.Check(t, is.Equal(processInfo2.State.Running, true))
|
||||
assert.Check(t, is.Equal(hypervInfo2.State.Running, true))
|
||||
}
|
||||
|
||||
// TestWindowsProcessIsolationResourceConstraints validates resource constraints
|
||||
// work correctly with process isolation on Windows.
|
||||
func TestWindowsProcessIsolationResourceConstraints(t *testing.T) {
|
||||
ctx := setupTest(t)
|
||||
apiClient := testEnv.APIClient()
|
||||
|
||||
testcases := []struct {
|
||||
name string
|
||||
cpuShares int64
|
||||
nanoCPUs int64
|
||||
memoryLimit int64
|
||||
cpuCount int64
|
||||
validateConfig func(t *testing.T, ctrInfo types.ContainerJSON)
|
||||
}{
|
||||
{
|
||||
name: "CPU shares constraint - config only",
|
||||
cpuShares: 512,
|
||||
// Note: CPU shares are accepted by the API but NOT enforced on Windows.
|
||||
// This test only verifies the configuration is stored correctly.
|
||||
// Actual enforcement does not work - containers get equal CPU regardless of shares.
|
||||
// Use NanoCPUs (--cpus flag) for actual CPU limiting on Windows.
|
||||
validateConfig: func(t *testing.T, ctrInfo types.ContainerJSON) {
|
||||
assert.Check(t, is.Equal(ctrInfo.HostConfig.CPUShares, int64(512)))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "CPU limit (NanoCPUs) constraint",
|
||||
nanoCPUs: 2000000000, // 2.0 CPUs
|
||||
// NanoCPUs enforce hard CPU limits on Windows (unlike CPUShares which don't work)
|
||||
validateConfig: func(t *testing.T, ctrInfo types.ContainerJSON) {
|
||||
assert.Check(t, is.Equal(ctrInfo.HostConfig.NanoCPUs, int64(2000000000)))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Memory limit constraint",
|
||||
memoryLimit: 512 * 1024 * 1024, // 512MB
|
||||
// Memory limits enforce hard limits on container memory usage
|
||||
validateConfig: func(t *testing.T, ctrInfo types.ContainerJSON) {
|
||||
assert.Check(t, is.Equal(ctrInfo.HostConfig.Memory, int64(512*1024*1024)))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "CPU count constraint",
|
||||
cpuCount: 2,
|
||||
// CPU count limits the number of CPUs available to the container
|
||||
validateConfig: func(t *testing.T, ctrInfo types.ContainerJSON) {
|
||||
assert.Check(t, is.Equal(ctrInfo.HostConfig.CPUCount, int64(2)))
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
|
||||
opts := []func(*container.TestContainerConfig){
|
||||
container.WithIsolation(containertypes.IsolationProcess),
|
||||
container.WithCmd("ping", "-t", "localhost"),
|
||||
}
|
||||
|
||||
if tc.cpuShares > 0 {
|
||||
opts = append(opts, func(config *container.TestContainerConfig) {
|
||||
config.HostConfig.CPUShares = tc.cpuShares
|
||||
})
|
||||
}
|
||||
|
||||
if tc.nanoCPUs > 0 {
|
||||
opts = append(opts, func(config *container.TestContainerConfig) {
|
||||
config.HostConfig.NanoCPUs = tc.nanoCPUs
|
||||
})
|
||||
}
|
||||
|
||||
if tc.memoryLimit > 0 {
|
||||
opts = append(opts, func(config *container.TestContainerConfig) {
|
||||
config.HostConfig.Memory = tc.memoryLimit
|
||||
})
|
||||
}
|
||||
|
||||
if tc.cpuCount > 0 {
|
||||
opts = append(opts, func(config *container.TestContainerConfig) {
|
||||
config.HostConfig.CPUCount = tc.cpuCount
|
||||
})
|
||||
}
|
||||
|
||||
id := container.Run(ctx, t, apiClient, opts...)
|
||||
defer apiClient.ContainerRemove(ctx, id, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
ctrInfo := container.Inspect(ctx, t, apiClient, id)
|
||||
tc.validateConfig(t, ctrInfo)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestWindowsProcessIsolationVolumeMount validates volume mounting with process isolation on Windows.
|
||||
func TestWindowsProcessIsolationVolumeMount(t *testing.T) {
|
||||
ctx := setupTest(t)
|
||||
apiClient := testEnv.APIClient()
|
||||
|
||||
volumeName := "process-iso-test-volume"
|
||||
volRes, err := apiClient.VolumeCreate(ctx, volume.CreateOptions{
|
||||
Name: volumeName,
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
defer func() {
|
||||
// Force volume removal in case container cleanup fails
|
||||
apiClient.VolumeRemove(ctx, volRes.Name, true)
|
||||
}()
|
||||
|
||||
// Create container with volume mount
|
||||
id := container.Run(ctx, t, apiClient,
|
||||
container.WithIsolation(containertypes.IsolationProcess),
|
||||
container.WithCmd("ping", "-t", "localhost"),
|
||||
container.WithMount(mount.Mount{
|
||||
Type: mount.TypeVolume,
|
||||
Source: volumeName,
|
||||
Target: "C:\\data",
|
||||
}),
|
||||
)
|
||||
defer apiClient.ContainerRemove(ctx, id, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
// Write data to mounted volume
|
||||
execCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
res := container.ExecT(execCtx, t, apiClient, id,
|
||||
[]string{"cmd", "/c", "echo volume-test > C:\\data\\test.txt"})
|
||||
assert.Check(t, is.Equal(res.ExitCode, 0))
|
||||
|
||||
// Read data from mounted volume
|
||||
execCtx2, cancel2 := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel2()
|
||||
res2 := container.ExecT(execCtx2, t, apiClient, id,
|
||||
[]string{"cmd", "/c", "type", "C:\\data\\test.txt"})
|
||||
assert.Check(t, is.Equal(res2.ExitCode, 0))
|
||||
assert.Check(t, strings.Contains(res2.Stdout(), "volume-test"))
|
||||
|
||||
// Verify container has volume mount
|
||||
ctrInfo := container.Inspect(ctx, t, apiClient, id)
|
||||
assert.Check(t, len(ctrInfo.Mounts) == 1)
|
||||
assert.Check(t, is.Equal(ctrInfo.Mounts[0].Type, mount.TypeVolume))
|
||||
assert.Check(t, is.Equal(ctrInfo.Mounts[0].Name, volumeName))
|
||||
}
|
||||
|
||||
// TestWindowsHyperVIsolationResourceLimits validates resource limits work with Hyper-V isolation.
|
||||
// This ensures Windows can properly enforce resource constraints on Hyper-V containers.
|
||||
func TestWindowsHyperVIsolationResourceLimits(t *testing.T) {
|
||||
ctx := setupTest(t)
|
||||
apiClient := testEnv.APIClient()
|
||||
|
||||
// Create container with memory limit
|
||||
memoryLimit := int64(512 * 1024 * 1024) // 512MB
|
||||
id := container.Run(ctx, t, apiClient,
|
||||
container.WithIsolation(containertypes.IsolationHyperV),
|
||||
container.WithCmd("ping", "-t", "localhost"),
|
||||
func(config *container.TestContainerConfig) {
|
||||
config.HostConfig.Memory = memoryLimit
|
||||
},
|
||||
)
|
||||
defer apiClient.ContainerRemove(ctx, id, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
// Verify resource limit is set
|
||||
ctrInfo := container.Inspect(ctx, t, apiClient, id)
|
||||
assert.Check(t, is.Equal(ctrInfo.HostConfig.Memory, memoryLimit))
|
||||
assert.Check(t, is.Equal(ctrInfo.HostConfig.Isolation, containertypes.IsolationHyperV))
|
||||
}
|
||||
378
integration/networking/drivers_windows_test.go
Normal file
378
integration/networking/drivers_windows_test.go
Normal file
@@ -0,0 +1,378 @@
|
||||
package networking
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/integration/internal/container"
|
||||
"github.com/docker/docker/integration/internal/network"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/v3/poll"
|
||||
"gotest.tools/v3/skip"
|
||||
)
|
||||
|
||||
// TestWindowsNetworkDrivers validates Windows-specific network drivers for Windows.
|
||||
// Tests: NAT, Transparent, and L2Bridge network drivers.
|
||||
func TestWindowsNetworkDrivers(t *testing.T) {
|
||||
ctx := setupTest(t)
|
||||
c := testEnv.APIClient()
|
||||
|
||||
testcases := []struct {
|
||||
name string
|
||||
driver string
|
||||
}{
|
||||
{
|
||||
// NAT connectivity is already tested in TestBridgeICCWindows (bridge_test.go),
|
||||
// so we only validate network creation here.
|
||||
name: "NAT driver network creation",
|
||||
driver: "nat",
|
||||
},
|
||||
{
|
||||
// Only test creation of a Transparent driver network, connectivity depends on external
|
||||
// network infrastructure.
|
||||
name: "Transparent driver network creation",
|
||||
driver: "transparent",
|
||||
},
|
||||
{
|
||||
// L2Bridge driver requires specific host network adapter configuration, test will skip
|
||||
// if host configuration is missing.
|
||||
name: "L2Bridge driver network creation",
|
||||
driver: "l2bridge",
|
||||
},
|
||||
}
|
||||
|
||||
for tcID, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
|
||||
netName := fmt.Sprintf("test-%s-%d", tc.driver, tcID)
|
||||
|
||||
// Create network with specified driver
|
||||
netResp, err := c.NetworkCreate(ctx, netName, types.NetworkCreate{
|
||||
Driver: tc.driver,
|
||||
})
|
||||
if err != nil {
|
||||
// L2Bridge may fail if host network configuration is not available
|
||||
if tc.driver == "l2bridge" {
|
||||
errStr := strings.ToLower(err.Error())
|
||||
if strings.Contains(errStr, "the network does not have a subnet for this endpoint") {
|
||||
t.Skipf("Driver %s requires host network configuration: %v", tc.driver, err)
|
||||
}
|
||||
}
|
||||
t.Fatalf("Failed to create network with %s driver: %v", tc.driver, err)
|
||||
}
|
||||
defer network.RemoveNoError(ctx, t, c, netName)
|
||||
|
||||
// Inspect network to validate driver is correctly set
|
||||
netInfo, err := c.NetworkInspect(ctx, netResp.ID, types.NetworkInspectOptions{})
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(netInfo.Driver, tc.driver), "Network driver mismatch")
|
||||
assert.Check(t, is.Equal(netInfo.Name, netName), "Network name mismatch")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestWindowsNATDriverPortMapping validates NAT port mapping by testing host connectivity.
|
||||
func TestWindowsNATDriverPortMapping(t *testing.T) {
|
||||
ctx := setupTest(t)
|
||||
c := testEnv.APIClient()
|
||||
|
||||
// Use default NAT network which supports port mapping
|
||||
netName := "nat"
|
||||
|
||||
// PowerShell HTTP listener on port 80
|
||||
psScript := `
|
||||
$listener = New-Object System.Net.HttpListener
|
||||
$listener.Prefixes.Add('http://+:80/')
|
||||
$listener.Start()
|
||||
while ($listener.IsListening) {
|
||||
$context = $listener.GetContext()
|
||||
$response = $context.Response
|
||||
$content = [System.Text.Encoding]::UTF8.GetBytes('OK')
|
||||
$response.ContentLength64 = $content.Length
|
||||
$response.OutputStream.Write($content, 0, $content.Length)
|
||||
$response.OutputStream.Close()
|
||||
}
|
||||
`
|
||||
|
||||
// Create container with port mapping 80->8080
|
||||
ctrName := "port-mapping-test"
|
||||
id := container.Run(ctx, t, c,
|
||||
container.WithName(ctrName),
|
||||
container.WithCmd("powershell", "-Command", psScript),
|
||||
container.WithNetworkMode(netName),
|
||||
container.WithExposedPorts("80/tcp"),
|
||||
container.WithPortMap(nat.PortMap{
|
||||
"80/tcp": []nat.PortBinding{{HostPort: "8080"}},
|
||||
}),
|
||||
)
|
||||
defer c.ContainerRemove(ctx, id, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
// Verify port mapping metadata
|
||||
ctrInfo := container.Inspect(ctx, t, c, id)
|
||||
portKey := nat.Port("80/tcp")
|
||||
assert.Check(t, ctrInfo.NetworkSettings.Ports[portKey] != nil, "Port mapping not found")
|
||||
assert.Check(t, len(ctrInfo.NetworkSettings.Ports[portKey]) > 0, "No host port binding")
|
||||
assert.Check(t, is.Equal(ctrInfo.NetworkSettings.Ports[portKey][0].HostPort, "8080"))
|
||||
|
||||
// Test actual connectivity from host to container via mapped port
|
||||
httpClient := &http.Client{Timeout: 2 * time.Second}
|
||||
checkHTTP := func(t poll.LogT) poll.Result {
|
||||
resp, err := httpClient.Get("http://localhost:8080")
|
||||
if err != nil {
|
||||
return poll.Continue("connection failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return poll.Continue("failed to read body: %v", err)
|
||||
}
|
||||
|
||||
if !strings.Contains(string(body), "OK") {
|
||||
return poll.Continue("unexpected response body: %s", string(body))
|
||||
}
|
||||
return poll.Success()
|
||||
}
|
||||
|
||||
poll.WaitOn(t, checkHTTP, poll.WithTimeout(10*time.Second))
|
||||
}
|
||||
|
||||
// TestWindowsNetworkDNSResolution validates DNS resolution on Windows networks.
|
||||
func TestWindowsNetworkDNSResolution(t *testing.T) {
|
||||
ctx := setupTest(t)
|
||||
c := testEnv.APIClient()
|
||||
|
||||
testcases := []struct {
|
||||
name string
|
||||
driver string
|
||||
customDNS bool
|
||||
dnsServers []string
|
||||
}{
|
||||
{
|
||||
name: "Default NAT network DNS resolution",
|
||||
driver: "nat",
|
||||
},
|
||||
{
|
||||
name: "Custom DNS servers on NAT network",
|
||||
driver: "nat",
|
||||
customDNS: true,
|
||||
dnsServers: []string{"8.8.8.8", "8.8.4.4"},
|
||||
},
|
||||
}
|
||||
|
||||
for tcID, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
|
||||
netName := fmt.Sprintf("test-dns-%s-%d", tc.driver, tcID)
|
||||
|
||||
// Create network with optional custom DNS
|
||||
netOpts := []func(*types.NetworkCreate){
|
||||
network.WithDriver(tc.driver),
|
||||
}
|
||||
if tc.customDNS {
|
||||
// Note: DNS options may need to be set via network options on Windows
|
||||
for _, dns := range tc.dnsServers {
|
||||
netOpts = append(netOpts, network.WithOption("com.docker.network.windowsshim.dnsservers", dns))
|
||||
}
|
||||
}
|
||||
|
||||
network.CreateNoError(ctx, t, c, netName, netOpts...)
|
||||
defer network.RemoveNoError(ctx, t, c, netName)
|
||||
|
||||
// Create container and verify DNS resolution
|
||||
ctrName := fmt.Sprintf("dns-test-%d", tcID)
|
||||
id := container.Run(ctx, t, c,
|
||||
container.WithName(ctrName),
|
||||
container.WithNetworkMode(netName),
|
||||
)
|
||||
defer c.ContainerRemove(ctx, id, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
// Test DNS resolution by pinging container by name from another container
|
||||
pingCmd := []string{"ping", "-n", "1", "-w", "3000", ctrName}
|
||||
|
||||
attachCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
|
||||
defer cancel()
|
||||
res := container.RunAttach(attachCtx, t, c,
|
||||
container.WithCmd(pingCmd...),
|
||||
container.WithNetworkMode(netName),
|
||||
)
|
||||
defer c.ContainerRemove(ctx, res.ContainerID, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
assert.Check(t, is.Equal(res.ExitCode, 0), "DNS resolution failed")
|
||||
assert.Check(t, is.Contains(res.Stdout.String(), "Sent = 1, Received = 1, Lost = 0"))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestWindowsNetworkLifecycle validates network lifecycle operations on Windows.
|
||||
// Tests network creation, container attachment, detachment, and deletion.
|
||||
func TestWindowsNetworkLifecycle(t *testing.T) {
|
||||
// Skip this test on Windows Containerd because NetworkConnect operations fail with an
|
||||
// unsupported platform request error:
|
||||
// https://github.com/moby/moby/issues/51589
|
||||
skip.If(t, testEnv.RuntimeIsWindowsContainerd(),
|
||||
"Skipping test: fails on Containerd due to unsupported platform request error during NetworkConnect operations")
|
||||
|
||||
ctx := setupTest(t)
|
||||
c := testEnv.APIClient()
|
||||
|
||||
netName := "lifecycle-test-nat"
|
||||
|
||||
netID := network.CreateNoError(ctx, t, c, netName,
|
||||
network.WithDriver("nat"),
|
||||
)
|
||||
|
||||
netInfo, err := c.NetworkInspect(ctx, netID, types.NetworkInspectOptions{})
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(netInfo.Name, netName))
|
||||
|
||||
// Create container on network
|
||||
ctrName := "lifecycle-ctr"
|
||||
id := container.Run(ctx, t, c,
|
||||
container.WithName(ctrName),
|
||||
container.WithNetworkMode(netName),
|
||||
)
|
||||
|
||||
ctrInfo := container.Inspect(ctx, t, c, id)
|
||||
assert.Check(t, ctrInfo.NetworkSettings.Networks[netName] != nil)
|
||||
|
||||
// Disconnect container from network
|
||||
err = c.NetworkDisconnect(ctx, netID, id, false)
|
||||
assert.NilError(t, err)
|
||||
|
||||
ctrInfo = container.Inspect(ctx, t, c, id)
|
||||
assert.Check(t, ctrInfo.NetworkSettings.Networks[netName] == nil, "Container still connected after disconnect")
|
||||
|
||||
// Reconnect container to network
|
||||
err = c.NetworkConnect(ctx, netID, id, nil)
|
||||
assert.NilError(t, err)
|
||||
|
||||
ctrInfo = container.Inspect(ctx, t, c, id)
|
||||
assert.Check(t, ctrInfo.NetworkSettings.Networks[netName] != nil, "Container not reconnected")
|
||||
|
||||
c.ContainerRemove(ctx, id, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
network.RemoveNoError(ctx, t, c, netName)
|
||||
|
||||
_, err = c.NetworkInspect(ctx, netID, types.NetworkInspectOptions{})
|
||||
assert.Check(t, err != nil, "Network still exists after deletion")
|
||||
}
|
||||
|
||||
// TestWindowsNetworkIsolation validates network isolation between containers on different networks.
|
||||
// Ensures containers on different networks cannot communicate, validating Windows network driver isolation.
|
||||
func TestWindowsNetworkIsolation(t *testing.T) {
|
||||
ctx := setupTest(t)
|
||||
c := testEnv.APIClient()
|
||||
|
||||
// Create two separate NAT networks
|
||||
net1Name := "isolation-net1"
|
||||
net2Name := "isolation-net2"
|
||||
|
||||
network.CreateNoError(ctx, t, c, net1Name, network.WithDriver("nat"))
|
||||
defer network.RemoveNoError(ctx, t, c, net1Name)
|
||||
|
||||
network.CreateNoError(ctx, t, c, net2Name, network.WithDriver("nat"))
|
||||
defer network.RemoveNoError(ctx, t, c, net2Name)
|
||||
|
||||
// Create container on first network
|
||||
ctr1Name := "isolated-ctr1"
|
||||
id1 := container.Run(ctx, t, c,
|
||||
container.WithName(ctr1Name),
|
||||
container.WithNetworkMode(net1Name),
|
||||
)
|
||||
defer c.ContainerRemove(ctx, id1, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
ctr1Info := container.Inspect(ctx, t, c, id1)
|
||||
ctr1IP := ctr1Info.NetworkSettings.Networks[net1Name].IPAddress
|
||||
assert.Check(t, ctr1IP != "", "Container IP not assigned")
|
||||
|
||||
// Create container on second network and try to ping first container
|
||||
pingCmd := []string{"ping", "-n", "1", "-w", "2000", ctr1IP}
|
||||
|
||||
attachCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
|
||||
defer cancel()
|
||||
res := container.RunAttach(attachCtx, t, c,
|
||||
container.WithCmd(pingCmd...),
|
||||
container.WithNetworkMode(net2Name),
|
||||
)
|
||||
defer c.ContainerRemove(ctx, res.ContainerID, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
// Ping should fail, demonstrating network isolation
|
||||
assert.Check(t, res.ExitCode != 0, "Ping succeeded unexpectedly - networks are not isolated")
|
||||
// Windows ping failure can have various error messages, but we should see some indication of failure
|
||||
stdout := res.Stdout.String()
|
||||
stderr := res.Stderr.String()
|
||||
|
||||
// Check for common Windows ping failure indicators
|
||||
hasFailureIndicator := strings.Contains(stdout, "Destination host unreachable") ||
|
||||
strings.Contains(stdout, "Request timed out") ||
|
||||
strings.Contains(stdout, "100% loss") ||
|
||||
strings.Contains(stdout, "Lost = 1") ||
|
||||
strings.Contains(stderr, "unreachable") ||
|
||||
strings.Contains(stderr, "timeout")
|
||||
|
||||
assert.Check(t, hasFailureIndicator,
|
||||
"Expected ping failure indicators not found. Exit code: %d, stdout: %q, stderr: %q",
|
||||
res.ExitCode, stdout, stderr)
|
||||
}
|
||||
|
||||
// TestWindowsNetworkEndpointManagement validates endpoint creation and management on Windows networks.
|
||||
// Tests that multiple containers can be created and managed on the same network.
|
||||
func TestWindowsNetworkEndpointManagement(t *testing.T) {
|
||||
ctx := setupTest(t)
|
||||
c := testEnv.APIClient()
|
||||
|
||||
netName := "endpoint-test-nat"
|
||||
network.CreateNoError(ctx, t, c, netName, network.WithDriver("nat"))
|
||||
defer network.RemoveNoError(ctx, t, c, netName)
|
||||
|
||||
// Create multiple containers on the same network
|
||||
const numContainers = 3
|
||||
containerIDs := make([]string, numContainers)
|
||||
|
||||
for i := 0; i < numContainers; i++ {
|
||||
ctrName := fmt.Sprintf("endpoint-ctr-%d", i)
|
||||
id := container.Run(ctx, t, c,
|
||||
container.WithName(ctrName),
|
||||
container.WithNetworkMode(netName),
|
||||
)
|
||||
containerIDs[i] = id
|
||||
defer c.ContainerRemove(ctx, id, containertypes.RemoveOptions{Force: true})
|
||||
}
|
||||
|
||||
netInfo, err := c.NetworkInspect(ctx, netName, types.NetworkInspectOptions{})
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(len(netInfo.Containers), numContainers),
|
||||
"Expected %d containers, got %d", numContainers, len(netInfo.Containers))
|
||||
|
||||
// Verify each container has network connectivity to others
|
||||
for i := 0; i < numContainers-1; i++ {
|
||||
targetName := fmt.Sprintf("endpoint-ctr-%d", i)
|
||||
pingCmd := []string{"ping", "-n", "1", "-w", "3000", targetName}
|
||||
|
||||
sourceName := fmt.Sprintf("endpoint-ctr-%d", i+1)
|
||||
attachCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
|
||||
defer cancel()
|
||||
res := container.RunAttach(attachCtx, t, c,
|
||||
container.WithName(fmt.Sprintf("%s-pinger", sourceName)),
|
||||
container.WithCmd(pingCmd...),
|
||||
container.WithNetworkMode(netName),
|
||||
)
|
||||
defer c.ContainerRemove(ctx, res.ContainerID, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
assert.Check(t, is.Equal(res.ExitCode, 0),
|
||||
"Container %s failed to ping %s", sourceName, targetName)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user