Merge pull request #51290 from thaJeztah/container_noraw

client: merge ContainerInspectWithRaw with ContainerInspect
This commit is contained in:
Austin Vazquez
2025-10-24 19:25:10 -05:00
committed by GitHub
47 changed files with 370 additions and 396 deletions

View File

@@ -63,8 +63,7 @@ type ContainerAPIClient interface {
ContainerDiff(ctx context.Context, container string, options ContainerDiffOptions) (ContainerDiffResult, error)
ExecAPIClient
ContainerExport(ctx context.Context, container string) (io.ReadCloser, error)
ContainerInspect(ctx context.Context, container string) (container.InspectResponse, error)
ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (container.InspectResponse, []byte, error)
ContainerInspect(ctx context.Context, container string, options ContainerInspectOptions) (ContainerInspectResult, error)
ContainerKill(ctx context.Context, container, signal string) error
ContainerList(ctx context.Context, options ContainerListOptions) ([]container.Summary, error)
ContainerLogs(ctx context.Context, container string, options ContainerLogsOptions) (io.ReadCloser, error)

View File

@@ -1,57 +1,47 @@
package client
import (
"bytes"
"context"
"encoding/json"
"io"
"net/url"
"github.com/moby/moby/api/types/container"
)
// ContainerInspect returns the container information.
func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (container.InspectResponse, error) {
containerID, err := trimID("container", containerID)
if err != nil {
return container.InspectResponse{}, err
}
resp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil)
defer ensureReaderClosed(resp)
if err != nil {
return container.InspectResponse{}, err
}
var response container.InspectResponse
err = json.NewDecoder(resp.Body).Decode(&response)
return response, err
// ContainerInspectOptions holds options for inspecting a container using
// the [Client.ConfigInspect] method.
type ContainerInspectOptions struct {
// Size controls whether the container's filesystem size should be calculated.
// When set, the [container.InspectResponse.SizeRw] and [container.InspectResponse.SizeRootFs]
// fields in [ContainerInspectResult.Container] are populated with the result.
//
// Calculating the size can be a costly operation, and should not be used
// unless needed.
Size bool
}
// ContainerInspectWithRaw returns the container information and its raw representation.
func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (container.InspectResponse, []byte, error) {
// ContainerInspectResult holds the result from the [Client.ConfigInspect] method.
type ContainerInspectResult struct {
Container container.InspectResponse
Raw json.RawMessage
}
// ContainerInspect returns the container information.
func (cli *Client) ContainerInspect(ctx context.Context, containerID string, options ContainerInspectOptions) (ContainerInspectResult, error) {
containerID, err := trimID("container", containerID)
if err != nil {
return container.InspectResponse{}, nil, err
return ContainerInspectResult{}, err
}
query := url.Values{}
if getSize {
if options.Size {
query.Set("size", "1")
}
resp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil)
defer ensureReaderClosed(resp)
if err != nil {
return container.InspectResponse{}, nil, err
return ContainerInspectResult{}, err
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return container.InspectResponse{}, nil, err
}
var response container.InspectResponse
rdr := bytes.NewReader(body)
err = json.NewDecoder(rdr).Decode(&response)
return response, body, err
var out ContainerInspectResult
out.Raw, err = decodeWithRaw(resp, &out.Container)
return out, err
}

View File

@@ -1,7 +1,6 @@
package client
import (
"context"
"errors"
"net/http"
"testing"
@@ -18,14 +17,14 @@ func TestContainerInspectError(t *testing.T) {
)
assert.NilError(t, err)
_, err = client.ContainerInspect(context.Background(), "nothing")
_, err = client.ContainerInspect(t.Context(), "nothing", ContainerInspectOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInternal))
_, err = client.ContainerInspect(context.Background(), "")
_, err = client.ContainerInspect(t.Context(), "", ContainerInspectOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInvalidArgument))
assert.Check(t, is.ErrorContains(err, "value is empty"))
_, err = client.ContainerInspect(context.Background(), " ")
_, err = client.ContainerInspect(t.Context(), " ", ContainerInspectOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInvalidArgument))
assert.Check(t, is.ErrorContains(err, "value is empty"))
}
@@ -36,7 +35,7 @@ func TestContainerInspectContainerNotFound(t *testing.T) {
)
assert.NilError(t, err)
_, err = client.ContainerInspect(context.Background(), "unknown")
_, err = client.ContainerInspect(t.Context(), "unknown", ContainerInspectOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsNotFound))
}
@@ -48,19 +47,11 @@ func TestContainerInspectWithEmptyID(t *testing.T) {
)
assert.NilError(t, err)
_, err = client.ContainerInspect(context.Background(), "")
_, err = client.ContainerInspect(t.Context(), "", ContainerInspectOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInvalidArgument))
assert.Check(t, is.ErrorContains(err, "value is empty"))
_, err = client.ContainerInspect(context.Background(), " ")
assert.Check(t, is.ErrorType(err, cerrdefs.IsInvalidArgument))
assert.Check(t, is.ErrorContains(err, "value is empty"))
_, _, err = client.ContainerInspectWithRaw(context.Background(), "", false)
assert.Check(t, is.ErrorType(err, cerrdefs.IsInvalidArgument))
assert.Check(t, is.ErrorContains(err, "value is empty"))
_, _, err = client.ContainerInspectWithRaw(context.Background(), " ", false)
_, err = client.ContainerInspect(t.Context(), " ", ContainerInspectOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInvalidArgument))
assert.Check(t, is.ErrorContains(err, "value is empty"))
}
@@ -81,9 +72,9 @@ func TestContainerInspect(t *testing.T) {
)
assert.NilError(t, err)
r, err := client.ContainerInspect(context.Background(), "container_id")
res, err := client.ContainerInspect(t.Context(), "container_id", ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(r.ID, "container_id"))
assert.Check(t, is.Equal(r.Image, "image"))
assert.Check(t, is.Equal(r.Name, "name"))
assert.Check(t, is.Equal(res.Container.ID, "container_id"))
assert.Check(t, is.Equal(res.Container.Image, "image"))
assert.Check(t, is.Equal(res.Container.Name, "name"))
}

View File

@@ -593,10 +593,10 @@ func UtilCreateNetworkMode(t *testing.T, networkMode container.NetworkMode) {
})
assert.NilError(t, err)
containerJSON, err := apiClient.ContainerInspect(testutil.GetContext(t), ctr.ID)
res, err := apiClient.ContainerInspect(testutil.GetContext(t), ctr.ID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Equal(t, containerJSON.HostConfig.NetworkMode, networkMode, "Mismatched NetworkMode")
assert.Equal(t, res.Container.HostConfig.NetworkMode, networkMode, "Mismatched NetworkMode")
}
func (s *DockerAPISuite) TestContainerAPICreateWithCpuSharesCpuset(c *testing.T) {
@@ -624,13 +624,13 @@ func (s *DockerAPISuite) TestContainerAPICreateWithCpuSharesCpuset(c *testing.T)
})
assert.NilError(c, err)
containerJSON, err := apiClient.ContainerInspect(testutil.GetContext(c), ctr.ID)
res, err := apiClient.ContainerInspect(testutil.GetContext(c), ctr.ID, client.ContainerInspectOptions{})
assert.NilError(c, err)
out := inspectField(c, containerJSON.ID, "HostConfig.CpuShares")
out := inspectField(c, res.Container.ID, "HostConfig.CpuShares")
assert.Equal(c, out, "512")
outCpuset := inspectField(c, containerJSON.ID, "HostConfig.CpusetCpus")
outCpuset := inspectField(c, res.Container.ID, "HostConfig.CpusetCpus")
assert.Equal(c, outCpuset, "0")
}
@@ -913,10 +913,10 @@ func (s *DockerAPISuite) TestContainerAPIDeleteRemoveVolume(c *testing.T) {
assert.NilError(c, err)
defer apiClient.Close()
ctrInspect, err := apiClient.ContainerInspect(testutil.GetContext(c), id)
res, err := apiClient.ContainerInspect(testutil.GetContext(c), id, client.ContainerInspectOptions{})
assert.NilError(c, err)
assert.Assert(c, is.Len(ctrInspect.Mounts, 1), "expected to have 1 mount")
mnt := ctrInspect.Mounts[0]
assert.Assert(c, is.Len(res.Container.Mounts, 1), "expected to have 1 mount")
mnt := res.Container.Mounts[0]
assert.Equal(c, mnt.Destination, testVol)
_, err = os.Stat(mnt.Source)
@@ -949,7 +949,7 @@ func (s *DockerAPISuite) TestContainerAPIChunkedEncoding(c *testing.T) {
req.ContentLength = -1
return nil
}))
assert.Assert(c, err == nil, "error creating container with chunked encoding")
assert.NilError(c, err, "error creating container with chunked encoding")
defer resp.Body.Close()
assert.Equal(c, resp.StatusCode, http.StatusCreated)
}
@@ -1057,10 +1057,10 @@ func (s *DockerAPISuite) TestPostContainersCreateMemorySwappinessHostConfigOmitt
})
assert.NilError(c, err)
containerJSON, err := apiClient.ContainerInspect(testutil.GetContext(c), ctr.ID)
res, err := apiClient.ContainerInspect(testutil.GetContext(c), ctr.ID, client.ContainerInspectOptions{})
assert.NilError(c, err)
assert.Assert(c, is.Nil(containerJSON.HostConfig.MemorySwappiness))
assert.Assert(c, is.Nil(res.Container.HostConfig.MemorySwappiness))
}
// check validation is done daemon side and not only in cli
@@ -1660,9 +1660,9 @@ func (s *DockerAPISuite) TestContainersAPICreateMountsCreate(c *testing.T) {
})
assert.NilError(c, err)
containerInspect, err := apiclient.ContainerInspect(ctx, ctr.ID)
res, err := apiclient.ContainerInspect(ctx, ctr.ID, client.ContainerInspectOptions{})
assert.NilError(c, err)
mps := containerInspect.Mounts
mps := res.Container.Mounts
assert.Assert(c, is.Len(mps, 1))
mountPoint := mps[0]
@@ -1713,13 +1713,13 @@ func (s *DockerAPISuite) TestContainersAPICreateMountsCreate(c *testing.T) {
func containerExit(ctx context.Context, apiclient client.APIClient, name string) func(poll.LogT) poll.Result {
return func(logT poll.LogT) poll.Result {
ctr, err := apiclient.ContainerInspect(ctx, name)
res, err := apiclient.ContainerInspect(ctx, name, client.ContainerInspectOptions{})
if err != nil {
return poll.Error(err)
}
switch ctr.State.Status {
switch s := res.Container.State.Status; s {
case container.StateCreated, container.StateRunning:
return poll.Continue("container %s is %s, waiting for exit", name, ctr.State.Status)
return poll.Continue("container %s is %s, waiting for exit", name, s)
case container.StatePaused, container.StateRestarting, container.StateRemoving, container.StateExited, container.StateDead:
// done
}

View File

@@ -10,6 +10,7 @@ import (
"strings"
"testing"
"github.com/moby/moby/client"
"github.com/moby/moby/v2/integration-cli/cli"
"github.com/moby/moby/v2/internal/testutil"
"gotest.tools/v3/assert"
@@ -190,14 +191,14 @@ func assertPortList(t *testing.T, out string, expected []string) {
}
func assertPortRange(ctx context.Context, id string, expectedTCP, expectedUDP []int) error {
client := testEnv.APIClient()
inspect, err := client.ContainerInspect(ctx, id)
apiClient := testEnv.APIClient()
res, err := apiClient.ContainerInspect(ctx, id, client.ContainerInspectOptions{})
if err != nil {
return err
}
var validTCP, validUDP bool
for port, binding := range inspect.NetworkSettings.Ports {
for port, binding := range res.Container.NetworkSettings.Ports {
if port.Proto() == "tcp" && len(expectedTCP) == 0 {
continue
}

View File

@@ -3689,10 +3689,10 @@ func (s *DockerCLIRunSuite) TestRunNamedVolumesFromNotRemoved(c *testing.T) {
assert.NilError(c, err)
defer apiClient.Close()
container, err := apiClient.ContainerInspect(testutil.GetContext(c), strings.TrimSpace(cid))
inspect, err := apiClient.ContainerInspect(testutil.GetContext(c), strings.TrimSpace(cid), client.ContainerInspectOptions{})
assert.NilError(c, err)
var vname string
for _, v := range container.Mounts {
for _, v := range inspect.Container.Mounts {
if v.Name != "test" {
vname = v.Name
}

View File

@@ -1560,9 +1560,9 @@ func (s *DockerCLIRunSuite) TestRunWithNanoCPUs(c *testing.T) {
clt, err := client.NewClientWithOpts(client.FromEnv)
assert.NilError(c, err)
inspect, err := clt.ContainerInspect(testutil.GetContext(c), "test")
res, err := clt.ContainerInspect(testutil.GetContext(c), "test", client.ContainerInspectOptions{})
assert.NilError(c, err)
assert.Equal(c, inspect.HostConfig.NanoCPUs, int64(500000000))
assert.Equal(c, res.Container.HostConfig.NanoCPUs, int64(500000000))
out = inspectField(c, "test", "HostConfig.CpuQuota")
assert.Equal(c, out, "0", "CPU CFS quota should be 0")

View File

@@ -266,9 +266,9 @@ func (s *DockerCLIUpdateSuite) TestUpdateWithNanoCPUs(c *testing.T) {
clt, err := client.NewClientWithOpts(client.FromEnv)
assert.NilError(c, err)
inspect, err := clt.ContainerInspect(testutil.GetContext(c), "top")
res, err := clt.ContainerInspect(testutil.GetContext(c), "top", client.ContainerInspectOptions{})
assert.NilError(c, err)
assert.Equal(c, inspect.HostConfig.NanoCPUs, int64(500000000))
assert.Equal(c, res.Container.HostConfig.NanoCPUs, int64(500000000))
out = inspectField(c, "top", "HostConfig.CpuQuota")
assert.Equal(c, out, "0", "CPU CFS quota should be 0")
@@ -280,9 +280,9 @@ func (s *DockerCLIUpdateSuite) TestUpdateWithNanoCPUs(c *testing.T) {
assert.Assert(c, is.Contains(out, "Conflicting options: CPU Quota cannot be updated as NanoCPUs has already been set"))
cli.DockerCmd(c, "update", "--cpus", "0.8", "top")
inspect, err = clt.ContainerInspect(testutil.GetContext(c), "top")
res, err = clt.ContainerInspect(testutil.GetContext(c), "top", client.ContainerInspectOptions{})
assert.NilError(c, err)
assert.Equal(c, inspect.HostConfig.NanoCPUs, int64(800000000))
assert.Equal(c, res.Container.HostConfig.NanoCPUs, int64(800000000))
out = inspectField(c, "top", "HostConfig.CpuQuota")
assert.Equal(c, out, "0", "CPU CFS quota should be 0")

View File

@@ -251,14 +251,14 @@ func waitInspect(name, expr, expected string, timeout time.Duration) error {
return daemon.WaitInspectWithArgs(dockerBinary, name, expr, expected, timeout)
}
func getInspectBody(t *testing.T, version, id string) []byte {
func getInspectBody(t *testing.T, version, id string) json.RawMessage {
t.Helper()
apiClient, err := client.NewClientWithOpts(client.FromEnv, client.WithVersion(version))
assert.NilError(t, err)
defer apiClient.Close()
_, body, err := apiClient.ContainerInspectWithRaw(testutil.GetContext(t), id, false)
inspect, err := apiClient.ContainerInspect(testutil.GetContext(t), id, client.ContainerInspectOptions{})
assert.NilError(t, err)
return body
return inspect.Raw
}
// Run a long running idle task in a background container using the

View File

@@ -42,7 +42,7 @@ func TestCreateWithCDIDevices(t *testing.T) {
)
defer apiClient.ContainerRemove(ctx, id, client.ContainerRemoveOptions{Force: true})
inspect, err := apiClient.ContainerInspect(ctx, id)
res, err := apiClient.ContainerInspect(ctx, id, client.ContainerInspectOptions{})
assert.NilError(t, err)
expectedRequests := []containertypes.DeviceRequest{
@@ -51,7 +51,7 @@ func TestCreateWithCDIDevices(t *testing.T) {
DeviceIDs: []string{"vendor1.com/device=foo"},
},
}
assert.Check(t, is.DeepEqual(inspect.HostConfig.DeviceRequests, expectedRequests))
assert.Check(t, is.DeepEqual(res.Container.HostConfig.DeviceRequests, expectedRequests))
poll.WaitOn(t, container.IsStopped(ctx, apiClient, id))
reader, err := apiClient.ContainerLogs(ctx, id, client.ContainerLogsOptions{

View File

@@ -73,9 +73,9 @@ func TestCheckpoint(t *testing.T) {
}
assert.NilError(t, err)
inspect, err := apiClient.ContainerInspect(ctx, cID)
inspect, err := apiClient.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(true, inspect.State.Running))
assert.Check(t, is.Equal(true, inspect.Container.State.Running))
res, err := apiClient.CheckpointList(ctx, cID, client.CheckpointListOptions{})
assert.NilError(t, err)
@@ -98,9 +98,9 @@ func TestCheckpoint(t *testing.T) {
poll.WaitOn(t, container.IsInState(ctx, apiClient, cID, containertypes.StateExited))
inspect, err = apiClient.ContainerInspect(ctx, cID)
inspect, err = apiClient.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(false, inspect.State.Running))
assert.Check(t, is.Equal(false, inspect.Container.State.Running))
// Check that both checkpoints are listed.
res, err = apiClient.CheckpointList(ctx, cID, client.CheckpointListOptions{})
@@ -121,9 +121,9 @@ func TestCheckpoint(t *testing.T) {
})
assert.NilError(t, err)
inspect, err = apiClient.ContainerInspect(ctx, cID)
inspect, err = apiClient.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(true, inspect.State.Running))
assert.Check(t, is.Equal(true, inspect.Container.State.Running))
// Check that the test file has been restored.
cmd = []string{"test", "-f", "/tmp/test-file"}

View File

@@ -295,9 +295,9 @@ func TestCreateWithCustomMaskedPaths(t *testing.T) {
})
assert.NilError(t, err)
ctrInspect, err := apiClient.ContainerInspect(ctx, ctr.ID)
inspect, err := apiClient.ContainerInspect(ctx, ctr.ID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.DeepEqual(t, ctrInspect.HostConfig.MaskedPaths, tc.expected)
assert.DeepEqual(t, inspect.Container.HostConfig.MaskedPaths, tc.expected)
// Start the container.
err = apiClient.ContainerStart(ctx, ctr.ID, client.ContainerStartOptions{})
@@ -307,9 +307,9 @@ func TestCreateWithCustomMaskedPaths(t *testing.T) {
err = apiClient.ContainerStop(ctx, ctr.ID, client.ContainerStopOptions{})
assert.NilError(t, err)
ctrInspect, err = apiClient.ContainerInspect(ctx, ctr.ID)
inspect, err = apiClient.ContainerInspect(ctx, ctr.ID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.DeepEqual(t, ctrInspect.HostConfig.MaskedPaths, tc.expected)
assert.DeepEqual(t, inspect.Container.HostConfig.MaskedPaths, tc.expected)
})
}
}
@@ -366,9 +366,9 @@ func TestCreateWithCustomReadonlyPaths(t *testing.T) {
})
assert.NilError(t, err)
ctrInspect, err := apiClient.ContainerInspect(ctx, ctr.ID)
ctrInspect, err := apiClient.ContainerInspect(ctx, ctr.ID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.DeepEqual(t, ctrInspect.HostConfig.ReadonlyPaths, tc.expected)
assert.DeepEqual(t, ctrInspect.Container.HostConfig.ReadonlyPaths, tc.expected)
// Start the container.
err = apiClient.ContainerStart(ctx, ctr.ID, client.ContainerStartOptions{})
@@ -378,9 +378,9 @@ func TestCreateWithCustomReadonlyPaths(t *testing.T) {
err = apiClient.ContainerStop(ctx, ctr.ID, client.ContainerStopOptions{})
assert.NilError(t, err)
ctrInspect, err = apiClient.ContainerInspect(ctx, ctr.ID)
ctrInspect, err = apiClient.ContainerInspect(ctx, ctr.ID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.DeepEqual(t, ctrInspect.HostConfig.ReadonlyPaths, tc.expected)
assert.DeepEqual(t, ctrInspect.Container.HostConfig.ReadonlyPaths, tc.expected)
})
}
}
@@ -492,11 +492,11 @@ func TestCreateTmpfsOverrideAnonymousVolume(t *testing.T) {
assert.NilError(t, err)
}()
inspect, err := apiClient.ContainerInspect(ctx, id)
inspect, err := apiClient.ContainerInspect(ctx, id, client.ContainerInspectOptions{})
assert.NilError(t, err)
// tmpfs do not currently get added to inspect.Mounts
// Normally an anonymous volume would, except now tmpfs should prevent that.
assert.Assert(t, is.Len(inspect.Mounts, 0))
assert.Assert(t, is.Len(inspect.Container.Mounts, 0))
chWait, chErr := apiClient.ContainerWait(ctx, id, container.WaitConditionNextExit)
assert.NilError(t, apiClient.ContainerStart(ctx, id, client.ContainerStartOptions{}))

View File

@@ -52,15 +52,15 @@ func TestContainerStartOnDaemonRestart(t *testing.T) {
err := c.ContainerStart(ctx, cID, client.ContainerStartOptions{})
assert.Check(t, err, "error starting test container")
inspect, err := c.ContainerInspect(ctx, cID)
inspect, err := c.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.Check(t, err, "error getting inspect data")
ppid := getContainerdShimPid(t, inspect)
ppid := getContainerdShimPid(t, inspect.Container)
err = d.Kill()
assert.Check(t, err, "failed to kill test daemon")
err = unix.Kill(inspect.State.Pid, unix.SIGKILL)
err = unix.Kill(inspect.Container.State.Pid, unix.SIGKILL)
assert.Check(t, err, "failed to kill container process")
err = unix.Kill(ppid, unix.SIGKILL)
@@ -107,25 +107,25 @@ func TestDaemonRestartIpcMode(t *testing.T) {
)
defer c.ContainerRemove(ctx, cID, client.ContainerRemoveOptions{Force: true})
inspect, err := c.ContainerInspect(ctx, cID)
inspect, err := c.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(string(inspect.HostConfig.IpcMode), "private"))
assert.Check(t, is.Equal(string(inspect.Container.HostConfig.IpcMode), "private"))
// restart the daemon with shareable default ipc mode
d.Restart(t, "--iptables=false", "--ip6tables=false", "--default-ipc-mode=shareable")
// check the container is still having private ipc mode
inspect, err = c.ContainerInspect(ctx, cID)
inspect, err = c.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(string(inspect.HostConfig.IpcMode), "private"))
assert.Check(t, is.Equal(string(inspect.Container.HostConfig.IpcMode), "private"))
// check a new container is created with shareable ipc mode as per new daemon default
cID = container.Run(ctx, t, c)
defer c.ContainerRemove(ctx, cID, client.ContainerRemoveOptions{Force: true})
inspect, err = c.ContainerInspect(ctx, cID)
inspect, err = c.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(string(inspect.HostConfig.IpcMode), "shareable"))
assert.Check(t, is.Equal(string(inspect.Container.HostConfig.IpcMode), "shareable"))
}
// TestDaemonHostGatewayIP verifies that when a magic string "host-gateway" is passed
@@ -267,11 +267,11 @@ func TestHardRestartWhenContainerIsRunning(t *testing.T) {
ctx := testutil.StartSpan(ctx, t)
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
inspect, err := apiClient.ContainerInspect(ctx, noPolicy)
inspect, err := apiClient.ContainerInspect(ctx, noPolicy, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(inspect.State.Status, containertypes.StateExited))
assert.Check(t, is.Equal(inspect.State.ExitCode, 255))
finishedAt, err := time.Parse(time.RFC3339Nano, inspect.State.FinishedAt)
assert.Check(t, is.Equal(inspect.Container.State.Status, containertypes.StateExited))
assert.Check(t, is.Equal(inspect.Container.State.ExitCode, 255))
finishedAt, err := time.Parse(time.RFC3339Nano, inspect.Container.State.FinishedAt)
if assert.Check(t, err) {
assert.Check(t, is.DeepEqual(finishedAt, time.Now(), opt.TimeWithThreshold(time.Minute)))
}
@@ -281,11 +281,11 @@ func TestHardRestartWhenContainerIsRunning(t *testing.T) {
ctx := testutil.StartSpan(ctx, t)
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
inspect, err := apiClient.ContainerInspect(ctx, onFailure)
inspect, err := apiClient.ContainerInspect(ctx, onFailure, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(inspect.State.Status, containertypes.StateRunning))
assert.Check(t, is.Equal(inspect.State.ExitCode, 0))
finishedAt, err := time.Parse(time.RFC3339Nano, inspect.State.FinishedAt)
assert.Check(t, is.Equal(inspect.Container.State.Status, containertypes.StateRunning))
assert.Check(t, is.Equal(inspect.Container.State.ExitCode, 0))
finishedAt, err := time.Parse(time.RFC3339Nano, inspect.Container.State.FinishedAt)
if assert.Check(t, err) {
assert.Check(t, is.DeepEqual(finishedAt, time.Now(), opt.TimeWithThreshold(time.Minute)))
}

View File

@@ -40,16 +40,16 @@ func TestContainerKillOnDaemonStart(t *testing.T) {
assert.NilError(t, err)
}()
inspect, err := apiClient.ContainerInspect(ctx, id)
inspect, err := apiClient.ContainerInspect(ctx, id, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Assert(t, inspect.State.Running)
assert.Assert(t, inspect.Container.State.Running)
assert.NilError(t, d.Kill())
d.Start(t, "--iptables=false", "--ip6tables=false")
inspect, err = apiClient.ContainerInspect(ctx, id)
inspect, err = apiClient.ContainerInspect(ctx, id, client.ContainerInspectOptions{})
assert.Check(t, is.Nil(err))
assert.Assert(t, !inspect.State.Running)
assert.Assert(t, !inspect.Container.State.Running)
}
// When the daemon doesn't stop in a clean way (eg. it crashes, the host has a power failure, etc..), or if it's started
@@ -84,18 +84,18 @@ func TestNetworkStateCleanupOnDaemonStart(t *testing.T) {
assert.NilError(t, err)
}()
inspect, err := apiClient.ContainerInspect(ctx, cid)
inspect, err := apiClient.ContainerInspect(ctx, cid, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Assert(t, inspect.NetworkSettings.SandboxID != "")
assert.Assert(t, inspect.NetworkSettings.SandboxKey != "")
assert.Assert(t, inspect.NetworkSettings.Ports[mappedPort] != nil)
assert.Assert(t, inspect.Container.NetworkSettings.SandboxID != "")
assert.Assert(t, inspect.Container.NetworkSettings.SandboxKey != "")
assert.Assert(t, inspect.Container.NetworkSettings.Ports[mappedPort] != nil)
assert.NilError(t, d.Kill())
d.Start(t)
inspect, err = apiClient.ContainerInspect(ctx, cid)
inspect, err = apiClient.ContainerInspect(ctx, cid, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Assert(t, inspect.NetworkSettings.SandboxID == "")
assert.Assert(t, inspect.NetworkSettings.SandboxKey == "")
assert.Assert(t, is.Nil(inspect.NetworkSettings.Ports[mappedPort]))
assert.Assert(t, inspect.Container.NetworkSettings.SandboxID == "")
assert.Assert(t, inspect.Container.NetworkSettings.SandboxKey == "")
assert.Assert(t, is.Nil(inspect.Container.NetworkSettings.Ports[mappedPort]))
}

View File

@@ -131,13 +131,13 @@ func TestHealthStartInterval(t *testing.T) {
if ctxPoll.Err() != nil {
return poll.Error(ctxPoll.Err())
}
inspect, err := apiClient.ContainerInspect(ctxPoll, id)
inspect, err := apiClient.ContainerInspect(ctxPoll, id, client.ContainerInspectOptions{})
if err != nil {
return poll.Error(err)
}
if inspect.State.Health.Status != containertypes.Healthy {
if len(inspect.State.Health.Log) > 0 {
t.Log(inspect.State.Health.Log[len(inspect.State.Health.Log)-1])
if inspect.Container.State.Health.Status != containertypes.Healthy {
if len(inspect.Container.State.Health.Log) > 0 {
t.Log(inspect.Container.State.Health.Log[len(inspect.Container.State.Health.Log)-1])
}
return poll.Continue("waiting on container to be ready")
}
@@ -150,19 +150,19 @@ func TestHealthStartInterval(t *testing.T) {
dl, _ = ctxPoll.Deadline()
poll.WaitOn(t, func(log poll.LogT) poll.Result {
inspect, err := apiClient.ContainerInspect(ctxPoll, id)
inspect, err := apiClient.ContainerInspect(ctxPoll, id, client.ContainerInspectOptions{})
if err != nil {
return poll.Error(err)
}
hLen := len(inspect.State.Health.Log)
hLen := len(inspect.Container.State.Health.Log)
if hLen < 2 {
return poll.Continue("waiting for more healthcheck results")
}
h1 := inspect.State.Health.Log[hLen-1]
h2 := inspect.State.Health.Log[hLen-2]
if h1.Start.Sub(h2.Start) >= inspect.Config.Healthcheck.Interval {
h1 := inspect.Container.State.Health.Log[hLen-1]
h2 := inspect.Container.State.Health.Log[hLen-2]
if h1.Start.Sub(h2.Start) >= inspect.Container.Config.Healthcheck.Interval {
return poll.Success()
}
t.Log(h1.Start.Sub(h2.Start))
@@ -170,15 +170,15 @@ func TestHealthStartInterval(t *testing.T) {
}, poll.WithDelay(time.Second), poll.WithTimeout(time.Until(dl)))
}
func pollForHealthCheckLog(ctx context.Context, client client.APIClient, containerID string, expected string) func(log poll.LogT) poll.Result {
func pollForHealthCheckLog(ctx context.Context, apiClient client.APIClient, containerID string, expected string) func(log poll.LogT) poll.Result {
return func(log poll.LogT) poll.Result {
inspect, err := client.ContainerInspect(ctx, containerID)
inspect, err := apiClient.ContainerInspect(ctx, containerID, client.ContainerInspectOptions{})
if err != nil {
return poll.Error(err)
}
healthChecksTotal := len(inspect.State.Health.Log)
healthChecksTotal := len(inspect.Container.State.Health.Log)
if healthChecksTotal > 0 {
output := inspect.State.Health.Log[healthChecksTotal-1].Output
output := inspect.Container.State.Health.Log[healthChecksTotal-1].Output
if output == expected {
return poll.Success()
}
@@ -188,14 +188,14 @@ func pollForHealthCheckLog(ctx context.Context, client client.APIClient, contain
}
}
func pollForHealthStatus(ctx context.Context, client client.APIClient, containerID string, healthStatus containertypes.HealthStatus) func(log poll.LogT) poll.Result {
func pollForHealthStatus(ctx context.Context, apiClient client.APIClient, containerID string, healthStatus containertypes.HealthStatus) func(log poll.LogT) poll.Result {
return func(log poll.LogT) poll.Result {
inspect, err := client.ContainerInspect(ctx, containerID)
inspect, err := apiClient.ContainerInspect(ctx, containerID, client.ContainerInspectOptions{})
switch {
case err != nil:
return poll.Error(err)
case inspect.State.Health.Status == healthStatus:
case inspect.Container.State.Health.Status == healthStatus:
return poll.Success()
default:
return poll.Continue("waiting for container to become %s", healthStatus)

View File

@@ -33,9 +33,9 @@ func TestInspectAnnotations(t *testing.T) {
},
)
inspect, err := apiClient.ContainerInspect(ctx, id)
inspect, err := apiClient.ContainerInspect(ctx, id, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.DeepEqual(inspect.HostConfig.Annotations, annotations))
assert.Check(t, is.DeepEqual(inspect.Container.HostConfig.Annotations, annotations))
}
// TestNetworkAliasesAreEmpty verifies that network-scoped aliases are not set
@@ -165,12 +165,14 @@ func TestContainerInspectWithRaw(t *testing.T) {
}
for _, tc := range tests {
t.Run(tc.doc, func(t *testing.T) {
ctrInspect, raw, err := apiClient.ContainerInspectWithRaw(ctx, ctrID, tc.withSize)
inspect, err := apiClient.ContainerInspect(ctx, ctrID, client.ContainerInspectOptions{
Size: tc.withSize,
})
assert.NilError(t, err)
assert.Check(t, is.Equal(ctrInspect.ID, ctrID))
assert.Check(t, is.Equal(inspect.Container.ID, ctrID))
var rawInspect map[string]any
err = json.Unmarshal(raw, &rawInspect)
err = json.Unmarshal(inspect.Raw, &rawInspect)
assert.NilError(t, err, "Should produce valid JSON")
if tc.withSize {
@@ -179,12 +181,12 @@ func TestContainerInspectWithRaw(t *testing.T) {
// See https://github.com/moby/moby/blob/2837112c8ead55cdad36eaac61bafc713b4f669a/daemon/images/image_windows.go#L12-L16
t.Log("skip checking SizeRw, SizeRootFs on windows as it's not yet implemented")
} else {
if assert.Check(t, ctrInspect.SizeRw != nil) {
if assert.Check(t, inspect.Container.SizeRw != nil) {
// RW-layer size can be zero.
assert.Check(t, *ctrInspect.SizeRw >= 0, "Should have a size: %d", *ctrInspect.SizeRw)
assert.Check(t, *inspect.Container.SizeRw >= 0, "Should have a size: %d", *inspect.Container.SizeRw)
}
if assert.Check(t, ctrInspect.SizeRootFs != nil) {
assert.Check(t, *ctrInspect.SizeRootFs > 0, "Should have a size: %d", *ctrInspect.SizeRootFs)
if assert.Check(t, inspect.Container.SizeRootFs != nil) {
assert.Check(t, *inspect.Container.SizeRootFs > 0, "Should have a size: %d", *inspect.Container.SizeRootFs)
}
}
@@ -193,12 +195,12 @@ func TestContainerInspectWithRaw(t *testing.T) {
_, ok = rawInspect["SizeRootFs"]
assert.Check(t, ok)
} else {
assert.Check(t, is.Nil(ctrInspect.SizeRw))
assert.Check(t, is.Nil(ctrInspect.SizeRootFs))
assert.Check(t, is.Nil(inspect.Container.SizeRw))
assert.Check(t, is.Nil(inspect.Container.SizeRootFs))
_, ok := rawInspect["SizeRw"]
assert.Check(t, !ok, "Should not contain SizeRw:\n%s", string(raw))
assert.Check(t, !ok, "Should not contain SizeRw:\n%s", string(inspect.Raw))
_, ok = rawInspect["SizeRootFs"]
assert.Check(t, !ok, "Should not contain SizeRootFs:\n%s", string(raw))
assert.Check(t, !ok, "Should not contain SizeRootFs:\n%s", string(inspect.Raw))
}
})
}

View File

@@ -323,15 +323,15 @@ func TestIpcModeOlderClient(t *testing.T) {
// pre-check: default ipc mode in daemon is private
cID := container.Create(ctx, t, apiClient, container.WithAutoRemove)
inspect, err := apiClient.ContainerInspect(ctx, cID)
inspect, err := apiClient.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(string(inspect.HostConfig.IpcMode), "private"))
assert.Check(t, is.Equal(string(inspect.Container.HostConfig.IpcMode), "private"))
// main check: using older client creates "shareable" container
apiClient = request.NewAPIClient(t, client.WithVersion("1.39"))
cID = container.Create(ctx, t, apiClient, container.WithAutoRemove)
inspect, err = apiClient.ContainerInspect(ctx, cID)
inspect, err = apiClient.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(string(inspect.HostConfig.IpcMode), "shareable"))
assert.Check(t, is.Equal(string(inspect.Container.HostConfig.IpcMode), "shareable"))
}

View File

@@ -5,6 +5,7 @@ import (
"testing"
containertypes "github.com/moby/moby/api/types/container"
"github.com/moby/moby/client"
"github.com/moby/moby/v2/integration/internal/container"
"github.com/moby/moby/v2/internal/testutil"
"github.com/moby/moby/v2/internal/testutil/request"
@@ -160,9 +161,9 @@ func TestInspectOomKilledTrue(t *testing.T) {
poll.WaitOn(t, container.IsInState(ctx, apiClient, cID, containertypes.StateExited))
inspect, err := apiClient.ContainerInspect(ctx, cID)
inspect, err := apiClient.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(true, inspect.State.OOMKilled))
assert.Check(t, is.Equal(true, inspect.Container.State.OOMKilled))
}
func TestInspectOomKilledFalse(t *testing.T) {
@@ -175,7 +176,7 @@ func TestInspectOomKilledFalse(t *testing.T) {
poll.WaitOn(t, container.IsInState(ctx, apiClient, cID, containertypes.StateExited))
inspect, err := apiClient.ContainerInspect(ctx, cID)
inspect, err := apiClient.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(false, inspect.State.OOMKilled))
assert.Check(t, is.Equal(false, inspect.Container.State.OOMKilled))
}

View File

@@ -207,15 +207,15 @@ func TestMountDaemonRoot(t *testing.T) {
}
}()
inspect, err := apiClient.ContainerInspect(ctx, c.ID)
inspect, err := apiClient.ContainerInspect(ctx, c.ID, client.ContainerInspectOptions{})
if err != nil {
t.Fatal(err)
}
if len(inspect.Mounts) != 1 {
t.Fatalf("unexpected number of mounts: %+v", inspect.Mounts)
if len(inspect.Container.Mounts) != 1 {
t.Fatalf("unexpected number of mounts: %+v", inspect.Container.Mounts)
}
m := inspect.Mounts[0]
m := inspect.Container.Mounts[0]
if m.Propagation != test.expected {
t.Fatalf("got unexpected propagation mode, expected %q, got: %v", test.expected, m.Propagation)
}

View File

@@ -30,9 +30,9 @@ func TestPause(t *testing.T) {
err := apiClient.ContainerPause(ctx, cID)
assert.NilError(t, err)
inspect, err := apiClient.ContainerInspect(ctx, cID)
inspect, err := apiClient.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(true, inspect.State.Paused))
assert.Check(t, is.Equal(true, inspect.Container.State.Paused))
err = apiClient.ContainerUnpause(ctx, cID)
assert.NilError(t, err)

View File

@@ -44,7 +44,7 @@ func TestRemoveContainerWithRemovedVolume(t *testing.T) {
})
assert.NilError(t, err)
_, err = apiClient.ContainerInspect(ctx, cID)
_, err = apiClient.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsNotFound))
assert.Check(t, is.ErrorContains(err, "No such container"))
}
@@ -58,10 +58,10 @@ func TestRemoveContainerWithVolume(t *testing.T) {
cID := container.Run(ctx, t, apiClient, container.WithVolume(prefix+slash+"srv"))
ctrInspect, err := apiClient.ContainerInspect(ctx, cID)
inspect, err := apiClient.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(1, len(ctrInspect.Mounts)))
volName := ctrInspect.Mounts[0].Name
assert.Check(t, is.Equal(1, len(inspect.Container.Mounts)))
volName := inspect.Container.Mounts[0].Name
_, err = apiClient.VolumeInspect(ctx, volName, client.VolumeInspectOptions{})
assert.NilError(t, err)

View File

@@ -37,9 +37,9 @@ func TestRenameLinkedContainer(t *testing.T) {
bID = container.Run(ctx, t, apiClient, container.WithName(bName), container.WithLinks(aName))
inspect, err := apiClient.ContainerInspect(ctx, bID)
inspect, err := apiClient.ContainerInspect(ctx, bID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.DeepEqual([]string{"/" + aName + ":/" + bName + "/" + aName}, inspect.HostConfig.Links))
assert.Check(t, is.DeepEqual([]string{"/" + aName + ":/" + bName + "/" + aName}, inspect.Container.HostConfig.Links))
}
func TestRenameStoppedContainer(t *testing.T) {
@@ -49,17 +49,17 @@ func TestRenameStoppedContainer(t *testing.T) {
oldName := "first_name" + t.Name()
cID := container.Run(ctx, t, apiClient, container.WithName(oldName), container.WithCmd("sh"))
inspect, err := apiClient.ContainerInspect(ctx, cID)
inspect, err := apiClient.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal("/"+oldName, inspect.Name))
assert.Check(t, is.Equal("/"+oldName, inspect.Container.Name))
newName := "new_name" + cID // using cID as random suffix
err = apiClient.ContainerRename(ctx, oldName, newName)
assert.NilError(t, err)
inspect, err = apiClient.ContainerInspect(ctx, cID)
inspect, err = apiClient.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal("/"+newName, inspect.Name))
assert.Check(t, is.Equal("/"+newName, inspect.Container.Name))
}
func TestRenameRunningContainerAndReuse(t *testing.T) {
@@ -73,18 +73,18 @@ func TestRenameRunningContainerAndReuse(t *testing.T) {
err := apiClient.ContainerRename(ctx, oldName, newName)
assert.NilError(t, err)
inspect, err := apiClient.ContainerInspect(ctx, cID)
inspect, err := apiClient.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal("/"+newName, inspect.Name))
assert.Check(t, is.Equal("/"+newName, inspect.Container.Name))
_, err = apiClient.ContainerInspect(ctx, oldName)
_, err = apiClient.ContainerInspect(ctx, oldName, client.ContainerInspectOptions{})
assert.Check(t, is.ErrorContains(err, "No such container: "+oldName))
cID = container.Run(ctx, t, apiClient, container.WithName(oldName))
inspect, err = apiClient.ContainerInspect(ctx, cID)
inspect, err = apiClient.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal("/"+oldName, inspect.Name))
assert.Check(t, is.Equal("/"+oldName, inspect.Container.Name))
}
func TestRenameInvalidName(t *testing.T) {
@@ -97,9 +97,9 @@ func TestRenameInvalidName(t *testing.T) {
err := apiClient.ContainerRename(ctx, oldName, "new:invalid")
assert.Check(t, is.ErrorContains(err, "Invalid container name"))
inspect, err := apiClient.ContainerInspect(ctx, oldName)
inspect, err := apiClient.ContainerInspect(ctx, oldName, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(cID, inspect.ID))
assert.Check(t, is.Equal(cID, inspect.Container.ID))
}
// Test case for GitHub issue 22466
@@ -146,9 +146,9 @@ func TestRenameAnonymousContainer(t *testing.T) {
}, container.WithCmd("ping", count, "1", container1Name))
poll.WaitOn(t, container.IsInState(ctx, apiClient, cID, containertypes.StateExited))
inspect, err := apiClient.ContainerInspect(ctx, cID)
inspect, err := apiClient.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(0, inspect.State.ExitCode), "container %s exited with the wrong exitcode: %s", cID, inspect.State.Error)
assert.Check(t, is.Equal(0, inspect.Container.State.ExitCode), "container %s exited with the wrong exitcode: %s", cID, inspect.Container.State.Error)
}
// TODO: should be a unit test
@@ -186,9 +186,9 @@ func TestRenameContainerWithLinkedContainer(t *testing.T) {
err := apiClient.ContainerRename(ctx, app1Name, app2Name)
assert.NilError(t, err)
inspect, err := apiClient.ContainerInspect(ctx, app2Name+"/mysql")
inspect, err := apiClient.ContainerInspect(ctx, app2Name+"/mysql", client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(db1ID, inspect.ID))
assert.Check(t, is.Equal(db1ID, inspect.Container.ID))
}
// Regression test for https://github.com/moby/moby/issues/47186

View File

@@ -145,15 +145,15 @@ func TestDaemonRestartKillContainers(t *testing.T) {
}
}
func pollForNewHealthCheck(ctx context.Context, client *client.Client, startTime time.Time, containerID string) func(log poll.LogT) poll.Result {
func pollForNewHealthCheck(ctx context.Context, apiClient *client.Client, startTime time.Time, containerID string) func(log poll.LogT) poll.Result {
return func(log poll.LogT) poll.Result {
inspect, err := client.ContainerInspect(ctx, containerID)
inspect, err := apiClient.ContainerInspect(ctx, containerID, client.ContainerInspectOptions{})
if err != nil {
return poll.Error(err)
}
healthChecksTotal := len(inspect.State.Health.Log)
healthChecksTotal := len(inspect.Container.State.Health.Log)
if healthChecksTotal > 0 {
if inspect.State.Health.Log[healthChecksTotal-1].Start.After(startTime) {
if inspect.Container.State.Health.Log[healthChecksTotal-1].Start.After(startTime) {
return poll.Success()
}
}
@@ -202,9 +202,9 @@ func TestContainerWithAutoRemoveCanBeRestarted(t *testing.T) {
err := apiClient.ContainerRestart(ctx, cID, client.ContainerStopOptions{Timeout: &noWaitTimeout})
assert.NilError(t, err)
inspect, err := apiClient.ContainerInspect(ctx, cID)
inspect, err := apiClient.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Assert(t, inspect.State.Status != container.StateRemoving, "Container should not be removing yet")
assert.Assert(t, inspect.Container.State.Status != container.StateRemoving, "Container should not be removing yet")
poll.WaitOn(t, testContainer.IsInState(ctx, apiClient, cID, container.StateRunning))
@@ -278,7 +278,7 @@ func TestContainerRestartWithCancelledRequest(t *testing.T) {
}
// Container should be restarted (running).
inspect, err := apiClient.ContainerInspect(ctx, cID)
inspect, err := apiClient.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(inspect.State.Status, container.StateRunning))
assert.Check(t, is.Equal(inspect.Container.State.Status, container.StateRunning))
}

View File

@@ -49,10 +49,10 @@ func TestNISDomainname(t *testing.T) {
c.Config.Hostname = hostname
c.Config.Domainname = domainname
})
inspect, err := apiClient.ContainerInspect(ctx, cID)
inspect, err := apiClient.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(hostname, inspect.Config.Hostname))
assert.Check(t, is.Equal(domainname, inspect.Config.Domainname))
assert.Check(t, is.Equal(hostname, inspect.Container.Config.Hostname))
assert.Check(t, is.Equal(domainname, inspect.Container.Config.Domainname))
// Check hostname.
res, err := container.Exec(ctx, apiClient, cID,
@@ -89,9 +89,9 @@ func TestHostnameDnsResolution(t *testing.T) {
c.Config.Hostname = hostname
c.HostConfig.NetworkMode = containertypes.NetworkMode(netName)
})
inspect, err := apiClient.ContainerInspect(ctx, cID)
inspect, err := apiClient.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(hostname, inspect.Config.Hostname))
assert.Check(t, is.Equal(hostname, inspect.Container.Config.Hostname))
// Clear hosts file so ping will use DNS for hostname resolution
res, err := container.Exec(ctx, apiClient, cID,

View File

@@ -53,9 +53,9 @@ func TestStopContainerWithTimeoutCancel(t *testing.T) {
case <-time.After(5 * time.Second):
t.Fatal("timeout waiting for stop request to be cancelled")
}
inspect, err := apiClient.ContainerInspect(ctx, id)
inspect, err := apiClient.ContainerInspect(ctx, id, client.ContainerInspectOptions{})
assert.Check(t, err)
assert.Check(t, inspect.State.Running)
assert.Check(t, inspect.Container.State.Running)
// container should be stopped after stopTimeout is reached. The daemon.containerStop
// code is rather convoluted, and waits another 2 seconds for the container to

View File

@@ -105,9 +105,9 @@ func TestStopContainerWithTimeout(t *testing.T) {
poll.WaitOn(t, container.IsStopped(ctx, apiClient, id), pollOpts...)
inspect, err := apiClient.ContainerInspect(ctx, id)
inspect, err := apiClient.ContainerInspect(ctx, id, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(inspect.State.ExitCode, tc.expectedExitCode))
assert.Check(t, is.Equal(inspect.Container.State.ExitCode, tc.expectedExitCode))
})
}
}

View File

@@ -45,10 +45,10 @@ func TestUpdateMemory(t *testing.T) {
})
assert.NilError(t, err)
inspect, err := apiClient.ContainerInspect(ctx, cID)
inspect, err := apiClient.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(setMemory, inspect.HostConfig.Memory))
assert.Check(t, is.Equal(setMemorySwap, inspect.HostConfig.MemorySwap))
assert.Check(t, is.Equal(setMemory, inspect.Container.HostConfig.Memory))
assert.Check(t, is.Equal(setMemorySwap, inspect.Container.HostConfig.MemorySwap))
memoryFile := "/sys/fs/cgroup/memory/memory.limit_in_bytes"
if testEnv.DaemonInfo.CgroupVersion == "2" {
@@ -116,9 +116,9 @@ func TestUpdateCPUQuota(t *testing.T) {
assert.NilError(t, err)
}
inspect, err := apiClient.ContainerInspect(ctx, cID)
inspect, err := apiClient.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(test.update, inspect.HostConfig.CPUQuota))
assert.Check(t, is.Equal(test.update, inspect.Container.HostConfig.CPUQuota))
if testEnv.DaemonInfo.CgroupVersion == "2" {
res, err := container.Exec(ctx, apiClient, cID,
@@ -192,10 +192,10 @@ func TestUpdatePidsLimit(t *testing.T) {
})
assert.NilError(t, err)
inspect, err := c.ContainerInspect(ctx, cID)
inspect, err := c.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Assert(t, inspect.HostConfig.Resources.PidsLimit != nil)
assert.Equal(t, *inspect.HostConfig.Resources.PidsLimit, test.expect)
assert.Assert(t, inspect.Container.HostConfig.Resources.PidsLimit != nil)
assert.Equal(t, *inspect.Container.HostConfig.Resources.PidsLimit, test.expect)
ctx, cancel := context.WithTimeout(ctx, 60*time.Second)
defer cancel()

View File

@@ -6,6 +6,7 @@ import (
cerrdefs "github.com/containerd/errdefs"
containertypes "github.com/moby/moby/api/types/container"
"github.com/moby/moby/client"
"github.com/moby/moby/v2/integration/internal/container"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
@@ -38,10 +39,10 @@ func TestUpdateRestartPolicy(t *testing.T) {
poll.WaitOn(t, container.IsInState(ctx, apiClient, cID, containertypes.StateExited), poll.WithTimeout(timeout))
inspect, err := apiClient.ContainerInspect(ctx, cID)
inspect, err := apiClient.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(inspect.RestartCount, 5))
assert.Check(t, is.Equal(inspect.HostConfig.RestartPolicy.MaximumRetryCount, 5))
assert.Check(t, is.Equal(inspect.Container.RestartCount, 5))
assert.Check(t, is.Equal(inspect.Container.HostConfig.RestartPolicy.MaximumRetryCount, 5))
}
func TestUpdateRestartWithAutoRemove(t *testing.T) {

View File

@@ -158,8 +158,8 @@ func TestWaitConditions(t *testing.T) {
default:
}
info, _ := cli.ContainerInspect(ctx, containerID)
assert.Equal(t, info.State.Status, containertypes.StateRunning)
inspect, _ := cli.ContainerInspect(ctx, containerID, client.ContainerInspectOptions{})
assert.Equal(t, inspect.Container.State.Status, containertypes.StateRunning)
_, err = streams.Conn.Write([]byte("\n"))
assert.NilError(t, err)
@@ -170,8 +170,8 @@ func TestWaitConditions(t *testing.T) {
case waitRes := <-waitResC:
assert.Check(t, is.Equal(int64(99), waitRes.StatusCode))
case <-time.After(StopContainerWindowsPollTimeout):
ctr, _ := cli.ContainerInspect(ctx, containerID)
t.Fatalf("Timed out waiting for container exit code (status = %q)", ctr.State.Status)
ctr, _ := cli.ContainerInspect(ctx, containerID, client.ContainerInspectOptions{})
t.Fatalf("Timed out waiting for container exit code (status = %q)", ctr.Container.State.Status)
}
})
}

View File

@@ -505,7 +505,7 @@ func testLiveRestoreAutoRemove(t *testing.T) {
d.Restart(t, "--live-restore", "--iptables=false", "--ip6tables=false")
apiClient := d.NewClientT(t)
_, err := apiClient.ContainerInspect(ctx, cID)
_, err := apiClient.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err, "Container shouldn't be removed after engine restart")
finishContainer()
@@ -518,9 +518,9 @@ func testLiveRestoreAutoRemove(t *testing.T) {
apiClient := d.NewClientT(t)
// Get PID of the container process.
inspect, err := apiClient.ContainerInspect(ctx, cID)
inspect, err := apiClient.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
pid := inspect.State.Pid
pid := inspect.Container.State.Pid
d.Stop(t)
@@ -639,9 +639,9 @@ func testLiveRestoreVolumeReferences(t *testing.T) {
poll.WaitOn(t, container.IsStopped(ctx, c, cID2))
inspect, err := c.ContainerInspect(ctx, cID2)
inspect, err := c.ContainerInspect(ctx, cID2, client.ContainerInspectOptions{})
if assert.Check(t, err) {
assert.Check(t, is.Equal(inspect.State.ExitCode, 0), "volume doesn't have the same file")
assert.Check(t, is.Equal(inspect.Container.State.ExitCode, 0), "volume doesn't have the same file")
}
logs, err := c.ContainerLogs(ctx, cID2, client.ContainerLogsOptions{ShowStdout: true})

View File

@@ -84,10 +84,10 @@ func TestGraphDriverPersistence(t *testing.T) {
assert.Check(t, is.Equal(imageInspect.GraphDriver.Name, prevDriver), "Image graphdriver data should match")
// Verify our container is still there
containerInspect, err := c.ContainerInspect(ctx, containerID)
inspect, err := c.ContainerInspect(ctx, containerID, client.ContainerInspectOptions{})
assert.NilError(t, err, "Test container should still exist after daemon restart")
assert.Check(t, containerInspect.GraphDriver != nil, "GraphDriver should be set for graphdriver backend")
assert.Check(t, is.Equal(containerInspect.GraphDriver.Name, prevDriver), "Container graphdriver data should match")
assert.Check(t, inspect.Container.GraphDriver != nil, "GraphDriver should be set for graphdriver backend")
assert.Check(t, is.Equal(inspect.Container.GraphDriver.Name, prevDriver), "Container graphdriver data should match")
}
// TestInspectGraphDriverAPIBC checks API backward compatibility of the GraphDriver field in image/container inspect.
@@ -158,20 +158,20 @@ func TestInspectGraphDriverAPIBC(t *testing.T) {
}
}
if containerInspect, err := c.ContainerInspect(ctx, ctr.ID); assert.Check(t, err) {
if inspect, err := c.ContainerInspect(ctx, ctr.ID, client.ContainerInspectOptions{}); assert.Check(t, err) {
if tc.expGraphDriver != "" {
if assert.Check(t, containerInspect.GraphDriver != nil) {
assert.Check(t, is.Equal(containerInspect.GraphDriver.Name, tc.expGraphDriver))
if assert.Check(t, inspect.Container.GraphDriver != nil) {
assert.Check(t, is.Equal(inspect.Container.GraphDriver.Name, tc.expGraphDriver))
}
} else {
assert.Check(t, is.Nil(containerInspect.GraphDriver))
assert.Check(t, is.Nil(inspect.Container.GraphDriver))
}
if tc.expRootFSStorage {
assert.DeepEqual(t, containerInspect.Storage, &storage.Storage{
assert.DeepEqual(t, inspect.Container.Storage, &storage.Storage{
RootFS: &storage.RootFSStorage{Snapshot: &storage.RootFSStorageSnapshot{Name: "overlayfs"}},
})
} else {
assert.Check(t, is.Nil(containerInspect.Storage))
assert.Check(t, is.Nil(inspect.Container.Storage))
}
}
})

View File

@@ -127,10 +127,10 @@ func RunAttach(ctx context.Context, t *testing.T, apiClient client.APIClient, op
// Inspect to get the exit code. A new context is used here to make sure that if the context passed as argument as
// reached timeout during the demultiplexStream call, we still return a RunResult.
resp, err := apiClient.ContainerInspect(context.Background(), id)
inspect, err := apiClient.ContainerInspect(context.Background(), id, client.ContainerInspectOptions{})
assert.NilError(t, err)
return RunResult{ContainerID: id, ExitCode: resp.State.ExitCode, Stdout: &s.stdout, Stderr: &s.stderr}
return RunResult{ContainerID: id, ExitCode: inspect.Container.State.ExitCode, Stdout: &s.stdout, Stderr: &s.stderr}
}
type streams struct {
@@ -187,10 +187,10 @@ func RemoveAll(ctx context.Context, t *testing.T, apiClient client.APIClient) {
func Inspect(ctx context.Context, t *testing.T, apiClient client.APIClient, containerRef string) container.InspectResponse {
t.Helper()
c, err := apiClient.ContainerInspect(ctx, containerRef)
inspect, err := apiClient.ContainerInspect(ctx, containerRef, client.ContainerInspectOptions{})
assert.NilError(t, err)
return c
return inspect.Container
}
type ContainerOutput struct {

View File

@@ -14,12 +14,12 @@ import (
// RunningStateFlagIs polls for the container's Running state flag to be equal to running.
func RunningStateFlagIs(ctx context.Context, apiClient client.APIClient, containerID string, running bool) func(log poll.LogT) poll.Result {
return func(log poll.LogT) poll.Result {
inspect, err := apiClient.ContainerInspect(ctx, containerID)
inspect, err := apiClient.ContainerInspect(ctx, containerID, client.ContainerInspectOptions{})
switch {
case err != nil:
return poll.Error(err)
case inspect.State.Running == running:
case inspect.Container.State.Running == running:
return poll.Success()
default:
return poll.Continue("waiting for container to be %s", map[bool]string{true: "running", false: "stopped"}[running])
@@ -35,19 +35,19 @@ func IsStopped(ctx context.Context, apiClient client.APIClient, containerID stri
// IsInState verifies the container is in one of the specified state, e.g., "running", "exited", etc.
func IsInState(ctx context.Context, apiClient client.APIClient, containerID string, state ...container.ContainerState) func(log poll.LogT) poll.Result {
return func(log poll.LogT) poll.Result {
inspect, err := apiClient.ContainerInspect(ctx, containerID)
inspect, err := apiClient.ContainerInspect(ctx, containerID, client.ContainerInspectOptions{})
if err != nil {
return poll.Error(err)
}
for _, v := range state {
if inspect.State.Status == v {
if inspect.Container.State.Status == v {
return poll.Success()
}
}
if len(state) == 1 {
return poll.Continue("waiting for container State.Status to be '%s', currently '%s'", state[0], inspect.State.Status)
return poll.Continue("waiting for container State.Status to be '%s', currently '%s'", state[0], inspect.Container.State.Status)
} else {
return poll.Continue("waiting for container State.Status to be one of (%s), currently '%s'", strings.Join(state, ", "), inspect.State.Status)
return poll.Continue("waiting for container State.Status to be one of (%s), currently '%s'", strings.Join(state, ", "), inspect.Container.State.Status)
}
}
}
@@ -55,30 +55,30 @@ func IsInState(ctx context.Context, apiClient client.APIClient, containerID stri
// IsSuccessful verifies state.Status == "exited" && state.ExitCode == 0
func IsSuccessful(ctx context.Context, apiClient client.APIClient, containerID string) func(log poll.LogT) poll.Result {
return func(log poll.LogT) poll.Result {
inspect, err := apiClient.ContainerInspect(ctx, containerID)
inspect, err := apiClient.ContainerInspect(ctx, containerID, client.ContainerInspectOptions{})
if err != nil {
return poll.Error(err)
}
if inspect.State.Status == container.StateExited {
if inspect.State.ExitCode == 0 {
if inspect.Container.State.Status == container.StateExited {
if inspect.Container.State.ExitCode == 0 {
return poll.Success()
}
return poll.Error(errors.Errorf("expected exit code 0, got %d", inspect.State.ExitCode))
return poll.Error(errors.Errorf("expected exit code 0, got %d", inspect.Container.State.ExitCode))
}
return poll.Continue("waiting for container to be %q, currently %s", container.StateExited, inspect.State.Status)
return poll.Continue("waiting for container to be %q, currently %s", container.StateExited, inspect.Container.State.Status)
}
}
// IsRemoved verifies the container has been removed
func IsRemoved(ctx context.Context, apiClient client.APIClient, containerID string) func(log poll.LogT) poll.Result {
return func(log poll.LogT) poll.Result {
inspect, err := apiClient.ContainerInspect(ctx, containerID)
inspect, err := apiClient.ContainerInspect(ctx, containerID, client.ContainerInspectOptions{})
if err != nil {
if cerrdefs.IsNotFound(err) {
return poll.Success()
}
return poll.Error(err)
}
return poll.Continue("waiting for container to be removed, currently %s", inspect.State.Status)
return poll.Continue("waiting for container to be removed, currently %s", inspect.Container.State.Status)
}
}

View File

@@ -531,9 +531,9 @@ func TestPublishedPortAlreadyInUse(t *testing.T) {
err := apiClient.ContainerStart(ctx, ctr2, client.ContainerStartOptions{})
assert.Assert(t, is.ErrorContains(err, "failed to set up container networking"))
inspect, err := apiClient.ContainerInspect(ctx, ctr2)
inspect, err := apiClient.ContainerInspect(ctx, ctr2, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(inspect.State.Status, containertypes.StateCreated))
assert.Check(t, is.Equal(inspect.Container.State.Status, containertypes.StateCreated))
}
// TestAllPortMappingsAreReturned check that dual-stack ports mapped through
@@ -975,9 +975,9 @@ func TestEmptyPortBindingsBC(t *testing.T) {
// Inspect the container and return its port bindings, along with
// warnings returns on container create.
inspect, err := apiClient.ContainerInspect(ctx, c.ID)
inspect, err := apiClient.ContainerInspect(ctx, c.ID, client.ContainerInspectOptions{})
assert.NilError(t, err)
return inspect.HostConfig.PortBindings, c.Warnings
return inspect.Container.HostConfig.PortBindings, c.Warnings
}
t.Run("backfilling on old client version", func(t *testing.T) {

View File

@@ -245,9 +245,9 @@ func testIpvlanL2MultiSubnetNoParent(t *testing.T, ctx context.Context, client d
testIpvlanL2MultiSubnet(t, ctx, client, "")
}
func testIpvlanL2MultiSubnet(t *testing.T, ctx context.Context, client dclient.APIClient, parent string) {
func testIpvlanL2MultiSubnet(t *testing.T, ctx context.Context, apiClient dclient.APIClient, parent string) {
netName := "dualstackl2"
net.CreateNoError(ctx, t, client, netName,
net.CreateNoError(ctx, t, apiClient, netName,
net.WithIPvlan(parent, ""),
net.WithIPv6(),
net.WithIPAM("172.28.200.0/24", ""),
@@ -255,63 +255,63 @@ func testIpvlanL2MultiSubnet(t *testing.T, ctx context.Context, client dclient.A
net.WithIPAM("2001:db8:abc8::/64", ""),
net.WithIPAM("2001:db8:abc6::/64", "2001:db8:abc6::254"),
)
assert.Check(t, n.IsNetworkAvailable(ctx, client, netName))
assert.Check(t, n.IsNetworkAvailable(ctx, apiClient, netName))
// start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.100.0/24 and 2001:db8:abc2::/64
id1 := container.Run(ctx, t, client,
id1 := container.Run(ctx, t, apiClient,
container.WithNetworkMode(netName),
container.WithIPv4(netName, "172.28.200.20"),
container.WithIPv6(netName, "2001:db8:abc8::20"),
)
id2 := container.Run(ctx, t, client,
id2 := container.Run(ctx, t, apiClient,
container.WithNetworkMode(netName),
container.WithIPv4(netName, "172.28.200.21"),
container.WithIPv6(netName, "2001:db8:abc8::21"),
)
c1, err := client.ContainerInspect(ctx, id1)
c1, err := apiClient.ContainerInspect(ctx, id1, dclient.ContainerInspectOptions{})
assert.NilError(t, err)
// Inspect the v4 gateway to ensure no default GW was assigned
assert.Check(t, !c1.NetworkSettings.Networks[netName].Gateway.IsValid())
assert.Check(t, !c1.Container.NetworkSettings.Networks[netName].Gateway.IsValid())
// Inspect the v6 gateway to ensure no default GW was assigned
assert.Check(t, !c1.NetworkSettings.Networks[netName].IPv6Gateway.IsValid())
assert.Check(t, !c1.Container.NetworkSettings.Networks[netName].IPv6Gateway.IsValid())
// verify ipv4 connectivity to the explicit --ip address second to first
_, err = container.Exec(ctx, client, id2, []string{"ping", "-c", "1", c1.NetworkSettings.Networks[netName].IPAddress.String()})
_, err = container.Exec(ctx, apiClient, id2, []string{"ping", "-c", "1", c1.Container.NetworkSettings.Networks[netName].IPAddress.String()})
assert.NilError(t, err)
// verify ipv6 connectivity to the explicit --ip6 address second to first
_, err = container.Exec(ctx, client, id2, []string{"ping6", "-c", "1", c1.NetworkSettings.Networks[netName].GlobalIPv6Address.String()})
_, err = container.Exec(ctx, apiClient, id2, []string{"ping6", "-c", "1", c1.Container.NetworkSettings.Networks[netName].GlobalIPv6Address.String()})
assert.NilError(t, err)
// start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.102.0/24 and 2001:db8:abc4::/64
id3 := container.Run(ctx, t, client,
id3 := container.Run(ctx, t, apiClient,
container.WithNetworkMode(netName),
container.WithIPv4(netName, "172.28.202.20"),
container.WithIPv6(netName, "2001:db8:abc6::20"),
)
id4 := container.Run(ctx, t, client,
id4 := container.Run(ctx, t, apiClient,
container.WithNetworkMode(netName),
container.WithIPv4(netName, "172.28.202.21"),
container.WithIPv6(netName, "2001:db8:abc6::21"),
)
c3, err := client.ContainerInspect(ctx, id3)
c3, err := apiClient.ContainerInspect(ctx, id3, dclient.ContainerInspectOptions{})
assert.NilError(t, err)
if parent == "" {
// Inspect the v4 gateway to ensure no default GW was assigned
assert.Check(t, !c3.NetworkSettings.Networks[netName].Gateway.IsValid())
assert.Check(t, !c3.Container.NetworkSettings.Networks[netName].Gateway.IsValid())
// Inspect the v6 gateway to ensure no default GW was assigned
assert.Check(t, !c3.NetworkSettings.Networks[netName].IPv6Gateway.IsValid())
assert.Check(t, !c3.Container.NetworkSettings.Networks[netName].IPv6Gateway.IsValid())
} else {
// Inspect the v4 gateway to ensure the proper explicitly assigned default GW was assigned
assert.Check(t, is.Equal(c3.NetworkSettings.Networks[netName].Gateway, netip.MustParseAddr("172.28.202.254")))
assert.Check(t, is.Equal(c3.Container.NetworkSettings.Networks[netName].Gateway, netip.MustParseAddr("172.28.202.254")))
// Inspect the v6 gateway to ensure the proper explicitly assigned default GW was assigned
assert.Check(t, is.Equal(c3.NetworkSettings.Networks[netName].IPv6Gateway, netip.MustParseAddr("2001:db8:abc6::254")))
assert.Check(t, is.Equal(c3.Container.NetworkSettings.Networks[netName].IPv6Gateway, netip.MustParseAddr("2001:db8:abc6::254")))
}
// verify ipv4 connectivity to the explicit --ip address from third to fourth
_, err = container.Exec(ctx, client, id4, []string{"ping", "-c", "1", c3.NetworkSettings.Networks[netName].IPAddress.String()})
_, err = container.Exec(ctx, apiClient, id4, []string{"ping", "-c", "1", c3.Container.NetworkSettings.Networks[netName].IPAddress.String()})
assert.NilError(t, err)
// verify ipv6 connectivity to the explicit --ip6 address from third to fourth
_, err = container.Exec(ctx, client, id4, []string{"ping6", "-c", "1", c3.NetworkSettings.Networks[netName].GlobalIPv6Address.String()})
_, err = container.Exec(ctx, apiClient, id4, []string{"ping6", "-c", "1", c3.Container.NetworkSettings.Networks[netName].GlobalIPv6Address.String()})
assert.NilError(t, err)
}
@@ -338,14 +338,14 @@ func testIpvlanL3MultiSubnet(t *testing.T, ctx context.Context, client dclient.A
container.WithIPv4(netName, "172.28.10.21"),
container.WithIPv6(netName, "2001:db8:abc9::21"),
)
c1, err := client.ContainerInspect(ctx, id1)
c1, err := client.ContainerInspect(ctx, id1, dclient.ContainerInspectOptions{})
assert.NilError(t, err)
// verify ipv4 connectivity to the explicit --ipv address second to first
_, err = container.Exec(ctx, client, id2, []string{"ping", "-c", "1", c1.NetworkSettings.Networks[netName].IPAddress.String()})
_, err = container.Exec(ctx, client, id2, []string{"ping", "-c", "1", c1.Container.NetworkSettings.Networks[netName].IPAddress.String()})
assert.NilError(t, err)
// verify ipv6 connectivity to the explicit --ipv6 address second to first
_, err = container.Exec(ctx, client, id2, []string{"ping6", "-c", "1", c1.NetworkSettings.Networks[netName].GlobalIPv6Address.String()})
_, err = container.Exec(ctx, client, id2, []string{"ping6", "-c", "1", c1.Container.NetworkSettings.Networks[netName].GlobalIPv6Address.String()})
assert.NilError(t, err)
// start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.102.0/24 and 2001:db8:abc4::/64
@@ -359,24 +359,24 @@ func testIpvlanL3MultiSubnet(t *testing.T, ctx context.Context, client dclient.A
container.WithIPv4(netName, "172.28.12.21"),
container.WithIPv6(netName, "2001:db8:abc7::21"),
)
c3, err := client.ContainerInspect(ctx, id3)
c3, err := client.ContainerInspect(ctx, id3, dclient.ContainerInspectOptions{})
assert.NilError(t, err)
// verify ipv4 connectivity to the explicit --ipv address from third to fourth
_, err = container.Exec(ctx, client, id4, []string{"ping", "-c", "1", c3.NetworkSettings.Networks[netName].IPAddress.String()})
_, err = container.Exec(ctx, client, id4, []string{"ping", "-c", "1", c3.Container.NetworkSettings.Networks[netName].IPAddress.String()})
assert.NilError(t, err)
// verify ipv6 connectivity to the explicit --ipv6 address from third to fourth
_, err = container.Exec(ctx, client, id4, []string{"ping6", "-c", "1", c3.NetworkSettings.Networks[netName].GlobalIPv6Address.String()})
_, err = container.Exec(ctx, client, id4, []string{"ping6", "-c", "1", c3.Container.NetworkSettings.Networks[netName].GlobalIPv6Address.String()})
assert.NilError(t, err)
// Inspect the v4 gateway to ensure no next hop is assigned in L3 mode
assert.Check(t, !c1.NetworkSettings.Networks[netName].Gateway.IsValid())
assert.Check(t, !c1.Container.NetworkSettings.Networks[netName].Gateway.IsValid())
// Inspect the v6 gateway to ensure the explicitly specified default GW is ignored per L3 mode enabled
assert.Check(t, !c1.NetworkSettings.Networks[netName].IPv6Gateway.IsValid())
assert.Check(t, !c1.Container.NetworkSettings.Networks[netName].IPv6Gateway.IsValid())
// Inspect the v4 gateway to ensure no next hop is assigned in L3 mode
assert.Check(t, !c3.NetworkSettings.Networks[netName].Gateway.IsValid())
assert.Check(t, !c3.Container.NetworkSettings.Networks[netName].Gateway.IsValid())
// Inspect the v6 gateway to ensure the explicitly specified default GW is ignored per L3 mode enabled
assert.Check(t, !c3.NetworkSettings.Networks[netName].IPv6Gateway.IsValid())
assert.Check(t, !c3.Container.NetworkSettings.Networks[netName].IPv6Gateway.IsValid())
}
// Verify ipvlan l2 mode sets the proper default gateway routes via netlink

View File

@@ -332,9 +332,9 @@ func testMacvlanMultiSubnetNoParent(t *testing.T, ctx context.Context, client cl
testMacvlanMultiSubnet(t, ctx, client, "")
}
func testMacvlanMultiSubnet(t *testing.T, ctx context.Context, client client.APIClient, parent string) {
func testMacvlanMultiSubnet(t *testing.T, ctx context.Context, apiClient client.APIClient, parent string) {
netName := "dualstackbridge"
net.CreateNoError(ctx, t, client, netName,
net.CreateNoError(ctx, t, apiClient, netName,
net.WithMacvlan(parent),
net.WithIPv6(),
net.WithIPAM("172.28.100.0/24", ""),
@@ -343,63 +343,63 @@ func testMacvlanMultiSubnet(t *testing.T, ctx context.Context, client client.API
net.WithIPAM("2001:db8:abc4::/64", "2001:db8:abc4::254"),
)
assert.Check(t, n.IsNetworkAvailable(ctx, client, netName))
assert.Check(t, n.IsNetworkAvailable(ctx, apiClient, netName))
// start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.100.0/24 and 2001:db8:abc2::/64
id1 := container.Run(ctx, t, client,
id1 := container.Run(ctx, t, apiClient,
container.WithNetworkMode("dualstackbridge"),
container.WithIPv4("dualstackbridge", "172.28.100.20"),
container.WithIPv6("dualstackbridge", "2001:db8:abc2::20"),
)
id2 := container.Run(ctx, t, client,
id2 := container.Run(ctx, t, apiClient,
container.WithNetworkMode("dualstackbridge"),
container.WithIPv4("dualstackbridge", "172.28.100.21"),
container.WithIPv6("dualstackbridge", "2001:db8:abc2::21"),
)
c1, err := client.ContainerInspect(ctx, id1)
c1, err := apiClient.ContainerInspect(ctx, id1, client.ContainerInspectOptions{})
assert.NilError(t, err)
// Inspect the v4 gateway to ensure no default GW was assigned
assert.Check(t, !c1.NetworkSettings.Networks["dualstackbridge"].Gateway.IsValid())
assert.Check(t, !c1.Container.NetworkSettings.Networks["dualstackbridge"].Gateway.IsValid())
// Inspect the v6 gateway to ensure no default GW was assigned
assert.Check(t, !c1.NetworkSettings.Networks["dualstackbridge"].IPv6Gateway.IsValid())
assert.Check(t, !c1.Container.NetworkSettings.Networks["dualstackbridge"].IPv6Gateway.IsValid())
// verify ipv4 connectivity to the explicit --ip address second to first
_, err = container.Exec(ctx, client, id2, []string{"ping", "-c", "1", c1.NetworkSettings.Networks["dualstackbridge"].IPAddress.String()})
_, err = container.Exec(ctx, apiClient, id2, []string{"ping", "-c", "1", c1.Container.NetworkSettings.Networks["dualstackbridge"].IPAddress.String()})
assert.NilError(t, err)
// verify ipv6 connectivity to the explicit --ip6 address second to first
_, err = container.Exec(ctx, client, id2, []string{"ping6", "-c", "1", c1.NetworkSettings.Networks["dualstackbridge"].GlobalIPv6Address.String()})
_, err = container.Exec(ctx, apiClient, id2, []string{"ping6", "-c", "1", c1.Container.NetworkSettings.Networks["dualstackbridge"].GlobalIPv6Address.String()})
assert.NilError(t, err)
// start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.102.0/24 and 2001:db8:abc4::/64
id3 := container.Run(ctx, t, client,
id3 := container.Run(ctx, t, apiClient,
container.WithNetworkMode("dualstackbridge"),
container.WithIPv4("dualstackbridge", "172.28.102.20"),
container.WithIPv6("dualstackbridge", "2001:db8:abc4::20"),
)
id4 := container.Run(ctx, t, client,
id4 := container.Run(ctx, t, apiClient,
container.WithNetworkMode("dualstackbridge"),
container.WithIPv4("dualstackbridge", "172.28.102.21"),
container.WithIPv6("dualstackbridge", "2001:db8:abc4::21"),
)
c3, err := client.ContainerInspect(ctx, id3)
c3, err := apiClient.ContainerInspect(ctx, id3, client.ContainerInspectOptions{})
assert.NilError(t, err)
if parent == "" {
// Inspect the v4 gateway to ensure no default GW was assigned
assert.Check(t, !c3.NetworkSettings.Networks["dualstackbridge"].Gateway.IsValid())
assert.Check(t, !c3.Container.NetworkSettings.Networks["dualstackbridge"].Gateway.IsValid())
// Inspect the v6 gateway to ensure no default GW was assigned
assert.Check(t, !c3.NetworkSettings.Networks["dualstackbridge"].IPv6Gateway.IsValid())
assert.Check(t, !c3.Container.NetworkSettings.Networks["dualstackbridge"].IPv6Gateway.IsValid())
} else {
// Inspect the v4 gateway to ensure the proper explicitly assigned default GW was assigned
assert.Check(t, is.Equal(c3.NetworkSettings.Networks["dualstackbridge"].Gateway, netip.MustParseAddr("172.28.102.254")))
assert.Check(t, is.Equal(c3.Container.NetworkSettings.Networks["dualstackbridge"].Gateway, netip.MustParseAddr("172.28.102.254")))
// Inspect the v6 gateway to ensure the proper explicitly assigned default GW was assigned
assert.Check(t, is.Equal(c3.NetworkSettings.Networks["dualstackbridge"].IPv6Gateway, netip.MustParseAddr("2001:db8:abc4::254")))
assert.Check(t, is.Equal(c3.Container.NetworkSettings.Networks["dualstackbridge"].IPv6Gateway, netip.MustParseAddr("2001:db8:abc4::254")))
}
// verify ipv4 connectivity to the explicit --ip address from third to fourth
_, err = container.Exec(ctx, client, id4, []string{"ping", "-c", "1", c3.NetworkSettings.Networks["dualstackbridge"].IPAddress.String()})
_, err = container.Exec(ctx, apiClient, id4, []string{"ping", "-c", "1", c3.Container.NetworkSettings.Networks["dualstackbridge"].IPAddress.String()})
assert.NilError(t, err)
// verify ipv6 connectivity to the explicit --ip6 address from third to fourth
_, err = container.Exec(ctx, client, id4, []string{"ping6", "-c", "1", c3.NetworkSettings.Networks["dualstackbridge"].GlobalIPv6Address.String()})
_, err = container.Exec(ctx, apiClient, id4, []string{"ping6", "-c", "1", c3.Container.NetworkSettings.Networks["dualstackbridge"].GlobalIPv6Address.String()})
assert.NilError(t, err)
}

View File

@@ -235,9 +235,9 @@ func TestInspectCfgdMAC(t *testing.T) {
Force: true,
})
_, raw, err := c.ContainerInspectWithRaw(ctx, id, false)
inspect, err := c.ContainerInspect(ctx, id, client.ContainerInspectOptions{})
assert.NilError(t, err)
var inspect struct {
var resp struct {
Config struct {
// Mac Address of the container.
//
@@ -245,10 +245,10 @@ func TestInspectCfgdMAC(t *testing.T) {
MacAddress string `json:",omitempty"`
}
}
err = json.Unmarshal(raw, &inspect)
assert.NilError(t, err, string(raw))
configMAC := inspect.Config.MacAddress
assert.Check(t, is.DeepEqual(configMAC, tc.desiredMAC), string(raw))
err = json.Unmarshal(inspect.Raw, &resp)
assert.NilError(t, err, string(inspect.Raw))
configMAC := resp.Config.MacAddress
assert.Check(t, is.DeepEqual(configMAC, tc.desiredMAC), string(inspect.Raw))
})
}
}

View File

@@ -49,7 +49,7 @@ func TestAuthZPluginV2AllowNonVolumeRequest(t *testing.T) {
// Ensure docker run command and accompanying docker ps are successful
cID := container.Run(ctx, t, c)
_, err = c.ContainerInspect(ctx, cID)
_, err = c.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
}

View File

@@ -70,9 +70,9 @@ func inspectServiceContainer(ctx context.Context, t *testing.T, apiClient client
assert.NilError(t, err)
assert.Check(t, is.Len(containers, 1))
i, err := apiClient.ContainerInspect(ctx, containers[0].ID)
inspect, err := apiClient.ContainerInspect(ctx, containers[0].ID, client.ContainerInspectOptions{})
assert.NilError(t, err)
return i
return inspect.Container
}
func TestCreateServiceMultipleTimes(t *testing.T) {
@@ -380,9 +380,9 @@ func TestCreateServiceSysctls(t *testing.T) {
assert.Check(t, is.Equal(len(taskList.Items), 1))
// verify that the container has the sysctl option set
ctnr, err := apiClient.ContainerInspect(ctx, taskList.Items[0].Status.ContainerStatus.ContainerID)
inspect, err := apiClient.ContainerInspect(ctx, taskList.Items[0].Status.ContainerStatus.ContainerID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.DeepEqual(t, ctnr.HostConfig.Sysctls, expectedSysctls)
assert.DeepEqual(t, inspect.Container.HostConfig.Sysctls, expectedSysctls)
// verify that the task has the sysctl option set in the task object
assert.DeepEqual(t, taskList.Items[0].Spec.ContainerSpec.Sysctls, expectedSysctls)
@@ -450,10 +450,10 @@ func TestCreateServiceCapabilities(t *testing.T) {
assert.Check(t, is.Equal(len(taskList.Items), 1))
// verify that the container has the capabilities option set
ctnr, err := apiClient.ContainerInspect(ctx, taskList.Items[0].Status.ContainerStatus.ContainerID)
inspect, err := apiClient.ContainerInspect(ctx, taskList.Items[0].Status.ContainerStatus.ContainerID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.DeepEqual(t, ctnr.HostConfig.CapAdd, capAdd)
assert.DeepEqual(t, ctnr.HostConfig.CapDrop, capDrop)
assert.DeepEqual(t, inspect.Container.HostConfig.CapAdd, capAdd)
assert.DeepEqual(t, inspect.Container.HostConfig.CapDrop, capDrop)
// verify that the task has the capabilities option set in the task object
assert.DeepEqual(t, taskList.Items[0].Spec.ContainerSpec.CapabilityAdd, capAdd)
@@ -541,9 +541,9 @@ func TestCreateServiceMemorySwap(t *testing.T) {
// if the host supports it (see https://github.com/moby/moby/blob/v17.03.2-ce/daemon/daemon_unix.go#L290-L294)
// then check that the swap option is set on the container, and properly reported by the group FS as well
if testEnv.DaemonInfo.SwapLimit {
ctnr, err := apiClient.ContainerInspect(ctx, task.Status.ContainerStatus.ContainerID)
ctr, err := apiClient.ContainerInspect(ctx, task.Status.ContainerStatus.ContainerID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Equal(t, testCase.expectedDockerSwap, ctnr.HostConfig.Resources.MemorySwap)
assert.Equal(t, testCase.expectedDockerSwap, ctr.Container.HostConfig.Resources.MemorySwap)
}
})
}

View File

@@ -57,10 +57,10 @@ func TestDockerNetworkConnectAliasPreV144(t *testing.T) {
err = apiClient.ContainerStart(ctx, cID1, client.ContainerStartOptions{})
assert.NilError(t, err)
ng1, err := apiClient.ContainerInspect(ctx, cID1)
ng1, err := apiClient.ContainerInspect(ctx, cID1, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(len(ng1.NetworkSettings.Networks[name].Aliases), 2))
assert.Check(t, is.Equal(ng1.NetworkSettings.Networks[name].Aliases[0], "aaa"))
assert.Check(t, is.Equal(len(ng1.Container.NetworkSettings.Networks[name].Aliases), 2))
assert.Check(t, is.Equal(ng1.Container.NetworkSettings.Networks[name].Aliases[0], "aaa"))
cID2 := container.Create(ctx, t, apiClient, func(c *container.TestContainerConfig) {
c.NetworkingConfig = &network.NetworkingConfig{
@@ -80,10 +80,10 @@ func TestDockerNetworkConnectAliasPreV144(t *testing.T) {
err = apiClient.ContainerStart(ctx, cID2, client.ContainerStartOptions{})
assert.NilError(t, err)
ng2, err := apiClient.ContainerInspect(ctx, cID2)
ng2, err := apiClient.ContainerInspect(ctx, cID2, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(len(ng2.NetworkSettings.Networks[name].Aliases), 2))
assert.Check(t, is.Equal(ng2.NetworkSettings.Networks[name].Aliases[0], "bbb"))
assert.Check(t, is.Equal(len(ng2.Container.NetworkSettings.Networks[name].Aliases), 2))
assert.Check(t, is.Equal(ng2.Container.NetworkSettings.Networks[name].Aliases[0], "bbb"))
}
func TestDockerNetworkReConnect(t *testing.T) {
@@ -114,15 +114,15 @@ func TestDockerNetworkReConnect(t *testing.T) {
err = apiClient.ContainerStart(ctx, c1, client.ContainerStartOptions{})
assert.NilError(t, err)
n1, err := apiClient.ContainerInspect(ctx, c1)
n1, err := apiClient.ContainerInspect(ctx, c1, client.ContainerInspectOptions{})
assert.NilError(t, err)
err = apiClient.NetworkConnect(ctx, name, c1, &network.EndpointSettings{})
assert.ErrorContains(t, err, "is already attached to network")
n2, err := apiClient.ContainerInspect(ctx, c1)
n2, err := apiClient.ContainerInspect(ctx, c1, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.DeepEqual(n1, n2, cmpopts.EquateComparable(netip.Addr{}, netip.Prefix{})))
assert.Check(t, is.DeepEqual(n1.Container, n2.Container, cmpopts.EquateComparable(netip.Addr{}, netip.Prefix{})))
}
// Check that a swarm-scoped network can't have EnableIPv4=false.

View File

@@ -366,10 +366,10 @@ func getServiceTaskContainer(ctx context.Context, t *testing.T, cli client.APICl
assert.NilError(t, err)
assert.Assert(t, len(taskList.Items) > 0)
ctr, err := cli.ContainerInspect(ctx, taskList.Items[0].Status.ContainerStatus.ContainerID)
inspect, err := cli.ContainerInspect(ctx, taskList.Items[0].Status.ContainerStatus.ContainerID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Equal(t, ctr.State.Running, true)
return ctr
assert.Equal(t, inspect.Container.State.Running, true)
return inspect.Container
}
func getService(ctx context.Context, t *testing.T, apiClient client.ServiceAPIClient, serviceID string) swarmtypes.Service {

View File

@@ -49,7 +49,7 @@ func TestCgroupDriverSystemdMemoryLimit(t *testing.T) {
err := c.ContainerStart(ctx, ctrID, client.ContainerStartOptions{})
assert.NilError(t, err)
s, err := c.ContainerInspect(ctx, ctrID)
s, err := c.ContainerInspect(ctx, ctrID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Equal(t, s.HostConfig.Memory, mem)
assert.Equal(t, s.Container.HostConfig.Memory, mem)
}

View File

@@ -107,9 +107,9 @@ func TestRunMountVolumeSubdir(t *testing.T) {
output, err := container.Output(ctx, apiClient, id)
assert.Check(t, err)
inspect, err := apiClient.ContainerInspect(ctx, id)
inspect, err := apiClient.ContainerInspect(ctx, id, client.ContainerInspectOptions{})
if assert.Check(t, err) {
assert.Check(t, is.Equal(inspect.State.ExitCode, 0))
assert.Check(t, is.Equal(inspect.Container.State.ExitCode, 0))
}
assert.Check(t, is.Equal(strings.TrimSpace(output.Stderr), ""))
@@ -222,9 +222,9 @@ func TestRunMountImage(t *testing.T) {
output, err := container.Output(ctx, apiClient, id)
assert.Check(t, err)
inspect, err := apiClient.ContainerInspect(ctx, id)
inspect, err := apiClient.ContainerInspect(ctx, id, client.ContainerInspectOptions{})
if tc.startErr == "" && assert.Check(t, err) {
assert.Check(t, is.Equal(inspect.State.ExitCode, 0))
assert.Check(t, is.Equal(inspect.Container.State.ExitCode, 0))
}
assert.Check(t, is.Equal(strings.TrimSpace(output.Stderr), ""))
@@ -294,9 +294,9 @@ func setupTestVolume(t *testing.T, apiClient client.APIClient) string {
assert.NilError(t, err)
assert.Assert(t, is.Equal(output.Stderr, ""))
inspect, err := apiClient.ContainerInspect(ctx, cid)
inspect, err := apiClient.ContainerInspect(ctx, cid, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Assert(t, is.Equal(inspect.State.ExitCode, 0))
assert.Assert(t, is.Equal(inspect.Container.State.ExitCode, 0))
return volumeName
}
@@ -384,12 +384,12 @@ func TestRunMountImageMultipleTimes(t *testing.T) {
defer apiClient.ContainerRemove(ctx, id, client.ContainerRemoveOptions{Force: true})
inspect, err := apiClient.ContainerInspect(ctx, id)
inspect, err := apiClient.ContainerInspect(ctx, id, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Equal(t, len(inspect.Mounts), 2)
assert.Equal(t, len(inspect.Container.Mounts), 2)
var hasFoo, hasBar bool
for _, mnt := range inspect.Mounts {
for _, mnt := range inspect.Container.Mounts {
if mnt.Destination == "/etc/foo" {
hasFoo = true
}

View File

@@ -71,9 +71,9 @@ func TestVolumesRemove(t *testing.T) {
id := container.Create(ctx, t, apiClient, container.WithVolume(prefix+slash+"foo"))
c, err := apiClient.ContainerInspect(ctx, id)
inspect, err := apiClient.ContainerInspect(ctx, id, client.ContainerInspectOptions{})
assert.NilError(t, err)
vname := c.Mounts[0].Name
vname := inspect.Container.Mounts[0].Name
t.Run("volume in use", func(t *testing.T) {
err = apiClient.VolumeRemove(ctx, vname, client.VolumeRemoveOptions{})
@@ -123,9 +123,9 @@ func TestVolumesRemoveSwarmEnabled(t *testing.T) {
prefix, slash := getPrefixAndSlashFromDaemonPlatform()
id := container.Create(ctx, t, apiClient, container.WithVolume(prefix+slash+"foo"))
c, err := apiClient.ContainerInspect(ctx, id)
inspect, err := apiClient.ContainerInspect(ctx, id, client.ContainerInspectOptions{})
assert.NilError(t, err)
vname := c.Mounts[0].Name
vname := inspect.Container.Mounts[0].Name
t.Run("volume in use", func(t *testing.T) {
err = apiClient.VolumeRemove(ctx, vname, client.VolumeRemoveOptions{})
@@ -343,12 +343,12 @@ VOLUME ` + volDest
id := container.Create(ctx, t, apiClient, container.WithImage(img))
defer apiClient.ContainerRemove(ctx, id, client.ContainerRemoveOptions{})
inspect, err := apiClient.ContainerInspect(ctx, id)
inspect, err := apiClient.ContainerInspect(ctx, id, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Assert(t, is.Len(inspect.Mounts, 1))
assert.Assert(t, is.Len(inspect.Container.Mounts, 1))
volumeName := inspect.Mounts[0].Name
volumeName := inspect.Container.Mounts[0].Name
assert.Assert(t, volumeName != "")
err = apiClient.ContainerRemove(ctx, id, client.ContainerRemoveOptions{})

View File

@@ -164,12 +164,12 @@ COPY . /static`); err != nil {
assert.NilError(t, err)
// Find out the system assigned port
i, err := c.ContainerInspect(context.Background(), b.ID)
inspect, err := c.ContainerInspect(context.Background(), b.ID, client.ContainerInspectOptions{})
assert.NilError(t, err)
ports, exists := i.NetworkSettings.Ports[network.MustParsePort("80/tcp")]
ports, exists := inspect.Container.NetworkSettings.Ports[network.MustParsePort("80/tcp")]
assert.Assert(t, exists, "unable to find port 80/tcp for %s", ctrName)
if len(ports) == 0 {
t.Fatalf("no ports mapped for 80/tcp for %s: %#v", ctrName, i.NetworkSettings.Ports)
t.Fatalf("no ports mapped for 80/tcp for %s: %#v", ctrName, inspect.Container.NetworkSettings.Ports)
}
// TODO(thaJeztah): this will be "0.0.0.0" or "::", is that expected, should this use the IP of the testEnv.Server?
host := ports[0].HostIP

View File

@@ -63,8 +63,7 @@ type ContainerAPIClient interface {
ContainerDiff(ctx context.Context, container string, options ContainerDiffOptions) (ContainerDiffResult, error)
ExecAPIClient
ContainerExport(ctx context.Context, container string) (io.ReadCloser, error)
ContainerInspect(ctx context.Context, container string) (container.InspectResponse, error)
ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (container.InspectResponse, []byte, error)
ContainerInspect(ctx context.Context, container string, options ContainerInspectOptions) (ContainerInspectResult, error)
ContainerKill(ctx context.Context, container, signal string) error
ContainerList(ctx context.Context, options ContainerListOptions) ([]container.Summary, error)
ContainerLogs(ctx context.Context, container string, options ContainerLogsOptions) (io.ReadCloser, error)

View File

@@ -1,57 +1,47 @@
package client
import (
"bytes"
"context"
"encoding/json"
"io"
"net/url"
"github.com/moby/moby/api/types/container"
)
// ContainerInspect returns the container information.
func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (container.InspectResponse, error) {
containerID, err := trimID("container", containerID)
if err != nil {
return container.InspectResponse{}, err
}
resp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil)
defer ensureReaderClosed(resp)
if err != nil {
return container.InspectResponse{}, err
}
var response container.InspectResponse
err = json.NewDecoder(resp.Body).Decode(&response)
return response, err
// ContainerInspectOptions holds options for inspecting a container using
// the [Client.ConfigInspect] method.
type ContainerInspectOptions struct {
// Size controls whether the container's filesystem size should be calculated.
// When set, the [container.InspectResponse.SizeRw] and [container.InspectResponse.SizeRootFs]
// fields in [ContainerInspectResult.Container] are populated with the result.
//
// Calculating the size can be a costly operation, and should not be used
// unless needed.
Size bool
}
// ContainerInspectWithRaw returns the container information and its raw representation.
func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (container.InspectResponse, []byte, error) {
// ContainerInspectResult holds the result from the [Client.ConfigInspect] method.
type ContainerInspectResult struct {
Container container.InspectResponse
Raw json.RawMessage
}
// ContainerInspect returns the container information.
func (cli *Client) ContainerInspect(ctx context.Context, containerID string, options ContainerInspectOptions) (ContainerInspectResult, error) {
containerID, err := trimID("container", containerID)
if err != nil {
return container.InspectResponse{}, nil, err
return ContainerInspectResult{}, err
}
query := url.Values{}
if getSize {
if options.Size {
query.Set("size", "1")
}
resp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil)
defer ensureReaderClosed(resp)
if err != nil {
return container.InspectResponse{}, nil, err
return ContainerInspectResult{}, err
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return container.InspectResponse{}, nil, err
}
var response container.InspectResponse
rdr := bytes.NewReader(body)
err = json.NewDecoder(rdr).Decode(&response)
return response, body, err
var out ContainerInspectResult
out.Raw, err = decodeWithRaw(resp, &out.Container)
return out, err
}