Merge pull request #51308 from thaJeztah/client_container_types

client: add option and output structs for various container methods
This commit is contained in:
Sebastiaan van Stijn
2025-10-28 15:42:25 +01:00
committed by GitHub
65 changed files with 506 additions and 262 deletions

View File

@@ -64,20 +64,20 @@ type ContainerAPIClient interface {
ExecAPIClient
ContainerExport(ctx context.Context, container string) (io.ReadCloser, error)
ContainerInspect(ctx context.Context, container string, options ContainerInspectOptions) (ContainerInspectResult, error)
ContainerKill(ctx context.Context, container, signal string) error
ContainerKill(ctx context.Context, container string, options ContainerKillOptions) (ContainerKillResult, error)
ContainerList(ctx context.Context, options ContainerListOptions) ([]container.Summary, error)
ContainerLogs(ctx context.Context, container string, options ContainerLogsOptions) (io.ReadCloser, error)
ContainerPause(ctx context.Context, container string) error
ContainerRemove(ctx context.Context, container string, options ContainerRemoveOptions) error
ContainerPause(ctx context.Context, container string, options ContainerPauseOptions) (ContainerPauseResult, error)
ContainerRemove(ctx context.Context, container string, options ContainerRemoveOptions) (ContainerRemoveResult, error)
ContainerRename(ctx context.Context, container, newContainerName string) error
ContainerResize(ctx context.Context, container string, options ContainerResizeOptions) error
ContainerRestart(ctx context.Context, container string, options ContainerStopOptions) error
ContainerResize(ctx context.Context, container string, options ContainerResizeOptions) (ContainerResizeResult, error)
ContainerRestart(ctx context.Context, container string, options ContainerRestartOptions) (ContainerRestartResult, error)
ContainerStatPath(ctx context.Context, container, path string) (container.PathStat, error)
ContainerStats(ctx context.Context, container string, options ContainerStatsOptions) (ContainerStatsResult, error)
ContainerStart(ctx context.Context, container string, options ContainerStartOptions) error
ContainerStop(ctx context.Context, container string, options ContainerStopOptions) error
ContainerStart(ctx context.Context, container string, options ContainerStartOptions) (ContainerStartResult, error)
ContainerStop(ctx context.Context, container string, options ContainerStopOptions) (ContainerStopResult, error)
ContainerTop(ctx context.Context, container string, arguments []string) (container.TopResponse, error)
ContainerUnpause(ctx context.Context, container string) error
ContainerUnpause(ctx context.Context, container string, options ContainerUnPauseOptions) (ContainerUnPauseResult, error)
ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.UpdateResponse, error)
ContainerWait(ctx context.Context, container string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error)
CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, container.PathStat, error)

View File

@@ -5,19 +5,35 @@ import (
"net/url"
)
// ContainerKillOptions holds options for [Client.ContainerKill].
type ContainerKillOptions struct {
// Signal (optional) is the signal to send to the container to (gracefully)
// stop it before forcibly terminating the container with SIGKILL after a
// timeout. If no value is set, the default (SIGKILL) is used.
Signal string `json:",omitempty"`
}
// ContainerKillResult holds the result of [Client.ContainerKill],
type ContainerKillResult struct {
// Add future fields here.
}
// ContainerKill terminates the container process but does not remove the container from the docker host.
func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error {
func (cli *Client) ContainerKill(ctx context.Context, containerID string, options ContainerKillOptions) (ContainerKillResult, error) {
containerID, err := trimID("container", containerID)
if err != nil {
return err
return ContainerKillResult{}, err
}
query := url.Values{}
if signal != "" {
query.Set("signal", signal)
if options.Signal != "" {
query.Set("signal", options.Signal)
}
resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil)
defer ensureReaderClosed(resp)
return err
if err != nil {
return ContainerKillResult{}, err
}
return ContainerKillResult{}, nil
}

View File

@@ -1,7 +1,6 @@
package client
import (
"context"
"fmt"
"net/http"
"testing"
@@ -17,34 +16,39 @@ func TestContainerKillError(t *testing.T) {
)
assert.NilError(t, err)
err = client.ContainerKill(context.Background(), "nothing", "SIGKILL")
_, err = client.ContainerKill(t.Context(), "nothing", ContainerKillOptions{
Signal: "SIGKILL",
})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInternal))
err = client.ContainerKill(context.Background(), "", "")
_, err = client.ContainerKill(t.Context(), "", ContainerKillOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInvalidArgument))
assert.Check(t, is.ErrorContains(err, "value is empty"))
err = client.ContainerKill(context.Background(), " ", "")
_, err = client.ContainerKill(t.Context(), " ", ContainerKillOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInvalidArgument))
assert.Check(t, is.ErrorContains(err, "value is empty"))
}
func TestContainerKill(t *testing.T) {
const expectedURL = "/containers/container_id/kill"
const expectedSignal = "SIG_SOMETHING"
client, err := NewClientWithOpts(
WithMockClient(func(req *http.Request) (*http.Response, error) {
if err := assertRequest(req, http.MethodPost, expectedURL); err != nil {
return nil, err
}
signal := req.URL.Query().Get("signal")
if signal != "SIGKILL" {
return nil, fmt.Errorf("signal not set in URL query properly. Expected 'SIGKILL', got %s", signal)
if signal != expectedSignal {
return nil, fmt.Errorf("signal not set in URL query properly. Expected '%s', got %s", expectedSignal, signal)
}
return mockResponse(http.StatusOK, nil, "")(req)
}),
)
assert.NilError(t, err)
err = client.ContainerKill(context.Background(), "container_id", "SIGKILL")
_, err = client.ContainerKill(t.Context(), "container_id", ContainerKillOptions{
Signal: expectedSignal,
})
assert.NilError(t, err)
}

View File

@@ -2,14 +2,27 @@ package client
import "context"
// ContainerPauseOptions holds options for [Client.ContainerPause].
type ContainerPauseOptions struct {
// Add future optional parameters here.
}
// ContainerPauseResult holds the result of [Client.ContainerPause],
type ContainerPauseResult struct {
// Add future fields here.
}
// ContainerPause pauses the main process of a given container without terminating it.
func (cli *Client) ContainerPause(ctx context.Context, containerID string) error {
func (cli *Client) ContainerPause(ctx context.Context, containerID string, options ContainerPauseOptions) (ContainerPauseResult, error) {
containerID, err := trimID("container", containerID)
if err != nil {
return err
return ContainerPauseResult{}, err
}
resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil)
defer ensureReaderClosed(resp)
return err
if err != nil {
return ContainerPauseResult{}, err
}
return ContainerPauseResult{}, nil
}

View File

@@ -1,7 +1,6 @@
package client
import (
"context"
"net/http"
"testing"
@@ -16,7 +15,7 @@ func TestContainerPauseError(t *testing.T) {
)
assert.NilError(t, err)
err = client.ContainerPause(context.Background(), "nothing")
_, err = client.ContainerPause(t.Context(), "nothing", ContainerPauseOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInternal))
}
@@ -32,6 +31,6 @@ func TestContainerPause(t *testing.T) {
)
assert.NilError(t, err)
err = client.ContainerPause(context.Background(), "container_id")
_, err = client.ContainerPause(t.Context(), "container_id", ContainerPauseOptions{})
assert.NilError(t, err)
}

View File

@@ -12,11 +12,16 @@ type ContainerRemoveOptions struct {
Force bool
}
// ContainerRemoveResult holds the result of [Client.ContainerRemove],
type ContainerRemoveResult struct {
// Add future fields here.
}
// ContainerRemove kills and removes a container from the docker host.
func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options ContainerRemoveOptions) error {
func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options ContainerRemoveOptions) (ContainerRemoveResult, error) {
containerID, err := trimID("container", containerID)
if err != nil {
return err
return ContainerRemoveResult{}, err
}
query := url.Values{}
@@ -33,5 +38,8 @@ func (cli *Client) ContainerRemove(ctx context.Context, containerID string, opti
resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil)
defer ensureReaderClosed(resp)
return err
if err != nil {
return ContainerRemoveResult{}, err
}
return ContainerRemoveResult{}, nil
}

View File

@@ -1,7 +1,6 @@
package client
import (
"context"
"fmt"
"net/http"
"testing"
@@ -14,14 +13,14 @@ import (
func TestContainerRemoveError(t *testing.T) {
client, err := NewClientWithOpts(WithMockClient(errorMock(http.StatusInternalServerError, "Server error")))
assert.NilError(t, err)
err = client.ContainerRemove(context.Background(), "container_id", ContainerRemoveOptions{})
_, err = client.ContainerRemove(t.Context(), "container_id", ContainerRemoveOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInternal))
err = client.ContainerRemove(context.Background(), "", ContainerRemoveOptions{})
_, err = client.ContainerRemove(t.Context(), "", ContainerRemoveOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInvalidArgument))
assert.Check(t, is.ErrorContains(err, "value is empty"))
err = client.ContainerRemove(context.Background(), " ", ContainerRemoveOptions{})
_, err = client.ContainerRemove(t.Context(), " ", ContainerRemoveOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInvalidArgument))
assert.Check(t, is.ErrorContains(err, "value is empty"))
}
@@ -29,7 +28,7 @@ func TestContainerRemoveError(t *testing.T) {
func TestContainerRemoveNotFoundError(t *testing.T) {
client, err := NewClientWithOpts(WithMockClient(errorMock(http.StatusNotFound, "no such container: container_id")))
assert.NilError(t, err)
err = client.ContainerRemove(context.Background(), "container_id", ContainerRemoveOptions{})
_, err = client.ContainerRemove(t.Context(), "container_id", ContainerRemoveOptions{})
assert.Check(t, is.ErrorContains(err, "no such container: container_id"))
assert.Check(t, is.ErrorType(err, cerrdefs.IsNotFound))
}
@@ -57,7 +56,7 @@ func TestContainerRemove(t *testing.T) {
}))
assert.NilError(t, err)
err = client.ContainerRemove(context.Background(), "container_id", ContainerRemoveOptions{
_, err = client.ContainerRemove(t.Context(), "container_id", ContainerRemoveOptions{
RemoveVolumes: true,
Force: true,
})

View File

@@ -14,13 +14,28 @@ type ContainerResizeOptions struct {
Width uint
}
// ContainerResizeResult holds the result of [Client.ContainerResize],
type ContainerResizeResult struct {
// Add future fields here.
}
// ContainerResize changes the size of the pseudo-TTY for a container.
func (cli *Client) ContainerResize(ctx context.Context, containerID string, options ContainerResizeOptions) error {
func (cli *Client) ContainerResize(ctx context.Context, containerID string, options ContainerResizeOptions) (ContainerResizeResult, error) {
containerID, err := trimID("container", containerID)
if err != nil {
return err
return ContainerResizeResult{}, err
}
return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width)
// FIXME(thaJeztah): the API / backend accepts uint32, but container.ResizeOptions uses uint.
query := url.Values{}
query.Set("h", strconv.FormatUint(uint64(options.Height), 10))
query.Set("w", strconv.FormatUint(uint64(options.Width), 10))
resp, err := cli.post(ctx, "/containers/"+containerID+"/resize", query, nil, nil)
defer ensureReaderClosed(resp)
if err != nil {
return ContainerResizeResult{}, err
}
return ContainerResizeResult{}, nil
}
// ExecResizeOptions holds options for resizing a container exec TTY.
@@ -36,17 +51,16 @@ func (cli *Client) ExecResize(ctx context.Context, execID string, options ExecRe
if err != nil {
return ExecResizeResult{}, err
}
err = cli.resize(ctx, "/exec/"+execID, options.Height, options.Width)
return ExecResizeResult{}, err
}
func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error {
// FIXME(thaJeztah): the API / backend accepts uint32, but container.ResizeOptions uses uint.
query := url.Values{}
query.Set("h", strconv.FormatUint(uint64(height), 10))
query.Set("w", strconv.FormatUint(uint64(width), 10))
query.Set("h", strconv.FormatUint(uint64(options.Height), 10))
query.Set("w", strconv.FormatUint(uint64(options.Width), 10))
resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil)
resp, err := cli.post(ctx, "/exec/"+execID+"/resize", query, nil, nil)
defer ensureReaderClosed(resp)
return err
if err != nil {
return ExecResizeResult{}, err
}
return ExecResizeResult{}, nil
}

View File

@@ -1,7 +1,6 @@
package client
import (
"context"
"math"
"net/http"
"testing"
@@ -14,14 +13,14 @@ import (
func TestContainerResizeError(t *testing.T) {
client, err := NewClientWithOpts(WithMockClient(errorMock(http.StatusInternalServerError, "Server error")))
assert.NilError(t, err)
err = client.ContainerResize(context.Background(), "container_id", ContainerResizeOptions{})
_, err = client.ContainerResize(t.Context(), "container_id", ContainerResizeOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInternal))
err = client.ContainerResize(context.Background(), "", ContainerResizeOptions{})
_, err = client.ContainerResize(t.Context(), "", ContainerResizeOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInvalidArgument))
assert.Check(t, is.ErrorContains(err, "value is empty"))
err = client.ContainerResize(context.Background(), " ", ContainerResizeOptions{})
_, err = client.ContainerResize(t.Context(), " ", ContainerResizeOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInvalidArgument))
assert.Check(t, is.ErrorContains(err, "value is empty"))
}
@@ -29,7 +28,7 @@ func TestContainerResizeError(t *testing.T) {
func TestExecResizeError(t *testing.T) {
client, err := NewClientWithOpts(WithMockClient(errorMock(http.StatusInternalServerError, "Server error")))
assert.NilError(t, err)
_, err = client.ExecResize(context.Background(), "exec_id", ExecResizeOptions{})
_, err = client.ExecResize(t.Context(), "exec_id", ExecResizeOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInternal))
}
@@ -70,7 +69,7 @@ func TestContainerResize(t *testing.T) {
t.Run(tc.doc, func(t *testing.T) {
client, err := NewClientWithOpts(WithMockClient(resizeTransport(t, expectedURL, tc.expectedHeight, tc.expectedWidth)))
assert.NilError(t, err)
err = client.ContainerResize(context.Background(), "container_id", tc.opts)
_, err = client.ContainerResize(t.Context(), "container_id", tc.opts)
assert.NilError(t, err)
})
}
@@ -112,7 +111,7 @@ func TestExecResize(t *testing.T) {
t.Run(tc.doc, func(t *testing.T) {
client, err := NewClientWithOpts(WithMockClient(resizeTransport(t, expectedURL, tc.expectedHeight, tc.expectedWidth)))
assert.NilError(t, err)
_, err = client.ExecResize(context.Background(), "exec_id", tc.opts)
_, err = client.ExecResize(t.Context(), "exec_id", tc.opts)
assert.NilError(t, err)
})
}

View File

@@ -6,13 +6,36 @@ import (
"strconv"
)
// ContainerRestartOptions holds options for [Client.ContainerRestart].
type ContainerRestartOptions struct {
// Signal (optional) is the signal to send to the container to (gracefully)
// stop it before forcibly terminating the container with SIGKILL after the
// timeout expires. If no value is set, the default (SIGTERM) is used.
Signal string `json:",omitempty"`
// Timeout (optional) is the timeout (in seconds) to wait for the container
// to stop gracefully before forcibly terminating it with SIGKILL.
//
// - Use nil to use the default timeout (10 seconds).
// - Use '-1' to wait indefinitely.
// - Use '0' to not wait for the container to exit gracefully, and
// immediately proceeds to forcibly terminating the container.
// - Other positive values are used as timeout (in seconds).
Timeout *int `json:",omitempty"`
}
// ContainerRestartResult holds the result of [Client.ContainerRestart],
type ContainerRestartResult struct {
// Add future fields here.
}
// ContainerRestart stops, and starts a container again.
// It makes the daemon wait for the container to be up again for
// a specific amount of time, given the timeout.
func (cli *Client) ContainerRestart(ctx context.Context, containerID string, options ContainerStopOptions) error {
func (cli *Client) ContainerRestart(ctx context.Context, containerID string, options ContainerRestartOptions) (ContainerRestartResult, error) {
containerID, err := trimID("container", containerID)
if err != nil {
return err
return ContainerRestartResult{}, err
}
query := url.Values{}
@@ -24,5 +47,8 @@ func (cli *Client) ContainerRestart(ctx context.Context, containerID string, opt
}
resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil)
defer ensureReaderClosed(resp)
return err
if err != nil {
return ContainerRestartResult{}, err
}
return ContainerRestartResult{}, nil
}

View File

@@ -1,7 +1,6 @@
package client
import (
"context"
"fmt"
"net/http"
"testing"
@@ -14,14 +13,14 @@ import (
func TestContainerRestartError(t *testing.T) {
client, err := NewClientWithOpts(WithMockClient(errorMock(http.StatusInternalServerError, "Server error")))
assert.NilError(t, err)
err = client.ContainerRestart(context.Background(), "nothing", ContainerStopOptions{})
_, err = client.ContainerRestart(t.Context(), "nothing", ContainerRestartOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInternal))
err = client.ContainerRestart(context.Background(), "", ContainerStopOptions{})
_, err = client.ContainerRestart(t.Context(), "", ContainerRestartOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInvalidArgument))
assert.Check(t, is.ErrorContains(err, "value is empty"))
err = client.ContainerRestart(context.Background(), " ", ContainerStopOptions{})
_, err = client.ContainerRestart(t.Context(), " ", ContainerRestartOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInvalidArgument))
assert.Check(t, is.ErrorContains(err, "value is empty"))
}
@@ -34,7 +33,7 @@ func TestContainerRestartConnectionError(t *testing.T) {
client, err := NewClientWithOpts(WithAPIVersionNegotiation(), WithHost("tcp://no-such-host.invalid"))
assert.NilError(t, err)
err = client.ContainerRestart(context.Background(), "nothing", ContainerStopOptions{})
_, err = client.ContainerRestart(t.Context(), "nothing", ContainerRestartOptions{})
assert.Check(t, is.ErrorType(err, IsErrConnectionFailed))
}
@@ -56,7 +55,7 @@ func TestContainerRestart(t *testing.T) {
}))
assert.NilError(t, err)
timeout := 100
err = client.ContainerRestart(context.Background(), "container_id", ContainerStopOptions{
_, err = client.ContainerRestart(t.Context(), "container_id", ContainerRestartOptions{
Signal: "SIGKILL",
Timeout: &timeout,
})

View File

@@ -5,17 +5,22 @@ import (
"net/url"
)
// ContainerStartOptions holds parameters to start containers.
// ContainerStartOptions holds options for [Client.ContainerStart].
type ContainerStartOptions struct {
CheckpointID string
CheckpointDir string
}
// ContainerStartResult holds the result of [Client.ContainerStart],
type ContainerStartResult struct {
// Add future fields here.
}
// ContainerStart sends a request to the docker daemon to start a container.
func (cli *Client) ContainerStart(ctx context.Context, containerID string, options ContainerStartOptions) error {
func (cli *Client) ContainerStart(ctx context.Context, containerID string, options ContainerStartOptions) (ContainerStartResult, error) {
containerID, err := trimID("container", containerID)
if err != nil {
return err
return ContainerStartResult{}, err
}
query := url.Values{}
@@ -28,5 +33,8 @@ func (cli *Client) ContainerStart(ctx context.Context, containerID string, optio
resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil)
defer ensureReaderClosed(resp)
return err
if err != nil {
return ContainerStartResult{}, err
}
return ContainerStartResult{}, nil
}

View File

@@ -1,7 +1,6 @@
package client
import (
"context"
"encoding/json"
"fmt"
"net/http"
@@ -15,14 +14,14 @@ import (
func TestContainerStartError(t *testing.T) {
client, err := NewClientWithOpts(WithMockClient(errorMock(http.StatusInternalServerError, "Server error")))
assert.NilError(t, err)
err = client.ContainerStart(context.Background(), "nothing", ContainerStartOptions{})
_, err = client.ContainerStart(t.Context(), "nothing", ContainerStartOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInternal))
err = client.ContainerStart(context.Background(), "", ContainerStartOptions{})
_, err = client.ContainerStart(t.Context(), "", ContainerStartOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInvalidArgument))
assert.Check(t, is.ErrorContains(err, "value is empty"))
err = client.ContainerStart(context.Background(), " ", ContainerStartOptions{})
_, err = client.ContainerStart(t.Context(), " ", ContainerStartOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInvalidArgument))
assert.Check(t, is.ErrorContains(err, "value is empty"))
}
@@ -49,6 +48,6 @@ func TestContainerStart(t *testing.T) {
}))
assert.NilError(t, err)
err = client.ContainerStart(context.Background(), "container_id", ContainerStartOptions{CheckpointID: "checkpoint_id"})
_, err = client.ContainerStart(t.Context(), "container_id", ContainerStartOptions{CheckpointID: "checkpoint_id"})
assert.NilError(t, err)
}

View File

@@ -6,11 +6,11 @@ import (
"strconv"
)
// ContainerStopOptions holds the options to stop or restart a container.
// ContainerStopOptions holds the options for [Client.ContainerStop].
type ContainerStopOptions struct {
// Signal (optional) is the signal to send to the container to (gracefully)
// stop it before forcibly terminating the container with SIGKILL after the
// timeout expires. If not value is set, the default (SIGTERM) is used.
// timeout expires. If no value is set, the default (SIGTERM) is used.
Signal string `json:",omitempty"`
// Timeout (optional) is the timeout (in seconds) to wait for the container
@@ -24,6 +24,11 @@ type ContainerStopOptions struct {
Timeout *int `json:",omitempty"`
}
// ContainerStopResult holds the result of [Client.ContainerStop],
type ContainerStopResult struct {
// Add future fields here.
}
// ContainerStop stops a container. In case the container fails to stop
// gracefully within a time frame specified by the timeout argument,
// it is forcefully terminated (killed).
@@ -31,10 +36,10 @@ type ContainerStopOptions struct {
// If the timeout is nil, the container's StopTimeout value is used, if set,
// otherwise the engine default. A negative timeout value can be specified,
// meaning no timeout, i.e. no forceful termination is performed.
func (cli *Client) ContainerStop(ctx context.Context, containerID string, options ContainerStopOptions) error {
func (cli *Client) ContainerStop(ctx context.Context, containerID string, options ContainerStopOptions) (ContainerStopResult, error) {
containerID, err := trimID("container", containerID)
if err != nil {
return err
return ContainerStopResult{}, err
}
query := url.Values{}
@@ -46,5 +51,8 @@ func (cli *Client) ContainerStop(ctx context.Context, containerID string, option
}
resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil)
defer ensureReaderClosed(resp)
return err
if err != nil {
return ContainerStopResult{}, err
}
return ContainerStopResult{}, nil
}

View File

@@ -1,7 +1,6 @@
package client
import (
"context"
"fmt"
"net/http"
"testing"
@@ -14,14 +13,14 @@ import (
func TestContainerStopError(t *testing.T) {
client, err := NewClientWithOpts(WithMockClient(errorMock(http.StatusInternalServerError, "Server error")))
assert.NilError(t, err)
err = client.ContainerStop(context.Background(), "container_id", ContainerStopOptions{})
_, err = client.ContainerStop(t.Context(), "container_id", ContainerStopOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInternal))
err = client.ContainerStop(context.Background(), "", ContainerStopOptions{})
_, err = client.ContainerStop(t.Context(), "", ContainerStopOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInvalidArgument))
assert.Check(t, is.ErrorContains(err, "value is empty"))
err = client.ContainerStop(context.Background(), " ", ContainerStopOptions{})
_, err = client.ContainerStop(t.Context(), " ", ContainerStopOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInvalidArgument))
assert.Check(t, is.ErrorContains(err, "value is empty"))
}
@@ -34,7 +33,7 @@ func TestContainerStopConnectionError(t *testing.T) {
client, err := NewClientWithOpts(WithAPIVersionNegotiation(), WithHost("tcp://no-such-host.invalid"))
assert.NilError(t, err)
err = client.ContainerStop(context.Background(), "container_id", ContainerStopOptions{})
_, err = client.ContainerStop(t.Context(), "container_id", ContainerStopOptions{})
assert.Check(t, is.ErrorType(err, IsErrConnectionFailed))
}
@@ -56,7 +55,7 @@ func TestContainerStop(t *testing.T) {
}))
assert.NilError(t, err)
timeout := 100
err = client.ContainerStop(context.Background(), "container_id", ContainerStopOptions{
_, err = client.ContainerStop(t.Context(), "container_id", ContainerStopOptions{
Signal: "SIGKILL",
Timeout: &timeout,
})

View File

@@ -2,14 +2,27 @@ package client
import "context"
// ContainerUnPauseOptions holds options for [Client.ContainerUnpause].
type ContainerUnPauseOptions struct {
// Add future optional parameters here.
}
// ContainerUnPauseResult holds the result of [Client.ContainerUnpause],
type ContainerUnPauseResult struct {
// Add future fields here.
}
// ContainerUnpause resumes the process execution within a container.
func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error {
func (cli *Client) ContainerUnpause(ctx context.Context, containerID string, options ContainerUnPauseOptions) (ContainerUnPauseResult, error) {
containerID, err := trimID("container", containerID)
if err != nil {
return err
return ContainerUnPauseResult{}, err
}
resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil)
defer ensureReaderClosed(resp)
return err
if err != nil {
return ContainerUnPauseResult{}, err
}
return ContainerUnPauseResult{}, nil
}

View File

@@ -1,7 +1,6 @@
package client
import (
"context"
"net/http"
"testing"
@@ -13,14 +12,14 @@ import (
func TestContainerUnpauseError(t *testing.T) {
client, err := NewClientWithOpts(WithMockClient(errorMock(http.StatusInternalServerError, "Server error")))
assert.NilError(t, err)
err = client.ContainerUnpause(context.Background(), "nothing")
_, err = client.ContainerUnpause(t.Context(), "nothing", ContainerUnPauseOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInternal))
err = client.ContainerUnpause(context.Background(), "")
_, err = client.ContainerUnpause(t.Context(), "", ContainerUnPauseOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInvalidArgument))
assert.Check(t, is.ErrorContains(err, "value is empty"))
err = client.ContainerUnpause(context.Background(), " ")
_, err = client.ContainerUnpause(t.Context(), " ", ContainerUnPauseOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInvalidArgument))
assert.Check(t, is.ErrorContains(err, "value is empty"))
}
@@ -34,6 +33,6 @@ func TestContainerUnpause(t *testing.T) {
return mockResponse(http.StatusOK, nil, "")(req)
}))
assert.NilError(t, err)
err = client.ContainerUnpause(context.Background(), "container_id")
_, err = client.ContainerUnpause(t.Context(), "container_id", ContainerUnPauseOptions{})
assert.NilError(t, err)
}

View File

@@ -145,8 +145,10 @@ func (daemon *Daemon) CreateImageFromContainer(ctx context.Context, name string,
}
if !c.NoPause && !container.State.IsPaused() {
daemon.containerPause(container)
defer daemon.containerUnpause(container)
_ = daemon.containerPause(container)
defer func() {
_ = daemon.containerUnpause(container)
}()
}
if c.Config == nil {

View File

@@ -366,7 +366,7 @@ func (s *DockerAPISuite) TestContainerAPIPause(c *testing.T) {
assert.NilError(c, err)
defer apiClient.Close()
err = apiClient.ContainerPause(testutil.GetContext(c), ContainerID)
_, err = apiClient.ContainerPause(testutil.GetContext(c), ContainerID, client.ContainerPauseOptions{})
assert.NilError(c, err)
pausedContainers := getPaused(c)
@@ -375,7 +375,7 @@ func (s *DockerAPISuite) TestContainerAPIPause(c *testing.T) {
c.Fatalf("there should be one paused container and not %d", len(pausedContainers))
}
err = apiClient.ContainerUnpause(testutil.GetContext(c), ContainerID)
_, err = apiClient.ContainerUnpause(testutil.GetContext(c), ContainerID, client.ContainerUnPauseOptions{})
assert.NilError(c, err)
pausedContainers = getPaused(c)
@@ -713,7 +713,9 @@ func (s *DockerAPISuite) TestContainerAPIKill(c *testing.T) {
assert.NilError(c, err)
defer apiClient.Close()
err = apiClient.ContainerKill(testutil.GetContext(c), name, "SIGKILL")
_, err = apiClient.ContainerKill(testutil.GetContext(c), name, client.ContainerKillOptions{
Signal: "SIGKILL",
})
assert.NilError(c, err)
state := inspectField(c, name, "State.Running")
@@ -728,7 +730,9 @@ func (s *DockerAPISuite) TestContainerAPIRestart(c *testing.T) {
defer apiClient.Close()
timeout := 1
err = apiClient.ContainerRestart(testutil.GetContext(c), name, client.ContainerStopOptions{Timeout: &timeout})
_, err = apiClient.ContainerRestart(testutil.GetContext(c), name, client.ContainerRestartOptions{
Timeout: &timeout,
})
assert.NilError(c, err)
assert.NilError(c, waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second))
@@ -743,7 +747,7 @@ func (s *DockerAPISuite) TestContainerAPIRestartNotimeoutParam(c *testing.T) {
assert.NilError(c, err)
defer apiClient.Close()
err = apiClient.ContainerRestart(testutil.GetContext(c), name, client.ContainerStopOptions{})
_, err = apiClient.ContainerRestart(testutil.GetContext(c), name, client.ContainerRestartOptions{})
assert.NilError(c, err)
assert.NilError(c, waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second))
@@ -769,12 +773,12 @@ func (s *DockerAPISuite) TestContainerAPIStart(c *testing.T) {
})
assert.NilError(c, err)
err = apiClient.ContainerStart(testutil.GetContext(c), name, client.ContainerStartOptions{})
_, err = apiClient.ContainerStart(testutil.GetContext(c), name, client.ContainerStartOptions{})
assert.NilError(c, err)
// second call to start should give 304
// maybe add ContainerStartWithRaw to test it
err = apiClient.ContainerStart(testutil.GetContext(c), name, client.ContainerStartOptions{})
_, err = apiClient.ContainerStart(testutil.GetContext(c), name, client.ContainerStartOptions{})
assert.NilError(c, err)
// TODO(tibor): figure out why this doesn't work on windows
@@ -789,7 +793,7 @@ func (s *DockerAPISuite) TestContainerAPIStop(c *testing.T) {
assert.NilError(c, err)
defer apiClient.Close()
err = apiClient.ContainerStop(testutil.GetContext(c), name, client.ContainerStopOptions{
_, err = apiClient.ContainerStop(testutil.GetContext(c), name, client.ContainerStopOptions{
Timeout: &timeout,
})
assert.NilError(c, err)
@@ -797,7 +801,7 @@ func (s *DockerAPISuite) TestContainerAPIStop(c *testing.T) {
// second call to start should give 304
// maybe add ContainerStartWithRaw to test it
err = apiClient.ContainerStop(testutil.GetContext(c), name, client.ContainerStopOptions{
_, err = apiClient.ContainerStop(testutil.GetContext(c), name, client.ContainerStopOptions{
Timeout: &timeout,
})
assert.NilError(c, err)
@@ -835,7 +839,7 @@ func (s *DockerAPISuite) TestContainerAPIDelete(c *testing.T) {
assert.NilError(c, err)
defer apiClient.Close()
err = apiClient.ContainerRemove(testutil.GetContext(c), id, client.ContainerRemoveOptions{})
_, err = apiClient.ContainerRemove(testutil.GetContext(c), id, client.ContainerRemoveOptions{})
assert.NilError(c, err)
}
@@ -844,7 +848,7 @@ func (s *DockerAPISuite) TestContainerAPIDeleteNotExist(c *testing.T) {
assert.NilError(c, err)
defer apiClient.Close()
err = apiClient.ContainerRemove(testutil.GetContext(c), "doesnotexist", client.ContainerRemoveOptions{})
_, err = apiClient.ContainerRemove(testutil.GetContext(c), "doesnotexist", client.ContainerRemoveOptions{})
assert.ErrorContains(c, err, "No such container: doesnotexist")
}
@@ -860,7 +864,7 @@ func (s *DockerAPISuite) TestContainerAPIDeleteForce(c *testing.T) {
assert.NilError(c, err)
defer apiClient.Close()
err = apiClient.ContainerRemove(testutil.GetContext(c), id, removeOptions)
_, err = apiClient.ContainerRemove(testutil.GetContext(c), id, removeOptions)
assert.NilError(c, err)
}
@@ -886,7 +890,7 @@ func (s *DockerAPISuite) TestContainerAPIDeleteRemoveLinks(c *testing.T) {
assert.NilError(c, err)
defer apiClient.Close()
err = apiClient.ContainerRemove(testutil.GetContext(c), "tlink2/tlink1", removeOptions)
_, err = apiClient.ContainerRemove(testutil.GetContext(c), "tlink2/tlink1", removeOptions)
assert.NilError(c, err)
linksPostRm := inspectFieldJSON(c, id2, "HostConfig.Links")
@@ -922,7 +926,7 @@ func (s *DockerAPISuite) TestContainerAPIDeleteRemoveVolume(c *testing.T) {
RemoveVolumes: true,
}
err = apiClient.ContainerRemove(testutil.GetContext(c), id, removeOptions)
_, err = apiClient.ContainerRemove(testutil.GetContext(c), id, removeOptions)
assert.NilError(c, err)
_, err = os.Stat(mnt.Source)
@@ -957,7 +961,7 @@ func (s *DockerAPISuite) TestContainerAPIPostContainerStop(c *testing.T) {
assert.NilError(c, err)
defer apiClient.Close()
err = apiClient.ContainerStop(testutil.GetContext(c), containerID, client.ContainerStopOptions{})
_, err = apiClient.ContainerStop(testutil.GetContext(c), containerID, client.ContainerStopOptions{})
assert.NilError(c, err)
assert.NilError(c, waitInspect(containerID, "{{ .State.Running }}", "false", 60*time.Second))
}
@@ -1108,7 +1112,7 @@ func (s *DockerAPISuite) TestContainerAPIDeleteWithEmptyName(c *testing.T) {
assert.NilError(c, err)
defer apiClient.Close()
err = apiClient.ContainerRemove(testutil.GetContext(c), "", client.ContainerRemoveOptions{})
_, err = apiClient.ContainerRemove(testutil.GetContext(c), "", client.ContainerRemoveOptions{})
assert.Check(c, is.ErrorType(err, cerrdefs.IsInvalidArgument))
assert.Check(c, is.ErrorContains(err, "value is empty"))
}
@@ -1138,7 +1142,7 @@ func (s *DockerAPISuite) TestContainerAPIStatsWithNetworkDisabled(c *testing.T)
assert.NilError(c, err)
defer cli.DockerCmd(c, "rm", "-f", name)
err = apiClient.ContainerStart(testutil.GetContext(c), ctr.ID, client.ContainerStartOptions{})
_, err = apiClient.ContainerStart(testutil.GetContext(c), ctr.ID, client.ContainerStartOptions{})
assert.NilError(c, err)
cli.WaitRun(c, ctr.ID)
@@ -1672,11 +1676,11 @@ func (s *DockerAPISuite) TestContainersAPICreateMountsCreate(c *testing.T) {
assert.Check(c, is.Equal(tc.expected.Mode, mountPoint.Mode))
assert.Check(c, is.Equal(tc.expected.Destination, mountPoint.Destination))
err = apiclient.ContainerStart(ctx, ctr.ID, client.ContainerStartOptions{})
_, err = apiclient.ContainerStart(ctx, ctr.ID, client.ContainerStartOptions{})
assert.NilError(c, err)
poll.WaitOn(c, containerExit(ctx, apiclient, ctr.ID), poll.WithDelay(time.Second))
err = apiclient.ContainerRemove(ctx, ctr.ID, client.ContainerRemoveOptions{
_, err = apiclient.ContainerRemove(ctx, ctr.ID, client.ContainerRemoveOptions{
RemoveVolumes: true,
Force: true,
})

View File

@@ -65,13 +65,14 @@ func (s *DockerAPISuite) TestContainersAPICreateMountsBindNamedPipe(c *testing.T
Target: containerPipeName,
},
},
},
NetworkingConfig: &network.NetworkingConfig{},
Name: name,
},
NetworkingConfig: &network.NetworkingConfig{},
Name: name,
})
)
assert.NilError(c, err)
err = apiClient.ContainerStart(ctx, name, client.ContainerStartOptions{})
_, err = apiClient.ContainerStart(ctx, name, client.ContainerStartOptions{})
assert.NilError(c, err)
err = <-ch

View File

@@ -457,7 +457,7 @@ func (s *DockerCLIEventSuite) TestEventsResize(c *testing.T) {
Height: 80,
Width: 24,
}
err = apiClient.ContainerResize(testutil.GetContext(c), cID, options)
_, err = apiClient.ContainerResize(testutil.GetContext(c), cID, options)
assert.NilError(c, err)
cli.DockerCmd(c, "stop", cID)

View File

@@ -116,7 +116,7 @@ func TestCheckpoint(t *testing.T) {
// Restore the container from a second checkpoint.
t.Log("Restore the container")
err = apiClient.ContainerStart(ctx, cID, client.ContainerStartOptions{
_, err = apiClient.ContainerStart(ctx, cID, client.ContainerStartOptions{
CheckpointID: "test2",
})
assert.NilError(t, err)

View File

@@ -135,7 +135,7 @@ func TestCreateByImageID(t *testing.T) {
assert.Check(t, resp.ID != "")
}
// cleanup the container if one was created.
_ = apiClient.ContainerRemove(ctx, resp.ID, client.ContainerRemoveOptions{Force: true})
_, _ = apiClient.ContainerRemove(ctx, resp.ID, client.ContainerRemoveOptions{Force: true})
})
}
}
@@ -300,11 +300,11 @@ func TestCreateWithCustomMaskedPaths(t *testing.T) {
assert.DeepEqual(t, inspect.Container.HostConfig.MaskedPaths, tc.expected)
// Start the container.
err = apiClient.ContainerStart(ctx, ctr.ID, client.ContainerStartOptions{})
_, err = apiClient.ContainerStart(ctx, ctr.ID, client.ContainerStartOptions{})
assert.NilError(t, err)
// It should die down by itself, but stop it to be sure.
err = apiClient.ContainerStop(ctx, ctr.ID, client.ContainerStopOptions{})
_, err = apiClient.ContainerStop(ctx, ctr.ID, client.ContainerStopOptions{})
assert.NilError(t, err)
inspect, err = apiClient.ContainerInspect(ctx, ctr.ID, client.ContainerInspectOptions{})
@@ -371,11 +371,11 @@ func TestCreateWithCustomReadonlyPaths(t *testing.T) {
assert.DeepEqual(t, ctrInspect.Container.HostConfig.ReadonlyPaths, tc.expected)
// Start the container.
err = apiClient.ContainerStart(ctx, ctr.ID, client.ContainerStartOptions{})
_, err = apiClient.ContainerStart(ctx, ctr.ID, client.ContainerStartOptions{})
assert.NilError(t, err)
// It should die down by itself, but stop it to be sure.
err = apiClient.ContainerStop(ctx, ctr.ID, client.ContainerStopOptions{})
_, err = apiClient.ContainerStop(ctx, ctr.ID, client.ContainerStopOptions{})
assert.NilError(t, err)
ctrInspect, err = apiClient.ContainerInspect(ctx, ctr.ID, client.ContainerInspectOptions{})
@@ -488,7 +488,7 @@ func TestCreateTmpfsOverrideAnonymousVolume(t *testing.T) {
)
defer func() {
err := apiClient.ContainerRemove(ctx, id, client.ContainerRemoveOptions{Force: true})
_, err := apiClient.ContainerRemove(ctx, id, client.ContainerRemoveOptions{Force: true})
assert.NilError(t, err)
}()
@@ -499,7 +499,8 @@ func TestCreateTmpfsOverrideAnonymousVolume(t *testing.T) {
assert.Assert(t, is.Len(inspect.Container.Mounts, 0))
chWait, chErr := apiClient.ContainerWait(ctx, id, container.WaitConditionNextExit)
assert.NilError(t, apiClient.ContainerStart(ctx, id, client.ContainerStartOptions{}))
_, err = apiClient.ContainerStart(ctx, id, client.ContainerStartOptions{})
assert.NilError(t, err)
timeout := time.NewTimer(30 * time.Second)
defer timeout.Stop()

View File

@@ -49,7 +49,7 @@ func TestContainerStartOnDaemonRestart(t *testing.T) {
cID := container.Create(ctx, t, c)
defer c.ContainerRemove(ctx, cID, client.ContainerRemoveOptions{Force: true})
err := c.ContainerStart(ctx, cID, client.ContainerStartOptions{})
_, err := c.ContainerStart(ctx, cID, client.ContainerStartOptions{})
assert.Check(t, err, "error starting test container")
inspect, err := c.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
@@ -68,7 +68,7 @@ func TestContainerStartOnDaemonRestart(t *testing.T) {
d.Start(t, "--iptables=false", "--ip6tables=false")
err = c.ContainerStart(ctx, cID, client.ContainerStartOptions{})
_, err = c.ContainerStart(ctx, cID, client.ContainerStartOptions{})
assert.Check(t, err, "failed to start test container")
}
@@ -291,6 +291,9 @@ func TestHardRestartWhenContainerIsRunning(t *testing.T) {
}
stopTimeout := 0
assert.Assert(t, apiClient.ContainerStop(ctx, onFailure, client.ContainerStopOptions{Timeout: &stopTimeout}))
_, err = apiClient.ContainerStop(ctx, onFailure, client.ContainerStopOptions{
Timeout: &stopTimeout,
})
assert.NilError(t, err)
})
}

View File

@@ -36,7 +36,7 @@ func TestContainerKillOnDaemonStart(t *testing.T) {
// Sadly this means the test will take longer, but at least this test can be parallelized.
id := container.Run(ctx, t, apiClient, container.WithCmd("/bin/sh", "-c", "while true; do echo hello; sleep 1; done"))
defer func() {
err := apiClient.ContainerRemove(ctx, id, client.ContainerRemoveOptions{Force: true})
_, err := apiClient.ContainerRemove(ctx, id, client.ContainerRemoveOptions{Force: true})
assert.NilError(t, err)
}()
@@ -80,7 +80,7 @@ func TestNetworkStateCleanupOnDaemonStart(t *testing.T) {
container.WithPortMap(network.PortMap{mappedPort: {{}}}),
container.WithCmd("/bin/sh", "-c", "while true; do echo hello; sleep 1; done"))
defer func() {
err := apiClient.ContainerRemove(ctx, cid, client.ContainerRemoveOptions{Force: true})
_, err := apiClient.ContainerRemove(ctx, cid, client.ContainerRemoveOptions{Force: true})
assert.NilError(t, err)
}()

View File

@@ -100,7 +100,7 @@ func TestWindowsDevices(t *testing.T) {
// remove this skip.If and validate the expected behaviour under Hyper-V.
skip.If(t, d.isolation == containertypes.IsolationHyperV && !d.expectedStartFailure, "FIXME. HyperV isolation setup is probably incorrect in the test")
err := apiClient.ContainerStart(ctx, id, client.ContainerStartOptions{})
_, err := apiClient.ContainerStart(ctx, id, client.ContainerStartOptions{})
if d.expectedStartFailure {
assert.ErrorContains(t, err, d.expectedStartFailureMessage)
return

View File

@@ -273,7 +273,7 @@ func TestExecResize(t *testing.T) {
// Error response from daemon: No such exec instance: cc728a332d3f594249fb7ee9adb3bb12a59a5d1776f8f6dedc56355364361711
skip.If(t, testEnv.DaemonInfo.OSType == "windows" && !testEnv.RuntimeIsWindowsContainerd(), "FIXME. Windows + builtin returns a NotFound instead of a Conflict error")
err := apiClient.ContainerKill(ctx, cID, "SIGKILL")
_, err := apiClient.ContainerKill(ctx, cID, client.ContainerKillOptions{})
assert.NilError(t, err)
_, err = apiClient.ExecResize(ctx, execID, client.ExecResizeOptions{

View File

@@ -76,14 +76,18 @@ while true; do sleep 1; done
defer cancel()
poll.WaitOn(t, pollForHealthStatus(ctxPoll, apiClient, id, "healthy"))
err := apiClient.ContainerKill(ctx, id, "SIGUSR1")
_, err := apiClient.ContainerKill(ctx, id, client.ContainerKillOptions{
Signal: "SIGUSR1",
})
assert.NilError(t, err)
ctxPoll, cancel = context.WithTimeout(ctx, 30*time.Second)
defer cancel()
poll.WaitOn(t, pollForHealthStatus(ctxPoll, apiClient, id, "unhealthy"))
err = apiClient.ContainerKill(ctx, id, "SIGUSR1")
_, err = apiClient.ContainerKill(ctx, id, client.ContainerKillOptions{
Signal: "SIGUSR1",
})
assert.NilError(t, err)
ctxPoll, cancel = context.WithTimeout(ctx, 30*time.Second)

View File

@@ -71,7 +71,7 @@ func testIpcNonePrivateShareable(t *testing.T, mode string, mustBeMounted bool,
assert.NilError(t, err)
assert.Check(t, is.Equal(len(resp.Warnings), 0))
err = apiClient.ContainerStart(ctx, resp.ID, client.ContainerStartOptions{})
_, err = apiClient.ContainerStart(ctx, resp.ID, client.ContainerStartOptions{})
assert.NilError(t, err)
// get major:minor pair for /dev/shm from container's /proc/self/mountinfo
@@ -146,7 +146,7 @@ func testIpcContainer(t *testing.T, donorMode string, mustWork bool) {
assert.Check(t, is.Equal(len(resp.Warnings), 0))
name1 := resp.ID
err = apiClient.ContainerStart(ctx, name1, client.ContainerStartOptions{})
_, err = apiClient.ContainerStart(ctx, name1, client.ContainerStartOptions{})
assert.NilError(t, err)
// create and start the second container
@@ -159,7 +159,7 @@ func testIpcContainer(t *testing.T, donorMode string, mustWork bool) {
assert.Check(t, is.Equal(len(resp.Warnings), 0))
name2 := resp.ID
err = apiClient.ContainerStart(ctx, name2, client.ContainerStartOptions{})
_, err = apiClient.ContainerStart(ctx, name2, client.ContainerStartOptions{})
if !mustWork {
// start should fail with a specific error
assert.Check(t, is.ErrorContains(err, "non-shareable IPC"))
@@ -218,7 +218,7 @@ func TestAPIIpcModeHost(t *testing.T) {
assert.Check(t, is.Equal(len(resp.Warnings), 0))
name := resp.ID
err = apiClient.ContainerStart(ctx, name, client.ContainerStartOptions{})
_, err = apiClient.ContainerStart(ctx, name, client.ContainerStartOptions{})
assert.NilError(t, err)
// check that IPC is shared
@@ -256,7 +256,7 @@ func testDaemonIpcPrivateShareable(t *testing.T, mustBeShared bool, arg ...strin
assert.NilError(t, err)
assert.Check(t, is.Equal(len(resp.Warnings), 0))
err = c.ContainerStart(ctx, resp.ID, client.ContainerStartOptions{})
_, err = c.ContainerStart(ctx, resp.ID, client.ContainerStartOptions{})
assert.NilError(t, err)
// get major:minor pair for /dev/shm from container's /proc/self/mountinfo

View File

@@ -20,12 +20,16 @@ func TestKillContainerInvalidSignal(t *testing.T) {
apiClient := testEnv.APIClient()
id := container.Run(ctx, t, apiClient)
err := apiClient.ContainerKill(ctx, id, "0")
_, err := apiClient.ContainerKill(ctx, id, client.ContainerKillOptions{
Signal: "0",
})
assert.ErrorContains(t, err, "Error response from daemon:")
assert.ErrorContains(t, err, "nvalid signal: 0") // match "(I|i)nvalid" case-insensitive to allow testing against older daemons.
poll.WaitOn(t, container.IsInState(ctx, apiClient, id, containertypes.StateRunning))
err = apiClient.ContainerKill(ctx, id, "SIG42")
_, err = apiClient.ContainerKill(ctx, id, client.ContainerKillOptions{
Signal: "SIG42",
})
assert.ErrorContains(t, err, "Error response from daemon:")
assert.ErrorContains(t, err, "nvalid signal: SIG42") // match "(I|i)nvalid" case-insensitive to allow testing against older daemons.
poll.WaitOn(t, container.IsInState(ctx, apiClient, id, containertypes.StateRunning))
@@ -71,7 +75,9 @@ func TestKillContainer(t *testing.T) {
skip.If(t, testEnv.DaemonInfo.OSType == tc.skipOs, "Windows does not support SIGWINCH")
ctx := testutil.StartSpan(ctx, t)
id := container.Run(ctx, t, apiClient)
err := apiClient.ContainerKill(ctx, id, tc.signal)
_, err := apiClient.ContainerKill(ctx, id, client.ContainerKillOptions{
Signal: tc.signal,
})
assert.NilError(t, err)
poll.WaitOn(t, container.IsInState(ctx, apiClient, id, tc.status), pollOpts...)
@@ -112,8 +118,11 @@ func TestKillWithStopSignalAndRestartPolicies(t *testing.T) {
container.WithRestartPolicy(containertypes.RestartPolicyAlways),
func(c *container.TestContainerConfig) {
c.Config.StopSignal = tc.stopsignal
})
err := apiClient.ContainerKill(ctx, id, "TERM")
},
)
_, err := apiClient.ContainerKill(ctx, id, client.ContainerKillOptions{
Signal: "TERM",
})
assert.NilError(t, err)
poll.WaitOn(t, container.IsInState(ctx, apiClient, id, tc.status), pollOpts...)
@@ -125,7 +134,7 @@ func TestKillStoppedContainer(t *testing.T) {
ctx := setupTest(t)
apiClient := testEnv.APIClient()
id := container.Create(ctx, t, apiClient)
err := apiClient.ContainerKill(ctx, id, "SIGKILL")
_, err := apiClient.ContainerKill(ctx, id, client.ContainerKillOptions{})
assert.ErrorContains(t, err, "")
assert.ErrorContains(t, err, "is not running")
}
@@ -141,7 +150,7 @@ func TestKillDifferentUserContainer(t *testing.T) {
c.Config.User = "daemon"
})
err := apiClient.ContainerKill(ctx, id, "SIGKILL")
_, err := apiClient.ContainerKill(ctx, id, client.ContainerKillOptions{})
assert.NilError(t, err)
poll.WaitOn(t, container.IsInState(ctx, apiClient, id, containertypes.StateExited))
}

View File

@@ -74,7 +74,7 @@ func TestContainerNetworkMountsNoChown(t *testing.T) {
})
assert.NilError(t, err)
// container will exit immediately because of no tty, but we only need the start sequence to test the condition
err = cli.ContainerStart(ctx, ctrCreate.ID, client.ContainerStartOptions{})
_, err = cli.ContainerStart(ctx, ctrCreate.ID, client.ContainerStartOptions{})
assert.NilError(t, err)
// Check that host-located bind mount network file did not change ownership when the container was started
@@ -202,7 +202,7 @@ func TestMountDaemonRoot(t *testing.T) {
}
defer func() {
if err := apiClient.ContainerRemove(ctx, c.ID, client.ContainerRemoveOptions{Force: true}); err != nil {
if _, err := apiClient.ContainerRemove(ctx, c.ID, client.ContainerRemoveOptions{Force: true}); err != nil {
panic(err)
}
}()

View File

@@ -27,14 +27,14 @@ func TestPause(t *testing.T) {
since := request.DaemonUnixTime(ctx, t, apiClient, testEnv)
err := apiClient.ContainerPause(ctx, cID)
_, err := apiClient.ContainerPause(ctx, cID, client.ContainerPauseOptions{})
assert.NilError(t, err)
inspect, err := apiClient.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
assert.NilError(t, err)
assert.Check(t, is.Equal(true, inspect.Container.State.Paused))
err = apiClient.ContainerUnpause(ctx, cID)
_, err = apiClient.ContainerUnpause(ctx, cID, client.ContainerUnPauseOptions{})
assert.NilError(t, err)
until := request.DaemonUnixTime(ctx, t, apiClient, testEnv)
@@ -54,7 +54,7 @@ func TestPauseFailsOnWindowsServerContainers(t *testing.T) {
apiClient := testEnv.APIClient()
cID := container.Run(ctx, t, apiClient)
err := apiClient.ContainerPause(ctx, cID)
_, err := apiClient.ContainerPause(ctx, cID, client.ContainerPauseOptions{})
assert.Check(t, is.ErrorContains(err, cerrdefs.ErrNotImplemented.Error()))
}
@@ -65,10 +65,10 @@ func TestPauseStopPausedContainer(t *testing.T) {
apiClient := testEnv.APIClient()
cID := container.Run(ctx, t, apiClient)
err := apiClient.ContainerPause(ctx, cID)
_, err := apiClient.ContainerPause(ctx, cID, client.ContainerPauseOptions{})
assert.NilError(t, err)
err = apiClient.ContainerStop(ctx, cID, client.ContainerStopOptions{})
_, err = apiClient.ContainerStop(ctx, cID, client.ContainerStopOptions{})
assert.NilError(t, err)
poll.WaitOn(t, container.IsStopped(ctx, apiClient, cID))

View File

@@ -50,7 +50,7 @@ func TestPIDModeContainer(t *testing.T) {
ctr, err := container.CreateFromConfig(ctx, apiClient, container.NewTestConfig(container.WithPIDMode("container:"+pidCtrName)))
assert.NilError(t, err, "should not produce an error when creating, only when starting")
err = apiClient.ContainerStart(ctx, ctr.ID, client.ContainerStartOptions{})
_, err = apiClient.ContainerStart(ctx, ctr.ID, client.ContainerStartOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInternal), "should produce a System error when starting an existing container from an invalid state")
assert.Check(t, is.ErrorContains(err, "failed to join PID namespace"))
assert.Check(t, is.ErrorContains(err, cPIDContainerID+" is not running"))
@@ -63,7 +63,7 @@ func TestPIDModeContainer(t *testing.T) {
ctr, err := container.CreateFromConfig(ctx, apiClient, container.NewTestConfig(container.WithPIDMode("container:"+pidCtrName)))
assert.NilError(t, err)
err = apiClient.ContainerStart(ctx, ctr.ID, client.ContainerStartOptions{})
_, err = apiClient.ContainerStart(ctx, ctr.ID, client.ContainerStartOptions{})
assert.Check(t, err)
})
}

View File

@@ -39,7 +39,7 @@ func TestRemoveContainerWithRemovedVolume(t *testing.T) {
err := os.RemoveAll(tempDir.Path())
assert.NilError(t, err)
err = apiClient.ContainerRemove(ctx, cID, client.ContainerRemoveOptions{
_, err = apiClient.ContainerRemove(ctx, cID, client.ContainerRemoveOptions{
RemoveVolumes: true,
})
assert.NilError(t, err)
@@ -66,7 +66,7 @@ func TestRemoveContainerWithVolume(t *testing.T) {
_, err = apiClient.VolumeInspect(ctx, volName, client.VolumeInspectOptions{})
assert.NilError(t, err)
err = apiClient.ContainerRemove(ctx, cID, client.ContainerRemoveOptions{
_, err = apiClient.ContainerRemove(ctx, cID, client.ContainerRemoveOptions{
Force: true,
RemoveVolumes: true,
})
@@ -82,7 +82,7 @@ func TestRemoveContainerRunning(t *testing.T) {
cID := container.Run(ctx, t, apiClient)
err := apiClient.ContainerRemove(ctx, cID, client.ContainerRemoveOptions{})
_, err := apiClient.ContainerRemove(ctx, cID, client.ContainerRemoveOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsConflict))
assert.Check(t, is.ErrorContains(err, "container is running"))
}
@@ -93,7 +93,7 @@ func TestRemoveContainerForceRemoveRunning(t *testing.T) {
cID := container.Run(ctx, t, apiClient)
err := apiClient.ContainerRemove(ctx, cID, client.ContainerRemoveOptions{
_, err := apiClient.ContainerRemove(ctx, cID, client.ContainerRemoveOptions{
Force: true,
})
assert.NilError(t, err)
@@ -103,7 +103,7 @@ func TestRemoveInvalidContainer(t *testing.T) {
ctx := setupTest(t)
apiClient := testEnv.APIClient()
err := apiClient.ContainerRemove(ctx, "unknown", client.ContainerRemoveOptions{})
_, err := apiClient.ContainerRemove(ctx, "unknown", client.ContainerRemoveOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsNotFound))
assert.Check(t, is.ErrorContains(err, "No such container"))
}

View File

@@ -32,7 +32,7 @@ func TestRenameLinkedContainer(t *testing.T) {
container.Run(ctx, t, apiClient, container.WithName(aName))
err = apiClient.ContainerRemove(ctx, bID, client.ContainerRemoveOptions{Force: true})
_, err = apiClient.ContainerRemove(ctx, bID, client.ContainerRemoveOptions{Force: true})
assert.NilError(t, err)
bID = container.Run(ctx, t, apiClient, container.WithName(bName), container.WithLinks(aName))
@@ -129,9 +129,9 @@ func TestRenameAnonymousContainer(t *testing.T) {
assert.NilError(t, err)
// Stop/Start the container to get registered
// FIXME(vdemeester) this is a really weird behavior as it fails otherwise
err = apiClient.ContainerStop(ctx, container1Name, client.ContainerStopOptions{})
_, err = apiClient.ContainerStop(ctx, container1Name, client.ContainerStopOptions{})
assert.NilError(t, err)
err = apiClient.ContainerStart(ctx, container1Name, client.ContainerStartOptions{})
_, err = apiClient.ContainerStart(ctx, container1Name, client.ContainerStartOptions{})
assert.NilError(t, err)
count := "-c"

View File

@@ -22,7 +22,7 @@ func TestResize(t *testing.T) {
t.Run("success", func(t *testing.T) {
cID := container.Run(ctx, t, apiClient, container.WithTty(true))
defer container.Remove(ctx, t, apiClient, cID, client.ContainerRemoveOptions{Force: true})
err := apiClient.ContainerResize(ctx, cID, client.ContainerResizeOptions{
_, err := apiClient.ContainerResize(ctx, cID, client.ContainerResizeOptions{
Height: 40,
Width: 40,
})
@@ -129,7 +129,7 @@ func TestResize(t *testing.T) {
t.Run("invalid state", func(t *testing.T) {
cID := container.Create(ctx, t, apiClient, container.WithCmd("echo"))
defer container.Remove(ctx, t, apiClient, cID, client.ContainerRemoveOptions{Force: true})
err := apiClient.ContainerResize(ctx, cID, client.ContainerResizeOptions{
_, err := apiClient.ContainerResize(ctx, cID, client.ContainerResizeOptions{
Height: 40,
Width: 40,
})

View File

@@ -108,7 +108,7 @@ func TestDaemonRestartKillContainers(t *testing.T) {
defer apiClient.ContainerRemove(ctx, resp.ID, client.ContainerRemoveOptions{Force: true})
if tc.xStart {
err = apiClient.ContainerStart(ctx, resp.ID, client.ContainerStartOptions{})
_, err = apiClient.ContainerStart(ctx, resp.ID, client.ContainerStartOptions{})
assert.NilError(t, err)
if tc.xHealthCheck {
poll.WaitOn(t, pollForHealthStatus(ctx, apiClient, resp.ID, container.Healthy), poll.WithTimeout(30*time.Second))
@@ -176,13 +176,15 @@ func TestContainerWithAutoRemoveCanBeRestarted(t *testing.T) {
{
desc: "kill",
doSth: func(ctx context.Context, containerID string) error {
return apiClient.ContainerKill(ctx, containerID, "SIGKILL")
_, err := apiClient.ContainerKill(ctx, containerID, client.ContainerKillOptions{})
return err
},
},
{
desc: "stop",
doSth: func(ctx context.Context, containerID string) error {
return apiClient.ContainerStop(ctx, containerID, client.ContainerStopOptions{Timeout: &noWaitTimeout})
_, err := apiClient.ContainerStop(ctx, containerID, client.ContainerStopOptions{Timeout: &noWaitTimeout})
return err
},
},
} {
@@ -193,13 +195,15 @@ func TestContainerWithAutoRemoveCanBeRestarted(t *testing.T) {
testContainer.WithAutoRemove,
)
defer func() {
err := apiClient.ContainerRemove(ctx, cID, client.ContainerRemoveOptions{Force: true})
_, err := apiClient.ContainerRemove(ctx, cID, client.ContainerRemoveOptions{Force: true})
if t.Failed() && err != nil {
t.Logf("Cleaning up test container failed with error: %v", err)
}
}()
err := apiClient.ContainerRestart(ctx, cID, client.ContainerStopOptions{Timeout: &noWaitTimeout})
_, err := apiClient.ContainerRestart(ctx, cID, client.ContainerRestartOptions{
Timeout: &noWaitTimeout,
})
assert.NilError(t, err)
inspect, err := apiClient.ContainerInspect(ctx, cID, client.ContainerInspectOptions{})
@@ -236,7 +240,7 @@ func TestContainerRestartWithCancelledRequest(t *testing.T) {
// taking place.
cID := testContainer.Run(ctx, t, apiClient, testContainer.WithCmd("sh", "-c", "trap 'echo received TERM' TERM; while true; do usleep 10; done"))
defer func() {
err := apiClient.ContainerRemove(ctx, cID, client.ContainerRemoveOptions{Force: true})
_, err := apiClient.ContainerRemove(ctx, cID, client.ContainerRemoveOptions{Force: true})
if t.Failed() && err != nil {
t.Logf("Cleaning up test container failed with error: %v", err)
}
@@ -251,7 +255,7 @@ func TestContainerRestartWithCancelledRequest(t *testing.T) {
// is (forcibly) killed.
ctx2, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
stopTimeout := 1
err := apiClient.ContainerRestart(ctx2, cID, client.ContainerStopOptions{
_, err := apiClient.ContainerRestart(ctx2, cID, client.ContainerRestartOptions{
Timeout: &stopTimeout,
})
assert.Check(t, is.ErrorIs(err, context.DeadlineExceeded))

View File

@@ -288,7 +288,7 @@ func TestMacAddressIsAppliedToMainNetworkWithShortID(t *testing.T) {
}
cid := createLegacyContainer(ctx, t, apiClient, "02:42:08:26:a9:55", opts...)
err := apiClient.ContainerStart(ctx, cid, client.ContainerStartOptions{})
_, err := apiClient.ContainerStart(ctx, cid, client.ContainerStartOptions{})
assert.NilError(t, err)
defer container.Remove(ctx, t, apiClient, cid, client.ContainerRemoveOptions{Force: true})
@@ -473,7 +473,7 @@ func TestCgroupRW(t *testing.T) {
return
}
// TODO check if ro or not
err = apiClient.ContainerStart(ctx, resp.ID, client.ContainerStartOptions{})
_, err = apiClient.ContainerStart(ctx, resp.ID, client.ContainerStartOptions{})
assert.NilError(t, err)
res, err := container.Exec(ctx, apiClient, resp.ID, []string{"sh", "-ec", `

View File

@@ -38,7 +38,8 @@ func TestStopContainerWithTimeoutCancel(t *testing.T) {
stoppedCh := make(chan error)
go func() {
sto := stopTimeout
stoppedCh <- apiClient.ContainerStop(ctxCancel, id, client.ContainerStopOptions{Timeout: &sto})
_, err := apiClient.ContainerStop(ctxCancel, id, client.ContainerStopOptions{Timeout: &sto})
stoppedCh <- err
}()
poll.WaitOn(t, logsContains(ctx, apiClient, id, "received TERM"))

View File

@@ -34,7 +34,7 @@ func TestStopContainerWithRestartPolicyAlways(t *testing.T) {
}
for _, name := range names {
err := apiClient.ContainerStop(ctx, name, client.ContainerStopOptions{})
_, err := apiClient.ContainerStop(ctx, name, client.ContainerStopOptions{})
assert.NilError(t, err)
}
@@ -100,7 +100,7 @@ func TestStopContainerWithTimeout(t *testing.T) {
// t.Parallel()
id := container.Run(ctx, t, apiClient, testCmd)
err := apiClient.ContainerStop(ctx, id, client.ContainerStopOptions{Timeout: &tc.timeout})
_, err := apiClient.ContainerStop(ctx, id, client.ContainerStopOptions{Timeout: &tc.timeout})
assert.NilError(t, err)
poll.WaitOn(t, container.IsStopped(ctx, apiClient, id), pollOpts...)

View File

@@ -87,7 +87,7 @@ func TestWaitBlocked(t *testing.T) {
containerID := container.Run(ctx, t, cli, container.WithCmd("sh", "-c", tc.cmd))
waitResC, errC := cli.ContainerWait(ctx, containerID, "")
err := cli.ContainerStop(ctx, containerID, client.ContainerStopOptions{})
_, err := cli.ContainerStop(ctx, containerID, client.ContainerStopOptions{})
assert.NilError(t, err)
select {
@@ -148,7 +148,8 @@ func TestWaitConditions(t *testing.T) {
assert.NilError(t, err)
defer streams.Close()
assert.NilError(t, cli.ContainerStart(ctx, containerID, client.ContainerStartOptions{}))
_, err = cli.ContainerStart(ctx, containerID, client.ContainerStartOptions{})
assert.NilError(t, err)
waitResC, errC := cli.ContainerWait(ctx, containerID, tc.waitCond)
select {
case err := <-errC:
@@ -221,7 +222,10 @@ func TestWaitRestartedContainer(t *testing.T) {
timeout = 0
}
err := cli.ContainerRestart(ctx, containerID, client.ContainerStopOptions{Timeout: &timeout, Signal: "SIGTERM"})
_, err := cli.ContainerRestart(ctx, containerID, client.ContainerRestartOptions{
Timeout: &timeout,
Signal: "SIGTERM",
})
assert.NilError(t, err)
select {

View File

@@ -656,7 +656,7 @@ func testLiveRestoreVolumeReferences(t *testing.T) {
})
// Remove that container which should free the references in the volume
err = c.ContainerRemove(ctx, cID, client.ContainerRemoveOptions{Force: true})
_, err = c.ContainerRemove(ctx, cID, client.ContainerRemoveOptions{Force: true})
assert.NilError(t, err)
// Now we should be able to remove the volume
@@ -704,7 +704,7 @@ func testLiveRestoreVolumeReferences(t *testing.T) {
assert.ErrorContains(t, err, fmt.Sprintf("container %s is using its referenced image", cID[:12]))
// Remove that container which should free the references in the volume
err = c.ContainerRemove(ctx, cID, client.ContainerRemoveOptions{Force: true})
_, err = c.ContainerRemove(ctx, cID, client.ContainerRemoveOptions{Force: true})
assert.NilError(t, err)
// Now we should be able to remove the volume
@@ -727,7 +727,7 @@ func testLiveRestoreVolumeReferences(t *testing.T) {
d.Restart(t, "--live-restore", "--iptables=false", "--ip6tables=false")
err := c.ContainerRemove(ctx, cID, client.ContainerRemoveOptions{Force: true})
_, err := c.ContainerRemove(ctx, cID, client.ContainerRemoveOptions{Force: true})
assert.NilError(t, err)
})
}

View File

@@ -146,7 +146,7 @@ func TestInspectGraphDriverAPIBC(t *testing.T) {
const testImage = "busybox:latest"
ctr, err := c.ContainerCreate(ctx, client.ContainerCreateOptions{Image: testImage, Name: "test-container"})
assert.NilError(t, err)
defer func() { _ = c.ContainerRemove(ctx, ctr.ID, client.ContainerRemoveOptions{Force: true}) }()
defer func() { _, _ = c.ContainerRemove(ctx, ctr.ID, client.ContainerRemoveOptions{Force: true}) }()
if imageInspect, err := c.ImageInspect(ctx, testImage); assert.Check(t, err) {
if tc.expGraphDriver != "" {

View File

@@ -396,7 +396,7 @@ func TestSaveRepoWithMultipleImages(t *testing.T) {
res, err := apiClient.ContainerCommit(ctx, id, client.ContainerCommitOptions{Reference: tag})
assert.NilError(t, err)
err = apiClient.ContainerRemove(ctx, id, client.ContainerRemoveOptions{Force: true})
_, err = apiClient.ContainerRemove(ctx, id, client.ContainerRemoveOptions{Force: true})
assert.NilError(t, err)
return res.ID

View File

@@ -88,7 +88,7 @@ func Run(ctx context.Context, t *testing.T, apiClient client.APIClient, ops ...f
t.Helper()
id := Create(ctx, t, apiClient, ops...)
err := apiClient.ContainerStart(ctx, id, client.ContainerStartOptions{})
_, err := apiClient.ContainerStart(ctx, id, client.ContainerStartOptions{})
assert.NilError(t, err)
return id
@@ -117,7 +117,7 @@ func RunAttach(ctx context.Context, t *testing.T, apiClient client.APIClient, op
})
assert.NilError(t, err)
err = apiClient.ContainerStart(ctx, id, client.ContainerStartOptions{})
_, err = apiClient.ContainerStart(ctx, id, client.ContainerStartOptions{})
assert.NilError(t, err)
s, err := demultiplexStreams(ctx, aresp.HijackedResponse)
@@ -169,7 +169,7 @@ func demultiplexStreams(ctx context.Context, resp client.HijackedResponse) (stre
func Remove(ctx context.Context, t *testing.T, apiClient client.APIClient, container string, options client.ContainerRemoveOptions) {
t.Helper()
err := apiClient.ContainerRemove(ctx, container, options)
_, err := apiClient.ContainerRemove(ctx, container, options)
assert.NilError(t, err)
}

View File

@@ -233,7 +233,7 @@ func TestIPRangeAt64BitLimit(t *testing.T) {
id := ctr.Create(ctx, t, c, ctr.WithNetworkMode(netName))
defer c.ContainerRemove(ctx, id, client.ContainerRemoveOptions{Force: true})
err := c.ContainerStart(ctx, id, client.ContainerStartOptions{})
_, err := c.ContainerStart(ctx, id, client.ContainerStartOptions{})
assert.NilError(t, err)
})
}
@@ -528,7 +528,7 @@ func TestPublishedPortAlreadyInUse(t *testing.T) {
ctr.WithPortMap(networktypes.PortMap{mappedPort: {{HostPort: "8000"}}}))
defer ctr.Remove(ctx, t, apiClient, ctr2, client.ContainerRemoveOptions{Force: true})
err := apiClient.ContainerStart(ctx, ctr2, client.ContainerStartOptions{})
_, err := apiClient.ContainerStart(ctx, ctr2, client.ContainerStartOptions{})
assert.Assert(t, is.ErrorContains(err, "failed to set up container networking"))
inspect, err := apiClient.ContainerInspect(ctx, ctr2, client.ContainerInspectOptions{})
@@ -754,7 +754,7 @@ func TestRemoveLegacyLink(t *testing.T) {
assert.Check(t, is.Contains(res.Stderr(), "404 Not Found"))
// Remove the link ("docker rm --link client/thealias").
err := c.ContainerRemove(ctx, clientName+"/"+svrAlias, client.ContainerRemoveOptions{RemoveLinks: true})
_, err := c.ContainerRemove(ctx, clientName+"/"+svrAlias, client.ContainerRemoveOptions{RemoveLinks: true})
assert.Check(t, err)
// Check both containers are still running.

View File

@@ -1336,7 +1336,7 @@ func TestReadOnlySlashProc(t *testing.T) {
container.WithCmd("ls"),
)
defer c.ContainerRemove(ctx, id4, client.ContainerRemoveOptions{Force: true})
err := c.ContainerStart(ctx, id4, client.ContainerStartOptions{})
_, err := c.ContainerStart(ctx, id4, client.ContainerStartOptions{})
if tc.expErr == "" {
assert.Check(t, err)
} else {

View File

@@ -55,7 +55,7 @@ func TestMACAddrOnRestart(t *testing.T) {
defer c.ContainerRemove(ctx, id1, client.ContainerRemoveOptions{
Force: true,
})
err := c.ContainerStop(ctx, ctr1Name, client.ContainerStopOptions{})
_, err := c.ContainerStop(ctx, ctr1Name, client.ContainerStopOptions{})
assert.Assert(t, is.Nil(err))
// Start a second container, giving the daemon a chance to recycle the first container's
@@ -71,7 +71,7 @@ func TestMACAddrOnRestart(t *testing.T) {
})
// Restart the first container.
err = c.ContainerStart(ctx, ctr1Name, client.ContainerStartOptions{})
_, err = c.ContainerStart(ctx, ctr1Name, client.ContainerStartOptions{})
assert.Assert(t, is.Nil(err))
// Check that the containers ended up with different MAC addresses.
@@ -124,7 +124,7 @@ func TestCfgdMACAddrOnRestart(t *testing.T) {
startAndCheck := func() {
t.Helper()
err := c.ContainerStart(ctx, ctr1Name, client.ContainerStartOptions{})
_, err := c.ContainerStart(ctx, ctr1Name, client.ContainerStartOptions{})
assert.Assert(t, is.Nil(err))
inspect = container.Inspect(ctx, t, c, ctr1Name)
gotMAC = inspect.NetworkSettings.Networks[netName].MacAddress
@@ -132,12 +132,12 @@ func TestCfgdMACAddrOnRestart(t *testing.T) {
}
// Restart the container, check that the MAC address is restored.
err := c.ContainerStop(ctx, ctr1Name, client.ContainerStopOptions{})
_, err := c.ContainerStop(ctx, ctr1Name, client.ContainerStopOptions{})
assert.Assert(t, is.Nil(err))
startAndCheck()
// Restart the daemon, check that the MAC address is restored.
err = c.ContainerStop(ctx, ctr1Name, client.ContainerStopOptions{})
_, err = c.ContainerStop(ctx, ctr1Name, client.ContainerStopOptions{})
assert.Assert(t, is.Nil(err))
d.Restart(t)
startAndCheck()
@@ -294,7 +294,7 @@ func TestWatchtowerCreate(t *testing.T) {
}
id := createLegacyContainer(ctx, t, c, ctrMAC, opts...)
defer c.ContainerRemove(ctx, id, client.ContainerRemoveOptions{Force: true})
err := c.ContainerStart(ctx, id, client.ContainerStartOptions{})
_, err := c.ContainerStart(ctx, id, client.ContainerStartOptions{})
assert.NilError(t, err)
// Check that the container got the expected addresses.

View File

@@ -625,7 +625,8 @@ func TestRestartUserlandProxyUnder2MSL(t *testing.T) {
// Removing the container will kill the userland proxy, and the connection
// opened by the previous HTTP request will be properly closed (ie. on both
// sides). Thus, that connection will transition to the TIME_WAIT state.
assert.NilError(t, c.ContainerRemove(ctx, ctrName, client.ContainerRemoveOptions{Force: true}))
_, err = c.ContainerRemove(ctx, ctrName, client.ContainerRemoveOptions{Force: true})
assert.NilError(t, err)
// Make sure the container can be restarted. [container.Run] checks that
// the ContainerStart API call doesn't return an error. We don't need to

View File

@@ -63,7 +63,7 @@ func TestReadPluginNoRead(t *testing.T) {
assert.Assert(t, err)
defer apiclient.ContainerRemove(ctx, c.ID, client.ContainerRemoveOptions{Force: true})
err = apiclient.ContainerStart(ctx, c.ID, client.ContainerStartOptions{})
_, err = apiclient.ContainerStart(ctx, c.ID, client.ContainerStartOptions{})
assert.Assert(t, err)
poll.WaitOn(t, testContainer.IsStopped(ctx, apiclient, c.ID))

View File

@@ -54,7 +54,7 @@ func TestDockerNetworkConnectAliasPreV144(t *testing.T) {
})
assert.NilError(t, err)
err = apiClient.ContainerStart(ctx, cID1, client.ContainerStartOptions{})
_, err = apiClient.ContainerStart(ctx, cID1, client.ContainerStartOptions{})
assert.NilError(t, err)
ng1, err := apiClient.ContainerInspect(ctx, cID1, client.ContainerInspectOptions{})
@@ -77,7 +77,7 @@ func TestDockerNetworkConnectAliasPreV144(t *testing.T) {
})
assert.NilError(t, err)
err = apiClient.ContainerStart(ctx, cID2, client.ContainerStartOptions{})
_, err = apiClient.ContainerStart(ctx, cID2, client.ContainerStartOptions{})
assert.NilError(t, err)
ng2, err := apiClient.ContainerInspect(ctx, cID2, client.ContainerInspectOptions{})
@@ -111,7 +111,7 @@ func TestDockerNetworkReConnect(t *testing.T) {
err := apiClient.NetworkConnect(ctx, name, c1, &network.EndpointSettings{})
assert.NilError(t, err)
err = apiClient.ContainerStart(ctx, c1, client.ContainerStartOptions{})
_, err = apiClient.ContainerStart(ctx, c1, client.ContainerStartOptions{})
assert.NilError(t, err)
n1, err := apiClient.ContainerInspect(ctx, c1, client.ContainerInspectOptions{})

View File

@@ -46,7 +46,7 @@ func TestCgroupDriverSystemdMemoryLimit(t *testing.T) {
})
defer c.ContainerRemove(ctx, ctrID, client.ContainerRemoveOptions{Force: true})
err := c.ContainerStart(ctx, ctrID, client.ContainerStartOptions{})
_, err := c.ContainerStart(ctx, ctrID, client.ContainerStartOptions{})
assert.NilError(t, err)
s, err := c.ContainerInspect(ctx, ctrID, client.ContainerInspectOptions{})

View File

@@ -97,7 +97,7 @@ func TestRunMountVolumeSubdir(t *testing.T) {
}
assert.NilError(t, creatErr, "container creation failed")
startErr := apiClient.ContainerStart(ctx, id, client.ContainerStartOptions{})
_, startErr := apiClient.ContainerStart(ctx, id, client.ContainerStartOptions{})
if tc.startErr != "" {
assert.ErrorContains(t, startErr, tc.startErr)
return
@@ -173,7 +173,7 @@ func TestRunMountImage(t *testing.T) {
}
startContainer := func(id string) {
startErr := apiClient.ContainerStart(ctx, id, client.ContainerStartOptions{})
_, startErr := apiClient.ContainerStart(ctx, id, client.ContainerStartOptions{})
if tc.startErr != "" {
assert.ErrorContains(t, startErr, tc.startErr)
return
@@ -210,7 +210,7 @@ func TestRunMountImage(t *testing.T) {
// Test that the container servives a restart when mounted image is removed
if tc.name == "image_remove_force" {
stopErr := apiClient.ContainerStop(ctx, id, client.ContainerStopOptions{})
_, stopErr := apiClient.ContainerStop(ctx, id, client.ContainerStopOptions{})
assert.NilError(t, stopErr)
_, removeErr := apiClient.ImageRemove(ctx, testImage, client.ImageRemoveOptions{Force: true})

View File

@@ -82,7 +82,7 @@ func TestVolumesRemove(t *testing.T) {
})
t.Run("volume not in use", func(t *testing.T) {
err = apiClient.ContainerRemove(ctx, id, client.ContainerRemoveOptions{
_, err = apiClient.ContainerRemove(ctx, id, client.ContainerRemoveOptions{
Force: true,
})
assert.NilError(t, err)
@@ -134,7 +134,7 @@ func TestVolumesRemoveSwarmEnabled(t *testing.T) {
})
t.Run("volume not in use", func(t *testing.T) {
err = apiClient.ContainerRemove(ctx, id, client.ContainerRemoveOptions{
_, err = apiClient.ContainerRemove(ctx, id, client.ContainerRemoveOptions{
Force: true,
})
assert.NilError(t, err)
@@ -351,7 +351,7 @@ VOLUME ` + volDest
volumeName := inspect.Container.Mounts[0].Name
assert.Assert(t, volumeName != "")
err = apiClient.ContainerRemove(ctx, id, client.ContainerRemoveOptions{})
_, err = apiClient.ContainerRemove(ctx, id, client.ContainerRemoveOptions{})
assert.NilError(t, err)
res, err := apiClient.VolumesPrune(ctx, client.VolumePruneOptions{})

View File

@@ -38,12 +38,12 @@ func (e *Execution) Clean(ctx context.Context, t testing.TB) {
}
}
func unpauseAllContainers(ctx context.Context, t testing.TB, client client.ContainerAPIClient) {
func unpauseAllContainers(ctx context.Context, t testing.TB, apiClient client.ContainerAPIClient) {
t.Helper()
containers := getPausedContainers(ctx, t, client)
containers := getPausedContainers(ctx, t, apiClient)
if len(containers) > 0 {
for _, ctr := range containers {
err := client.ContainerUnpause(ctx, ctr.ID)
_, err := apiClient.ContainerUnpause(ctx, ctr.ID, client.ContainerUnPauseOptions{})
assert.Check(t, err, "failed to unpause container %s", ctr.ID)
}
}
@@ -70,7 +70,7 @@ func deleteAllContainers(ctx context.Context, t testing.TB, apiclient client.Con
if _, ok := protectedContainers[ctr.ID]; ok {
continue
}
err := apiclient.ContainerRemove(ctx, ctr.ID, client.ContainerRemoveOptions{
_, err := apiclient.ContainerRemove(ctx, ctr.ID, client.ContainerRemoveOptions{
Force: true,
RemoveVolumes: true,
})

View File

@@ -124,10 +124,11 @@ func (f *remoteFileServer) Close() error {
if f.container == "" {
return nil
}
return f.client.ContainerRemove(context.Background(), f.container, client.ContainerRemoveOptions{
_, err := f.client.ContainerRemove(context.Background(), f.container, client.ContainerRemoveOptions{
Force: true,
RemoveVolumes: true,
})
return err
}
func newRemoteFileServer(t testing.TB, ctx *fakecontext.Fake, c client.APIClient) *remoteFileServer {
@@ -160,7 +161,7 @@ COPY . /static`); err != nil {
Name: ctrName,
})
assert.NilError(t, err)
err = c.ContainerStart(context.Background(), b.ID, client.ContainerStartOptions{})
_, err = c.ContainerStart(context.Background(), b.ID, client.ContainerStartOptions{})
assert.NilError(t, err)
// Find out the system assigned port

View File

@@ -64,20 +64,20 @@ type ContainerAPIClient interface {
ExecAPIClient
ContainerExport(ctx context.Context, container string) (io.ReadCloser, error)
ContainerInspect(ctx context.Context, container string, options ContainerInspectOptions) (ContainerInspectResult, error)
ContainerKill(ctx context.Context, container, signal string) error
ContainerKill(ctx context.Context, container string, options ContainerKillOptions) (ContainerKillResult, error)
ContainerList(ctx context.Context, options ContainerListOptions) ([]container.Summary, error)
ContainerLogs(ctx context.Context, container string, options ContainerLogsOptions) (io.ReadCloser, error)
ContainerPause(ctx context.Context, container string) error
ContainerRemove(ctx context.Context, container string, options ContainerRemoveOptions) error
ContainerPause(ctx context.Context, container string, options ContainerPauseOptions) (ContainerPauseResult, error)
ContainerRemove(ctx context.Context, container string, options ContainerRemoveOptions) (ContainerRemoveResult, error)
ContainerRename(ctx context.Context, container, newContainerName string) error
ContainerResize(ctx context.Context, container string, options ContainerResizeOptions) error
ContainerRestart(ctx context.Context, container string, options ContainerStopOptions) error
ContainerResize(ctx context.Context, container string, options ContainerResizeOptions) (ContainerResizeResult, error)
ContainerRestart(ctx context.Context, container string, options ContainerRestartOptions) (ContainerRestartResult, error)
ContainerStatPath(ctx context.Context, container, path string) (container.PathStat, error)
ContainerStats(ctx context.Context, container string, options ContainerStatsOptions) (ContainerStatsResult, error)
ContainerStart(ctx context.Context, container string, options ContainerStartOptions) error
ContainerStop(ctx context.Context, container string, options ContainerStopOptions) error
ContainerStart(ctx context.Context, container string, options ContainerStartOptions) (ContainerStartResult, error)
ContainerStop(ctx context.Context, container string, options ContainerStopOptions) (ContainerStopResult, error)
ContainerTop(ctx context.Context, container string, arguments []string) (container.TopResponse, error)
ContainerUnpause(ctx context.Context, container string) error
ContainerUnpause(ctx context.Context, container string, options ContainerUnPauseOptions) (ContainerUnPauseResult, error)
ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.UpdateResponse, error)
ContainerWait(ctx context.Context, container string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error)
CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, container.PathStat, error)

View File

@@ -5,19 +5,35 @@ import (
"net/url"
)
// ContainerKillOptions holds options for [Client.ContainerKill].
type ContainerKillOptions struct {
// Signal (optional) is the signal to send to the container to (gracefully)
// stop it before forcibly terminating the container with SIGKILL after a
// timeout. If no value is set, the default (SIGKILL) is used.
Signal string `json:",omitempty"`
}
// ContainerKillResult holds the result of [Client.ContainerKill],
type ContainerKillResult struct {
// Add future fields here.
}
// ContainerKill terminates the container process but does not remove the container from the docker host.
func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error {
func (cli *Client) ContainerKill(ctx context.Context, containerID string, options ContainerKillOptions) (ContainerKillResult, error) {
containerID, err := trimID("container", containerID)
if err != nil {
return err
return ContainerKillResult{}, err
}
query := url.Values{}
if signal != "" {
query.Set("signal", signal)
if options.Signal != "" {
query.Set("signal", options.Signal)
}
resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil)
defer ensureReaderClosed(resp)
return err
if err != nil {
return ContainerKillResult{}, err
}
return ContainerKillResult{}, nil
}

View File

@@ -2,14 +2,27 @@ package client
import "context"
// ContainerPauseOptions holds options for [Client.ContainerPause].
type ContainerPauseOptions struct {
// Add future optional parameters here.
}
// ContainerPauseResult holds the result of [Client.ContainerPause],
type ContainerPauseResult struct {
// Add future fields here.
}
// ContainerPause pauses the main process of a given container without terminating it.
func (cli *Client) ContainerPause(ctx context.Context, containerID string) error {
func (cli *Client) ContainerPause(ctx context.Context, containerID string, options ContainerPauseOptions) (ContainerPauseResult, error) {
containerID, err := trimID("container", containerID)
if err != nil {
return err
return ContainerPauseResult{}, err
}
resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil)
defer ensureReaderClosed(resp)
return err
if err != nil {
return ContainerPauseResult{}, err
}
return ContainerPauseResult{}, nil
}

View File

@@ -12,11 +12,16 @@ type ContainerRemoveOptions struct {
Force bool
}
// ContainerRemoveResult holds the result of [Client.ContainerRemove],
type ContainerRemoveResult struct {
// Add future fields here.
}
// ContainerRemove kills and removes a container from the docker host.
func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options ContainerRemoveOptions) error {
func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options ContainerRemoveOptions) (ContainerRemoveResult, error) {
containerID, err := trimID("container", containerID)
if err != nil {
return err
return ContainerRemoveResult{}, err
}
query := url.Values{}
@@ -33,5 +38,8 @@ func (cli *Client) ContainerRemove(ctx context.Context, containerID string, opti
resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil)
defer ensureReaderClosed(resp)
return err
if err != nil {
return ContainerRemoveResult{}, err
}
return ContainerRemoveResult{}, nil
}

View File

@@ -14,13 +14,28 @@ type ContainerResizeOptions struct {
Width uint
}
// ContainerResizeResult holds the result of [Client.ContainerResize],
type ContainerResizeResult struct {
// Add future fields here.
}
// ContainerResize changes the size of the pseudo-TTY for a container.
func (cli *Client) ContainerResize(ctx context.Context, containerID string, options ContainerResizeOptions) error {
func (cli *Client) ContainerResize(ctx context.Context, containerID string, options ContainerResizeOptions) (ContainerResizeResult, error) {
containerID, err := trimID("container", containerID)
if err != nil {
return err
return ContainerResizeResult{}, err
}
return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width)
// FIXME(thaJeztah): the API / backend accepts uint32, but container.ResizeOptions uses uint.
query := url.Values{}
query.Set("h", strconv.FormatUint(uint64(options.Height), 10))
query.Set("w", strconv.FormatUint(uint64(options.Width), 10))
resp, err := cli.post(ctx, "/containers/"+containerID+"/resize", query, nil, nil)
defer ensureReaderClosed(resp)
if err != nil {
return ContainerResizeResult{}, err
}
return ContainerResizeResult{}, nil
}
// ExecResizeOptions holds options for resizing a container exec TTY.
@@ -36,17 +51,16 @@ func (cli *Client) ExecResize(ctx context.Context, execID string, options ExecRe
if err != nil {
return ExecResizeResult{}, err
}
err = cli.resize(ctx, "/exec/"+execID, options.Height, options.Width)
return ExecResizeResult{}, err
}
func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error {
// FIXME(thaJeztah): the API / backend accepts uint32, but container.ResizeOptions uses uint.
query := url.Values{}
query.Set("h", strconv.FormatUint(uint64(height), 10))
query.Set("w", strconv.FormatUint(uint64(width), 10))
query.Set("h", strconv.FormatUint(uint64(options.Height), 10))
query.Set("w", strconv.FormatUint(uint64(options.Width), 10))
resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil)
resp, err := cli.post(ctx, "/exec/"+execID+"/resize", query, nil, nil)
defer ensureReaderClosed(resp)
return err
if err != nil {
return ExecResizeResult{}, err
}
return ExecResizeResult{}, nil
}

View File

@@ -6,13 +6,36 @@ import (
"strconv"
)
// ContainerRestartOptions holds options for [Client.ContainerRestart].
type ContainerRestartOptions struct {
// Signal (optional) is the signal to send to the container to (gracefully)
// stop it before forcibly terminating the container with SIGKILL after the
// timeout expires. If no value is set, the default (SIGTERM) is used.
Signal string `json:",omitempty"`
// Timeout (optional) is the timeout (in seconds) to wait for the container
// to stop gracefully before forcibly terminating it with SIGKILL.
//
// - Use nil to use the default timeout (10 seconds).
// - Use '-1' to wait indefinitely.
// - Use '0' to not wait for the container to exit gracefully, and
// immediately proceeds to forcibly terminating the container.
// - Other positive values are used as timeout (in seconds).
Timeout *int `json:",omitempty"`
}
// ContainerRestartResult holds the result of [Client.ContainerRestart],
type ContainerRestartResult struct {
// Add future fields here.
}
// ContainerRestart stops, and starts a container again.
// It makes the daemon wait for the container to be up again for
// a specific amount of time, given the timeout.
func (cli *Client) ContainerRestart(ctx context.Context, containerID string, options ContainerStopOptions) error {
func (cli *Client) ContainerRestart(ctx context.Context, containerID string, options ContainerRestartOptions) (ContainerRestartResult, error) {
containerID, err := trimID("container", containerID)
if err != nil {
return err
return ContainerRestartResult{}, err
}
query := url.Values{}
@@ -24,5 +47,8 @@ func (cli *Client) ContainerRestart(ctx context.Context, containerID string, opt
}
resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil)
defer ensureReaderClosed(resp)
return err
if err != nil {
return ContainerRestartResult{}, err
}
return ContainerRestartResult{}, nil
}

View File

@@ -5,17 +5,22 @@ import (
"net/url"
)
// ContainerStartOptions holds parameters to start containers.
// ContainerStartOptions holds options for [Client.ContainerStart].
type ContainerStartOptions struct {
CheckpointID string
CheckpointDir string
}
// ContainerStartResult holds the result of [Client.ContainerStart],
type ContainerStartResult struct {
// Add future fields here.
}
// ContainerStart sends a request to the docker daemon to start a container.
func (cli *Client) ContainerStart(ctx context.Context, containerID string, options ContainerStartOptions) error {
func (cli *Client) ContainerStart(ctx context.Context, containerID string, options ContainerStartOptions) (ContainerStartResult, error) {
containerID, err := trimID("container", containerID)
if err != nil {
return err
return ContainerStartResult{}, err
}
query := url.Values{}
@@ -28,5 +33,8 @@ func (cli *Client) ContainerStart(ctx context.Context, containerID string, optio
resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil)
defer ensureReaderClosed(resp)
return err
if err != nil {
return ContainerStartResult{}, err
}
return ContainerStartResult{}, nil
}

View File

@@ -6,11 +6,11 @@ import (
"strconv"
)
// ContainerStopOptions holds the options to stop or restart a container.
// ContainerStopOptions holds the options for [Client.ContainerStop].
type ContainerStopOptions struct {
// Signal (optional) is the signal to send to the container to (gracefully)
// stop it before forcibly terminating the container with SIGKILL after the
// timeout expires. If not value is set, the default (SIGTERM) is used.
// timeout expires. If no value is set, the default (SIGTERM) is used.
Signal string `json:",omitempty"`
// Timeout (optional) is the timeout (in seconds) to wait for the container
@@ -24,6 +24,11 @@ type ContainerStopOptions struct {
Timeout *int `json:",omitempty"`
}
// ContainerStopResult holds the result of [Client.ContainerStop],
type ContainerStopResult struct {
// Add future fields here.
}
// ContainerStop stops a container. In case the container fails to stop
// gracefully within a time frame specified by the timeout argument,
// it is forcefully terminated (killed).
@@ -31,10 +36,10 @@ type ContainerStopOptions struct {
// If the timeout is nil, the container's StopTimeout value is used, if set,
// otherwise the engine default. A negative timeout value can be specified,
// meaning no timeout, i.e. no forceful termination is performed.
func (cli *Client) ContainerStop(ctx context.Context, containerID string, options ContainerStopOptions) error {
func (cli *Client) ContainerStop(ctx context.Context, containerID string, options ContainerStopOptions) (ContainerStopResult, error) {
containerID, err := trimID("container", containerID)
if err != nil {
return err
return ContainerStopResult{}, err
}
query := url.Values{}
@@ -46,5 +51,8 @@ func (cli *Client) ContainerStop(ctx context.Context, containerID string, option
}
resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil)
defer ensureReaderClosed(resp)
return err
if err != nil {
return ContainerStopResult{}, err
}
return ContainerStopResult{}, nil
}

View File

@@ -2,14 +2,27 @@ package client
import "context"
// ContainerUnPauseOptions holds options for [Client.ContainerUnpause].
type ContainerUnPauseOptions struct {
// Add future optional parameters here.
}
// ContainerUnPauseResult holds the result of [Client.ContainerUnpause],
type ContainerUnPauseResult struct {
// Add future fields here.
}
// ContainerUnpause resumes the process execution within a container.
func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error {
func (cli *Client) ContainerUnpause(ctx context.Context, containerID string, options ContainerUnPauseOptions) (ContainerUnPauseResult, error) {
containerID, err := trimID("container", containerID)
if err != nil {
return err
return ContainerUnPauseResult{}, err
}
resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil)
defer ensureReaderClosed(resp)
return err
if err != nil {
return ContainerUnPauseResult{}, err
}
return ContainerUnPauseResult{}, nil
}