mirror of
https://github.com/moby/moby.git
synced 2026-01-12 11:11:44 +00:00
Compare commits
378 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5f6d6f3f22 | ||
|
|
d3dab1f618 | ||
|
|
3bd1759f80 | ||
|
|
f8cfa7947c | ||
|
|
11665130f9 | ||
|
|
3ba45cef16 | ||
|
|
a836daf6c5 | ||
|
|
e686f468f7 | ||
|
|
0dd0af939f | ||
|
|
7b575f9813 | ||
|
|
acca30055a | ||
|
|
69098f05cf | ||
|
|
6949793bb1 | ||
|
|
c030578fe4 | ||
|
|
ef7b19365e | ||
|
|
c3936abb67 | ||
|
|
78571e9049 | ||
|
|
d2693998a6 | ||
|
|
6def98ee7d | ||
|
|
60220a48b2 | ||
|
|
efe241644b | ||
|
|
b645c8c70e | ||
|
|
dda9b3eced | ||
|
|
8270df208b | ||
|
|
facfb9e1b0 | ||
|
|
abfed203eb | ||
|
|
e6ba13d3b9 | ||
|
|
bd9e7fca87 | ||
|
|
68b270b97c | ||
|
|
ec50d8f814 | ||
|
|
325e889ba3 | ||
|
|
1984d8064b | ||
|
|
2b05c146ef | ||
|
|
c7cd5d6726 | ||
|
|
1942d3a8b1 | ||
|
|
67ac9ab190 | ||
|
|
2c571d3a45 | ||
|
|
9da49d0b99 | ||
|
|
37851d8f5b | ||
|
|
e4e71dcf6b | ||
|
|
d2aa7e3b3f | ||
|
|
ea5f540fb6 | ||
|
|
834f0f19c5 | ||
|
|
062a45bfa4 | ||
|
|
cc0416d0eb | ||
|
|
bbd22fb5d9 | ||
|
|
71dec69ef5 | ||
|
|
e6ef2b0641 | ||
|
|
c0fd6556f2 | ||
|
|
b1798d895a | ||
|
|
d17c56f639 | ||
|
|
32206f17d0 | ||
|
|
b2694b459f | ||
|
|
10df1f55f1 | ||
|
|
f13b265b56 | ||
|
|
89c5fbacfd | ||
|
|
9077436e6e | ||
|
|
1a451ca6e0 | ||
|
|
cf14fa7a23 | ||
|
|
f6398c1f07 | ||
|
|
71e07f9130 | ||
|
|
c4dbf36951 | ||
|
|
077f093988 | ||
|
|
96582ab4ba | ||
|
|
16f503c048 | ||
|
|
8e57214487 | ||
|
|
b617355190 | ||
|
|
8dbc7420ed | ||
|
|
e2f226b5b4 | ||
|
|
5302429fff | ||
|
|
1f18c73c09 | ||
|
|
3fca5878d6 | ||
|
|
4d190af804 | ||
|
|
9ab162a73a | ||
|
|
fe00613d06 | ||
|
|
cfcf25bb54 | ||
|
|
efcd84e47c | ||
|
|
ba28377919 | ||
|
|
449b60fcd0 | ||
|
|
d3d724e45a | ||
|
|
d699e3de12 | ||
|
|
e1cae011e2 | ||
|
|
ea84732a77 | ||
|
|
33b2719488 | ||
|
|
075a0201b9 | ||
|
|
5d5083a57a | ||
|
|
25162d4a4e | ||
|
|
35913e58c2 | ||
|
|
12e7d99439 | ||
|
|
0c38d56a6d | ||
|
|
031ef2dc8e | ||
|
|
ddb60aa6d1 | ||
|
|
92a8618ddc | ||
|
|
370def6b30 | ||
|
|
e2e3abec71 | ||
|
|
0e8949a003 | ||
|
|
967aa3a9ef | ||
|
|
83bcde8f60 | ||
|
|
d91a85a9b5 | ||
|
|
e5a0bc6a50 | ||
|
|
dae4436d1c | ||
|
|
1e26b431c9 | ||
|
|
ce74774c09 | ||
|
|
645f559352 | ||
|
|
9c388fb119 | ||
|
|
a8b454a934 | ||
|
|
fd169c00bf | ||
|
|
e037bade8c | ||
|
|
adfac697dc | ||
|
|
54a58760b6 | ||
|
|
5787ef7e9c | ||
|
|
9a21cf7e55 | ||
|
|
abbc956ac8 | ||
|
|
646e7a5239 | ||
|
|
3e077fc866 | ||
|
|
fb0fca8607 | ||
|
|
5bd4233d7b | ||
|
|
2ae5cbcf05 | ||
|
|
3472e441c5 | ||
|
|
a2a4576c61 | ||
|
|
ac62fa7a61 | ||
|
|
d9fba87f5a | ||
|
|
ec0e20a9eb | ||
|
|
923e849f28 | ||
|
|
060997ca6b | ||
|
|
adcd369285 | ||
|
|
b6a7124855 | ||
|
|
7fe3abf887 | ||
|
|
3fec3d1f1c | ||
|
|
49e8f7451d | ||
|
|
3136dea250 | ||
|
|
8ddb4c4e95 | ||
|
|
b4c03dd633 | ||
|
|
65a6d9d9eb | ||
|
|
06f11abf43 | ||
|
|
da8cd68e4f | ||
|
|
9464d3cd68 | ||
|
|
50cee7c48d | ||
|
|
682a46189b | ||
|
|
e1c5cdf14d | ||
|
|
4cf69b995e | ||
|
|
419995682f | ||
|
|
7ae6aa420d | ||
|
|
4c3e2dc441 | ||
|
|
d98c74d38d | ||
|
|
cf50c5bba8 | ||
|
|
05933ab2d4 | ||
|
|
15aa73ea4c | ||
|
|
df569fd54c | ||
|
|
0fa81e50e3 | ||
|
|
a5282fa128 | ||
|
|
da96e5c27b | ||
|
|
fce03f9921 | ||
|
|
c3d8cb99a0 | ||
|
|
5f7621b01e | ||
|
|
37555cdeff | ||
|
|
be42af89f8 | ||
|
|
9266ff7893 | ||
|
|
d766dac3bf | ||
|
|
c30d52b829 | ||
|
|
be66788e3c | ||
|
|
dc044f26ea | ||
|
|
1b1fe4cc64 | ||
|
|
be28c05949 | ||
|
|
2f069fa3a5 | ||
|
|
673cf751ca | ||
|
|
ed9449a424 | ||
|
|
07b243656c | ||
|
|
99c1b63197 | ||
|
|
e25352a42a | ||
|
|
d523748a4f | ||
|
|
bbcad73a27 | ||
|
|
1da0c05e48 | ||
|
|
7696045c1d | ||
|
|
cfd0c55d99 | ||
|
|
e49237dc7d | ||
|
|
ef4c63acf6 | ||
|
|
7b91af803d | ||
|
|
17e04aa6c2 | ||
|
|
6dc7846d26 | ||
|
|
b3d02e7f3c | ||
|
|
819baeb430 | ||
|
|
fa8d7029a7 | ||
|
|
c8da7fbd25 | ||
|
|
99deded542 | ||
|
|
64a928a3d4 | ||
|
|
4a358d0763 | ||
|
|
a11079a449 | ||
|
|
e44c6dc109 | ||
|
|
59a9eda8b6 | ||
|
|
6abf32fd52 | ||
|
|
02545bf320 | ||
|
|
e71e7d8246 | ||
|
|
99799a9ab5 | ||
|
|
9f03b73dbd | ||
|
|
32685e9c2b | ||
|
|
d2b470142c | ||
|
|
8b328aa9b4 | ||
|
|
dc4884a9fb | ||
|
|
e64c635c31 | ||
|
|
9eec36e483 | ||
|
|
dfadf729d3 | ||
|
|
183cac25f9 | ||
|
|
168e23a2f5 | ||
|
|
35e9ee82a6 | ||
|
|
06cca53fa0 | ||
|
|
de3a04a65d | ||
|
|
c2b84fd0e8 | ||
|
|
55aadb3a8f | ||
|
|
eba485a3c6 | ||
|
|
0459d8c7a6 | ||
|
|
6fdd837110 | ||
|
|
884551acd1 | ||
|
|
d53f67be35 | ||
|
|
b9f2e88286 | ||
|
|
a1638563f7 | ||
|
|
5d74bd7ef9 | ||
|
|
e101935ae8 | ||
|
|
a365f0745d | ||
|
|
ff26a23314 | ||
|
|
4329550a74 | ||
|
|
d58829550e | ||
|
|
b116452a03 | ||
|
|
24181cd265 | ||
|
|
ae7858ff2c | ||
|
|
69da36f39e | ||
|
|
93b38b8008 | ||
|
|
e8e2666705 | ||
|
|
45f49fe5c3 | ||
|
|
1d91898ca6 | ||
|
|
79e5950b2f | ||
|
|
6d7d877c73 | ||
|
|
b8b8bcb8bf | ||
|
|
08573e2920 | ||
|
|
5d4f5db76c | ||
|
|
48e8f18495 | ||
|
|
517ebe626c | ||
|
|
14d561eb1c | ||
|
|
1da2e90b56 | ||
|
|
316390891c | ||
|
|
24395d55fc | ||
|
|
63f2e107b3 | ||
|
|
a768bf8673 | ||
|
|
2ebfdfd66c | ||
|
|
1204f3a77c | ||
|
|
1d795b53d3 | ||
|
|
7e76438537 | ||
|
|
bf212c5b33 | ||
|
|
eeeb2e941d | ||
|
|
05c096a1ac | ||
|
|
ad8327f2ce | ||
|
|
34418110ec | ||
|
|
e286096089 | ||
|
|
a63a02fefd | ||
|
|
f76cb3e6d5 | ||
|
|
edeff03134 | ||
|
|
8c8de170d2 | ||
|
|
1710bba5c3 | ||
|
|
0378afaf5f | ||
|
|
0c8bc0b57a | ||
|
|
c95330420c | ||
|
|
589f437b06 | ||
|
|
559be42fc2 | ||
|
|
dda2b4454f | ||
|
|
0ff52c285d | ||
|
|
09f8810272 | ||
|
|
f4f8feafe7 | ||
|
|
29db7cb98b | ||
|
|
cf298d3073 | ||
|
|
53b9d440b8 | ||
|
|
1e13f66fbb | ||
|
|
9a9ff44418 | ||
|
|
218af8c7bd | ||
|
|
a24fddc2ad | ||
|
|
6d9666c8a0 | ||
|
|
c5754a7329 | ||
|
|
c27f11fa2e | ||
|
|
09b72e0be4 | ||
|
|
b71e1008a5 | ||
|
|
0d6d5b392a | ||
|
|
51f390dd79 | ||
|
|
168132b632 | ||
|
|
3f4338cf04 | ||
|
|
3ade7ca12b | ||
|
|
9c49308cce | ||
|
|
91757722a9 | ||
|
|
235fa0eee8 | ||
|
|
08af35b250 | ||
|
|
ef4366ee89 | ||
|
|
3833f2a60b | ||
|
|
78f4d6b84f | ||
|
|
e489130717 | ||
|
|
34b31d0ee0 | ||
|
|
486953e2ff | ||
|
|
8306f1e31e | ||
|
|
879fba29d5 | ||
|
|
421a3aa737 | ||
|
|
23c7134bad | ||
|
|
2399b7a91b | ||
|
|
7cb08ca538 | ||
|
|
2aa5322638 | ||
|
|
3f7e68e894 | ||
|
|
4b5c535be9 | ||
|
|
80727e5a92 | ||
|
|
ff3dc8a7c4 | ||
|
|
b2823e4609 | ||
|
|
a18eea2702 | ||
|
|
36fc8f5809 | ||
|
|
ca22ec44ba | ||
|
|
02465c9f9d | ||
|
|
f9232e3f11 | ||
|
|
6de2bd28df | ||
|
|
2b10608f16 | ||
|
|
c416072ced | ||
|
|
5196dc65e7 | ||
|
|
8abb005598 | ||
|
|
44ca36c7cf | ||
|
|
b6190c2713 | ||
|
|
ca89db221f | ||
|
|
f3e1aff81d | ||
|
|
ad1e6bae4f | ||
|
|
b262d40daf | ||
|
|
8ab5e2a004 | ||
|
|
7b2d5556d5 | ||
|
|
776c2bd113 | ||
|
|
c67edc5d61 | ||
|
|
1920db0267 | ||
|
|
a6b8e81332 | ||
|
|
bc5df68698 | ||
|
|
3c63d7fd9b | ||
|
|
96af5bfbb5 | ||
|
|
665c5d0c5f | ||
|
|
9557c7be8e | ||
|
|
b13e995c78 | ||
|
|
c93da8ded9 | ||
|
|
cf05755e9d | ||
|
|
c502db4955 | ||
|
|
6ffb8e2b67 | ||
|
|
48282bea40 | ||
|
|
1176d6fa66 | ||
|
|
1242a39e8e | ||
|
|
3d678eb14a | ||
|
|
43e842cfd8 | ||
|
|
525e8ed3fe | ||
|
|
5772636dc6 | ||
|
|
0b12b76c28 | ||
|
|
40e3647f2f | ||
|
|
892dbfb87e | ||
|
|
c4d20760d4 | ||
|
|
768923199f | ||
|
|
f1a639bf53 | ||
|
|
f7dbee3eea | ||
|
|
1e0234ddc6 | ||
|
|
9cc467c3b3 | ||
|
|
04995b667b | ||
|
|
81fcfc67cd | ||
|
|
78198da34a | ||
|
|
16342ac1b1 | ||
|
|
239ac23799 | ||
|
|
c8ef549bf6 | ||
|
|
0485e53675 | ||
|
|
70ca64d736 | ||
|
|
cb9414bbb7 | ||
|
|
c5bfb0290e | ||
|
|
2d3475bac8 | ||
|
|
22f6cfd4df | ||
|
|
88301d8f6c | ||
|
|
5143f3a62c | ||
|
|
bb53ea71cb | ||
|
|
339261224f | ||
|
|
7f2d2e3cc3 | ||
|
|
9b836acdd0 | ||
|
|
f7a3fb8f5a | ||
|
|
d6ba2b6a68 | ||
|
|
78abff3e39 | ||
|
|
43919c2455 | ||
|
|
c5c73c2e1f | ||
|
|
7639c4bdeb |
59
Dockerfile
59
Dockerfile
@@ -25,7 +25,8 @@
|
||||
#
|
||||
|
||||
ARG CROSS="false"
|
||||
ARG GO_VERSION=1.12.8
|
||||
ARG GO_VERSION=1.12.16
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
FROM golang:${GO_VERSION}-stretch AS base
|
||||
ARG APT_MIRROR
|
||||
@@ -33,19 +34,21 @@ RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/
|
||||
&& sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list
|
||||
|
||||
FROM base AS criu
|
||||
ARG DEBIAN_FRONTEND
|
||||
# Install CRIU for checkpoint/restore support
|
||||
ENV CRIU_VERSION 3.11
|
||||
ENV CRIU_VERSION 3.12
|
||||
# Install dependency packages specific to criu
|
||||
RUN apt-get update && apt-get install -y \
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libnet-dev \
|
||||
libprotobuf-c0-dev \
|
||||
libprotobuf-c-dev \
|
||||
libprotobuf-dev \
|
||||
libnl-3-dev \
|
||||
libcap-dev \
|
||||
protobuf-compiler \
|
||||
protobuf-c-compiler \
|
||||
python-protobuf \
|
||||
&& mkdir -p /usr/src/criu \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN mkdir -p /usr/src/criu \
|
||||
&& curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \
|
||||
&& cd /usr/src/criu \
|
||||
&& make \
|
||||
@@ -84,7 +87,11 @@ RUN set -x \
|
||||
&& rm -rf "$GOPATH"
|
||||
|
||||
FROM base AS frozen-images
|
||||
RUN apt-get update && apt-get install -y jq ca-certificates --no-install-recommends
|
||||
ARG DEBIAN_FRONTEND
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
jq \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
|
||||
COPY contrib/download-frozen-image-v2.sh /
|
||||
RUN /download-frozen-image-v2.sh /build \
|
||||
@@ -98,32 +105,34 @@ RUN /download-frozen-image-v2.sh /build \
|
||||
FROM base AS cross-false
|
||||
|
||||
FROM base AS cross-true
|
||||
ARG DEBIAN_FRONTEND
|
||||
RUN dpkg --add-architecture armhf
|
||||
RUN dpkg --add-architecture arm64
|
||||
RUN dpkg --add-architecture armel
|
||||
RUN if [ "$(go env GOHOSTARCH)" = "amd64" ]; then \
|
||||
apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
crossbuild-essential-armhf \
|
||||
crossbuild-essential-arm64 \
|
||||
crossbuild-essential-armel; \
|
||||
crossbuild-essential-armel \
|
||||
&& rm -rf /var/lib/apt/lists/*; \
|
||||
fi
|
||||
|
||||
FROM cross-${CROSS} as dev-base
|
||||
|
||||
FROM dev-base AS runtime-dev-cross-false
|
||||
RUN apt-get update && apt-get install -y \
|
||||
ARG DEBIAN_FRONTEND
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libapparmor-dev \
|
||||
libseccomp-dev
|
||||
|
||||
libseccomp-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
FROM cross-true AS runtime-dev-cross-true
|
||||
ARG DEBIAN_FRONTEND
|
||||
# These crossbuild packages rely on gcc-<arch>, but this doesn't want to install
|
||||
# on non-amd64 systems.
|
||||
# Additionally, the crossbuild-amd64 is currently only on debian:buster, so
|
||||
# other architectures cannnot crossbuild amd64.
|
||||
RUN if [ "$(go env GOHOSTARCH)" = "amd64" ]; then \
|
||||
apt-get update \
|
||||
&& apt-get install -y \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
libseccomp-dev:armhf \
|
||||
libseccomp-dev:arm64 \
|
||||
libseccomp-dev:armel \
|
||||
@@ -133,7 +142,8 @@ RUN if [ "$(go env GOHOSTARCH)" = "amd64" ]; then \
|
||||
# install this arches seccomp here due to compat issues with the v0 builder
|
||||
# This is as opposed to inheriting from runtime-dev-cross-false
|
||||
libapparmor-dev \
|
||||
libseccomp-dev; \
|
||||
libseccomp-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*; \
|
||||
fi
|
||||
|
||||
FROM runtime-dev-cross-${CROSS} AS runtime-dev
|
||||
@@ -151,7 +161,10 @@ COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM dev-base AS containerd
|
||||
RUN apt-get update && apt-get install -y btrfs-tools
|
||||
ARG DEBIAN_FRONTEND
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
btrfs-tools \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
ENV INSTALL_BINARY_NAME=containerd
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
@@ -188,7 +201,11 @@ COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM dev-base AS tini
|
||||
RUN apt-get update && apt-get install -y cmake vim-common
|
||||
ARG DEBIAN_FRONTEND
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
cmake \
|
||||
vim-common \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
ENV INSTALL_BINARY_NAME=tini
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
@@ -203,6 +220,7 @@ COPY ./contrib/dockerd-rootless.sh /build
|
||||
|
||||
# TODO: Some of this is only really needed for testing, it would be nice to split this up
|
||||
FROM runtime-dev AS dev
|
||||
ARG DEBIAN_FRONTEND
|
||||
RUN groupadd -r docker
|
||||
RUN useradd --create-home --gid docker unprivilegeduser
|
||||
# Let us use a .bashrc file
|
||||
@@ -213,7 +231,7 @@ RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker
|
||||
RUN ldconfig
|
||||
# This should only install packages that are specifically needed for the dev environment and nothing else
|
||||
# Do you really need to add another package here? Can it be done in a different build stage?
|
||||
RUN apt-get update && apt-get install -y \
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
apparmor \
|
||||
aufs-tools \
|
||||
bash-completion \
|
||||
@@ -230,6 +248,7 @@ RUN apt-get update && apt-get install -y \
|
||||
pigz \
|
||||
python3-pip \
|
||||
python3-setuptools \
|
||||
python3-wheel \
|
||||
thin-provisioning-tools \
|
||||
vim \
|
||||
vim-common \
|
||||
@@ -240,7 +259,7 @@ RUN apt-get update && apt-get install -y \
|
||||
libprotobuf-c1 \
|
||||
libnet1 \
|
||||
libnl-3-200 \
|
||||
--no-install-recommends
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN pip3 install yamllint==1.16.0
|
||||
|
||||
@@ -262,8 +281,6 @@ COPY --from=djs55/vpnkit@sha256:e508a17cfacc8fd39261d5b4e397df2b953690da577e2c98
|
||||
|
||||
ENV PATH=/usr/local/cli:$PATH
|
||||
ENV DOCKER_BUILDTAGS apparmor seccomp selinux
|
||||
# Options for hack/validate/gometalinter
|
||||
ENV GOMETALINTER_OPTS="--deadline=2m"
|
||||
WORKDIR /go/src/github.com/docker/docker
|
||||
VOLUME /var/lib/docker
|
||||
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ARG GO_VERSION=1.12.8
|
||||
ARG GO_VERSION=1.12.16
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine AS base
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
# This represents the bare minimum required to build and test Docker.
|
||||
|
||||
ARG GO_VERSION=1.12.8
|
||||
ARG GO_VERSION=1.12.16
|
||||
|
||||
FROM golang:${GO_VERSION}-stretch
|
||||
|
||||
|
||||
@@ -45,8 +45,8 @@
|
||||
#
|
||||
# 1. Clone the sources from github.com:
|
||||
#
|
||||
# >> git clone https://github.com/docker/docker.git C:\go\src\github.com\docker\docker
|
||||
# >> Cloning into 'C:\go\src\github.com\docker\docker'...
|
||||
# >> git clone https://github.com/docker/docker.git C:\gopath\src\github.com\docker\docker
|
||||
# >> Cloning into 'C:\gopath\src\github.com\docker\docker'...
|
||||
# >> remote: Counting objects: 186216, done.
|
||||
# >> remote: Compressing objects: 100% (21/21), done.
|
||||
# >> remote: Total 186216 (delta 5), reused 0 (delta 0), pack-reused 186195
|
||||
@@ -59,7 +59,7 @@
|
||||
#
|
||||
# 2. Change directory to the cloned docker sources:
|
||||
#
|
||||
# >> cd C:\go\src\github.com\docker\docker
|
||||
# >> cd C:\gopath\src\github.com\docker\docker
|
||||
#
|
||||
#
|
||||
# 3. Build a docker image with the components required to build the docker binaries from source
|
||||
@@ -79,8 +79,8 @@
|
||||
# 5. Copy the binaries out of the container, replacing HostPath with an appropriate destination
|
||||
# folder on the host system where you want the binaries to be located.
|
||||
#
|
||||
# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\docker.exe C:\HostPath\docker.exe
|
||||
# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\dockerd.exe C:\HostPath\dockerd.exe
|
||||
# >> docker cp binaries:C:\gopath\src\github.com\docker\docker\bundles\docker.exe C:\HostPath\docker.exe
|
||||
# >> docker cp binaries:C:\gopath\src\github.com\docker\docker\bundles\dockerd.exe C:\HostPath\dockerd.exe
|
||||
#
|
||||
#
|
||||
# 6. (Optional) Remove the interim container holding the built executable binaries:
|
||||
@@ -147,7 +147,7 @@
|
||||
# The docker integration tests do not currently run in a container on Windows, predominantly
|
||||
# due to Windows not supporting privileged mode, so anything using a volume would fail.
|
||||
# They (along with the rest of the docker CI suite) can be run using
|
||||
# https://github.com/jhowardmsft/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1.
|
||||
# https://github.com/kevpar/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1.
|
||||
#
|
||||
# -----------------------------------------------------------------------------------------
|
||||
|
||||
@@ -165,14 +165,14 @@ FROM microsoft/windowsservercore
|
||||
# Use PowerShell as the default shell
|
||||
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
|
||||
|
||||
ARG GO_VERSION=1.12.8
|
||||
ARG GO_VERSION=1.12.16
|
||||
|
||||
# Environment variable notes:
|
||||
# - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
|
||||
# - FROM_DOCKERFILE is used for detection of building within a container.
|
||||
ENV GO_VERSION=${GO_VERSION} `
|
||||
GIT_VERSION=2.11.1 `
|
||||
GOPATH=C:\go `
|
||||
GOPATH=C:\gopath `
|
||||
FROM_DOCKERFILE=1
|
||||
|
||||
RUN `
|
||||
@@ -214,16 +214,17 @@ RUN `
|
||||
Download-File $location C:\gitsetup.zip; `
|
||||
`
|
||||
Write-Host INFO: Downloading go...; `
|
||||
Download-File $('https://golang.org/dl/go'+$Env:GO_VERSION+'.windows-amd64.zip') C:\go.zip; `
|
||||
$dlGoVersion=$Env:GO_VERSION -replace '\.0$',''; `
|
||||
Download-File "https://golang.org/dl/go${dlGoVersion}.windows-amd64.zip" C:\go.zip; `
|
||||
`
|
||||
Write-Host INFO: Downloading compiler 1 of 3...; `
|
||||
Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/gcc.zip C:\gcc.zip; `
|
||||
Download-File https://raw.githubusercontent.com/moby/docker-tdmgcc/master/gcc.zip C:\gcc.zip; `
|
||||
`
|
||||
Write-Host INFO: Downloading compiler 2 of 3...; `
|
||||
Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/runtime.zip C:\runtime.zip; `
|
||||
Download-File https://raw.githubusercontent.com/moby/docker-tdmgcc/master/runtime.zip C:\runtime.zip; `
|
||||
`
|
||||
Write-Host INFO: Downloading compiler 3 of 3...; `
|
||||
Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/binutils.zip C:\binutils.zip; `
|
||||
Download-File https://raw.githubusercontent.com/moby/docker-tdmgcc/master/binutils.zip C:\binutils.zip; `
|
||||
`
|
||||
Write-Host INFO: Extracting git...; `
|
||||
Expand-Archive C:\gitsetup.zip C:\git-tmp; `
|
||||
@@ -248,7 +249,7 @@ RUN `
|
||||
Remove-Item C:\gitsetup.zip; `
|
||||
`
|
||||
Write-Host INFO: Creating source directory...; `
|
||||
New-Item -ItemType Directory -Path C:\go\src\github.com\docker\docker | Out-Null; `
|
||||
New-Item -ItemType Directory -Path ${GOPATH}\src\github.com\docker\docker | Out-Null; `
|
||||
`
|
||||
Write-Host INFO: Configuring git core.autocrlf...; `
|
||||
C:\git\cmd\git config --global core.autocrlf true; `
|
||||
@@ -259,7 +260,7 @@ RUN `
|
||||
ENTRYPOINT ["powershell.exe"]
|
||||
|
||||
# Set the working directory to the location of the sources
|
||||
WORKDIR C:\go\src\github.com\docker\docker
|
||||
WORKDIR ${GOPATH}\src\github.com\docker\docker
|
||||
|
||||
# Copy the sources into the container
|
||||
COPY . .
|
||||
|
||||
319
Jenkinsfile
vendored
319
Jenkinsfile
vendored
@@ -8,12 +8,13 @@ pipeline {
|
||||
timestamps()
|
||||
}
|
||||
parameters {
|
||||
booleanParam(name: 'unit_validate', defaultValue: true, description: 'x86 unit tests and vendor check')
|
||||
booleanParam(name: 'janky', defaultValue: true, description: 'x86 Build/Test')
|
||||
booleanParam(name: 'z', defaultValue: true, description: 'IBM Z (s390x) Build/Test')
|
||||
booleanParam(name: 'powerpc', defaultValue: true, description: 'PowerPC (ppc64le) Build/Test')
|
||||
booleanParam(name: 'unit_validate', defaultValue: true, description: 'amd64 (x86_64) unit tests and vendor check')
|
||||
booleanParam(name: 'amd64', defaultValue: true, description: 'amd64 (x86_64) Build/Test')
|
||||
booleanParam(name: 's390x', defaultValue: true, description: 'IBM Z (s390x) Build/Test')
|
||||
booleanParam(name: 'ppc64le', defaultValue: true, description: 'PowerPC (ppc64le) Build/Test')
|
||||
booleanParam(name: 'windowsRS1', defaultValue: false, description: 'Windows 2016 (RS1) Build/Test')
|
||||
booleanParam(name: 'windowsRS5', defaultValue: false, description: 'Windows 2019 (RS5) Build/Test')
|
||||
booleanParam(name: 'windowsRS5', defaultValue: true, description: 'Windows 2019 (RS5) Build/Test')
|
||||
booleanParam(name: 'skip_dco', defaultValue: false, description: 'Skip the DCO check')
|
||||
}
|
||||
environment {
|
||||
DOCKER_BUILDKIT = '1'
|
||||
@@ -21,9 +22,37 @@ pipeline {
|
||||
DOCKER_GRAPHDRIVER = 'overlay2'
|
||||
APT_MIRROR = 'cdn-fastly.deb.debian.org'
|
||||
CHECK_CONFIG_COMMIT = '78405559cfe5987174aa2cb6463b9b2c1b917255'
|
||||
TESTDEBUG = '0'
|
||||
TIMEOUT = '120m'
|
||||
}
|
||||
stages {
|
||||
stage('pr-hack') {
|
||||
when { changeRequest() }
|
||||
steps {
|
||||
script {
|
||||
echo "Workaround for PR auto-cancel feature. Borrowed from https://issues.jenkins-ci.org/browse/JENKINS-43353"
|
||||
def buildNumber = env.BUILD_NUMBER as int
|
||||
if (buildNumber > 1) milestone(buildNumber - 1)
|
||||
milestone(buildNumber)
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('DCO-check') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { !params.skip_dco }
|
||||
}
|
||||
agent { label 'amd64 && ubuntu-1804 && overlay2' }
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm \
|
||||
-v "$WORKSPACE:/workspace" \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
alpine sh -c 'apk add --no-cache -q bash git openssh-client && cd /workspace && hack/validate/dco'
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('Build') {
|
||||
parallel {
|
||||
stage('unit-validate') {
|
||||
@@ -60,6 +89,8 @@ pipeline {
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/validate/default
|
||||
'''
|
||||
@@ -74,6 +105,8 @@ pipeline {
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary-daemon \
|
||||
@@ -94,12 +127,15 @@ pipeline {
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo 'Creating docker-py-bundles.tar.gz'
|
||||
tar -czf docker-py-bundles.tar.gz bundles/test-docker-py/*.xml bundles/test-docker-py/*.log
|
||||
'''
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=docker-py
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
tar -czf ${bundleName}-bundles.tar.gz bundles/test-docker-py/*.xml bundles/test-docker-py/*.log
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: 'docker-py-bundles.tar.gz'
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -139,6 +175,8 @@ pipeline {
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/test/unit
|
||||
'''
|
||||
@@ -158,6 +196,8 @@ pipeline {
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/validate/vendor
|
||||
'''
|
||||
@@ -185,12 +225,15 @@ pipeline {
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo 'Creating unit-bundles.tar.gz'
|
||||
tar -czvf unit-bundles.tar.gz bundles/junit-report.xml bundles/go-test-report.json bundles/profile.out
|
||||
'''
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=unit
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
tar -czvf ${bundleName}-bundles.tar.gz bundles/junit-report.xml bundles/go-test-report.json bundles/profile.out
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: 'unit-bundles.tar.gz'
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
@@ -198,10 +241,10 @@ pipeline {
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('janky') {
|
||||
stage('amd64') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.janky }
|
||||
expression { params.amd64 }
|
||||
}
|
||||
agent { label 'amd64 && ubuntu-1804 && overlay2' }
|
||||
|
||||
@@ -236,18 +279,20 @@ pipeline {
|
||||
run_tests() {
|
||||
[ -n "$TESTDEBUG" ] && rm= || rm=--rm;
|
||||
docker run $rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
-v "$WORKSPACE/bundles/${TEST_INTEGRATION_DEST}:/go/src/github.com/docker/docker/bundles" \
|
||||
-v "$WORKSPACE/bundles/dynbinary-daemon:/go/src/github.com/docker/docker/bundles/dynbinary-daemon" \
|
||||
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
|
||||
--name "$CONTAINER_NAME" \
|
||||
-e KEEPBUNDLE=1 \
|
||||
-e TESTDEBUG \
|
||||
-e TESTFLAGS \
|
||||
-e TEST_INTEGRATION_DEST \
|
||||
-e TEST_SKIP_INTEGRATION \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TIMEOUT \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
"$1" \
|
||||
@@ -274,10 +319,10 @@ pipeline {
|
||||
TEST_INTEGRATION_DEST=1 CONTAINER_NAME=${CONTAINER_NAME}-1 TEST_SKIP_INTEGRATION_CLI=1 run_tests test-integration-flaky &
|
||||
|
||||
# integration-cli first set
|
||||
TEST_INTEGRATION_DEST=2 CONTAINER_NAME=${CONTAINER_NAME}-2 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-check.f ^(DockerSuite|DockerNetworkSuite|DockerHubPullSuite|DockerRegistrySuite|DockerSchema1RegistrySuite|DockerRegistryAuthTokenSuite|DockerRegistryAuthHtpasswdSuite)" run_tests &
|
||||
TEST_INTEGRATION_DEST=2 CONTAINER_NAME=${CONTAINER_NAME}-2 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSuite|DockerNetworkSuite|DockerHubPullSuite|DockerRegistrySuite|DockerSchema1RegistrySuite|DockerRegistryAuthTokenSuite|DockerRegistryAuthHtpasswdSuite)/" run_tests &
|
||||
|
||||
# integration-cli second set
|
||||
TEST_INTEGRATION_DEST=3 CONTAINER_NAME=${CONTAINER_NAME}-3 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-check.f ^(DockerSwarmSuite|DockerDaemonSuite|DockerExternalVolumeSuite)" run_tests &
|
||||
TEST_INTEGRATION_DEST=3 CONTAINER_NAME=${CONTAINER_NAME}-3 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSwarmSuite|DockerDaemonSuite|DockerExternalVolumeSuite)/" run_tests &
|
||||
|
||||
set +x
|
||||
c=0
|
||||
@@ -287,6 +332,11 @@ pipeline {
|
||||
exit $c
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -294,7 +344,8 @@ pipeline {
|
||||
always {
|
||||
sh '''
|
||||
echo "Ensuring container killed."
|
||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||
cids=$(docker ps -aq -f name=docker-pr${BUILD_NUMBER}-*)
|
||||
[ -n "$cids" ] && docker rm -vf $cids || true
|
||||
'''
|
||||
|
||||
sh '''
|
||||
@@ -302,13 +353,16 @@ pipeline {
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Creating janky-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf janky-bundles.tar.gz
|
||||
'''
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=amd64
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: 'janky-bundles.tar.gz'
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
@@ -316,10 +370,10 @@ pipeline {
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('z') {
|
||||
stage('s390x') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.z }
|
||||
expression { params.s390x }
|
||||
}
|
||||
agent { label 's390x-ubuntu-1604' }
|
||||
// s390x machines run on Docker 18.06, and buildkit has some bugs on that version
|
||||
@@ -340,7 +394,7 @@ pipeline {
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh '''
|
||||
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} -f Dockerfile .
|
||||
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
|
||||
'''
|
||||
}
|
||||
}
|
||||
@@ -353,6 +407,8 @@ pipeline {
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/test/unit
|
||||
'''
|
||||
@@ -373,14 +429,22 @@ pipeline {
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TESTDEBUG \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
-e TIMEOUT \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary \
|
||||
test-integration
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -396,13 +460,16 @@ pipeline {
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Creating s390x-integration-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf s390x-integration-bundles.tar.gz
|
||||
'''
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=s390x-integration
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: 's390x-integration-bundles.tar.gz'
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
@@ -410,11 +477,11 @@ pipeline {
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('z-master') {
|
||||
stage('s390x integration-cli') {
|
||||
when {
|
||||
beforeAgent true
|
||||
branch 'master'
|
||||
expression { params.z }
|
||||
not { changeRequest() }
|
||||
expression { params.s390x }
|
||||
}
|
||||
agent { label 's390x-ubuntu-1604' }
|
||||
// s390x machines run on Docker 18.06, and buildkit has some bugs on that version
|
||||
@@ -435,7 +502,7 @@ pipeline {
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh '''
|
||||
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} -f Dockerfile .
|
||||
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
|
||||
'''
|
||||
}
|
||||
}
|
||||
@@ -450,12 +517,19 @@ pipeline {
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TEST_SKIP_INTEGRATION \
|
||||
-e TIMEOUT \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary \
|
||||
test-integration
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -471,12 +545,16 @@ pipeline {
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Creating s390x-integration-cli-bundles.tar.gz"
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf s390x-integration-cli-bundles.tar.gz
|
||||
'''
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=s390x-integration-cli
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: 's390x-integration-cli-bundles.tar.gz'
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
@@ -484,13 +562,13 @@ pipeline {
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('powerpc') {
|
||||
stage('ppc64le') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.powerpc }
|
||||
expression { params.ppc64le }
|
||||
}
|
||||
agent { label 'ppc64le-ubuntu-1604' }
|
||||
// power machines run on Docker 18.06, and buildkit has some bugs on that version
|
||||
// ppc64le machines run on Docker 18.06, and buildkit has some bugs on that version
|
||||
environment { DOCKER_BUILDKIT = '0' }
|
||||
|
||||
stages {
|
||||
@@ -507,7 +585,7 @@ pipeline {
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} -f Dockerfile .'
|
||||
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .'
|
||||
}
|
||||
}
|
||||
stage("Unit tests") {
|
||||
@@ -519,6 +597,8 @@ pipeline {
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/test/unit
|
||||
'''
|
||||
@@ -539,14 +619,22 @@ pipeline {
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TESTDEBUG \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
-e TIMEOUT \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary \
|
||||
test-integration
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -562,13 +650,16 @@ pipeline {
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Creating powerpc-integration-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf powerpc-integration-bundles.tar.gz
|
||||
'''
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=ppc64le-integration
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: 'powerpc-integration-bundles.tar.gz'
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
@@ -576,14 +667,14 @@ pipeline {
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('powerpc-master') {
|
||||
stage('ppc64le integration-cli') {
|
||||
when {
|
||||
beforeAgent true
|
||||
branch 'master'
|
||||
expression { params.powerpc }
|
||||
not { changeRequest() }
|
||||
expression { params.ppc64le }
|
||||
}
|
||||
agent { label 'ppc64le-ubuntu-1604' }
|
||||
// power machines run on Docker 18.06, and buildkit has some bugs on that version
|
||||
// ppc64le machines run on Docker 18.06, and buildkit has some bugs on that version
|
||||
environment { DOCKER_BUILDKIT = '0' }
|
||||
|
||||
stages {
|
||||
@@ -600,7 +691,7 @@ pipeline {
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} -f Dockerfile .'
|
||||
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .'
|
||||
}
|
||||
}
|
||||
stage("Integration-cli tests") {
|
||||
@@ -614,12 +705,19 @@ pipeline {
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TEST_SKIP_INTEGRATION \
|
||||
-e TIMEOUT \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary \
|
||||
test-integration
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -635,12 +733,16 @@ pipeline {
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Creating powerpc-integration-cli-bundles.tar.gz"
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf powerpc-integration-cli-bundles.tar.gz
|
||||
'''
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=ppc64le-integration-cli
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: 'powerpc-integration-cli-bundles.tar.gz'
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
@@ -648,15 +750,30 @@ pipeline {
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('windowsRS1') {
|
||||
stage('win-RS1') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.windowsRS1 }
|
||||
// Skip this stage on PRs unless the windowsRS1 checkbox is selected
|
||||
anyOf {
|
||||
not { changeRequest() }
|
||||
expression { params.windowsRS1 }
|
||||
}
|
||||
}
|
||||
environment {
|
||||
DOCKER_BUILDKIT = '0'
|
||||
DOCKER_DUT_DEBUG = '1'
|
||||
SKIP_VALIDATION_TESTS = '1'
|
||||
SOURCES_DRIVE = 'd'
|
||||
SOURCES_SUBDIR = 'gopath'
|
||||
TESTRUN_DRIVE = 'd'
|
||||
TESTRUN_SUBDIR = "CI"
|
||||
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
|
||||
WINDOWS_BASE_IMAGE_TAG = 'ltsc2016'
|
||||
}
|
||||
agent {
|
||||
node {
|
||||
label 'windows-rs1'
|
||||
customWorkspace 'c:\\gopath\\src\\github.com\\docker\\docker'
|
||||
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
|
||||
label 'windows-2016'
|
||||
}
|
||||
}
|
||||
stages {
|
||||
@@ -670,22 +787,54 @@ pipeline {
|
||||
steps {
|
||||
powershell '''
|
||||
$ErrorActionPreference = 'Stop'
|
||||
.\\hack\\ci\\windows.ps1
|
||||
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
|
||||
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
|
||||
./hack/ci/windows.ps1
|
||||
exit $LastExitCode
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
powershell '''
|
||||
$bundleName="windowsRS1-integration"
|
||||
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
|
||||
|
||||
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
|
||||
Compress-Archive -Path "${env:TEMP}/CIDUT.out", "${env:TEMP}/CIDUT.err" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('windowsRS5-process') {
|
||||
stage('win-RS5') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.windowsRS5 }
|
||||
}
|
||||
environment {
|
||||
DOCKER_BUILDKIT = '0'
|
||||
DOCKER_DUT_DEBUG = '1'
|
||||
SKIP_VALIDATION_TESTS = '1'
|
||||
SOURCES_DRIVE = 'd'
|
||||
SOURCES_SUBDIR = 'gopath'
|
||||
TESTRUN_DRIVE = 'd'
|
||||
TESTRUN_SUBDIR = "CI"
|
||||
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
|
||||
WINDOWS_BASE_IMAGE_TAG = 'ltsc2019'
|
||||
}
|
||||
agent {
|
||||
node {
|
||||
label 'windows-rs5'
|
||||
customWorkspace 'c:\\gopath\\src\\github.com\\docker\\docker'
|
||||
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
|
||||
label 'windows-2019'
|
||||
}
|
||||
}
|
||||
stages {
|
||||
@@ -699,12 +848,32 @@ pipeline {
|
||||
steps {
|
||||
powershell '''
|
||||
$ErrorActionPreference = 'Stop'
|
||||
.\\hack\\ci\\windows.ps1
|
||||
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
|
||||
./hack/ci/windows.ps1
|
||||
exit $LastExitCode
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
powershell '''
|
||||
$bundleName="windowsRS5-integration"
|
||||
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
|
||||
|
||||
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
|
||||
Compress-Archive -Path "${env:TEMP}/CIDUT.out", "${env:TEMP}/CIDUT.err" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -111,7 +111,7 @@
|
||||
# still stumble into him in our issue tracker, or on IRC.
|
||||
"erikh",
|
||||
|
||||
# Evan Hazlett is the creator of of the Shipyard and Interlock open source projects,
|
||||
# Evan Hazlett is the creator of the Shipyard and Interlock open source projects,
|
||||
# and the author of "Orca", which became the foundation of Docker Universal Control
|
||||
# Plane (UCP). As a maintainer, Evan helped integrating SwarmKit (secrets, tasks)
|
||||
# into the Docker engine.
|
||||
|
||||
3
Makefile
3
Makefile
@@ -53,7 +53,6 @@ DOCKER_ENVS := \
|
||||
-e DOCKER_TEST_HOST \
|
||||
-e DOCKER_USERLANDPROXY \
|
||||
-e DOCKERD_ARGS \
|
||||
-e TEST_INTEGRATION_DEST \
|
||||
-e TEST_INTEGRATION_DIR \
|
||||
-e TEST_SKIP_INTEGRATION \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
@@ -62,6 +61,7 @@ DOCKER_ENVS := \
|
||||
-e TESTFLAGS \
|
||||
-e TESTFLAGS_INTEGRATION \
|
||||
-e TESTFLAGS_INTEGRATION_CLI \
|
||||
-e TEST_FILTER \
|
||||
-e TIMEOUT \
|
||||
-e VALIDATE_REPO \
|
||||
-e VALIDATE_BRANCH \
|
||||
@@ -86,6 +86,7 @@ BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles))
|
||||
# DOCKER_MOUNT can be overriden, but use at your own risk!
|
||||
ifndef DOCKER_MOUNT
|
||||
DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)")
|
||||
DOCKER_MOUNT := $(if $(DOCKER_BINDDIR_MOUNT_OPTS),$(DOCKER_MOUNT):$(DOCKER_BINDDIR_MOUNT_OPTS),$(DOCKER_MOUNT))
|
||||
|
||||
# This allows the test suite to be able to run without worrying about the underlying fs used by the container running the daemon (e.g. aufs-on-aufs), so long as the host running the container is running a supported fs.
|
||||
# The volume will be cleaned up when the container is removed due to `--rm`.
|
||||
|
||||
2
NOTICE
2
NOTICE
@@ -3,7 +3,7 @@ Copyright 2012-2017 Docker, Inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (https://www.docker.com).
|
||||
|
||||
This product contains software (https://github.com/kr/pty) developed
|
||||
This product contains software (https://github.com/creack/pty) developed
|
||||
by Keith Rarick, licensed under the MIT License.
|
||||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
||||
23
TESTING.md
23
TESTING.md
@@ -67,6 +67,8 @@ If a remote daemon is detected, the test will be skipped.
|
||||
|
||||
## Running tests
|
||||
|
||||
### Unit Tests
|
||||
|
||||
To run the unit test suite:
|
||||
|
||||
```
|
||||
@@ -82,12 +84,33 @@ The following environment variables may be used to run a subset of tests:
|
||||
* `TESTFLAGS` - flags passed to `go test`, to run tests which match a pattern
|
||||
use `TESTFLAGS="-test.run TestNameOrPrefix"`
|
||||
|
||||
### Integration Tests
|
||||
|
||||
To run the integration test suite:
|
||||
|
||||
```
|
||||
make test-integration
|
||||
```
|
||||
|
||||
This make target runs both the "integration" suite and the "integration-cli"
|
||||
suite.
|
||||
|
||||
You can specify which integration test dirs to build and run by specifying
|
||||
the list of dirs in the TEST_INTEGRATION_DIR environment variable.
|
||||
|
||||
You can also explicitly skip either suite by setting (any value) in
|
||||
TEST_SKIP_INTEGRATION and/or TEST_SKIP_INTEGRATION_CLI environment variables.
|
||||
|
||||
Flags specific to each suite can be set in the TESTFLAGS_INTEGRATION and
|
||||
TESTFLAGS_INTEGRATION_CLI environment variables.
|
||||
|
||||
If all you want is to specity a test filter to run, you can set the
|
||||
`TEST_FILTER` environment variable. This ends up getting passed directly to `go
|
||||
test -run` (or `go test -check-f`, dpenending on the test suite). It will also
|
||||
automatically set the other above mentioned environment variables accordingly.
|
||||
|
||||
### Go Version
|
||||
|
||||
You can change a version of golang used for building stuff that is being tested
|
||||
by setting `GO_VERSION` variable, for example:
|
||||
|
||||
|
||||
317
api/swagger.yaml
317
api/swagger.yaml
@@ -618,6 +618,71 @@ definitions:
|
||||
description: "Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit."
|
||||
type: "integer"
|
||||
|
||||
Health:
|
||||
description: |
|
||||
Health stores information about the container's healthcheck results.
|
||||
type: "object"
|
||||
properties:
|
||||
Status:
|
||||
description: |
|
||||
Status is one of `none`, `starting`, `healthy` or `unhealthy`
|
||||
|
||||
- "none" Indicates there is no healthcheck
|
||||
- "starting" Starting indicates that the container is not yet ready
|
||||
- "healthy" Healthy indicates that the container is running correctly
|
||||
- "unhealthy" Unhealthy indicates that the container has a problem
|
||||
type: "string"
|
||||
enum:
|
||||
- "none"
|
||||
- "starting"
|
||||
- "healthy"
|
||||
- "unhealthy"
|
||||
example: "healthy"
|
||||
FailingStreak:
|
||||
description: "FailingStreak is the number of consecutive failures"
|
||||
type: "integer"
|
||||
example: 0
|
||||
Log:
|
||||
type: "array"
|
||||
description: |
|
||||
Log contains the last few results (oldest first)
|
||||
items:
|
||||
x-nullable: true
|
||||
$ref: "#/definitions/HealthcheckResult"
|
||||
|
||||
HealthcheckResult:
|
||||
description: |
|
||||
HealthcheckResult stores information about a single run of a healthcheck probe
|
||||
type: "object"
|
||||
properties:
|
||||
Start:
|
||||
description: |
|
||||
Date and time at which this check started in
|
||||
[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
|
||||
type: "string"
|
||||
format: "date-time"
|
||||
example: "2020-01-04T10:44:24.496525531Z"
|
||||
End:
|
||||
description: |
|
||||
Date and time at which this check ended in
|
||||
[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
|
||||
type: "string"
|
||||
format: "dateTime"
|
||||
example: "2020-01-04T10:45:21.364524523Z"
|
||||
ExitCode:
|
||||
description: |
|
||||
ExitCode meanings:
|
||||
|
||||
- `0` healthy
|
||||
- `1` unhealthy
|
||||
- `2` reserved (considered unhealthy)
|
||||
- other values: error running probe
|
||||
type: "integer"
|
||||
example: 0
|
||||
Output:
|
||||
description: "Output from last check"
|
||||
type: "string"
|
||||
|
||||
HostConfig:
|
||||
description: "Container configuration that depends on the host we are running on"
|
||||
allOf:
|
||||
@@ -628,12 +693,44 @@ definitions:
|
||||
Binds:
|
||||
type: "array"
|
||||
description: |
|
||||
A list of volume bindings for this container. Each volume binding is a string in one of these forms:
|
||||
A list of volume bindings for this container. Each volume binding
|
||||
is a string in one of these forms:
|
||||
|
||||
- `host-src:container-dest` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path.
|
||||
- `host-src:container-dest:ro` to make the bind mount read-only inside the container. Both `host-src`, and `container-dest` must be an _absolute_ path.
|
||||
- `volume-name:container-dest` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path.
|
||||
- `volume-name:container-dest:ro` to mount the volume read-only inside the container. `container-dest` must be an _absolute_ path.
|
||||
- `host-src:container-dest[:options]` to bind-mount a host path
|
||||
into the container. Both `host-src`, and `container-dest` must
|
||||
be an _absolute_ path.
|
||||
- `volume-name:container-dest[:options]` to bind-mount a volume
|
||||
managed by a volume driver into the container. `container-dest`
|
||||
must be an _absolute_ path.
|
||||
|
||||
`options` is an optional, comma-delimited list of:
|
||||
|
||||
- `nocopy` disables automatic copying of data from the container
|
||||
path to the volume. The `nocopy` flag only applies to named volumes.
|
||||
- `[ro|rw]` mounts a volume read-only or read-write, respectively.
|
||||
If omitted or set to `rw`, volumes are mounted read-write.
|
||||
- `[z|Z]` applies SELinux labels to allow or deny multiple containers
|
||||
to read and write to the same volume.
|
||||
- `z`: a _shared_ content label is applied to the content. This
|
||||
label indicates that multiple containers can share the volume
|
||||
content, for both reading and writing.
|
||||
- `Z`: a _private unshared_ label is applied to the content.
|
||||
This label indicates that only the current container can use
|
||||
a private volume. Labeling systems such as SELinux require
|
||||
proper labels to be placed on volume content that is mounted
|
||||
into a container. Without a label, the security system can
|
||||
prevent a container's processes from using the content. By
|
||||
default, the labels set by the host operating system are not
|
||||
modified.
|
||||
- `[[r]shared|[r]slave|[r]private]` specifies mount
|
||||
[propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt).
|
||||
This only applies to bind-mounted volumes, not internal volumes
|
||||
or named volumes. Mount propagation requires the source mount
|
||||
point (the location where the source directory is mounted in the
|
||||
host operating system) to have the correct propagation properties.
|
||||
For shared volumes, the source mount point must be set to `shared`.
|
||||
For slave volumes, the mount must be set to either `shared` or
|
||||
`slave`.
|
||||
items:
|
||||
type: "string"
|
||||
ContainerIDFile:
|
||||
@@ -2970,16 +3067,10 @@ definitions:
|
||||
description: "Runtime is the type of runtime specified for the task executor."
|
||||
type: "string"
|
||||
Networks:
|
||||
description: "Specifies which networks the service should attach to."
|
||||
type: "array"
|
||||
items:
|
||||
type: "object"
|
||||
properties:
|
||||
Target:
|
||||
type: "string"
|
||||
Aliases:
|
||||
type: "array"
|
||||
items:
|
||||
type: "string"
|
||||
$ref: "#/definitions/NetworkAttachmentConfig"
|
||||
LogDriver:
|
||||
description: "Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified."
|
||||
type: "object"
|
||||
@@ -3225,17 +3316,11 @@ definitions:
|
||||
- "stop-first"
|
||||
- "start-first"
|
||||
Networks:
|
||||
description: "Array of network names or IDs to attach the service to."
|
||||
description: "Specifies which networks the service should attach to."
|
||||
type: "array"
|
||||
items:
|
||||
type: "object"
|
||||
properties:
|
||||
Target:
|
||||
type: "string"
|
||||
Aliases:
|
||||
type: "array"
|
||||
items:
|
||||
type: "string"
|
||||
$ref: "#/definitions/NetworkAttachmentConfig"
|
||||
|
||||
EndpointSpec:
|
||||
$ref: "#/definitions/EndpointSpec"
|
||||
|
||||
@@ -3262,7 +3347,7 @@ definitions:
|
||||
|
||||
<p><br /></p>
|
||||
|
||||
- "ingress" makes the target port accessible on on every node,
|
||||
- "ingress" makes the target port accessible on every node,
|
||||
regardless of whether there is a task for the service running on
|
||||
that node or not.
|
||||
- "host" bypasses the routing mesh and publish the port directly on
|
||||
@@ -3280,8 +3365,8 @@ definitions:
|
||||
type: "object"
|
||||
properties:
|
||||
Mode:
|
||||
description: "The mode of resolution to use for internal load balancing
|
||||
between tasks."
|
||||
description: |
|
||||
The mode of resolution to use for internal load balancing between tasks.
|
||||
type: "string"
|
||||
enum:
|
||||
- "vip"
|
||||
@@ -3609,6 +3694,70 @@ definitions:
|
||||
Spec:
|
||||
$ref: "#/definitions/ConfigSpec"
|
||||
|
||||
ContainerState:
|
||||
description: |
|
||||
ContainerState stores container's running state. It's part of ContainerJSONBase
|
||||
and will be returned by the "inspect" command.
|
||||
type: "object"
|
||||
properties:
|
||||
Status:
|
||||
description: |
|
||||
String representation of the container state. Can be one of "created",
|
||||
"running", "paused", "restarting", "removing", "exited", or "dead".
|
||||
type: "string"
|
||||
enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"]
|
||||
example: "running"
|
||||
Running:
|
||||
description: |
|
||||
Whether this container is running.
|
||||
|
||||
Note that a running container can be _paused_. The `Running` and `Paused`
|
||||
booleans are not mutually exclusive:
|
||||
|
||||
When pausing a container (on Linux), the freezer cgroup is used to suspend
|
||||
all processes in the container. Freezing the process requires the process to
|
||||
be running. As a result, paused containers are both `Running` _and_ `Paused`.
|
||||
|
||||
Use the `Status` field instead to determine if a container's state is "running".
|
||||
type: "boolean"
|
||||
example: true
|
||||
Paused:
|
||||
description: "Whether this container is paused."
|
||||
type: "boolean"
|
||||
example: false
|
||||
Restarting:
|
||||
description: "Whether this container is restarting."
|
||||
type: "boolean"
|
||||
example: false
|
||||
OOMKilled:
|
||||
description: "Whether this container has been killed because it ran out of memory."
|
||||
type: "boolean"
|
||||
example: false
|
||||
Dead:
|
||||
type: "boolean"
|
||||
example: false
|
||||
Pid:
|
||||
description: "The process ID of this container"
|
||||
type: "integer"
|
||||
example: 1234
|
||||
ExitCode:
|
||||
description: "The last exit code of this container"
|
||||
type: "integer"
|
||||
example: 0
|
||||
Error:
|
||||
type: "string"
|
||||
StartedAt:
|
||||
description: "The time when this container was last started."
|
||||
type: "string"
|
||||
example: "2020-01-06T09:06:59.461876391Z"
|
||||
FinishedAt:
|
||||
description: "The time when this container last exited."
|
||||
type: "string"
|
||||
example: "2020-01-06T09:07:59.461876391Z"
|
||||
Health:
|
||||
x-nullable: true
|
||||
$ref: "#/definitions/Health"
|
||||
|
||||
SystemInfo:
|
||||
type: "object"
|
||||
properties:
|
||||
@@ -4407,6 +4556,24 @@ definitions:
|
||||
IP address and ports at which this node can be reached.
|
||||
type: "string"
|
||||
|
||||
NetworkAttachmentConfig:
|
||||
description: "Specifies how a service should be attached to a particular network."
|
||||
type: "object"
|
||||
properties:
|
||||
Target:
|
||||
description: "The target network for attachment. Must be a network name or ID."
|
||||
type: "string"
|
||||
Aliases:
|
||||
description: "Discoverable alternate names for the service on this network."
|
||||
type: "array"
|
||||
items:
|
||||
type: "string"
|
||||
DriverOpts:
|
||||
description: "Driver attachment options for the network target"
|
||||
type: "object"
|
||||
additionalProperties:
|
||||
type: "string"
|
||||
|
||||
paths:
|
||||
/containers/json:
|
||||
get:
|
||||
@@ -4822,52 +4989,8 @@ paths:
|
||||
items:
|
||||
type: "string"
|
||||
State:
|
||||
description: "The state of the container."
|
||||
type: "object"
|
||||
properties:
|
||||
Status:
|
||||
description: |
|
||||
The status of the container. For example, `"running"` or `"exited"`.
|
||||
type: "string"
|
||||
enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"]
|
||||
Running:
|
||||
description: |
|
||||
Whether this container is running.
|
||||
|
||||
Note that a running container can be _paused_. The `Running` and `Paused`
|
||||
booleans are not mutually exclusive:
|
||||
|
||||
When pausing a container (on Linux), the cgroups freezer is used to suspend
|
||||
all processes in the container. Freezing the process requires the process to
|
||||
be running. As a result, paused containers are both `Running` _and_ `Paused`.
|
||||
|
||||
Use the `Status` field instead to determine if a container's state is "running".
|
||||
type: "boolean"
|
||||
Paused:
|
||||
description: "Whether this container is paused."
|
||||
type: "boolean"
|
||||
Restarting:
|
||||
description: "Whether this container is restarting."
|
||||
type: "boolean"
|
||||
OOMKilled:
|
||||
description: "Whether this container has been killed because it ran out of memory."
|
||||
type: "boolean"
|
||||
Dead:
|
||||
type: "boolean"
|
||||
Pid:
|
||||
description: "The process ID of this container"
|
||||
type: "integer"
|
||||
ExitCode:
|
||||
description: "The last exit code of this container"
|
||||
type: "integer"
|
||||
Error:
|
||||
type: "string"
|
||||
StartedAt:
|
||||
description: "The time when this container was last started."
|
||||
type: "string"
|
||||
FinishedAt:
|
||||
description: "The time when this container last exited."
|
||||
type: "string"
|
||||
x-nullable: true
|
||||
$ref: "#/definitions/ContainerState"
|
||||
Image:
|
||||
description: "The container's image"
|
||||
type: "string"
|
||||
@@ -4888,6 +5011,8 @@ paths:
|
||||
type: "integer"
|
||||
Driver:
|
||||
type: "string"
|
||||
Platform:
|
||||
type: "string"
|
||||
MountLabel:
|
||||
type: "string"
|
||||
ProcessLabel:
|
||||
@@ -4937,6 +5062,8 @@ paths:
|
||||
Domainname: ""
|
||||
Env:
|
||||
- "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
Healthcheck:
|
||||
Test: ["CMD-SHELL", "exit 0"]
|
||||
Hostname: "ba033ac44011"
|
||||
Image: "ubuntu"
|
||||
Labels:
|
||||
@@ -5048,6 +5175,14 @@ paths:
|
||||
Error: ""
|
||||
ExitCode: 9
|
||||
FinishedAt: "2015-01-06T15:47:32.080254511Z"
|
||||
Health:
|
||||
Status: "healthy"
|
||||
FailingStreak: 0
|
||||
Log:
|
||||
- Start: "2019-12-22T10:59:05.6385933Z"
|
||||
End: "2019-12-22T10:59:05.8078452Z"
|
||||
ExitCode: 0
|
||||
Output: ""
|
||||
OOMKilled: false
|
||||
Dead: false
|
||||
Paused: false
|
||||
@@ -5507,8 +5642,6 @@ paths:
|
||||
description: "no error"
|
||||
304:
|
||||
description: "container already started"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
404:
|
||||
description: "no such container"
|
||||
schema:
|
||||
@@ -5540,8 +5673,6 @@ paths:
|
||||
description: "no error"
|
||||
304:
|
||||
description: "container already stopped"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
404:
|
||||
description: "no such container"
|
||||
schema:
|
||||
@@ -5732,9 +5863,9 @@ paths:
|
||||
post:
|
||||
summary: "Pause a container"
|
||||
description: |
|
||||
Use the cgroups freezer to suspend all processes in a container.
|
||||
Use the freezer cgroup to suspend all processes in a container.
|
||||
|
||||
Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the cgroups freezer the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed.
|
||||
Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the freezer cgroup the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed.
|
||||
operationId: "ContainerPause"
|
||||
responses:
|
||||
204:
|
||||
@@ -6457,10 +6588,11 @@ paths:
|
||||
type: "string"
|
||||
- name: "networkmode"
|
||||
in: "query"
|
||||
description: "Sets the networking mode for the run commands during
|
||||
build. Supported standard values are: `bridge`, `host`, `none`, and
|
||||
`container:<name|id>`. Any other value is taken as a custom network's
|
||||
name to which this container should connect to."
|
||||
description: |
|
||||
Sets the networking mode for the run commands during build. Supported
|
||||
standard values are: `bridge`, `host`, `none`, and `container:<name|id>`.
|
||||
Any other value is taken as a custom network's name or ID to which this
|
||||
container should connect to.
|
||||
type: "string"
|
||||
- name: "Content-type"
|
||||
in: "header"
|
||||
@@ -6605,6 +6737,10 @@ paths:
|
||||
in: "query"
|
||||
description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled."
|
||||
type: "string"
|
||||
- name: "message"
|
||||
in: "query"
|
||||
description: "Set commit message for imported image."
|
||||
type: "string"
|
||||
- name: "inputImage"
|
||||
in: "body"
|
||||
description: "Image content if the value `-` has been specified in fromSrc query parameter"
|
||||
@@ -9549,17 +9685,19 @@ paths:
|
||||
type: "integer"
|
||||
- name: "registryAuthFrom"
|
||||
in: "query"
|
||||
description: |
|
||||
If the `X-Registry-Auth` header is not specified, this parameter
|
||||
indicates where to find registry authorization credentials.
|
||||
type: "string"
|
||||
description: "If the X-Registry-Auth header is not specified, this
|
||||
parameter indicates where to find registry authorization credentials. The
|
||||
valid values are `spec` and `previous-spec`."
|
||||
enum: ["spec", "previous-spec"]
|
||||
default: "spec"
|
||||
- name: "rollback"
|
||||
in: "query"
|
||||
description: |
|
||||
Set to this parameter to `previous` to cause a server-side rollback
|
||||
to the previous service spec. The supplied spec will be ignored in
|
||||
this case.
|
||||
type: "string"
|
||||
description: "Set to this parameter to `previous` to cause a
|
||||
server-side rollback to the previous service spec. The supplied spec will be
|
||||
ignored in this case."
|
||||
- name: "X-Registry-Auth"
|
||||
in: "header"
|
||||
description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)"
|
||||
@@ -10379,9 +10517,6 @@ paths:
|
||||
description: |
|
||||
Start a new interactive session with a server. Session allows server to call back to the client for advanced capabilities.
|
||||
|
||||
> **Note**: This endpoint is *experimental* and only available if the daemon is started with experimental
|
||||
> features enabled. The specifications for this endpoint may still change in a future version of the API.
|
||||
|
||||
### Hijacking
|
||||
|
||||
This endpoint hijacks the HTTP connection to HTTP2 transport that allows the client to expose gPRC services on that connection.
|
||||
@@ -10415,4 +10550,4 @@ paths:
|
||||
description: "server error"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
tags: ["Session (experimental)"]
|
||||
tags: ["Session"]
|
||||
|
||||
@@ -36,6 +36,15 @@ func NewArgs(initialArgs ...KeyValuePair) Args {
|
||||
return args
|
||||
}
|
||||
|
||||
// Keys returns all the keys in list of Args
|
||||
func (args Args) Keys() []string {
|
||||
keys := make([]string, 0, len(args.fields))
|
||||
for k := range args.fields {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte representation of the Args
|
||||
func (args Args) MarshalJSON() ([]byte, error) {
|
||||
if len(args.fields) == 0 {
|
||||
|
||||
@@ -828,9 +828,9 @@ type resolverCache struct {
|
||||
}
|
||||
|
||||
type cachedResolver struct {
|
||||
counter int64 // needs to be 64bit aligned for 32bit systems
|
||||
timeout time.Time
|
||||
remotes.Resolver
|
||||
counter int64
|
||||
}
|
||||
|
||||
func (cr *cachedResolver) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) {
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/containerd/containerd/content/local"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/builder/builder-next/adapters/containerimage"
|
||||
"github.com/docker/docker/builder/builder-next/adapters/localinlinecache"
|
||||
"github.com/docker/docker/builder/builder-next/adapters/snapshot"
|
||||
@@ -195,10 +196,7 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
ResolveCacheExporterFuncs: map[string]remotecache.ResolveCacheExporterFunc{
|
||||
"inline": inlineremotecache.ResolveCacheExporterFunc(),
|
||||
},
|
||||
Entitlements: []string{
|
||||
string(entitlements.EntitlementNetworkHost),
|
||||
// string(entitlements.EntitlementSecurityInsecure),
|
||||
},
|
||||
Entitlements: getEntitlements(opt.BuilderConfig),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -232,7 +230,7 @@ func getGCPolicy(conf config.BuilderConfig, root string) ([]client.PruneInfo, er
|
||||
gcPolicy[i], err = toBuildkitPruneInfo(types.BuildCachePruneOptions{
|
||||
All: p.All,
|
||||
KeepStorage: b,
|
||||
Filters: p.Filter,
|
||||
Filters: filters.Args(p.Filter),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -254,3 +252,15 @@ func parsePlatforms(platformsStr []string) ([]specs.Platform, error) {
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func getEntitlements(conf config.BuilderConfig) []string {
|
||||
var ents []string
|
||||
// Incase of no config settings, NetworkHost should be enabled & SecurityInsecure must be disabled.
|
||||
if conf.Entitlements.NetworkHost == nil || *conf.Entitlements.NetworkHost {
|
||||
ents = append(ents, string(entitlements.EntitlementNetworkHost))
|
||||
}
|
||||
if conf.Entitlements.SecurityInsecure != nil && *conf.Entitlements.SecurityInsecure {
|
||||
ents = append(ents, string(entitlements.EntitlementSecurityInsecure))
|
||||
}
|
||||
return ents
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/libnetwork"
|
||||
"github.com/moby/buildkit/executor"
|
||||
"github.com/moby/buildkit/executor/oci"
|
||||
@@ -100,11 +101,12 @@ func (iface *lnInterface) Set(s *specs.Spec) {
|
||||
logrus.WithError(iface.err).Error("failed to set networking spec")
|
||||
return
|
||||
}
|
||||
shortNetCtlrID := stringid.TruncateID(iface.provider.NetworkController.ID())
|
||||
// attach netns to bridge within the container namespace, using reexec in a prestart hook
|
||||
s.Hooks = &specs.Hooks{
|
||||
Prestart: []specs.Hook{{
|
||||
Path: filepath.Join("/proc", strconv.Itoa(os.Getpid()), "exe"),
|
||||
Args: []string{"libnetwork-setkey", "-exec-root=" + iface.provider.Config().Daemon.ExecRoot, iface.sbx.ContainerID(), iface.provider.NetworkController.ID()},
|
||||
Args: []string{"libnetwork-setkey", "-exec-root=" + iface.provider.Config().Daemon.ExecRoot, iface.sbx.ContainerID(), shortNetCtlrID},
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,6 +35,7 @@ func (h *reqBodyHandler) newRequest(rc io.ReadCloser) (string, func()) {
|
||||
h.mu.Lock()
|
||||
delete(h.requests, id)
|
||||
h.mu.Unlock()
|
||||
rc.Close()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -24,8 +24,11 @@ func init() {
|
||||
reexec.Init()
|
||||
}
|
||||
|
||||
func initDispatchTestCases() []dispatchTestCase {
|
||||
dispatchTestCases := []dispatchTestCase{
|
||||
func TestDispatch(t *testing.T) {
|
||||
if runtime.GOOS != "windows" {
|
||||
skip.If(t, os.Getuid() != 0, "skipping test that requires root")
|
||||
}
|
||||
testCases := []dispatchTestCase{
|
||||
{
|
||||
name: "ADD multiple files to file",
|
||||
cmd: &instructions.AddCommand{SourcesAndDest: instructions.SourcesAndDest{
|
||||
@@ -92,56 +95,46 @@ func initDispatchTestCases() []dispatchTestCase {
|
||||
}},
|
||||
expectedError: "source can't be a URL for COPY",
|
||||
files: nil,
|
||||
}}
|
||||
|
||||
return dispatchTestCases
|
||||
}
|
||||
|
||||
func TestDispatch(t *testing.T) {
|
||||
if runtime.GOOS != "windows" {
|
||||
skip.If(t, os.Getuid() != 0, "skipping test that requires root")
|
||||
},
|
||||
}
|
||||
testCases := initDispatchTestCases()
|
||||
|
||||
for _, testCase := range testCases {
|
||||
executeTestCase(t, testCase)
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test")
|
||||
defer cleanup()
|
||||
|
||||
for filename, content := range tc.files {
|
||||
createTestTempFile(t, contextDir, filename, content, 0777)
|
||||
}
|
||||
|
||||
tarStream, err := archive.Tar(contextDir, archive.Uncompressed)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error when creating tar stream: %s", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err = tarStream.Close(); err != nil {
|
||||
t.Fatalf("Error when closing tar stream: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
context, err := remotecontext.FromArchive(tarStream)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error when creating tar context: %s", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err = context.Close(); err != nil {
|
||||
t.Fatalf("Error when closing tar context: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
b := newBuilderWithMockBackend()
|
||||
sb := newDispatchRequest(b, '`', context, NewBuildArgs(make(map[string]*string)), newStagesBuildResults())
|
||||
err = dispatch(sb, tc.cmd)
|
||||
assert.Check(t, is.ErrorContains(err, tc.expectedError))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func executeTestCase(t *testing.T, testCase dispatchTestCase) {
|
||||
contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test")
|
||||
defer cleanup()
|
||||
|
||||
for filename, content := range testCase.files {
|
||||
createTestTempFile(t, contextDir, filename, content, 0777)
|
||||
}
|
||||
|
||||
tarStream, err := archive.Tar(contextDir, archive.Uncompressed)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error when creating tar stream: %s", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err = tarStream.Close(); err != nil {
|
||||
t.Fatalf("Error when closing tar stream: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
context, err := remotecontext.FromArchive(tarStream)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error when creating tar context: %s", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err = context.Close(); err != nil {
|
||||
t.Fatalf("Error when closing tar context: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
b := newBuilderWithMockBackend()
|
||||
sb := newDispatchRequest(b, '`', context, NewBuildArgs(make(map[string]*string)), newStagesBuildResults())
|
||||
err = dispatch(sb, testCase.cmd)
|
||||
assert.Check(t, is.ErrorContains(err, testCase.expectedError))
|
||||
}
|
||||
|
||||
@@ -178,7 +178,13 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp
|
||||
// this is localised - for example in French the error would be
|
||||
// `open //./pipe/docker_engine: Le fichier spécifié est introuvable.`
|
||||
if strings.Contains(err.Error(), `open //./pipe/docker_engine`) {
|
||||
err = errors.New(err.Error() + " In the default daemon configuration on Windows, the docker client must be run elevated to connect. This error may also indicate that the docker daemon is not running.")
|
||||
// Checks if client is running with elevated privileges
|
||||
if f, elevatedErr := os.Open("\\\\.\\PHYSICALDRIVE0"); elevatedErr == nil {
|
||||
err = errors.Wrap(err, "In the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect.")
|
||||
} else {
|
||||
f.Close()
|
||||
err = errors.Wrap(err, "This error may indicate that the docker daemon is not running.")
|
||||
}
|
||||
}
|
||||
|
||||
return serverResp, errors.Wrap(err, "error during connect")
|
||||
|
||||
@@ -3,8 +3,10 @@ package main
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"github.com/docker/docker/daemon"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/docker/docker/plugin/executor/containerd"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
@@ -85,7 +87,13 @@ func installCommonConfigFlags(conf *config.Config, flags *pflag.FlagSet) error {
|
||||
|
||||
conf.MaxConcurrentDownloads = &maxConcurrentDownloads
|
||||
conf.MaxConcurrentUploads = &maxConcurrentUploads
|
||||
return nil
|
||||
|
||||
flags.StringVar(&conf.ContainerdNamespace, "containerd-namespace", daemon.ContainersNamespace, "Containerd namespace to use")
|
||||
if err := flags.MarkHidden("containerd-namespace"); err != nil {
|
||||
return err
|
||||
}
|
||||
flags.StringVar(&conf.ContainerdPluginNamespace, "containerd-plugins-namespace", containerd.PluginNamespace, "Containerd namespace to use for plugins")
|
||||
return flags.MarkHidden("containerd-plugins-namespace")
|
||||
}
|
||||
|
||||
func installRegistryServiceFlags(options *registry.ServiceOptions, flags *pflag.FlagSet) {
|
||||
|
||||
@@ -608,11 +608,17 @@ func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) {
|
||||
|
||||
func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) {
|
||||
var hosts []string
|
||||
seen := make(map[string]struct{}, len(cli.Config.Hosts))
|
||||
|
||||
for i := 0; i < len(cli.Config.Hosts); i++ {
|
||||
var err error
|
||||
if cli.Config.Hosts[i], err = dopts.ParseHost(cli.Config.TLS, honorXDG, cli.Config.Hosts[i]); err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i])
|
||||
}
|
||||
if _, ok := seen[cli.Config.Hosts[i]]; ok {
|
||||
continue
|
||||
}
|
||||
seen[cli.Config.Hosts[i]] = struct{}{}
|
||||
|
||||
protoAddr := cli.Config.Hosts[i]
|
||||
protoAddrParts := strings.SplitN(protoAddr, "://", 2)
|
||||
|
||||
@@ -730,7 +730,7 @@ func (i *rio) Close() error {
|
||||
}
|
||||
|
||||
func (i *rio) Wait() {
|
||||
i.sc.Wait()
|
||||
i.sc.Wait(context.Background())
|
||||
|
||||
i.IO.Wait()
|
||||
}
|
||||
|
||||
@@ -190,7 +190,7 @@ func (container *Container) UnmountIpcMount() error {
|
||||
if shmPath == "" {
|
||||
return nil
|
||||
}
|
||||
if err = mount.Unmount(shmPath); err != nil && !os.IsNotExist(err) {
|
||||
if err = mount.Unmount(shmPath); err != nil && !os.IsNotExist(errors.Cause(err)) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -22,7 +22,7 @@ func (s *Health) String() string {
|
||||
case types.Starting:
|
||||
return "health: starting"
|
||||
default: // Healthy and Unhealthy are clear on their own
|
||||
return s.Health.Status
|
||||
return status
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
type State struct {
|
||||
sync.Mutex
|
||||
// Note that `Running` and `Paused` are not mutually exclusive:
|
||||
// When pausing a container (on Linux), the cgroups freezer is used to suspend
|
||||
// When pausing a container (on Linux), the freezer cgroup is used to suspend
|
||||
// all processes in the container. Freezing the process requires the process to
|
||||
// be running. As a result, paused containers are both `Running` _and_ `Paused`.
|
||||
Running bool
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package stream // import "github.com/docker/docker/container/stream"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -24,11 +25,12 @@ import (
|
||||
// copied and delivered to all StdoutPipe and StderrPipe consumers, using
|
||||
// a kind of "broadcaster".
|
||||
type Config struct {
|
||||
sync.WaitGroup
|
||||
wg sync.WaitGroup
|
||||
stdout *broadcaster.Unbuffered
|
||||
stderr *broadcaster.Unbuffered
|
||||
stdin io.ReadCloser
|
||||
stdinPipe io.WriteCloser
|
||||
dio *cio.DirectIO
|
||||
}
|
||||
|
||||
// NewConfig creates a stream config and initializes
|
||||
@@ -115,14 +117,15 @@ func (c *Config) CloseStreams() error {
|
||||
|
||||
// CopyToPipe connects streamconfig with a libcontainerd.IOPipe
|
||||
func (c *Config) CopyToPipe(iop *cio.DirectIO) {
|
||||
c.dio = iop
|
||||
copyFunc := func(w io.Writer, r io.ReadCloser) {
|
||||
c.Add(1)
|
||||
c.wg.Add(1)
|
||||
go func() {
|
||||
if _, err := pools.Copy(w, r); err != nil {
|
||||
logrus.Errorf("stream copy error: %v", err)
|
||||
}
|
||||
r.Close()
|
||||
c.Done()
|
||||
c.wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -144,3 +147,23 @@ func (c *Config) CopyToPipe(iop *cio.DirectIO) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for the stream to close
|
||||
// Wait supports timeouts via the context to unblock and forcefully
|
||||
// close the io streams
|
||||
func (c *Config) Wait(ctx context.Context) {
|
||||
done := make(chan struct{}, 1)
|
||||
go func() {
|
||||
c.wg.Wait()
|
||||
close(done)
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
case <-ctx.Done():
|
||||
if c.dio != nil {
|
||||
c.dio.Cancel()
|
||||
c.dio.Wait()
|
||||
c.dio.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,6 +39,9 @@ fi
|
||||
|
||||
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}"
|
||||
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}"
|
||||
# if slirp4netns v0.4.0+ is installed, slirp4netns is hardened using sandbox (mount namespace) and seccomp
|
||||
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}"
|
||||
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}"
|
||||
net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET
|
||||
mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU
|
||||
if [ -z $net ]; then
|
||||
@@ -77,6 +80,8 @@ if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then
|
||||
# * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace
|
||||
exec $rootlesskit \
|
||||
--net=$net --mtu=$mtu \
|
||||
--slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \
|
||||
--slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \
|
||||
--disable-host-loopback --port-driver=builtin \
|
||||
--copy-up=/etc --copy-up=/run \
|
||||
$DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
// +build linux freebsd
|
||||
|
||||
package daemon // import "github.com/docker/docker/daemon"
|
||||
|
||||
const bindMountType = "bind"
|
||||
@@ -1,12 +1,57 @@
|
||||
package config
|
||||
|
||||
import "github.com/docker/docker/api/types/filters"
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
)
|
||||
|
||||
// BuilderGCRule represents a GC rule for buildkit cache
|
||||
type BuilderGCRule struct {
|
||||
All bool `json:",omitempty"`
|
||||
Filter filters.Args `json:",omitempty"`
|
||||
KeepStorage string `json:",omitempty"`
|
||||
All bool `json:",omitempty"`
|
||||
Filter BuilderGCFilter `json:",omitempty"`
|
||||
KeepStorage string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// BuilderGCFilter contains garbage-collection filter rules for a BuildKit builder
|
||||
type BuilderGCFilter filters.Args
|
||||
|
||||
// MarshalJSON returns a JSON byte representation of the BuilderGCFilter
|
||||
func (x *BuilderGCFilter) MarshalJSON() ([]byte, error) {
|
||||
f := filters.Args(*x)
|
||||
keys := f.Keys()
|
||||
sort.Strings(keys)
|
||||
arr := make([]string, 0, len(keys))
|
||||
for _, k := range keys {
|
||||
values := f.Get(k)
|
||||
for _, v := range values {
|
||||
arr = append(arr, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
}
|
||||
return json.Marshal(arr)
|
||||
}
|
||||
|
||||
// UnmarshalJSON fills the BuilderGCFilter values structure from JSON input
|
||||
func (x *BuilderGCFilter) UnmarshalJSON(data []byte) error {
|
||||
var arr []string
|
||||
f := filters.NewArgs()
|
||||
if err := json.Unmarshal(data, &arr); err != nil {
|
||||
// backwards compat for deprecated buggy form
|
||||
err := json.Unmarshal(data, &f)
|
||||
*x = BuilderGCFilter(f)
|
||||
return err
|
||||
}
|
||||
for _, s := range arr {
|
||||
fields := strings.SplitN(s, "=", 2)
|
||||
name := strings.ToLower(strings.TrimSpace(fields[0]))
|
||||
value := strings.TrimSpace(fields[1])
|
||||
f.Add(name, value)
|
||||
}
|
||||
*x = BuilderGCFilter(f)
|
||||
return nil
|
||||
}
|
||||
|
||||
// BuilderGCConfig contains GC config for a buildkit builder
|
||||
@@ -16,7 +61,14 @@ type BuilderGCConfig struct {
|
||||
DefaultKeepStorage string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// BuilderEntitlements contains settings to enable/disable entitlements
|
||||
type BuilderEntitlements struct {
|
||||
NetworkHost *bool `json:"network-host,omitempty"`
|
||||
SecurityInsecure *bool `json:"security-insecure,omitempty"`
|
||||
}
|
||||
|
||||
// BuilderConfig contains config for the builder
|
||||
type BuilderConfig struct {
|
||||
GC BuilderGCConfig `json:",omitempty"`
|
||||
GC BuilderGCConfig `json:",omitempty"`
|
||||
Entitlements BuilderEntitlements `json:",omitempty"`
|
||||
}
|
||||
|
||||
44
daemon/config/builder_test.go
Normal file
44
daemon/config/builder_test.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"gotest.tools/assert"
|
||||
"gotest.tools/fs"
|
||||
)
|
||||
|
||||
func TestBuilderGC(t *testing.T) {
|
||||
tempFile := fs.NewFile(t, "config", fs.WithContent(`{
|
||||
"builder": {
|
||||
"gc": {
|
||||
"enabled": true,
|
||||
"policy": [
|
||||
{"keepStorage": "10GB", "filter": ["unused-for=2200h"]},
|
||||
{"keepStorage": "50GB", "filter": {"unused-for": {"3300h": true}}},
|
||||
{"keepStorage": "100GB", "all": true}
|
||||
]
|
||||
}
|
||||
}
|
||||
}`))
|
||||
defer tempFile.Remove()
|
||||
configFile := tempFile.Path()
|
||||
|
||||
cfg, err := MergeDaemonConfigurations(&Config{}, nil, configFile)
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, cfg.Builder.GC.Enabled)
|
||||
f1 := filters.NewArgs()
|
||||
f1.Add("unused-for", "2200h")
|
||||
f2 := filters.NewArgs()
|
||||
f2.Add("unused-for", "3300h")
|
||||
expectedPolicy := []BuilderGCRule{
|
||||
{KeepStorage: "10GB", Filter: BuilderGCFilter(f1)},
|
||||
{KeepStorage: "50GB", Filter: BuilderGCFilter(f2)}, /* parsed from deprecated form */
|
||||
{KeepStorage: "100GB", All: true},
|
||||
}
|
||||
assert.DeepEqual(t, cfg.Builder.GC.Policy, expectedPolicy, cmp.AllowUnexported(BuilderGCFilter{}))
|
||||
// double check to please the skeptics
|
||||
assert.Assert(t, filters.Args(cfg.Builder.GC.Policy[0].Filter).UniqueExactMatch("unused-for", "2200h"))
|
||||
assert.Assert(t, filters.Args(cfg.Builder.GC.Policy[1].Filter).UniqueExactMatch("unused-for", "3300h"))
|
||||
}
|
||||
@@ -235,6 +235,9 @@ type CommonConfig struct {
|
||||
Features map[string]bool `json:"features,omitempty"`
|
||||
|
||||
Builder BuilderConfig `json:"builder,omitempty"`
|
||||
|
||||
ContainerdNamespace string `json:"containerd-namespace,omitempty"`
|
||||
ContainerdPluginNamespace string `json:"containerd-plugin-namespace,omitempty"`
|
||||
}
|
||||
|
||||
// IsValueSet returns true if a configuration value
|
||||
|
||||
@@ -875,7 +875,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)),
|
||||
}
|
||||
if config.ContainerdAddr != "" {
|
||||
d.containerdCli, err = containerd.New(config.ContainerdAddr, containerd.WithDefaultNamespace(ContainersNamespace), containerd.WithDialOpts(gopts), containerd.WithTimeout(60*time.Second))
|
||||
d.containerdCli, err = containerd.New(config.ContainerdAddr, containerd.WithDefaultNamespace(config.ContainerdNamespace), containerd.WithDialOpts(gopts), containerd.WithTimeout(60*time.Second))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to dial %q", config.ContainerdAddr)
|
||||
}
|
||||
@@ -887,13 +887,13 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
||||
// Windows is not currently using containerd, keep the
|
||||
// client as nil
|
||||
if config.ContainerdAddr != "" {
|
||||
pluginCli, err = containerd.New(config.ContainerdAddr, containerd.WithDefaultNamespace(pluginexec.PluginNamespace), containerd.WithDialOpts(gopts), containerd.WithTimeout(60*time.Second))
|
||||
pluginCli, err = containerd.New(config.ContainerdAddr, containerd.WithDefaultNamespace(config.ContainerdPluginNamespace), containerd.WithDialOpts(gopts), containerd.WithTimeout(60*time.Second))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to dial %q", config.ContainerdAddr)
|
||||
}
|
||||
}
|
||||
|
||||
return pluginexec.New(ctx, getPluginExecRoot(config.Root), pluginCli, m)
|
||||
return pluginexec.New(ctx, getPluginExecRoot(config.Root), pluginCli, config.ContainerdPluginNamespace, m)
|
||||
}
|
||||
|
||||
// Plugin system initialization should happen before restore. Do not change order.
|
||||
@@ -1041,7 +1041,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
||||
|
||||
go d.execCommandGC()
|
||||
|
||||
d.containerd, err = libcontainerd.NewClient(ctx, d.containerdCli, filepath.Join(config.ExecRoot, "containerd"), ContainersNamespace, d)
|
||||
d.containerd, err = libcontainerd.NewClient(ctx, d.containerdCli, filepath.Join(config.ExecRoot, "containerd"), config.ContainerdNamespace, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
containerd_cgroups "github.com/containerd/cgroups"
|
||||
statsV1 "github.com/containerd/cgroups/stats/v1"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/blkiodev"
|
||||
pblkiodev "github.com/docker/docker/api/types/blkiodev"
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/daemon/initlayer"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
@@ -192,8 +193,9 @@ func getBlkioWeightDevices(config containertypes.Resources) ([]specs.LinuxWeight
|
||||
}
|
||||
weight := weightDevice.Weight
|
||||
d := specs.LinuxWeightDevice{Weight: &weight}
|
||||
d.Major = int64(unix.Major(stat.Rdev))
|
||||
d.Minor = int64(unix.Minor(stat.Rdev))
|
||||
// The type is 32bit on mips.
|
||||
d.Major = int64(unix.Major(uint64(stat.Rdev))) // nolint: unconvert
|
||||
d.Minor = int64(unix.Minor(uint64(stat.Rdev))) // nolint: unconvert
|
||||
blkioWeightDevices = append(blkioWeightDevices, d)
|
||||
}
|
||||
|
||||
@@ -263,8 +265,9 @@ func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.LinuxThro
|
||||
return nil, err
|
||||
}
|
||||
d := specs.LinuxThrottleDevice{Rate: d.Rate}
|
||||
d.Major = int64(unix.Major(stat.Rdev))
|
||||
d.Minor = int64(unix.Minor(stat.Rdev))
|
||||
// the type is 32bit on mips
|
||||
d.Major = int64(unix.Major(uint64(stat.Rdev))) // nolint: unconvert
|
||||
d.Minor = int64(unix.Minor(uint64(stat.Rdev))) // nolint: unconvert
|
||||
throttleDevices = append(throttleDevices, d)
|
||||
}
|
||||
|
||||
@@ -735,6 +738,9 @@ func (daemon *Daemon) initRuntimes(runtimes map[string]types.Runtime) (err error
|
||||
|
||||
// verifyDaemonSettings performs validation of daemon config struct
|
||||
func verifyDaemonSettings(conf *config.Config) error {
|
||||
if conf.ContainerdNamespace == conf.ContainerdPluginNamespace {
|
||||
return errors.New("containers namespace and plugins namespace cannot be the same")
|
||||
}
|
||||
// Check for mutually incompatible config options
|
||||
if conf.BridgeConfig.Iface != "" && conf.BridgeConfig.IP != "" {
|
||||
return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one")
|
||||
@@ -1265,6 +1271,10 @@ func setupDaemonRootPropagation(cfg *config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(cleanupFile), 0700); err != nil {
|
||||
return errors.Wrap(err, "error creating dir to store mount cleanup file")
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(cleanupFile, nil, 0600); err != nil {
|
||||
return errors.Wrap(err, "error writing file to signal mount cleanup on shutdown")
|
||||
}
|
||||
@@ -1290,12 +1300,26 @@ func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *
|
||||
}
|
||||
child, err := daemon.GetContainer(name)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
// Trying to link to a non-existing container is not valid, and
|
||||
// should return an "invalid parameter" error. Returning a "not
|
||||
// found" error here would make the client report the container's
|
||||
// image could not be found (see moby/moby#39823)
|
||||
err = errdefs.InvalidParameter(err)
|
||||
}
|
||||
return errors.Wrapf(err, "could not get container for %s", name)
|
||||
}
|
||||
for child.HostConfig.NetworkMode.IsContainer() {
|
||||
parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2)
|
||||
child, err = daemon.GetContainer(parts[1])
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
// Trying to link to a non-existing container is not valid, and
|
||||
// should return an "invalid parameter" error. Returning a "not
|
||||
// found" error here would make the client report the container's
|
||||
// image could not be found (see moby/moby#39823)
|
||||
err = errdefs.InvalidParameter(err)
|
||||
}
|
||||
return errors.Wrapf(err, "Could not get container for %s", parts[1])
|
||||
}
|
||||
}
|
||||
@@ -1325,7 +1349,7 @@ func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container
|
||||
return daemon.Unmount(container)
|
||||
}
|
||||
|
||||
func copyBlkioEntry(entries []*containerd_cgroups.BlkIOEntry) []types.BlkioStatEntry {
|
||||
func copyBlkioEntry(entries []*statsV1.BlkIOEntry) []types.BlkioStatEntry {
|
||||
out := make([]types.BlkioStatEntry, len(entries))
|
||||
for i, re := range entries {
|
||||
out[i] = types.BlkioStatEntry{
|
||||
|
||||
@@ -3,7 +3,9 @@ package daemon // import "github.com/docker/docker/daemon"
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
@@ -40,9 +42,10 @@ const (
|
||||
windowsMaxCPUPercent = 100
|
||||
)
|
||||
|
||||
// Windows doesn't really have rlimits.
|
||||
// Windows containers are much larger than Linux containers and each of them
|
||||
// have > 20 system processes which why we use much smaller parallelism value.
|
||||
func adjustParallelLimit(n int, limit int) int {
|
||||
return limit
|
||||
return int(math.Max(1, math.Floor(float64(runtime.NumCPU())*.8)))
|
||||
}
|
||||
|
||||
// Windows has no concept of an execution state directory. So use config.Root here.
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package exec // import "github.com/docker/docker/daemon/exec"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
@@ -58,7 +59,7 @@ func (i *rio) Close() error {
|
||||
}
|
||||
|
||||
func (i *rio) Wait() {
|
||||
i.sc.Wait()
|
||||
i.sc.Wait(context.Background())
|
||||
|
||||
i.IO.Wait()
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build linux,!btrfs_noversion
|
||||
// +build linux
|
||||
|
||||
package btrfs // import "github.com/docker/docker/daemon/graphdriver/btrfs"
|
||||
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
// +build linux,btrfs_noversion
|
||||
|
||||
package btrfs // import "github.com/docker/docker/daemon/graphdriver/btrfs"
|
||||
|
||||
// TODO(vbatts) remove this work-around once supported linux distros are on
|
||||
// btrfs utilities of >= 3.16.1
|
||||
|
||||
func btrfsBuildVersion() string {
|
||||
return "-"
|
||||
}
|
||||
|
||||
func btrfsLibVersion() int {
|
||||
return -1
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build linux,!btrfs_noversion
|
||||
// +build linux
|
||||
|
||||
package btrfs // import "github.com/docker/docker/daemon/graphdriver/btrfs"
|
||||
|
||||
|
||||
@@ -146,7 +146,8 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
|
||||
|
||||
switch mode := f.Mode(); {
|
||||
case mode.IsRegular():
|
||||
id := fileID{dev: stat.Dev, ino: stat.Ino}
|
||||
//the type is 32bit on mips
|
||||
id := fileID{dev: uint64(stat.Dev), ino: stat.Ino} // nolint: unconvert
|
||||
if copyMode == Hardlink {
|
||||
isHardlink = true
|
||||
if err2 := os.Link(srcPath, dstPath); err2 != nil {
|
||||
|
||||
@@ -1527,7 +1527,8 @@ func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
dev := stat.Rdev
|
||||
// the type is 32bit on mips
|
||||
dev := uint64(stat.Rdev) // nolint: unconvert
|
||||
majorNum := major(dev)
|
||||
minorNum := minor(dev)
|
||||
|
||||
@@ -1738,7 +1739,8 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) {
|
||||
// - Managed by docker
|
||||
// - The target of this device is at major <maj> and minor <min>
|
||||
// - If <inode> is defined, use that file inside the device as a loopback image. Otherwise use the device itself.
|
||||
devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(st.Dev), minor(st.Dev), st.Ino)
|
||||
// The type Dev in Stat_t is 32bit on mips.
|
||||
devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(uint64(st.Dev)), minor(uint64(st.Dev)), st.Ino) // nolint: unconvert
|
||||
logger.Debugf("Generated prefix: %s", devices.devicePrefix)
|
||||
|
||||
// Check for the existence of the thin-pool device
|
||||
|
||||
@@ -446,6 +446,10 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(path.Join(d.dir(parent), "committed"), []byte{}, 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lower, err := d.getLower(parent)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -592,7 +596,20 @@ func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr e
|
||||
for i, s := range splitLowers {
|
||||
absLowers[i] = path.Join(d.home, s)
|
||||
}
|
||||
opts := indexOff + "lowerdir=" + strings.Join(absLowers, ":") + ",upperdir=" + diffDir + ",workdir=" + workDir
|
||||
var readonly bool
|
||||
if _, err := os.Stat(path.Join(dir, "committed")); err == nil {
|
||||
readonly = true
|
||||
} else if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var opts string
|
||||
if readonly {
|
||||
opts = indexOff + "lowerdir=" + diffDir + ":" + strings.Join(absLowers, ":")
|
||||
} else {
|
||||
opts = indexOff + "lowerdir=" + strings.Join(absLowers, ":") + ",upperdir=" + diffDir + ",workdir=" + workDir
|
||||
}
|
||||
|
||||
mountData := label.FormatMountLabel(opts, mountLabel)
|
||||
mount := unix.Mount
|
||||
mountTarget := mergedDir
|
||||
@@ -612,7 +629,11 @@ func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr e
|
||||
// fit within a page and relative links make the mount data much
|
||||
// smaller at the expense of requiring a fork exec to chroot.
|
||||
if len(mountData) > pageSize {
|
||||
opts = indexOff + "lowerdir=" + string(lowers) + ",upperdir=" + path.Join(id, diffDirName) + ",workdir=" + path.Join(id, workDirName)
|
||||
if readonly {
|
||||
opts = indexOff + "lowerdir=" + path.Join(id, diffDirName) + ":" + string(lowers)
|
||||
} else {
|
||||
opts = indexOff + "lowerdir=" + string(lowers) + ",upperdir=" + path.Join(id, diffDirName) + ",workdir=" + path.Join(id, workDirName)
|
||||
}
|
||||
mountData = label.FormatMountLabel(opts, mountLabel)
|
||||
if len(mountData) > pageSize {
|
||||
return nil, fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData))
|
||||
@@ -628,10 +649,12 @@ func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr e
|
||||
return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err)
|
||||
}
|
||||
|
||||
// chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a
|
||||
// user namespace requires this to move a directory from lower to upper.
|
||||
if err := os.Chown(path.Join(workDir, workDirName), rootUID, rootGID); err != nil {
|
||||
return nil, err
|
||||
if !readonly {
|
||||
// chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a
|
||||
// user namespace requires this to move a directory from lower to upper.
|
||||
if err := os.Chown(path.Join(workDir, workDirName), rootUID, rootGID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return containerfs.NewLocalContainerFS(mergedDir), nil
|
||||
|
||||
@@ -170,6 +170,9 @@ func (i *ImageService) pullForBuilder(ctx context.Context, name string, authConf
|
||||
func (i *ImageService) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ROLayer, error) {
|
||||
if refOrID == "" { // ie FROM scratch
|
||||
os := runtime.GOOS
|
||||
if runtime.GOOS == "windows" {
|
||||
os = "linux"
|
||||
}
|
||||
if opts.Platform != nil {
|
||||
os = opts.Platform.OS
|
||||
}
|
||||
|
||||
@@ -369,7 +369,7 @@ func (i *ImageService) checkImageDeleteConflict(imgID image.ID, mask conflictTyp
|
||||
if mask&conflictRunningContainer != 0 {
|
||||
// Check if any running container is using the image.
|
||||
running := func(c *container.Container) bool {
|
||||
return c.IsRunning() && c.ImageID == imgID
|
||||
return c.ImageID == imgID && c.IsRunning()
|
||||
}
|
||||
if container := i.containers.First(running); container != nil {
|
||||
return &imageDeleteConflict{
|
||||
|
||||
@@ -285,6 +285,9 @@ func TestLogClosed(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestLogBlocking tests that the Log method blocks appropriately when
|
||||
// non-blocking behavior is not enabled. Blocking is achieved through an
|
||||
// internal channel that must be drained for Log to return.
|
||||
func TestLogBlocking(t *testing.T) {
|
||||
mockClient := newMockClient()
|
||||
stream := &logStream{
|
||||
@@ -299,18 +302,20 @@ func TestLogBlocking(t *testing.T) {
|
||||
err := stream.Log(&logger.Message{})
|
||||
errorCh <- err
|
||||
}()
|
||||
// block until the goroutine above has started
|
||||
<-started
|
||||
select {
|
||||
case err := <-errorCh:
|
||||
t.Fatal("Expected stream.Log to block: ", err)
|
||||
default:
|
||||
break
|
||||
}
|
||||
// assuming it is blocked, we can now try to drain the internal channel and
|
||||
// unblock it
|
||||
select {
|
||||
case <-stream.messages:
|
||||
break
|
||||
default:
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
// if we're unable to drain the channel within 10ms, something seems broken
|
||||
t.Fatal("Expected to be able to read from stream.messages but was unable to")
|
||||
case <-stream.messages:
|
||||
}
|
||||
select {
|
||||
case err := <-errorCh:
|
||||
|
||||
@@ -166,6 +166,10 @@ func newGELFUDPWriter(address string, info logger.Info) (gelf.Writer, error) {
|
||||
}
|
||||
|
||||
func (s *gelfLogger) Log(msg *logger.Message) error {
|
||||
if len(msg.Line) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
level := gelf.LOG_INFO
|
||||
if msg.Source == "stderr" {
|
||||
level = gelf.LOG_ERR
|
||||
|
||||
@@ -265,7 +265,7 @@ func compressFile(fileName string, lastTimestamp time.Time) {
|
||||
compressWriter := gzip.NewWriter(outFile)
|
||||
defer compressWriter.Close()
|
||||
|
||||
// Add the last log entry timestramp to the gzip header
|
||||
// Add the last log entry timestamp to the gzip header
|
||||
extra := rotateFileMetadata{}
|
||||
extra.LastTime = lastTimestamp
|
||||
compressWriter.Header.Extra, err = json.Marshal(&extra)
|
||||
@@ -614,11 +614,25 @@ func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan int
|
||||
}
|
||||
}
|
||||
|
||||
oldSize := int64(-1)
|
||||
handleDecodeErr := func(err error) error {
|
||||
if errors.Cause(err) != io.EOF {
|
||||
return err
|
||||
}
|
||||
|
||||
// Handle special case (#39235): max-file=1 and file was truncated
|
||||
st, stErr := f.Stat()
|
||||
if stErr == nil {
|
||||
size := st.Size()
|
||||
defer func() { oldSize = size }()
|
||||
if size < oldSize { // truncated
|
||||
f.Seek(0, 0)
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
logrus.WithError(stErr).Warn("logger: stat error")
|
||||
}
|
||||
|
||||
for {
|
||||
err := waitRead()
|
||||
if err == nil {
|
||||
|
||||
@@ -925,7 +925,12 @@ func TestFrequency(t *testing.T) {
|
||||
|
||||
// 1 to verify connection and 10 to verify that we have sent messages with required frequency,
|
||||
// but because frequency is too small (to keep test quick), instead of 11, use 9 if context switches will be slow
|
||||
if hec.numOfRequests < 9 {
|
||||
expectedRequests := 9
|
||||
if runtime.GOOS == "windows" {
|
||||
// sometimes in Windows, this test fails with number of requests showing 8. So be more conservative.
|
||||
expectedRequests = 7
|
||||
}
|
||||
if hec.numOfRequests < expectedRequests {
|
||||
t.Fatalf("Unexpected number of requests %d", hec.numOfRequests)
|
||||
}
|
||||
|
||||
|
||||
@@ -2,8 +2,6 @@ package daemon // import "github.com/docker/docker/daemon"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"time"
|
||||
@@ -12,6 +10,7 @@ import (
|
||||
"github.com/docker/docker/container"
|
||||
libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
|
||||
"github.com/docker/docker/restartmanager"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -29,8 +28,8 @@ func (daemon *Daemon) setStateCounter(c *container.Container) {
|
||||
// ProcessEvent is called by libcontainerd whenever an event occurs
|
||||
func (daemon *Daemon) ProcessEvent(id string, e libcontainerdtypes.EventType, ei libcontainerdtypes.EventInfo) error {
|
||||
c, err := daemon.GetContainer(id)
|
||||
if c == nil || err != nil {
|
||||
return fmt.Errorf("no such container: %s", id)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not find container %s", id)
|
||||
}
|
||||
|
||||
switch e {
|
||||
@@ -55,8 +54,9 @@ func (daemon *Daemon) ProcessEvent(id string, e libcontainerdtypes.EventType, ei
|
||||
if err != nil {
|
||||
logrus.WithError(err).Warnf("failed to delete container %s from containerd", c.ID)
|
||||
}
|
||||
|
||||
c.StreamConfig.Wait()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
c.StreamConfig.Wait(ctx)
|
||||
cancel()
|
||||
c.Reset(false)
|
||||
|
||||
exitStatus := container.ExitStatus{
|
||||
@@ -117,13 +117,18 @@ func (daemon *Daemon) ProcessEvent(id string, e libcontainerdtypes.EventType, ei
|
||||
return cpErr
|
||||
}
|
||||
|
||||
exitCode := 127
|
||||
if execConfig := c.ExecCommands.Get(ei.ProcessID); execConfig != nil {
|
||||
ec := int(ei.ExitCode)
|
||||
execConfig.Lock()
|
||||
defer execConfig.Unlock()
|
||||
execConfig.ExitCode = &ec
|
||||
execConfig.Running = false
|
||||
execConfig.StreamConfig.Wait()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
execConfig.StreamConfig.Wait(ctx)
|
||||
cancel()
|
||||
|
||||
if err := execConfig.CloseStreams(); err != nil {
|
||||
logrus.Errorf("failed to cleanup exec %s streams: %s", c.ID, err)
|
||||
}
|
||||
@@ -131,18 +136,14 @@ func (daemon *Daemon) ProcessEvent(id string, e libcontainerdtypes.EventType, ei
|
||||
// remove the exec command from the container's store only and not the
|
||||
// daemon's store so that the exec command can be inspected.
|
||||
c.ExecCommands.Delete(execConfig.ID, execConfig.Pid)
|
||||
attributes := map[string]string{
|
||||
"execID": execConfig.ID,
|
||||
"exitCode": strconv.Itoa(ec),
|
||||
}
|
||||
daemon.LogContainerEventWithAttributes(c, "exec_die", attributes)
|
||||
} else {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"container": c.ID,
|
||||
"exec-id": ei.ProcessID,
|
||||
"exec-pid": ei.Pid,
|
||||
}).Warn("Ignoring Exit Event, no such exec command found")
|
||||
|
||||
exitCode = ec
|
||||
}
|
||||
attributes := map[string]string{
|
||||
"execID": ei.ProcessID,
|
||||
"exitCode": strconv.Itoa(exitCode),
|
||||
}
|
||||
daemon.LogContainerEventWithAttributes(c, "exec_die", attributes)
|
||||
case libcontainerdtypes.EventStart:
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/docker/docker/oci/caps"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/rootless/specconv"
|
||||
volumemounts "github.com/docker/docker/volume/mounts"
|
||||
"github.com/opencontainers/runc/libcontainer/apparmor"
|
||||
@@ -66,13 +67,14 @@ func WithLibnetwork(daemon *Daemon, c *container.Container) coci.SpecOpts {
|
||||
for _, ns := range s.Linux.Namespaces {
|
||||
if ns.Type == "network" && ns.Path == "" && !c.Config.NetworkDisabled {
|
||||
target := filepath.Join("/proc", strconv.Itoa(os.Getpid()), "exe")
|
||||
shortNetCtlrID := stringid.TruncateID(daemon.netController.ID())
|
||||
s.Hooks.Prestart = append(s.Hooks.Prestart, specs.Hook{
|
||||
Path: target,
|
||||
Args: []string{
|
||||
"libnetwork-setkey",
|
||||
"-exec-root=" + daemon.configStore.GetExecRoot(),
|
||||
c.ID,
|
||||
daemon.netController.ID(),
|
||||
shortNetCtlrID,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -161,7 +161,7 @@ func (daemon *Daemon) containerStart(container *container.Container, checkpoint
|
||||
container.HasBeenManuallyStopped = false
|
||||
}
|
||||
|
||||
if daemon.saveApparmorConfig(container); err != nil {
|
||||
if err := daemon.saveApparmorConfig(container); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -141,10 +141,6 @@ func (daemon *Daemon) mountVolumes(container *container.Container) error {
|
||||
if m.Writable {
|
||||
writeMode = "rw"
|
||||
}
|
||||
opts := strings.Join([]string{bindMode, writeMode}, ",")
|
||||
if err := mount.Mount(m.Source, dest, bindMountType, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// mountVolumes() seems to be called for temporary mounts
|
||||
// outside the container. Soon these will be unmounted with
|
||||
@@ -154,8 +150,9 @@ func (daemon *Daemon) mountVolumes(container *container.Container) error {
|
||||
// then these unmounts will propagate and unmount original
|
||||
// mount as well. So make all these mounts rprivate.
|
||||
// Do not use propagation property of volume as that should
|
||||
// apply only when mounting happen inside the container.
|
||||
if err := mount.MakeRPrivate(dest); err != nil {
|
||||
// apply only when mounting happens inside the container.
|
||||
opts := strings.Join([]string{bindMode, writeMode, "rprivate"}, ",")
|
||||
if err := mount.Mount(m.Source, dest, "", opts); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
package distribution
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// TODO: Remove this registration if distribution is included with OCI support
|
||||
|
||||
ocischemaFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
|
||||
m := new(schema2.DeserializedManifest)
|
||||
err := m.UnmarshalJSON(b)
|
||||
if err != nil {
|
||||
return nil, distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
dgst := digest.FromBytes(b)
|
||||
return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: ocispec.MediaTypeImageManifest}, err
|
||||
}
|
||||
err := distribution.RegisterManifestSchema(ocispec.MediaTypeImageManifest, ocischemaFunc)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Unable to register manifest: %s", err))
|
||||
}
|
||||
}
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/manifest/manifestlist"
|
||||
"github.com/docker/distribution/manifest/ocischema"
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/docker/distribution/reference"
|
||||
@@ -392,9 +393,14 @@ func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named, platform
|
||||
if p.config.RequireSchema2 {
|
||||
return false, fmt.Errorf("invalid manifest: not schema2")
|
||||
}
|
||||
msg := schema1DeprecationMessage(ref)
|
||||
logrus.Warn(msg)
|
||||
progress.Message(p.config.ProgressOutput, "", msg)
|
||||
|
||||
// give registries time to upgrade to schema2 and only warn if we know a registry has been upgraded long time ago
|
||||
// TODO: condition to be removed
|
||||
if reference.Domain(ref) == "docker.io" {
|
||||
msg := fmt.Sprintf("Image %s uses outdated schema1 manifest format. Please upgrade to a schema2 image for better future compatibility. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", ref)
|
||||
logrus.Warn(msg)
|
||||
progress.Message(p.config.ProgressOutput, "", msg)
|
||||
}
|
||||
|
||||
id, manifestDigest, err = p.pullSchema1(ctx, ref, v, platform)
|
||||
if err != nil {
|
||||
@@ -405,6 +411,11 @@ func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named, platform
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
case *ocischema.DeserializedManifest:
|
||||
id, manifestDigest, err = p.pullOCI(ctx, ref, v, platform)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
case *manifestlist.DeserializedManifestList:
|
||||
id, manifestDigest, err = p.pullManifestList(ctx, ref, v, platform)
|
||||
if err != nil {
|
||||
@@ -552,24 +563,18 @@ func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Reference, unv
|
||||
return imageID, manifestDigest, nil
|
||||
}
|
||||
|
||||
func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) {
|
||||
manifestDigest, err = schema2ManifestDigest(ref, mfst)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
target := mfst.Target()
|
||||
func (p *v2Puller) pullSchema2Layers(ctx context.Context, target distribution.Descriptor, layers []distribution.Descriptor, platform *specs.Platform) (id digest.Digest, err error) {
|
||||
if _, err := p.config.ImageStore.Get(target.Digest); err == nil {
|
||||
// If the image already exists locally, no need to pull
|
||||
// anything.
|
||||
return target.Digest, manifestDigest, nil
|
||||
return target.Digest, nil
|
||||
}
|
||||
|
||||
var descriptors []xfer.DownloadDescriptor
|
||||
|
||||
// Note that the order of this loop is in the direction of bottom-most
|
||||
// to top-most, so that the downloads slice gets ordered correctly.
|
||||
for _, d := range mfst.Layers {
|
||||
for _, d := range layers {
|
||||
layerDescriptor := &v2LayerDescriptor{
|
||||
digest: d.Digest,
|
||||
repo: p.repo,
|
||||
@@ -624,23 +629,23 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s
|
||||
if runtime.GOOS == "windows" {
|
||||
configJSON, configRootFS, configPlatform, err = receiveConfig(p.config.ImageStore, configChan, configErrChan)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
return "", err
|
||||
}
|
||||
if configRootFS == nil {
|
||||
return "", "", errRootFSInvalid
|
||||
return "", errRootFSInvalid
|
||||
}
|
||||
if err := checkImageCompatibility(configPlatform.OS, configPlatform.OSVersion); err != nil {
|
||||
return "", "", err
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(descriptors) != len(configRootFS.DiffIDs) {
|
||||
return "", "", errRootFSMismatch
|
||||
return "", errRootFSMismatch
|
||||
}
|
||||
if platform == nil {
|
||||
// Early bath if the requested OS doesn't match that of the configuration.
|
||||
// This avoids doing the download, only to potentially fail later.
|
||||
if !system.IsOSSupported(configPlatform.OS) {
|
||||
return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configPlatform.OS, layerStoreOS)
|
||||
return "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configPlatform.OS, layerStoreOS)
|
||||
}
|
||||
layerStoreOS = configPlatform.OS
|
||||
}
|
||||
@@ -687,14 +692,14 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s
|
||||
case <-downloadsDone:
|
||||
case <-layerErrChan:
|
||||
}
|
||||
return "", "", err
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-downloadsDone:
|
||||
case err = <-layerErrChan:
|
||||
return "", "", err
|
||||
return "", err
|
||||
}
|
||||
|
||||
if release != nil {
|
||||
@@ -706,22 +711,40 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s
|
||||
// Otherwise the image config could be referencing layers that aren't
|
||||
// included in the manifest.
|
||||
if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) {
|
||||
return "", "", errRootFSMismatch
|
||||
return "", errRootFSMismatch
|
||||
}
|
||||
|
||||
for i := range downloadedRootFS.DiffIDs {
|
||||
if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] {
|
||||
return "", "", errRootFSMismatch
|
||||
return "", errRootFSMismatch
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
imageID, err := p.config.ImageStore.Put(configJSON)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
return "", err
|
||||
}
|
||||
|
||||
return imageID, manifestDigest, nil
|
||||
return imageID, nil
|
||||
}
|
||||
|
||||
func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) {
|
||||
manifestDigest, err = schema2ManifestDigest(ref, mfst)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
id, err = p.pullSchema2Layers(ctx, mfst.Target(), mfst.Layers, platform)
|
||||
return id, manifestDigest, err
|
||||
}
|
||||
|
||||
func (p *v2Puller) pullOCI(ctx context.Context, ref reference.Named, mfst *ocischema.DeserializedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) {
|
||||
manifestDigest, err = schema2ManifestDigest(ref, mfst)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
id, err = p.pullSchema2Layers(ctx, mfst.Target(), mfst.Layers, platform)
|
||||
return id, manifestDigest, err
|
||||
}
|
||||
|
||||
func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, *specs.Platform, error) {
|
||||
@@ -791,7 +814,7 @@ func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mf
|
||||
|
||||
switch v := manifest.(type) {
|
||||
case *schema1.SignedManifest:
|
||||
msg := schema1DeprecationMessage(ref)
|
||||
msg := fmt.Sprintf("[DEPRECATION NOTICE] v2 schema1 manifests in manifest lists are not supported and will break in a future release. Suggest author of %s to upgrade to v2 schema2. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", ref)
|
||||
logrus.Warn(msg)
|
||||
progress.Message(p.config.ProgressOutput, "", msg)
|
||||
|
||||
@@ -806,6 +829,12 @@ func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mf
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
case *ocischema.DeserializedManifest:
|
||||
platform := toOCIPlatform(manifestMatches[0].Platform)
|
||||
id, _, err = p.pullOCI(ctx, manifestRef, v, &platform)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
default:
|
||||
return "", "", errors.New("unsupported manifest format")
|
||||
}
|
||||
|
||||
@@ -188,7 +188,7 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id
|
||||
|
||||
logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err)
|
||||
|
||||
msg := schema1DeprecationMessage(ref)
|
||||
msg := fmt.Sprintf("[DEPRECATION NOTICE] registry v2 schema1 support will be removed in an upcoming release. Please contact admins of the %s registry NOW to avoid future disruption. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", reference.Domain(ref))
|
||||
logrus.Warn(msg)
|
||||
progress.Message(p.config.ProgressOutput, "", msg)
|
||||
|
||||
|
||||
@@ -156,7 +156,3 @@ func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[s
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token))
|
||||
return nil
|
||||
}
|
||||
|
||||
func schema1DeprecationMessage(ref reference.Named) string {
|
||||
return fmt.Sprintf("[DEPRECATION NOTICE] registry v2 schema1 support will be removed in an upcoming release. Please contact admins of the %s registry NOW to avoid future disruption.", reference.Domain(ref))
|
||||
}
|
||||
|
||||
@@ -87,6 +87,9 @@ keywords: "API, Docker, rcli, REST, documentation"
|
||||
* `POST /swarm/init` now accepts a `DefaultAddrPool` property to set global scope default address pool
|
||||
* `POST /swarm/init` now accepts a `SubnetSize` property to set global scope networks by giving the
|
||||
length of the subnet masks for every such network
|
||||
* `POST /session` (added in [V1.31](#v131-api-changes) is no longer experimental.
|
||||
This endpoint can be used to run interactive long-running protocols between the
|
||||
client and the daemon.
|
||||
|
||||
## V1.38 API changes
|
||||
|
||||
@@ -244,7 +247,7 @@ keywords: "API, Docker, rcli, REST, documentation"
|
||||
* `GET /containers/create` now takes a `DeviceCgroupRules` field in `HostConfig` allowing to set custom device cgroup rules for the created container.
|
||||
* Optional query parameter `verbose` for `GET /networks/(id or name)` will now list all services with all the tasks, including the non-local tasks on the given network.
|
||||
* `GET /containers/(id or name)/attach/ws` now returns WebSocket in binary frame format for API version >= v1.28, and returns WebSocket in text frame format for API version< v1.28, for the purpose of backward-compatibility.
|
||||
* `GET /networks` is optimised only to return list of all networks and network specific information. List of all containers attached to a specific network is removed from this API and is only available using the network specific `GET /networks/{network-id}.
|
||||
* `GET /networks` is optimised only to return list of all networks and network specific information. List of all containers attached to a specific network is removed from this API and is only available using the network specific `GET /networks/{network-id}`.
|
||||
* `GET /containers/json` now supports `publish` and `expose` filters to filter containers that expose or publish certain ports.
|
||||
* `POST /services/create` and `POST /services/(id or name)/update` now accept the `ReadOnly` parameter, which mounts the container's root filesystem as read only.
|
||||
* `POST /build` now accepts `extrahosts` parameter to specify a host to ip mapping to use during the build.
|
||||
|
||||
@@ -100,8 +100,8 @@ To build Moby, run:
|
||||
Copy out the resulting Windows Moby Engine binary to `dockerd.exe` in the
|
||||
current directory:
|
||||
|
||||
docker cp binaries:C:\go\src\github.com\docker\docker\bundles\docker.exe docker.exe
|
||||
docker cp binaries:C:\go\src\github.com\docker\docker\bundles\dockerd.exe dockerd.exe
|
||||
docker cp binaries:C:\gopath\src\github.com\docker\docker\bundles\docker.exe docker.exe
|
||||
docker cp binaries:C:\gopath\src\github.com\docker\docker\bundles\dockerd.exe dockerd.exe
|
||||
|
||||
To test it, stop the system Docker daemon and start the one you just built:
|
||||
|
||||
@@ -109,7 +109,7 @@ To test it, stop the system Docker daemon and start the one you just built:
|
||||
.\dockerd.exe -D
|
||||
|
||||
The other make targets work too, to run unit tests try:
|
||||
`docker run --rm docker-builder sh -c 'cd /c/go/src/github.com/docker/docker; hack/make.sh test-unit'`.
|
||||
`docker run --rm docker-builder sh -c 'cd /c/gopath/src/github.com/docker/docker; hack/make.sh test-unit'`.
|
||||
|
||||
### 6. Remove the interim binaries container
|
||||
|
||||
@@ -169,7 +169,7 @@ quit due to the use of console hooks which are not available.
|
||||
The docker integration tests do not currently run in a container on Windows,
|
||||
predominantly due to Windows not supporting privileged mode, so anything using a volume would fail.
|
||||
They (along with the rest of the docker CI suite) can be run using
|
||||
https://github.com/jhowardmsft/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1.
|
||||
https://github.com/kevpar/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1.
|
||||
|
||||
## Where to go next
|
||||
|
||||
|
||||
@@ -174,13 +174,13 @@ flag's value is passed as arguments to the `go test` command. For example, from
|
||||
your local host you can run the `TestBuild` test with this command:
|
||||
|
||||
```bash
|
||||
$ TESTFLAGS='-check.f DockerSuite.TestBuild*' make test-integration
|
||||
$ TESTFLAGS='-test.run TestDockerSuite/TestBuild*' make test-integration
|
||||
```
|
||||
|
||||
To run the same test inside your Docker development container, you do this:
|
||||
|
||||
```bash
|
||||
# TESTFLAGS='-check.f TestBuild*' hack/make.sh binary test-integration
|
||||
# TESTFLAGS='-test.run TestDockerSuite/TestBuild*' hack/make.sh binary test-integration
|
||||
```
|
||||
|
||||
## Test the Windows binary against a Linux daemon
|
||||
@@ -228,11 +228,11 @@ run a Bash terminal on Windows.
|
||||
```
|
||||
|
||||
Should you wish to run a single test such as one with the name
|
||||
'TestExample', you can pass in `TESTFLAGS='-check.f TestExample'`. For
|
||||
'TestExample', you can pass in `TESTFLAGS='-test.run /TestExample'`. For
|
||||
example
|
||||
|
||||
```bash
|
||||
$ TESTFLAGS='-check.f TestExample' hack/make.sh binary test-integration
|
||||
$ TESTFLAGS='-test.run /TestExample' hack/make.sh binary test-integration
|
||||
```
|
||||
|
||||
You can now choose to make changes to the Moby source or the tests. If you
|
||||
|
||||
121
docs/rootless.md
121
docs/rootless.md
@@ -20,43 +20,107 @@ $ grep ^$(whoami): /etc/subgid
|
||||
penguin:231072:65536
|
||||
```
|
||||
|
||||
|
||||
### Distribution-specific hint
|
||||
|
||||
#### Debian (excluding Ubuntu)
|
||||
* `sudo sh -c "echo 1 > /proc/sys/kernel/unprivileged_userns_clone"` is required
|
||||
Using Ubuntu kernel is recommended.
|
||||
|
||||
#### Ubuntu
|
||||
* No preparation is needed.
|
||||
* `overlay2` is enabled by default ([Ubuntu-specific kernel patch](https://kernel.ubuntu.com/git/ubuntu/ubuntu-bionic.git/commit/fs/overlayfs?id=3b7da90f28fe1ed4b79ef2d994c81efbc58f1144)).
|
||||
* Known to work on Ubuntu 16.04 and 18.04.
|
||||
|
||||
#### Debian GNU/Linux
|
||||
* Add `kernel.unprivileged_userns_clone=1` to `/etc/sysctl.conf` (or `/etc/sysctl.d`) and run `sudo sysctl -p`
|
||||
* To use `overlay2` storage driver (recommended), run `sudo modprobe overlay permit_mounts_in_userns=1` ([Debian-specific kernel patch, introduced in Debian 10](https://salsa.debian.org/kernel-team/linux/blob/283390e7feb21b47779b48e0c8eb0cc409d2c815/debian/patches/debian/overlayfs-permit-mounts-in-userns.patch)). Put the configuration to `/etc/modprobe.d` for persistence.
|
||||
* Known to work on Debian 9 and 10. `overlay2` is only supported since Debian 10 and needs `modprobe` configuration described above.
|
||||
|
||||
#### Arch Linux
|
||||
* `sudo sh -c "echo 1 > /proc/sys/kernel/unprivileged_userns_clone"` is required
|
||||
* Add `kernel.unprivileged_userns_clone=1` to `/etc/sysctl.conf` (or `/etc/sysctl.d`) and run `sudo sysctl -p`
|
||||
|
||||
#### openSUSE
|
||||
* `sudo modprobe ip_tables iptable_mangle iptable_nat iptable_filter` is required. (This is likely to be required on other distros as well)
|
||||
* Known to work on openSUSE 15.
|
||||
|
||||
#### Fedora 31 and later
|
||||
* Run `sudo grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"` and reboot.
|
||||
|
||||
#### Fedora 30
|
||||
* No preparation is needed
|
||||
|
||||
#### RHEL/CentOS 8
|
||||
* No preparation is needed
|
||||
|
||||
#### RHEL/CentOS 7
|
||||
* `sudo sh -c "echo 28633 > /proc/sys/user/max_user_namespaces"` is required
|
||||
* [COPR package `vbatts/shadow-utils-newxidmap`](https://copr.fedorainfracloud.org/coprs/vbatts/shadow-utils-newxidmap/) needs to be installed
|
||||
* Add `user.max_user_namespaces=28633` to `/etc/sysctl.conf` (or `/etc/sysctl.d`) and run `sudo sysctl -p`
|
||||
* `systemctl --user` does not work by default. Run the daemon directly without systemd: `dockerd-rootless.sh --experimental --storage-driver vfs`
|
||||
* Known to work on RHEL/CentOS 7.7. Older releases require extra configuration steps.
|
||||
* RHEL/CentOS 7.6 and older releases require [COPR package `vbatts/shadow-utils-newxidmap`](https://copr.fedorainfracloud.org/coprs/vbatts/shadow-utils-newxidmap/) to be installed.
|
||||
* RHEL/CentOS 7.5 and older releases require running `sudo grubby --update-kernel=ALL --args="user_namespace.enable=1"` and reboot.
|
||||
|
||||
## Restrictions
|
||||
## Known limitations
|
||||
|
||||
* Only `vfs` graphdriver is supported. However, on [Ubuntu](http://kernel.ubuntu.com/git/ubuntu/ubuntu-artful.git/commit/fs/overlayfs?h=Ubuntu-4.13.0-25.29&id=0a414bdc3d01f3b61ed86cfe3ce8b63a9240eba7) and a few distros, `overlay2` and `overlay` are also supported.
|
||||
* Only `vfs` graphdriver is supported. However, on Ubuntu and Debian 10, `overlay2` and `overlay` are also supported.
|
||||
* Following features are not supported:
|
||||
* Cgroups (including `docker top`, which depends on the cgroups device controller)
|
||||
* Apparmor
|
||||
* Checkpoint
|
||||
* Overlay network
|
||||
* Exposing SCTP ports
|
||||
* To expose a TCP/UDP port, the host port number needs to be set to >= 1024.
|
||||
* To use `ping` command, see [Routing ping packets](#routing-ping-packets)
|
||||
* To expose privileged TCP/UDP ports (< 1024), see [Exposing privileged ports](#exposing-privileged-ports)
|
||||
|
||||
## Install
|
||||
|
||||
The installation script is available at https://get.docker.com/rootless .
|
||||
|
||||
```console
|
||||
$ curl -fsSL https://get.docker.com/rootless | sh
|
||||
```
|
||||
|
||||
Make sure to run the script as a non-root user.
|
||||
|
||||
The script will show the environment variables that are needed to be set:
|
||||
|
||||
```console
|
||||
$ curl -fsSL https://get.docker.com/rootless | sh
|
||||
...
|
||||
# Docker binaries are installed in /home/penguin/bin
|
||||
# WARN: dockerd is not in your current PATH or pointing to /home/penguin/bin/dockerd
|
||||
# Make sure the following environment variables are set (or add them to ~/.bashrc):
|
||||
|
||||
export PATH=/home/penguin/bin:$PATH
|
||||
export PATH=$PATH:/sbin
|
||||
export DOCKER_HOST=unix:///run/user/1001/docker.sock
|
||||
|
||||
#
|
||||
# To control docker service run:
|
||||
# systemctl --user (start|stop|restart) docker
|
||||
#
|
||||
```
|
||||
|
||||
To install the binaries manually without using the installer, extract `docker-rootless-extras-<version>.tar.gz` along with `docker-<version>.tar.gz`: https://download.docker.com/linux/static/stable/x86_64/
|
||||
|
||||
## Usage
|
||||
|
||||
### Daemon
|
||||
|
||||
You need to run `dockerd-rootless.sh` instead of `dockerd`.
|
||||
|
||||
Use `systemctl --user` to manage the lifecycle of the daemon:
|
||||
```console
|
||||
$ dockerd-rootless.sh --experimental
|
||||
$ systemctl --user start docker
|
||||
```
|
||||
As Rootless mode is experimental per se, currently you always need to run `dockerd-rootless.sh` with `--experimental`.
|
||||
|
||||
To launch the daemon on system startup, enable systemd lingering:
|
||||
```console
|
||||
$ sudo loginctl enable-linger $(whoami)
|
||||
```
|
||||
|
||||
To run the daemon directly without systemd, you need to run `dockerd-rootless.sh` instead of `dockerd`:
|
||||
```console
|
||||
$ dockerd-rootless.sh --experimental --storage-driver vfs
|
||||
```
|
||||
|
||||
As Rootless mode is experimental, currently you always need to run `dockerd-rootless.sh` with `--experimental`.
|
||||
You also need `--storage-driver vfs` unless using Ubuntu or Debian 10 kernel.
|
||||
|
||||
Remarks:
|
||||
* The socket path is set to `$XDG_RUNTIME_DIR/docker.sock` by default. `$XDG_RUNTIME_DIR` is typically set to `/run/user/$UID`.
|
||||
@@ -69,12 +133,24 @@ Remarks:
|
||||
|
||||
### Client
|
||||
|
||||
You can just use the upstream Docker client but you need to set the socket path explicitly.
|
||||
You need to set the socket path explicitly.
|
||||
|
||||
```console
|
||||
$ docker -H unix://$XDG_RUNTIME_DIR/docker.sock run -d nginx
|
||||
$ export DOCKER_HOST=unix://$XDG_RUNTIME_DIR/docker.sock
|
||||
$ docker run -d nginx
|
||||
```
|
||||
|
||||
### Rootless Docker in Docker
|
||||
|
||||
To run Rootless Docker inside "rootful" Docker, use `docker:<version>-dind-rootless` image instead of `docker:<version>-dind` image.
|
||||
|
||||
```console
|
||||
$ docker run -d --name dind-rootless --privileged docker:19.03-dind-rootless --experimental
|
||||
```
|
||||
|
||||
`docker:<version>-dind-rootless` image runs as a non-root user (UID 1000).
|
||||
However, `--privileged` is required for disabling seccomp, AppArmor, and mount masks.
|
||||
|
||||
### Expose Docker API socket via TCP
|
||||
|
||||
To expose the Docker API socket via TCP, you need to launch `dockerd-rootless.sh` with `DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS="-p 0.0.0.0:2376:2376/tcp"`.
|
||||
@@ -88,12 +164,23 @@ $ DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS="-p 0.0.0.0:2376:2376/tcp" \
|
||||
|
||||
### Routing ping packets
|
||||
|
||||
To route ping packets, you need to set up `net.ipv4.ping_group_range` properly as the root.
|
||||
Add `net.ipv4.ping_group_range = 0 2147483647` to `/etc/sysctl.conf` (or `/etc/sysctl.d`) and run `sudo sysctl -p`.
|
||||
|
||||
### Exposing privileged ports
|
||||
|
||||
To expose privileged ports (< 1024), set `CAP_NET_BIND_SERVICE` on `rootlesskit` binary.
|
||||
|
||||
```console
|
||||
$ sudo sh -c "echo 0 2147483647 > /proc/sys/net/ipv4/ping_group_range"
|
||||
$ sudo setcap cap_net_bind_service=ep $HOME/bin/rootlesskit
|
||||
```
|
||||
|
||||
Or add `net.ipv4.ip_unprivileged_port_start=0` to `/etc/sysctl.conf` (or `/etc/sysctl.d`) and run `sudo sysctl -p`.
|
||||
|
||||
### Limiting resources
|
||||
|
||||
Currently rootless mode ignores cgroup-related `docker run` flags such as `--cpus` and `memory`.
|
||||
However, traditional `ulimit` and [`cpulimit`](https://github.com/opsengine/cpulimit) can be still used, though it works in process-granularity rather than container-granularity.
|
||||
|
||||
### Changing network stack
|
||||
|
||||
`dockerd-rootless.sh` uses [slirp4netns](https://github.com/rootless-containers/slirp4netns) (if installed) or [VPNKit](https://github.com/moby/vpnkit) as the network stack by default.
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
containerderrors "github.com/containerd/containerd/errdefs"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc/codes"
|
||||
@@ -47,6 +48,10 @@ func GetHTTPErrorStatusCode(err error) int {
|
||||
if statusCode != http.StatusInternalServerError {
|
||||
return statusCode
|
||||
}
|
||||
statusCode = statusCodeFromContainerdError(err)
|
||||
if statusCode != http.StatusInternalServerError {
|
||||
return statusCode
|
||||
}
|
||||
statusCode = statusCodeFromDistributionError(err)
|
||||
if statusCode != http.StatusInternalServerError {
|
||||
return statusCode
|
||||
@@ -136,9 +141,6 @@ func statusCodeFromGRPCError(err error) int {
|
||||
case codes.Unavailable: // code 14
|
||||
return http.StatusServiceUnavailable
|
||||
default:
|
||||
if e, ok := err.(causer); ok {
|
||||
return statusCodeFromGRPCError(e.Cause())
|
||||
}
|
||||
// codes.Canceled(1)
|
||||
// codes.Unknown(2)
|
||||
// codes.DeadlineExceeded(4)
|
||||
@@ -163,10 +165,27 @@ func statusCodeFromDistributionError(err error) int {
|
||||
}
|
||||
case errcode.ErrorCoder:
|
||||
return errs.ErrorCode().Descriptor().HTTPStatusCode
|
||||
default:
|
||||
if e, ok := err.(causer); ok {
|
||||
return statusCodeFromDistributionError(e.Cause())
|
||||
}
|
||||
}
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
|
||||
// statusCodeFromContainerdError returns status code for containerd errors when
|
||||
// consumed directly (not through gRPC)
|
||||
func statusCodeFromContainerdError(err error) int {
|
||||
switch {
|
||||
case containerderrors.IsInvalidArgument(err):
|
||||
return http.StatusBadRequest
|
||||
case containerderrors.IsNotFound(err):
|
||||
return http.StatusNotFound
|
||||
case containerderrors.IsAlreadyExists(err):
|
||||
return http.StatusConflict
|
||||
case containerderrors.IsFailedPrecondition(err):
|
||||
return http.StatusPreconditionFailed
|
||||
case containerderrors.IsUnavailable(err):
|
||||
return http.StatusServiceUnavailable
|
||||
case containerderrors.IsNotImplemented(err):
|
||||
return http.StatusNotImplemented
|
||||
default:
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
}
|
||||
|
||||
13
hack/ci/master
Executable file
13
hack/ci/master
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
# Entrypoint for jenkins master CI build
|
||||
set -eu -o pipefail
|
||||
|
||||
hack/validate/default
|
||||
hack/test/unit
|
||||
|
||||
hack/make.sh \
|
||||
binary-daemon \
|
||||
dynbinary \
|
||||
test-docker-py \
|
||||
test-integration \
|
||||
cross
|
||||
@@ -1,7 +1,7 @@
|
||||
# WARNING WARNING WARNING - DO NOT EDIT THIS FILE IN JENKINS DIRECTLY.
|
||||
# SUBMIT A PR TO https://github.com/jhowardmsft/docker-w2wCIScripts/blob/master/runCI/executeCI.ps1,
|
||||
# AND MAKE SURE https://github.com/jhowardmsft/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1
|
||||
# ISN'T BROKEN!!!!!!! VALIDATE USING A TEST CONTEXT IN JENKINS. THEN COPY/PASTE INTO JENKINS PRODUCTION.
|
||||
# WARNING: When editing this file, consider submitting a PR to
|
||||
# https://github.com/kevpar/docker-w2wCIScripts/blob/master/runCI/executeCI.ps1, and make sure that
|
||||
# https://github.com/kevpar/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1 isn't broken.
|
||||
# Validate using a test context in Jenkins, then copy/paste into Jenkins production.
|
||||
#
|
||||
# Jenkins CI scripts for Windows to Windows CI (Powershell Version)
|
||||
# By John Howard (@jhowardmsft) January 2016 - bash version; July 2016 Ported to PowerShell
|
||||
@@ -15,6 +15,11 @@ if ($env:BUILD_TAG -match "-LoW") { $env:LCOW_MODE=1 }
|
||||
if ($env:BUILD_TAG -match "-WoW") { $env:LCOW_MODE="" }
|
||||
|
||||
|
||||
Write-Host -ForegroundColor Red "DEBUG: print all environment variables to check how Jenkins runs this script"
|
||||
$allArgs = [Environment]::GetCommandLineArgs()
|
||||
Write-Host -ForegroundColor Red $allArgs
|
||||
Write-Host -ForegroundColor Red "----------------------------------------------------------------------------"
|
||||
|
||||
# -------------------------------------------------------------------------------------------
|
||||
# When executed, we rely on four variables being set in the environment:
|
||||
#
|
||||
@@ -46,10 +51,24 @@ if ($env:BUILD_TAG -match "-WoW") { $env:LCOW_MODE="" }
|
||||
# TESTRUN_DRIVE\TESTRUN_SUBDIR\CI-<CommitID> or
|
||||
# d:\CI\CI-<CommitID>
|
||||
#
|
||||
# Optional environment variables help in CI:
|
||||
#
|
||||
# BUILD_NUMBER + BRANCH_NAME are optional variables to be added to the directory below TESTRUN_SUBDIR
|
||||
# to have individual folder per CI build. If some files couldn't be
|
||||
# cleaned up and we want to re-run the build in CI.
|
||||
# Hence, the daemon under test is run under
|
||||
# TESTRUN_DRIVE\TESTRUN_SUBDIR\PR-<PR-Number>\<BuildNumber> or
|
||||
# d:\CI\PR-<PR-Number>\<BuildNumber>
|
||||
#
|
||||
# In addition, the following variables can control the run configuration:
|
||||
#
|
||||
# DOCKER_DUT_DEBUG if defined starts the daemon under test in debug mode.
|
||||
#
|
||||
# DOCKER_STORAGE_OPTS comma-separated list of optional storage driver options for the daemon under test
|
||||
# examples:
|
||||
# DOCKER_STORAGE_OPTS="size=40G"
|
||||
# DOCKER_STORAGE_OPTS="lcow.globalmode=false,lcow.kernel=kernel.efi"
|
||||
#
|
||||
# SKIP_VALIDATION_TESTS if defined skips the validation tests
|
||||
#
|
||||
# SKIP_UNIT_TESTS if defined skips the unit tests
|
||||
@@ -78,6 +97,9 @@ if ($env:BUILD_TAG -match "-WoW") { $env:LCOW_MODE="" }
|
||||
# docker integration tests are also coded to use the same
|
||||
# environment variable, and if no set, defaults to microsoft/windowsservercore
|
||||
#
|
||||
# WINDOWS_BASE_IMAGE_TAG if defined, uses that as the tag name for the base image.
|
||||
# if no set, defaults to latest
|
||||
#
|
||||
# LCOW_BASIC_MODE if defined, does very basic LCOW verification. Ultimately we
|
||||
# want to run the entire CI suite from docker, but that's a way off.
|
||||
#
|
||||
@@ -88,7 +110,7 @@ if ($env:BUILD_TAG -match "-WoW") { $env:LCOW_MODE="" }
|
||||
# Jenkins Integration. Add a Windows Powershell build step as follows:
|
||||
#
|
||||
# Write-Host -ForegroundColor green "INFO: Jenkins build step starting"
|
||||
# $CISCRIPT_DEFAULT_LOCATION = "https://raw.githubusercontent.com/jhowardmsft/docker-w2wCIScripts/master/runCI/executeCI.ps1"
|
||||
# $CISCRIPT_DEFAULT_LOCATION = "https://raw.githubusercontent.com/moby/moby/master/hack/ci/windows.ps1"
|
||||
# $CISCRIPT_LOCAL_LOCATION = "$env:TEMP\executeCI.ps1"
|
||||
# Write-Host -ForegroundColor green "INFO: Removing cached execution script"
|
||||
# Remove-Item $CISCRIPT_LOCAL_LOCATION -Force -ErrorAction SilentlyContinue 2>&1 | Out-Null
|
||||
@@ -139,7 +161,7 @@ Function Nuke-Everything {
|
||||
}
|
||||
|
||||
$allImages = $(docker images --format "{{.Repository}}#{{.ID}}")
|
||||
$toRemove = ($allImages | Select-String -NotMatch "windowsservercore","nanoserver","docker")
|
||||
$toRemove = ($allImages | Select-String -NotMatch "servercore","nanoserver","docker")
|
||||
$imageCount = ($toRemove | Measure-Object -line).Lines
|
||||
|
||||
if ($imageCount -gt 0) {
|
||||
@@ -200,12 +222,8 @@ Function Nuke-Everything {
|
||||
$count=(Get-ChildItem $reg | Measure-Object).Count
|
||||
if ($count -gt 0) {
|
||||
Write-Warning "There are $count NdisAdapters leaked under Psched\Parameters"
|
||||
if ($env:COMPUTERNAME -match "jenkins-rs1-") {
|
||||
Write-Warning "Cleaning Psched..."
|
||||
Get-ChildItem $reg | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue | Out-Null
|
||||
} else {
|
||||
Write-Warning "Not cleaning as not a production RS1 server"
|
||||
}
|
||||
Write-Warning "Cleaning Psched..."
|
||||
Get-ChildItem $reg | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue | Out-Null
|
||||
}
|
||||
|
||||
# TODO: This should be able to be removed in August 2017 update. Only needed for RS1
|
||||
@@ -213,12 +231,8 @@ Function Nuke-Everything {
|
||||
$count=(Get-ChildItem $reg | Measure-Object).Count
|
||||
if ($count -gt 0) {
|
||||
Write-Warning "There are $count NdisAdapters leaked under WFPLWFS\Parameters"
|
||||
if ($env:COMPUTERNAME -match "jenkins-rs1-") {
|
||||
Write-Warning "Cleaning WFPLWFS..."
|
||||
Get-ChildItem $reg | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue | Out-Null
|
||||
} else {
|
||||
Write-Warning "Not cleaning as not a production RS1 server"
|
||||
}
|
||||
Write-Warning "Cleaning WFPLWFS..."
|
||||
Get-ChildItem $reg | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue | Out-Null
|
||||
}
|
||||
} catch {
|
||||
# Don't throw any errors onwards Throw $_
|
||||
@@ -261,6 +275,18 @@ Try {
|
||||
# Make sure docker-ci-zap is installed
|
||||
if ($null -eq (Get-Command "docker-ci-zap" -ErrorAction SilentlyContinue)) { Throw "ERROR: docker-ci-zap is not installed or not found on path" }
|
||||
|
||||
# Make sure Windows Defender is disabled
|
||||
$defender = $false
|
||||
Try {
|
||||
$status = Get-MpComputerStatus
|
||||
if ($status) {
|
||||
if ($status.RealTimeProtectionEnabled) {
|
||||
$defender = $true
|
||||
}
|
||||
}
|
||||
} Catch {}
|
||||
if ($defender) { Write-Host -ForegroundColor Magenta "WARN: Windows Defender real time protection is enabled, which may cause some integration tests to fail" }
|
||||
|
||||
# Make sure SOURCES_DRIVE is set
|
||||
if ($null -eq $env:SOURCES_DRIVE) { Throw "ERROR: Environment variable SOURCES_DRIVE is not set" }
|
||||
|
||||
@@ -345,14 +371,16 @@ Try {
|
||||
Write-Host -ForegroundColor Green "INFO: docker load of"$ControlDaemonBaseImage" completed successfully"
|
||||
} else {
|
||||
# We need to docker pull it instead. It will come in directly as microsoft/imagename:latest
|
||||
Write-Host -ForegroundColor Green $("INFO: Pulling microsoft/"+$ControlDaemonBaseImage+":latest from docker hub. This may take some time...")
|
||||
Write-Host -ForegroundColor Green $("INFO: Pulling $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG from docker hub. This may take some time...")
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
docker pull $("microsoft/"+$ControlDaemonBaseImage)
|
||||
docker pull "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG"
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not $LastExitCode -eq 0) {
|
||||
Throw $("ERROR: Failed to docker pull microsoft/"+$ControlDaemonBaseImage+":latest.")
|
||||
Throw $("ERROR: Failed to docker pull $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG.")
|
||||
}
|
||||
Write-Host -ForegroundColor Green $("INFO: docker pull of microsoft/"+$ControlDaemonBaseImage+":latest completed successfully")
|
||||
Write-Host -ForegroundColor Green $("INFO: docker pull of $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG completed successfully")
|
||||
Write-Host -ForegroundColor Green $("INFO: Tagging $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG as microsoft/$ControlDaemonBaseImage")
|
||||
docker tag "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG" microsoft/$ControlDaemonBaseImage
|
||||
}
|
||||
} else {
|
||||
Write-Host -ForegroundColor Green "INFO: Image"$("microsoft/"+$ControlDaemonBaseImage+":latest")"is already loaded in the control daemon"
|
||||
@@ -409,7 +437,12 @@ Try {
|
||||
|
||||
# Redirect to a temporary location.
|
||||
$TEMPORIG=$env:TEMP
|
||||
$env:TEMP="$env:TESTRUN_DRIVE`:\$env:TESTRUN_SUBDIR\CI-$COMMITHASH"
|
||||
if ($null -eq $env:BUILD_NUMBER) {
|
||||
$env:TEMP="$env:TESTRUN_DRIVE`:\$env:TESTRUN_SUBDIR\CI-$COMMITHASH"
|
||||
} else {
|
||||
# individual temporary location per CI build that better matches the BUILD_URL
|
||||
$env:TEMP="$env:TESTRUN_DRIVE`:\$env:TESTRUN_SUBDIR\$env:BRANCH_NAME\$env:BUILD_NUMBER"
|
||||
}
|
||||
$env:LOCALAPPDATA="$env:TEMP\localappdata"
|
||||
$errorActionPreference='Stop'
|
||||
New-Item -ItemType Directory "$env:TEMP" -ErrorAction SilentlyContinue | Out-Null
|
||||
@@ -453,7 +486,7 @@ Try {
|
||||
}
|
||||
|
||||
# Following at the moment must be docker\docker as it's dictated by dockerfile.Windows
|
||||
$contPath="$COMMITHASH`:c`:\go\src\github.com\docker\docker\bundles"
|
||||
$contPath="$COMMITHASH`:c`:\gopath\src\github.com\docker\docker\bundles"
|
||||
|
||||
# After https://github.com/docker/docker/pull/30290, .git was added to .dockerignore. Therefore
|
||||
# we have to calculate unsupported outside of the container, and pass the commit ID in through
|
||||
@@ -571,6 +604,15 @@ Try {
|
||||
$dutArgs += "--exec-opt isolation=hyperv"
|
||||
}
|
||||
|
||||
# Arguments: Allow setting optional storage-driver options
|
||||
# example usage: DOCKER_STORAGE_OPTS="lcow.globalmode=false,lcow.kernel=kernel.efi"
|
||||
if (-not ("$env:DOCKER_STORAGE_OPTS" -eq "")) {
|
||||
Write-Host -ForegroundColor Green "INFO: Running the daemon under test with storage-driver options ${env:DOCKER_STORAGE_OPTS}"
|
||||
$env:DOCKER_STORAGE_OPTS.Split(",") | ForEach {
|
||||
$dutArgs += "--storage-opt $_"
|
||||
}
|
||||
}
|
||||
|
||||
# Start the daemon under test, ensuring everything is redirected to folders under $TEMP.
|
||||
# Important - we launch the -$COMMITHASH version so that we can kill it without
|
||||
# killing the control daemon.
|
||||
@@ -599,7 +641,8 @@ Try {
|
||||
|
||||
# Start tailing the daemon under test if the command is installed
|
||||
if ($null -ne (Get-Command "tail" -ErrorAction SilentlyContinue)) {
|
||||
$tail = start-process "tail" -ArgumentList "-f $env:TEMP\dut.out" -ErrorAction SilentlyContinue
|
||||
Write-Host -ForegroundColor green "INFO: Start tailing logs of the daemon under tests"
|
||||
$tail = Start-Process "tail" -ArgumentList "-f $env:TEMP\dut.out" -PassThru -ErrorAction SilentlyContinue
|
||||
}
|
||||
|
||||
# Verify we can get the daemon under test to respond
|
||||
@@ -663,17 +706,20 @@ Try {
|
||||
if ($null -eq $env:WINDOWS_BASE_IMAGE) {
|
||||
$env:WINDOWS_BASE_IMAGE="microsoft/windowsservercore"
|
||||
}
|
||||
if ($null -eq $env:WINDOWS_BASE_IMAGE_TAG) {
|
||||
$env:WINDOWS_BASE_IMAGE_TAG="latest"
|
||||
}
|
||||
|
||||
# Lowercase and make sure it has a microsoft/ prefix
|
||||
$env:WINDOWS_BASE_IMAGE = $env:WINDOWS_BASE_IMAGE.ToLower()
|
||||
if ($($env:WINDOWS_BASE_IMAGE -Split "/")[0] -ne "microsoft") {
|
||||
Throw "ERROR: WINDOWS_BASE_IMAGE should start microsoft/"
|
||||
if (! $($env:WINDOWS_BASE_IMAGE -Split "/")[0] -match "microsoft") {
|
||||
Throw "ERROR: WINDOWS_BASE_IMAGE should start microsoft/ or mcr.microsoft.com/"
|
||||
}
|
||||
|
||||
Write-Host -ForegroundColor Green "INFO: Base image for tests is $env:WINDOWS_BASE_IMAGE"
|
||||
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
if ($((& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" images --format "{{.Repository}}:{{.Tag}}" | Select-String $($env:WINDOWS_BASE_IMAGE+":latest") | Measure-Object -Line).Lines) -eq 0) {
|
||||
if ($((& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" images --format "{{.Repository}}:{{.Tag}}" | Select-String "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG" | Measure-Object -Line).Lines) -eq 0) {
|
||||
# Try the internal azure CI image version or Microsoft internal corpnet where the base image is already pre-prepared on the disk,
|
||||
# either through Invoke-DockerCI or, in the case of Azure CI servers, baked into the VHD at the same location.
|
||||
if (Test-Path $("c:\baseimages\"+$($env:WINDOWS_BASE_IMAGE -Split "/")[1]+".tar")) {
|
||||
@@ -686,26 +732,28 @@ Try {
|
||||
}
|
||||
Write-Host -ForegroundColor Green "INFO: docker load of"$($env:WINDOWS_BASE_IMAGE -Split "/")[1]" into daemon under test completed successfully"
|
||||
} else {
|
||||
# We need to docker pull it instead. It will come in directly as microsoft/imagename:latest
|
||||
Write-Host -ForegroundColor Green $("INFO: Pulling "+$env:WINDOWS_BASE_IMAGE+":latest from docker hub into daemon under test. This may take some time...")
|
||||
# We need to docker pull it instead. It will come in directly as microsoft/imagename:tagname
|
||||
Write-Host -ForegroundColor Green $("INFO: Pulling "+$env:WINDOWS_BASE_IMAGE+":"+$env:WINDOWS_BASE_IMAGE_TAG+" from docker hub into daemon under test. This may take some time...")
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" pull $($env:WINDOWS_BASE_IMAGE)
|
||||
& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" pull "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG"
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not $LastExitCode -eq 0) {
|
||||
Throw $("ERROR: Failed to docker pull "+$env:WINDOWS_BASE_IMAGE+":latest into daemon under test.")
|
||||
Throw $("ERROR: Failed to docker pull $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG into daemon under test.")
|
||||
}
|
||||
Write-Host -ForegroundColor Green $("INFO: docker pull of "+$env:WINDOWS_BASE_IMAGE+":latest into daemon under test completed successfully")
|
||||
Write-Host -ForegroundColor Green $("INFO: docker pull of $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG into daemon under test completed successfully")
|
||||
Write-Host -ForegroundColor Green $("INFO: Tagging $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG as microsoft/$ControlDaemonBaseImage in daemon under test")
|
||||
& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" tag "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG" microsoft/$ControlDaemonBaseImage
|
||||
}
|
||||
} else {
|
||||
Write-Host -ForegroundColor Green "INFO: Image"$($env:WINDOWS_BASE_IMAGE+":latest")"is already loaded in the daemon under test"
|
||||
Write-Host -ForegroundColor Green "INFO: Image $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG is already loaded in the daemon under test"
|
||||
}
|
||||
|
||||
|
||||
# Inspect the pulled or loaded image to get the version directly
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
$dutimgVersion = $(&"$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" inspect $($env:WINDOWS_BASE_IMAGE) --format "{{.OsVersion}}")
|
||||
$dutimgVersion = $(&"$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" inspect "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG" --format "{{.OsVersion}}")
|
||||
$ErrorActionPreference = "Stop"
|
||||
Write-Host -ForegroundColor Green $("INFO: Version of "+$env:WINDOWS_BASE_IMAGE+":latest is '"+$dutimgVersion+"'")
|
||||
Write-Host -ForegroundColor Green $("INFO: Version of $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG is '"+$dutimgVersion+"'")
|
||||
}
|
||||
|
||||
# Run the validation tests unless SKIP_VALIDATION_TESTS is defined.
|
||||
@@ -752,14 +800,7 @@ Try {
|
||||
#if ($bbCount -eq 0) {
|
||||
Write-Host -ForegroundColor Green "INFO: Building busybox"
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
|
||||
# This is a temporary hack for nanoserver
|
||||
if ($env:WINDOWS_BASE_IMAGE -ne "microsoft/windowsservercore") {
|
||||
Write-Host -ForegroundColor Red "HACK HACK HACK - Building 64-bit nanoserver busybox image"
|
||||
$(& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" build -t busybox https://raw.githubusercontent.com/jhowardmsft/busybox64/v1.1/Dockerfile | Out-Host)
|
||||
} else {
|
||||
$(& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" build -t busybox https://raw.githubusercontent.com/jhowardmsft/busybox/v1.1/Dockerfile | Out-Host)
|
||||
}
|
||||
$(& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" build -t busybox https://raw.githubusercontent.com/moby/busybox/v1.1/Dockerfile | Out-Host)
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not($LastExitCode -eq 0)) {
|
||||
Throw "ERROR: Failed to build busybox image"
|
||||
@@ -790,14 +831,13 @@ Try {
|
||||
|
||||
#https://blogs.technet.microsoft.com/heyscriptingguy/2011/09/20/solve-problems-with-external-command-lines-in-powershell/ is useful to see tokenising
|
||||
$c = "go test "
|
||||
$c += "`"-check.v`" "
|
||||
$c += "`"-test.v`" "
|
||||
if ($null -ne $env:INTEGRATION_TEST_NAME) { # Makes is quicker for debugging to be able to run only a subset of the integration tests
|
||||
$c += "`"-check.f`" "
|
||||
$c += "`"-test.run`" "
|
||||
$c += "`"$env:INTEGRATION_TEST_NAME`" "
|
||||
Write-Host -ForegroundColor Magenta "WARN: Only running integration tests matching $env:INTEGRATION_TEST_NAME"
|
||||
}
|
||||
$c += "`"-tags`" " + "`"autogen`" "
|
||||
$c += "`"-check.timeout`" " + "`"10m`" "
|
||||
$c += "`"-test.timeout`" " + "`"200m`" "
|
||||
|
||||
if ($null -ne $env:INTEGRATION_IN_CONTAINER) {
|
||||
@@ -810,7 +850,7 @@ Try {
|
||||
$Duration= $(Measure-Command { & docker run `
|
||||
--rm `
|
||||
-e c=$c `
|
||||
--workdir "c`:\go\src\github.com\docker\docker\integration-cli" `
|
||||
--workdir "c`:\gopath\src\github.com\docker\docker\integration-cli" `
|
||||
-v "$env:TEMP\binary`:c:\target" `
|
||||
docker `
|
||||
"`$env`:PATH`='c`:\target;'+`$env:PATH`; `$env:DOCKER_HOST`='tcp`://'+(ipconfig | select -last 1).Substring(39)+'`:2357'; c:\target\runIntegrationCLI.ps1" | Out-Host } )
|
||||
@@ -867,7 +907,7 @@ Try {
|
||||
$wc = New-Object net.webclient
|
||||
try {
|
||||
Write-Host -ForegroundColor green "INFO: Downloading latest execution script..."
|
||||
$wc.Downloadfile("https://raw.githubusercontent.com/jhowardmsft/docker-w2wCIScripts/master/runCI/lcowbasicvalidation.ps1", "$env:TEMP\binary\lcowbasicvalidation.ps1")
|
||||
$wc.Downloadfile("https://raw.githubusercontent.com/kevpar/docker-w2wCIScripts/master/runCI/lcowbasicvalidation.ps1", "$env:TEMP\binary\lcowbasicvalidation.ps1")
|
||||
}
|
||||
catch [System.Net.WebException]
|
||||
{
|
||||
@@ -885,14 +925,13 @@ Try {
|
||||
} else {
|
||||
#https://blogs.technet.microsoft.com/heyscriptingguy/2011/09/20/solve-problems-with-external-command-lines-in-powershell/ is useful to see tokenising
|
||||
$c = "go test "
|
||||
$c += "`"-check.v`" "
|
||||
$c += "`"-test.v`" "
|
||||
if ($null -ne $env:INTEGRATION_TEST_NAME) { # Makes is quicker for debugging to be able to run only a subset of the integration tests
|
||||
$c += "`"-check.f`" "
|
||||
$c += "`"-test.run`" "
|
||||
$c += "`"$env:INTEGRATION_TEST_NAME`" "
|
||||
Write-Host -ForegroundColor Magenta "WARN: Only running LCOW integration tests matching $env:INTEGRATION_TEST_NAME"
|
||||
}
|
||||
$c += "`"-tags`" " + "`"autogen`" "
|
||||
$c += "`"-check.timeout`" " + "`"10m`" "
|
||||
$c += "`"-test.timeout`" " + "`"200m`" "
|
||||
|
||||
Write-Host -ForegroundColor Green "INFO: LCOW Integration tests being run from the host:"
|
||||
@@ -937,6 +976,12 @@ Try {
|
||||
Remove-Item "$env:TEMP\docker.pid" -force -ErrorAction SilentlyContinue
|
||||
}
|
||||
|
||||
# Stop the tail process (if started)
|
||||
if ($null -ne $tail) {
|
||||
Write-Host -ForegroundColor green "INFO: Stop tailing logs of the daemon under tests"
|
||||
Stop-Process -InputObject $tail -Force
|
||||
}
|
||||
|
||||
Write-Host -ForegroundColor Green "INFO: executeCI.ps1 Completed successfully at $(Get-Date)."
|
||||
}
|
||||
Catch [Exception] {
|
||||
@@ -953,6 +998,9 @@ Catch [Exception] {
|
||||
Throw $_
|
||||
}
|
||||
Finally {
|
||||
# Preserve the LastExitCode of the tests
|
||||
$tmpLastExitCode = $LastExitCode
|
||||
|
||||
$ErrorActionPreference="SilentlyContinue"
|
||||
$global:ProgressPreference=$origProgressPreference
|
||||
Write-Host -ForegroundColor Green "INFO: Tidying up at end of run"
|
||||
@@ -984,6 +1032,12 @@ Finally {
|
||||
|
||||
Set-Location "$env:SOURCES_DRIVE\$env:SOURCES_SUBDIR" -ErrorAction SilentlyContinue
|
||||
Nuke-Everything
|
||||
|
||||
# Restore the TEMP path
|
||||
if ($null -ne $TEMPORIG) { $env:TEMP="$TEMPORIG" }
|
||||
|
||||
$Dur=New-TimeSpan -Start $StartTime -End $(Get-Date)
|
||||
Write-Host -ForegroundColor $FinallyColour "`nINFO: executeCI.ps1 exiting at $(date). Duration $dur`n"
|
||||
|
||||
exit $tmpLastExitCode
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# containerd is also pinned in vendor.conf. When updating the binary
|
||||
# version you may also need to update the vendor version to pick up bug
|
||||
# fixes or new APIs.
|
||||
CONTAINERD_COMMIT=894b81a4b802e4eb2a91d1ce216b8817763c29fb # v1.2.6
|
||||
CONTAINERD_COMMIT=35bd7a5f69c13e1563af8a93431411cd9ecf5021 # v1.2.12
|
||||
|
||||
install_containerd() {
|
||||
echo "Install containerd version $CONTAINERD_COMMIT"
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
# LIBNETWORK_COMMIT is used to build the docker-userland-proxy binary. When
|
||||
# updating the binary version, consider updating github.com/docker/libnetwork
|
||||
# in vendor.conf accordingly
|
||||
LIBNETWORK_COMMIT=fc5a7d91d54cc98f64fc28f9e288b46a0bee756c
|
||||
LIBNETWORK_COMMIT=9fd385be8302dbe1071a3ce124891893ff27f90f # bump_19.03 branch
|
||||
|
||||
install_proxy() {
|
||||
case "$1" in
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/bin/sh
|
||||
|
||||
# v0.6.0
|
||||
ROOTLESSKIT_COMMIT=2fcff6ceae968a1d895e6205e5154b107247356f
|
||||
# v0.7.1
|
||||
: ${ROOTLESSKIT_COMMIT:=76c4e26750da3986fa0e741464fbf0fcd55bea71}
|
||||
|
||||
install_rootlesskit() {
|
||||
case "$1" in
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# The version of runc should match the version that is used by the containerd
|
||||
# version that is used. If you need to update runc, open a pull request in
|
||||
# the containerd project first, and update both after that is merged.
|
||||
RUNC_COMMIT=425e105d5a03fabd737a126ad93d62a9eeede87f # v1.0.0-rc8
|
||||
RUNC_COMMIT=dc9208a3303feef5b3839f4323d9beb36df0a9dd # v1.0.0-rc10
|
||||
|
||||
install_runc() {
|
||||
# If using RHEL7 kernels (3.10.0 el7), disable kmem accounting/limiting
|
||||
|
||||
@@ -134,7 +134,7 @@ Function Check-InContainer() {
|
||||
# outside of a container where it may be out of date with master.
|
||||
Function Verify-GoVersion() {
|
||||
Try {
|
||||
$goVersionDockerfile=(Select-String -Path ".\Dockerfile" -Pattern "^ARG[\s]+GO_VERSION=(.*)$").Matches.groups[1].Value.TrimEnd(".0")
|
||||
$goVersionDockerfile=(Select-String -Path ".\Dockerfile" -Pattern "^ARG[\s]+GO_VERSION=(.*)$").Matches.groups[1].Value -replace '\.0$',''
|
||||
$goVersionInstalled=(go version).ToString().Split(" ")[2].SubString(2)
|
||||
}
|
||||
Catch [Exception] {
|
||||
|
||||
41
hack/make.sh
41
hack/make.sh
@@ -28,30 +28,6 @@ export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
export MAKEDIR="$SCRIPTDIR/make"
|
||||
export PKG_CONFIG=${PKG_CONFIG:-pkg-config}
|
||||
|
||||
# We're a nice, sexy, little shell script, and people might try to run us;
|
||||
# but really, they shouldn't. We want to be in a container!
|
||||
inContainer="AssumeSoInitially"
|
||||
if [ "$(go env GOHOSTOS)" = 'windows' ]; then
|
||||
if [ -z "$FROM_DOCKERFILE" ]; then
|
||||
unset inContainer
|
||||
fi
|
||||
else
|
||||
if [ "$PWD" != "/go/src/$DOCKER_PKG" ]; then
|
||||
unset inContainer
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "$inContainer" ]; then
|
||||
{
|
||||
echo "# WARNING! I don't seem to be running in a Docker container."
|
||||
echo "# The result of this command might be an incorrect build, and will not be"
|
||||
echo "# officially supported."
|
||||
echo "#"
|
||||
echo "# Try this instead: make all"
|
||||
echo "#"
|
||||
} >&2
|
||||
fi
|
||||
|
||||
echo
|
||||
|
||||
# List of bundles to create when no argument is passed
|
||||
@@ -115,14 +91,6 @@ elif ${PKG_CONFIG} 'libsystemd-journal' 2> /dev/null ; then
|
||||
DOCKER_BUILDTAGS+=" journald journald_compat"
|
||||
fi
|
||||
|
||||
# test whether "btrfs/version.h" exists and apply btrfs_noversion appropriately
|
||||
if \
|
||||
command -v gcc &> /dev/null \
|
||||
&& ! gcc -E - -o /dev/null &> /dev/null <<<'#include <btrfs/version.h>' \
|
||||
; then
|
||||
DOCKER_BUILDTAGS+=' btrfs_noversion'
|
||||
fi
|
||||
|
||||
# test whether "libdevmapper.h" is new enough to support deferred remove
|
||||
# functionality. We favour libdm_dlsym_deferred_remove over
|
||||
# libdm_no_deferred_remove in dynamic cases because the binary could be shipped
|
||||
@@ -174,18 +142,11 @@ bundle() {
|
||||
main() {
|
||||
if [ -z "${KEEPBUNDLE-}" ]; then
|
||||
echo "Removing bundles/"
|
||||
rm -rf "bundles/*"
|
||||
rm -rf bundles/*
|
||||
echo
|
||||
fi
|
||||
mkdir -p bundles
|
||||
|
||||
# Windows and symlinks don't get along well
|
||||
if [ "$(go env GOHOSTOS)" != 'windows' ]; then
|
||||
rm -f bundles/latest
|
||||
# preserve latest symlink for backward compatibility
|
||||
ln -sf . bundles/latest
|
||||
fi
|
||||
|
||||
if [ $# -lt 1 ]; then
|
||||
bundles=(${DEFAULT_BUNDLES[@]})
|
||||
else
|
||||
|
||||
@@ -58,7 +58,9 @@ if [ "$(go env GOOS)" = "windows" ]; then
|
||||
fi
|
||||
|
||||
# Generate a Windows file version of the form major,minor,patch,build (with any part optional)
|
||||
VERSION_QUAD=$(echo -n $VERSION | sed -re 's/^([0-9.]*).*$/\1/' | tr . ,)
|
||||
if [ ! -v VERSION_QUAD ]; then
|
||||
VERSION_QUAD=$(echo -n $VERSION | sed -re 's/^([0-9.]*).*$/\1/' | tr . ,)
|
||||
fi
|
||||
|
||||
# Pass version and commit information into the resource compiler
|
||||
defs=
|
||||
|
||||
@@ -3,64 +3,90 @@
|
||||
# For integration-cli test, we use [gocheck](https://labix.org/gocheck), if you want
|
||||
# to run certain tests on your local host, you should run with command:
|
||||
#
|
||||
# TESTFLAGS='-check.f DockerSuite.TestBuild*' ./hack/make.sh binary test-integration
|
||||
# TESTFLAGS='-test.run TestDockerSuite/TestBuild*' ./hack/make.sh binary test-integration
|
||||
#
|
||||
|
||||
if [[ "${TESTFLAGS}" = *-check.f* ]]; then
|
||||
echo Skipping integration tests since TESTFLAGS includes integration-cli only flags
|
||||
TEST_SKIP_INTEGRATION=1
|
||||
if [ -z "${MAKEDIR}" ]; then
|
||||
MAKEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
export MAKEDIR
|
||||
fi
|
||||
|
||||
if [[ "${TESTFLAGS}" = *-test.run* ]]; then
|
||||
echo Skipping integration-cli tests since TESTFLAGS includes integration only flags
|
||||
TEST_SKIP_INTEGRATION_CLI=1
|
||||
fi
|
||||
|
||||
|
||||
if [ -z ${MAKEDIR} ]; then
|
||||
export MAKEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
fi
|
||||
source "$MAKEDIR/.go-autogen"
|
||||
source "${MAKEDIR}/.go-autogen"
|
||||
|
||||
# Set defaults
|
||||
: ${TEST_REPEAT:=1}
|
||||
: ${TESTFLAGS:=}
|
||||
: ${TESTDEBUG:=}
|
||||
: "${TEST_REPEAT:=1}"
|
||||
: "${TESTFLAGS:=}"
|
||||
: "${TESTDEBUG:=}"
|
||||
|
||||
integration_api_dirs=${TEST_INTEGRATION_DIR:-"$(go list -test -f '{{- if ne .ForTest "" -}}{{- .Dir -}}{{- end -}}' ./integration/...)"}
|
||||
setup_integration_test_filter() {
|
||||
if [ -z "${TEST_FILTER}" ]; then
|
||||
return
|
||||
fi
|
||||
TESTFLAGS+="-test.run ${TEST_FILTER}"
|
||||
|
||||
local dirs
|
||||
dirs=$(grep -rIlE --include '*_test.go' "func .*${TEST_FILTER}.*\(. \*testing\.T\)" ./integration*/ | xargs -I file dirname file | uniq)
|
||||
if [ -z "${TEST_SKIP_INTEGRATION}" ]; then
|
||||
: "${TEST_INTEGRATION_DIR:=$(echo "$dirs" | grep -v '^\./integration-cli$')}"
|
||||
if [ -z "${TEST_INTEGRATION_DIR}" ]; then
|
||||
echo "Skipping integration tests since the supplied filter \"${TEST_FILTER}\" omits all integration tests"
|
||||
TEST_SKIP_INTEGRATION=1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "${TEST_SKIP_INTEGRATION_CLI}" ]; then
|
||||
if echo "$dirs" | grep -vq '^./integration-cli$'; then
|
||||
TEST_SKIP_INTEGRATION_CLI=1
|
||||
echo "Skipping integration-cli tests since the supplied filter \"${TEST_FILTER}\" omits all integration-cli tests"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
setup_integration_test_filter
|
||||
integration_api_dirs="${TEST_INTEGRATION_DIR:-$(go list -test -f '{{- if ne .ForTest "" -}}{{- .Dir -}}{{- end -}}' ./integration/...)}"
|
||||
|
||||
run_test_integration() {
|
||||
set_platform_timeout
|
||||
if [ -z "${TEST_SKIP_INTEGRATION}" ]; then
|
||||
run_test_integration_suites
|
||||
run_test_integration_suites "${integration_api_dirs}"
|
||||
fi
|
||||
if [ -z "${TEST_SKIP_INTEGRATION_CLI}" ]; then
|
||||
run_test_integration_legacy_suites
|
||||
TIMEOUT=360m run_test_integration_suites integration-cli
|
||||
fi
|
||||
}
|
||||
|
||||
run_test_integration_suites() {
|
||||
local flags="-test.v -test.timeout=${TIMEOUT} $TESTFLAGS ${TESTFLAGS_INTEGRATION}"
|
||||
for dir in ${integration_api_dirs}; do
|
||||
local flags="-test.v -test.timeout=${TIMEOUT} $TESTFLAGS"
|
||||
local dirs="$1"
|
||||
for dir in ${dirs}; do
|
||||
if ! (
|
||||
cd "$dir"
|
||||
echo "Running $PWD flags=${flags}"
|
||||
test_env ./test.main ${flags}
|
||||
# Create a useful package name based on the tests's $dir. We need to take
|
||||
# into account that "$dir" can be either an absolute (/go/src/github.com/docker/docker/integration/foo)
|
||||
# or relative (./integration/foo) path. To account for both, first we strip
|
||||
# the absolute path, then remove any leading periods and slashes.
|
||||
pkgname="${dir}"
|
||||
pkgname="${pkgname#*${GOPATH}/src/${DOCKER_PKG}}"
|
||||
pkgname="${pkgname#*.}"
|
||||
pkgname="${pkgname#*\/}"
|
||||
|
||||
# Finally, we use periods as separator (instead of slashes) to be more
|
||||
# in line with Java package names (which is what junit.xml was designed for)
|
||||
pkgname="$(go env GOARCH).${pkgname//\//.}"
|
||||
echo "Running $PWD (${pkgname}) flags=${flags}"
|
||||
[ -n "$TESTDEBUG" ] && set -x
|
||||
# shellcheck disable=SC2086
|
||||
test_env gotestsum \
|
||||
--format=standard-verbose \
|
||||
--jsonfile="${ABS_DEST}/${pkgname//./-}-go-test-report.json" \
|
||||
--junitfile="${ABS_DEST}/${pkgname//./-}-junit-report.xml" \
|
||||
--raw-command \
|
||||
-- go tool test2json -p "${pkgname}" -t ./test.main ${flags}
|
||||
); then exit 1; fi
|
||||
done
|
||||
}
|
||||
|
||||
run_test_integration_legacy_suites() {
|
||||
(
|
||||
flags="-check.v -check.timeout=${TIMEOUT} -test.timeout=360m $TESTFLAGS ${TESTFLAGS_INTEGRATION_CLI}"
|
||||
cd integration-cli
|
||||
echo "Running $PWD flags=${flags}"
|
||||
test_env ./test.main $flags
|
||||
)
|
||||
}
|
||||
|
||||
build_test_suite_binaries() {
|
||||
if [ ${DOCKER_INTEGRATION_TESTS_VERIFIED-} ]; then
|
||||
if [ -n "${DOCKER_INTEGRATION_TESTS_VERIFIED}" ]; then
|
||||
echo "Skipping building test binaries; as DOCKER_INTEGRATION_TESTS_VERIFIED is set"
|
||||
return
|
||||
fi
|
||||
@@ -85,6 +111,7 @@ build_test_suite_binary() {
|
||||
cleanup_test_suite_binaries() {
|
||||
[ -n "$TESTDEBUG" ] && return
|
||||
echo "Removing test suite binaries"
|
||||
# shellcheck disable=SC2038
|
||||
find integration* -name test.main | xargs -r rm
|
||||
}
|
||||
|
||||
@@ -133,6 +160,7 @@ error_on_leaked_containerd_shims() {
|
||||
awk '$2 == "containerd-shim" && $4 ~ /.*\/bundles\/.*\/test-integration/ { print $1 }')
|
||||
if [ -n "$leftovers" ]; then
|
||||
ps aux
|
||||
# shellcheck disable=SC2086
|
||||
kill -9 ${leftovers} 2> /dev/null
|
||||
echo "!!!! WARNING you have left over shim(s), Cleanup your test !!!!"
|
||||
exit 1
|
||||
@@ -142,11 +170,11 @@ error_on_leaked_containerd_shims() {
|
||||
set_platform_timeout() {
|
||||
# Test timeout.
|
||||
if [ "${DOCKER_ENGINE_GOARCH}" = "arm64" ] || [ "${DOCKER_ENGINE_GOARCH}" = "arm" ]; then
|
||||
: ${TIMEOUT:=10m}
|
||||
: "${TIMEOUT:=10m}"
|
||||
elif [ "${DOCKER_ENGINE_GOARCH}" = "windows" ]; then
|
||||
: ${TIMEOUT:=8m}
|
||||
: "${TIMEOUT:=8m}"
|
||||
else
|
||||
: ${TIMEOUT:=5m}
|
||||
: "${TIMEOUT:=5m}"
|
||||
fi
|
||||
|
||||
if [ "${TEST_REPEAT}" -gt 1 ]; then
|
||||
|
||||
@@ -15,16 +15,16 @@ copy_binaries() {
|
||||
fi
|
||||
echo "Copying nested executables into $dir"
|
||||
for file in containerd containerd-shim ctr runc docker-init docker-proxy rootlesskit rootlesskit-docker-proxy dockerd-rootless.sh; do
|
||||
cp -f `which "$file"` "$dir/"
|
||||
if [ "$hash" == "hash" ]; then
|
||||
cp -f "$(command -v "$file")" "$dir/"
|
||||
if [ "$hash" = "hash" ]; then
|
||||
hash_files "$dir/$file"
|
||||
fi
|
||||
done
|
||||
|
||||
# vpnkit is amd64 only
|
||||
if which "vpnkit.$(uname -m)" 2>&1 >/dev/null; then
|
||||
cp -f `which "vpnkit.$(uname -m)"` "$dir/vpnkit"
|
||||
if [ "$hash" == "hash" ]; then
|
||||
if command -v "vpnkit.$(uname -m)" 2>&1 >/dev/null; then
|
||||
cp -f "$(command -v "vpnkit.$(uname -m)")" "$dir/vpnkit"
|
||||
if [ "$hash" = "hash" ]; then
|
||||
hash_files "$dir/vpnkit"
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -7,16 +7,14 @@ source hack/make/.integration-test-helpers
|
||||
# TODO docker 17.06 cli client used in CI fails to build using a sha;
|
||||
# unable to prepare context: unable to 'git clone' to temporary context directory: error fetching: error: no such remote ref ead0bb9e08c13dd3d1712759491eee06bf5a5602
|
||||
#: exit status 128
|
||||
: "${DOCKER_PY_COMMIT:=4.0.2}"
|
||||
: "${DOCKER_PY_COMMIT:=4.1.0}"
|
||||
|
||||
# custom options to pass py.test
|
||||
# TODO remove these skip once we update to a docker-py version that has https://github.com/docker/docker-py/pull/2369, https://github.com/docker/docker-py/pull/2380, https://github.com/docker/docker-py/pull/2382
|
||||
# TODO remove these skip once we update to a docker-py version that has https://github.com/docker/docker-py/pull/2485
|
||||
: "${PY_TEST_OPTIONS:=\
|
||||
--deselect=tests/integration/api_swarm_test.py::SwarmTest::test_init_swarm_data_path_addr \
|
||||
--deselect=tests/integration/api_exec_test.py::ExecTest::test_detach_with_arg \
|
||||
--deselect=tests/integration/api_container_test.py::AttachContainerTest::test_attach_no_stream \
|
||||
--deselect=tests/integration/api_exec_test.py::ExecDemuxTest::test_exec_command_tty_stream_no_demux \
|
||||
--deselect=tests/integration/api_build_test.py::BuildTest::test_build_invalid_platform \
|
||||
--deselect=tests/integration/api_image_test.py::PullImageTest::test_pull_invalid_platform \
|
||||
--deselect=tests/integration/models_images_test.py::ImageCollectionTest::test_pull_multiple \
|
||||
--junitxml=${DEST}/junit-report.xml \
|
||||
}"
|
||||
(
|
||||
|
||||
@@ -1,12 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e -o pipefail
|
||||
|
||||
if [ -n "$TEST_INTEGRATION_DEST" ]; then
|
||||
export DEST="$ABS_DEST/$TEST_INTEGRATION_DEST"
|
||||
export DOCKER_INTEGRATION_DAEMON_DEST="$DEST"
|
||||
mkdir -p "$DEST"
|
||||
fi
|
||||
|
||||
source hack/make/.integration-test-helpers
|
||||
|
||||
if [ ! -z "${TEST_SKIP_INTEGRATION}" ] && [ ! -z "${TEST_SKIP_INTEGRATION_CLI}" ]; then
|
||||
|
||||
@@ -2,28 +2,34 @@
|
||||
set -e -o pipefail
|
||||
|
||||
source hack/validate/.validate
|
||||
new_tests=$(
|
||||
validate_diff --diff-filter=ACMR --unified=0 -- 'integration/*_test.go' |
|
||||
grep -E '^(\+func )(.*)(\*testing)' || true
|
||||
)
|
||||
|
||||
if [ -z "$new_tests" ]; then
|
||||
echo 'No new tests added to integration.'
|
||||
return
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "Found new integrations tests:"
|
||||
echo "$new_tests"
|
||||
echo "Running stress test for them."
|
||||
run_integration_flaky() {
|
||||
new_tests=$(
|
||||
validate_diff --diff-filter=ACMR --unified=0 -- 'integration/*_test.go' |
|
||||
grep -E '^(\+func Test)(.*)(\*testing\.T\))' || true
|
||||
)
|
||||
|
||||
(
|
||||
TESTARRAY=$(echo "$new_tests" | sed 's/+func //' | awk -F'\\(' '{print $1}' | tr '\n' '|')
|
||||
# Note: TEST_REPEAT will make the test suite run 5 times, restarting the daemon
|
||||
# and each test will run 5 times in a row under the same daemon.
|
||||
# This will make a total of 25 runs for each test in TESTARRAY.
|
||||
export TEST_REPEAT=5
|
||||
export TESTFLAGS="-test.count ${TEST_REPEAT} -test.run ${TESTARRAY%?}"
|
||||
echo "Using test flags: $TESTFLAGS"
|
||||
source hack/make/test-integration
|
||||
)
|
||||
if [ -z "$new_tests" ]; then
|
||||
echo 'No new tests added to integration.'
|
||||
return
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "Found new integrations tests:"
|
||||
echo "$new_tests"
|
||||
echo "Running stress test for them."
|
||||
|
||||
(
|
||||
TESTARRAY=$(echo "$new_tests" | sed 's/+func //' | awk -F'\\(' '{print $1}' | tr '\n' '|')
|
||||
# Note: TEST_REPEAT will make the test suite run 5 times, restarting the daemon
|
||||
# and each test will run 5 times in a row under the same daemon.
|
||||
# This will make a total of 25 runs for each test in TESTARRAY.
|
||||
export TEST_REPEAT=5
|
||||
export TESTFLAGS="-test.count ${TEST_REPEAT} -test.run ${TESTARRAY%?}"
|
||||
echo "Using test flags: $TESTFLAGS"
|
||||
source hack/make/test-integration
|
||||
)
|
||||
}
|
||||
|
||||
run_integration_flaky
|
||||
|
||||
@@ -18,12 +18,8 @@ integration_api_dirs=${TEST_INTEGRATION_DIR:-"$(
|
||||
|
||||
run_test_integration() {
|
||||
set_platform_timeout
|
||||
if [[ "$TESTFLAGS" != *-check.f* ]]; then
|
||||
run_test_integration_suites
|
||||
fi
|
||||
if [[ "$TESTFLAGS" != *-test.run* ]]; then
|
||||
run_test_integration_legacy_suites
|
||||
fi
|
||||
run_test_integration_suites
|
||||
run_test_integration_legacy_suites
|
||||
}
|
||||
|
||||
run_test_integration_suites() {
|
||||
@@ -39,7 +35,7 @@ run_test_integration_suites() {
|
||||
|
||||
run_test_integration_legacy_suites() {
|
||||
(
|
||||
flags="-check.v -check.timeout=${TIMEOUT:-200m} -test.timeout=360m $TESTFLAGS"
|
||||
flags="-test.v -test.timeout=360m $TESTFLAGS"
|
||||
cd /tests/integration-cli
|
||||
echo "Running $PWD"
|
||||
test_env ./test.main $flags
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
set -eu -o pipefail
|
||||
|
||||
BUILDFLAGS=( -tags 'netgo seccomp libdm_no_deferred_remove' )
|
||||
TESTFLAGS+="-test.timeout=${TIMEOUT:-5m}"
|
||||
TESTFLAGS+=" -test.timeout=${TIMEOUT:-5m}"
|
||||
TESTDIRS="${TESTDIRS:-./...}"
|
||||
exclude_paths='/vendor/|/integration'
|
||||
pkg_list=$(go list $TESTDIRS | grep -vE "($exclude_paths)")
|
||||
|
||||
@@ -10,7 +10,6 @@ export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
. ${SCRIPTDIR}/pkg-imports
|
||||
. ${SCRIPTDIR}/swagger
|
||||
. ${SCRIPTDIR}/swagger-gen
|
||||
. ${SCRIPTDIR}/test-imports
|
||||
. ${SCRIPTDIR}/toml
|
||||
. ${SCRIPTDIR}/changelog-well-formed
|
||||
. ${SCRIPTDIR}/changelog-date-descending
|
||||
|
||||
@@ -4,10 +4,11 @@ set -e -o pipefail
|
||||
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
|
||||
# CI platforms differ, so per-platform GOMETALINTER_OPTS can be set
|
||||
# from a platform-specific Dockerfile, otherwise let's just set
|
||||
# in the Jenkinsfile, otherwise let's just set a
|
||||
# (somewhat pessimistic) default of 10 minutes.
|
||||
: ${GOMETALINTER_OPTS=--deadline=10m}
|
||||
: "${GOMETALINTER_OPTS=--deadline=10m}"
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
gometalinter \
|
||||
${GOMETALINTER_OPTS} \
|
||||
--config ${SCRIPTDIR}/gometalinter.json ./...
|
||||
--config "${SCRIPTDIR}/gometalinter.json" ./...
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Make sure we're not using gos' Testing package any more in integration-cli
|
||||
|
||||
export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
source "${SCRIPTDIR}/.validate"
|
||||
|
||||
IFS=$'\n'
|
||||
files=( $(validate_diff --diff-filter=ACMR --name-only -- 'integration-cli/*.go' || true) )
|
||||
unset IFS
|
||||
|
||||
badFiles=()
|
||||
for f in "${files[@]}"; do
|
||||
# skip check_test.go since it *does* use the testing package
|
||||
if [ "$f" = "integration-cli/check_test.go" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# we use "git show" here to validate that what's committed doesn't contain golang built-in testing
|
||||
if git show "$VALIDATE_HEAD:$f" | grep -q testing.T; then
|
||||
if [ "$(echo $f | grep '_test')" ]; then
|
||||
# allow testing.T for non- _test files
|
||||
badFiles+=( "$f" )
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#badFiles[@]} -eq 0 ]; then
|
||||
echo 'Congratulations! No testing.T found.'
|
||||
else
|
||||
{
|
||||
echo "These files use the wrong testing infrastructure:"
|
||||
for f in "${badFiles[@]}"; do
|
||||
echo " - $f"
|
||||
done
|
||||
echo
|
||||
} >&2
|
||||
false
|
||||
fi
|
||||
@@ -7,12 +7,12 @@ import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/go-check/check"
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func (s *DockerSuite) BenchmarkConcurrentContainerActions(c *check.C) {
|
||||
func (s *DockerSuite) BenchmarkConcurrentContainerActions(c *testing.B) {
|
||||
maxConcurrency := runtime.GOMAXPROCS(0)
|
||||
numIterations := c.N
|
||||
outerGroup := &sync.WaitGroup{}
|
||||
@@ -28,7 +28,7 @@ func (s *DockerSuite) BenchmarkConcurrentContainerActions(c *check.C) {
|
||||
go func() {
|
||||
defer innerGroup.Done()
|
||||
for i := 0; i < numIterations; i++ {
|
||||
args := []string{"run", "-d", defaultSleepImage}
|
||||
args := []string{"run", "-d", "busybox"}
|
||||
args = append(args, sleepCommandForDaemonPlatform()...)
|
||||
out, _, err := dockerCmdWithError(args...)
|
||||
if err != nil {
|
||||
|
||||
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http/httptest"
|
||||
@@ -22,8 +23,8 @@ import (
|
||||
"github.com/docker/docker/internal/test/fakestorage"
|
||||
"github.com/docker/docker/internal/test/fixtures/plugin"
|
||||
"github.com/docker/docker/internal/test/registry"
|
||||
"github.com/docker/docker/internal/test/suite"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/go-check/check"
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
@@ -43,6 +44,8 @@ var (
|
||||
|
||||
// the docker client binary to use
|
||||
dockerBinary = ""
|
||||
|
||||
testEnvOnce sync.Once
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -58,6 +61,9 @@ func init() {
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
flag.Parse()
|
||||
|
||||
// Global set up
|
||||
dockerBinary = testEnv.DockerBinary()
|
||||
err := ienv.EnsureFrozenImagesLinux(&testEnv.Execution)
|
||||
if err != nil {
|
||||
@@ -69,21 +75,78 @@ func TestMain(m *testing.M) {
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
cli.SetTestEnvironment(testEnv)
|
||||
fakestorage.SetTestEnvironment(&testEnv.Execution)
|
||||
ienv.ProtectAll(t, &testEnv.Execution)
|
||||
check.TestingT(t)
|
||||
func ensureTestEnvSetup(t *testing.T) {
|
||||
testEnvOnce.Do(func() {
|
||||
cli.SetTestEnvironment(testEnv)
|
||||
fakestorage.SetTestEnvironment(&testEnv.Execution)
|
||||
ienv.ProtectAll(t, &testEnv.Execution)
|
||||
})
|
||||
}
|
||||
|
||||
func init() {
|
||||
check.Suite(&DockerSuite{})
|
||||
func TestDockerSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerSuite{})
|
||||
}
|
||||
|
||||
func TestDockerRegistrySuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerRegistrySuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerSchema1RegistrySuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerSchema1RegistrySuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerRegistryAuthHtpasswdSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerRegistryAuthHtpasswdSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerRegistryAuthTokenSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerRegistryAuthTokenSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerDaemonSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerDaemonSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerSwarmSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerSwarmSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerPluginSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
suite.Run(t, &DockerPluginSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerExternalVolumeSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
testRequires(t, DaemonIsLinux)
|
||||
suite.Run(t, &DockerExternalVolumeSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerNetworkSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
testRequires(t, DaemonIsLinux)
|
||||
suite.Run(t, &DockerNetworkSuite{ds: &DockerSuite{}})
|
||||
}
|
||||
|
||||
func TestDockerHubPullSuite(t *testing.T) {
|
||||
ensureTestEnvSetup(t)
|
||||
// FIXME. Temporarily turning this off for Windows as GH16039 was breaking
|
||||
// Windows to Linux CI @icecrime
|
||||
testRequires(t, DaemonIsLinux)
|
||||
suite.Run(t, newDockerHubPullSuite())
|
||||
}
|
||||
|
||||
type DockerSuite struct {
|
||||
}
|
||||
|
||||
func (s *DockerSuite) OnTimeout(c *check.C) {
|
||||
func (s *DockerSuite) OnTimeout(c *testing.T) {
|
||||
if testEnv.IsRemoteDaemon() {
|
||||
return
|
||||
}
|
||||
@@ -104,34 +167,28 @@ func (s *DockerSuite) OnTimeout(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TearDownTest(c *check.C) {
|
||||
func (s *DockerSuite) TearDownTest(c *testing.T) {
|
||||
testEnv.Clean(c)
|
||||
}
|
||||
|
||||
func init() {
|
||||
check.Suite(&DockerRegistrySuite{
|
||||
ds: &DockerSuite{},
|
||||
})
|
||||
}
|
||||
|
||||
type DockerRegistrySuite struct {
|
||||
ds *DockerSuite
|
||||
reg *registry.V2
|
||||
d *daemon.Daemon
|
||||
}
|
||||
|
||||
func (s *DockerRegistrySuite) OnTimeout(c *check.C) {
|
||||
func (s *DockerRegistrySuite) OnTimeout(c *testing.T) {
|
||||
s.d.DumpStackAndQuit()
|
||||
}
|
||||
|
||||
func (s *DockerRegistrySuite) SetUpTest(c *check.C) {
|
||||
func (s *DockerRegistrySuite) SetUpTest(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, RegistryHosting, testEnv.IsLocalDaemon)
|
||||
s.reg = registry.NewV2(c)
|
||||
s.reg.WaitReady(c)
|
||||
s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
|
||||
}
|
||||
|
||||
func (s *DockerRegistrySuite) TearDownTest(c *check.C) {
|
||||
func (s *DockerRegistrySuite) TearDownTest(c *testing.T) {
|
||||
if s.reg != nil {
|
||||
s.reg.Close()
|
||||
}
|
||||
@@ -141,30 +198,24 @@ func (s *DockerRegistrySuite) TearDownTest(c *check.C) {
|
||||
s.ds.TearDownTest(c)
|
||||
}
|
||||
|
||||
func init() {
|
||||
check.Suite(&DockerSchema1RegistrySuite{
|
||||
ds: &DockerSuite{},
|
||||
})
|
||||
}
|
||||
|
||||
type DockerSchema1RegistrySuite struct {
|
||||
ds *DockerSuite
|
||||
reg *registry.V2
|
||||
d *daemon.Daemon
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) OnTimeout(c *check.C) {
|
||||
func (s *DockerSchema1RegistrySuite) OnTimeout(c *testing.T) {
|
||||
s.d.DumpStackAndQuit()
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) SetUpTest(c *check.C) {
|
||||
func (s *DockerSchema1RegistrySuite) SetUpTest(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, RegistryHosting, NotArm64, testEnv.IsLocalDaemon)
|
||||
s.reg = registry.NewV2(c, registry.Schema1)
|
||||
s.reg.WaitReady(c)
|
||||
s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) TearDownTest(c *check.C) {
|
||||
func (s *DockerSchema1RegistrySuite) TearDownTest(c *testing.T) {
|
||||
if s.reg != nil {
|
||||
s.reg.Close()
|
||||
}
|
||||
@@ -174,30 +225,24 @@ func (s *DockerSchema1RegistrySuite) TearDownTest(c *check.C) {
|
||||
s.ds.TearDownTest(c)
|
||||
}
|
||||
|
||||
func init() {
|
||||
check.Suite(&DockerRegistryAuthHtpasswdSuite{
|
||||
ds: &DockerSuite{},
|
||||
})
|
||||
}
|
||||
|
||||
type DockerRegistryAuthHtpasswdSuite struct {
|
||||
ds *DockerSuite
|
||||
reg *registry.V2
|
||||
d *daemon.Daemon
|
||||
}
|
||||
|
||||
func (s *DockerRegistryAuthHtpasswdSuite) OnTimeout(c *check.C) {
|
||||
func (s *DockerRegistryAuthHtpasswdSuite) OnTimeout(c *testing.T) {
|
||||
s.d.DumpStackAndQuit()
|
||||
}
|
||||
|
||||
func (s *DockerRegistryAuthHtpasswdSuite) SetUpTest(c *check.C) {
|
||||
func (s *DockerRegistryAuthHtpasswdSuite) SetUpTest(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, RegistryHosting, testEnv.IsLocalDaemon)
|
||||
s.reg = registry.NewV2(c, registry.Htpasswd)
|
||||
s.reg.WaitReady(c)
|
||||
s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
|
||||
}
|
||||
|
||||
func (s *DockerRegistryAuthHtpasswdSuite) TearDownTest(c *check.C) {
|
||||
func (s *DockerRegistryAuthHtpasswdSuite) TearDownTest(c *testing.T) {
|
||||
if s.reg != nil {
|
||||
out, err := s.d.Cmd("logout", privateRegistryURL)
|
||||
assert.NilError(c, err, out)
|
||||
@@ -209,28 +254,22 @@ func (s *DockerRegistryAuthHtpasswdSuite) TearDownTest(c *check.C) {
|
||||
s.ds.TearDownTest(c)
|
||||
}
|
||||
|
||||
func init() {
|
||||
check.Suite(&DockerRegistryAuthTokenSuite{
|
||||
ds: &DockerSuite{},
|
||||
})
|
||||
}
|
||||
|
||||
type DockerRegistryAuthTokenSuite struct {
|
||||
ds *DockerSuite
|
||||
reg *registry.V2
|
||||
d *daemon.Daemon
|
||||
}
|
||||
|
||||
func (s *DockerRegistryAuthTokenSuite) OnTimeout(c *check.C) {
|
||||
func (s *DockerRegistryAuthTokenSuite) OnTimeout(c *testing.T) {
|
||||
s.d.DumpStackAndQuit()
|
||||
}
|
||||
|
||||
func (s *DockerRegistryAuthTokenSuite) SetUpTest(c *check.C) {
|
||||
func (s *DockerRegistryAuthTokenSuite) SetUpTest(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, RegistryHosting, testEnv.IsLocalDaemon)
|
||||
s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
|
||||
}
|
||||
|
||||
func (s *DockerRegistryAuthTokenSuite) TearDownTest(c *check.C) {
|
||||
func (s *DockerRegistryAuthTokenSuite) TearDownTest(c *testing.T) {
|
||||
if s.reg != nil {
|
||||
out, err := s.d.Cmd("logout", privateRegistryURL)
|
||||
assert.NilError(c, err, out)
|
||||
@@ -242,7 +281,7 @@ func (s *DockerRegistryAuthTokenSuite) TearDownTest(c *check.C) {
|
||||
s.ds.TearDownTest(c)
|
||||
}
|
||||
|
||||
func (s *DockerRegistryAuthTokenSuite) setupRegistryWithTokenService(c *check.C, tokenURL string) {
|
||||
func (s *DockerRegistryAuthTokenSuite) setupRegistryWithTokenService(c *testing.T, tokenURL string) {
|
||||
if s == nil {
|
||||
c.Fatal("registry suite isn't initialized")
|
||||
}
|
||||
@@ -250,27 +289,21 @@ func (s *DockerRegistryAuthTokenSuite) setupRegistryWithTokenService(c *check.C,
|
||||
s.reg.WaitReady(c)
|
||||
}
|
||||
|
||||
func init() {
|
||||
check.Suite(&DockerDaemonSuite{
|
||||
ds: &DockerSuite{},
|
||||
})
|
||||
}
|
||||
|
||||
type DockerDaemonSuite struct {
|
||||
ds *DockerSuite
|
||||
d *daemon.Daemon
|
||||
}
|
||||
|
||||
func (s *DockerDaemonSuite) OnTimeout(c *check.C) {
|
||||
func (s *DockerDaemonSuite) OnTimeout(c *testing.T) {
|
||||
s.d.DumpStackAndQuit()
|
||||
}
|
||||
|
||||
func (s *DockerDaemonSuite) SetUpTest(c *check.C) {
|
||||
func (s *DockerDaemonSuite) SetUpTest(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
|
||||
s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
|
||||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TearDownTest(c *check.C) {
|
||||
func (s *DockerDaemonSuite) TearDownTest(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
|
||||
if s.d != nil {
|
||||
s.d.Stop(c)
|
||||
@@ -278,7 +311,7 @@ func (s *DockerDaemonSuite) TearDownTest(c *check.C) {
|
||||
s.ds.TearDownTest(c)
|
||||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TearDownSuite(c *check.C) {
|
||||
func (s *DockerDaemonSuite) TearDownSuite(c *testing.T) {
|
||||
filepath.Walk(testdaemon.SockRoot, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
// ignore errors here
|
||||
@@ -295,21 +328,15 @@ func (s *DockerDaemonSuite) TearDownSuite(c *check.C) {
|
||||
|
||||
const defaultSwarmPort = 2477
|
||||
|
||||
func init() {
|
||||
check.Suite(&DockerSwarmSuite{
|
||||
ds: &DockerSuite{},
|
||||
})
|
||||
}
|
||||
|
||||
type DockerSwarmSuite struct {
|
||||
server *httptest.Server
|
||||
ds *DockerSuite
|
||||
daemonsLock sync.Mutex // protect access to daemons and portIndex
|
||||
daemons []*daemon.Daemon
|
||||
daemonsLock sync.Mutex // protect access to daemons
|
||||
portIndex int
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) OnTimeout(c *check.C) {
|
||||
func (s *DockerSwarmSuite) OnTimeout(c *testing.T) {
|
||||
s.daemonsLock.Lock()
|
||||
defer s.daemonsLock.Unlock()
|
||||
for _, d := range s.daemons {
|
||||
@@ -317,11 +344,11 @@ func (s *DockerSwarmSuite) OnTimeout(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) SetUpTest(c *check.C) {
|
||||
func (s *DockerSwarmSuite) SetUpTest(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *daemon.Daemon {
|
||||
func (s *DockerSwarmSuite) AddDaemon(c *testing.T, joinSwarm, manager bool) *daemon.Daemon {
|
||||
d := daemon.New(c, dockerBinary, dockerdBinary,
|
||||
testdaemon.WithEnvironment(testEnv.Execution),
|
||||
testdaemon.WithSwarmPort(defaultSwarmPort+s.portIndex),
|
||||
@@ -333,18 +360,18 @@ func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *daemo
|
||||
d.StartAndSwarmInit(c)
|
||||
}
|
||||
} else {
|
||||
d.StartNode(c)
|
||||
d.StartNodeWithBusybox(c)
|
||||
}
|
||||
|
||||
s.portIndex++
|
||||
s.daemonsLock.Lock()
|
||||
s.portIndex++
|
||||
s.daemons = append(s.daemons, d)
|
||||
s.daemonsLock.Unlock()
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TearDownTest(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TearDownTest(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
s.daemonsLock.Lock()
|
||||
for _, d := range s.daemons {
|
||||
@@ -354,18 +381,11 @@ func (s *DockerSwarmSuite) TearDownTest(c *check.C) {
|
||||
}
|
||||
}
|
||||
s.daemons = nil
|
||||
s.daemonsLock.Unlock()
|
||||
|
||||
s.portIndex = 0
|
||||
s.daemonsLock.Unlock()
|
||||
s.ds.TearDownTest(c)
|
||||
}
|
||||
|
||||
func init() {
|
||||
check.Suite(&DockerPluginSuite{
|
||||
ds: &DockerSuite{},
|
||||
})
|
||||
}
|
||||
|
||||
type DockerPluginSuite struct {
|
||||
ds *DockerSuite
|
||||
registry *registry.V2
|
||||
@@ -382,7 +402,7 @@ func (ps *DockerPluginSuite) getPluginRepoWithTag() string {
|
||||
return ps.getPluginRepo() + ":" + "latest"
|
||||
}
|
||||
|
||||
func (ps *DockerPluginSuite) SetUpSuite(c *check.C) {
|
||||
func (ps *DockerPluginSuite) SetUpSuite(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, RegistryHosting)
|
||||
ps.registry = registry.NewV2(c)
|
||||
ps.registry.WaitReady(c)
|
||||
@@ -394,16 +414,16 @@ func (ps *DockerPluginSuite) SetUpSuite(c *check.C) {
|
||||
assert.NilError(c, err, "failed to create plugin")
|
||||
}
|
||||
|
||||
func (ps *DockerPluginSuite) TearDownSuite(c *check.C) {
|
||||
func (ps *DockerPluginSuite) TearDownSuite(c *testing.T) {
|
||||
if ps.registry != nil {
|
||||
ps.registry.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *DockerPluginSuite) TearDownTest(c *check.C) {
|
||||
func (ps *DockerPluginSuite) TearDownTest(c *testing.T) {
|
||||
ps.ds.TearDownTest(c)
|
||||
}
|
||||
|
||||
func (ps *DockerPluginSuite) OnTimeout(c *check.C) {
|
||||
func (ps *DockerPluginSuite) OnTimeout(c *testing.T) {
|
||||
ps.ds.OnTimeout(c)
|
||||
}
|
||||
|
||||
@@ -1,24 +1,84 @@
|
||||
// Package checker provides Docker specific implementations of the go-check.Checker interface.
|
||||
// Package checker provides helpers for gotest.tools/assert.
|
||||
// Please remove this package whenever possible.
|
||||
package checker // import "github.com/docker/docker/integration-cli/checker"
|
||||
|
||||
import (
|
||||
"github.com/go-check/check"
|
||||
"github.com/vdemeester/shakers"
|
||||
"fmt"
|
||||
|
||||
"gotest.tools/assert"
|
||||
"gotest.tools/assert/cmp"
|
||||
)
|
||||
|
||||
// As a commodity, we bring all check.Checker variables into the current namespace to avoid having
|
||||
// to think about check.X versus checker.X.
|
||||
var (
|
||||
DeepEquals = check.DeepEquals
|
||||
HasLen = check.HasLen
|
||||
IsNil = check.IsNil
|
||||
Matches = check.Matches
|
||||
Not = check.Not
|
||||
NotNil = check.NotNil
|
||||
// Compare defines the interface to compare values
|
||||
type Compare func(x interface{}) assert.BoolOrComparison
|
||||
|
||||
Contains = shakers.Contains
|
||||
Equals = shakers.Equals
|
||||
False = shakers.False
|
||||
GreaterThan = shakers.GreaterThan
|
||||
True = shakers.True
|
||||
)
|
||||
// False checks if the value is false
|
||||
func False() Compare {
|
||||
return func(x interface{}) assert.BoolOrComparison {
|
||||
return !x.(bool)
|
||||
}
|
||||
}
|
||||
|
||||
// True checks if the value is true
|
||||
func True() Compare {
|
||||
return func(x interface{}) assert.BoolOrComparison {
|
||||
return x
|
||||
}
|
||||
}
|
||||
|
||||
// Equals checks if the value is equal to the given value
|
||||
func Equals(y interface{}) Compare {
|
||||
return func(x interface{}) assert.BoolOrComparison {
|
||||
return cmp.Equal(x, y)
|
||||
}
|
||||
}
|
||||
|
||||
// Contains checks if the value contains the given value
|
||||
func Contains(y interface{}) Compare {
|
||||
return func(x interface{}) assert.BoolOrComparison {
|
||||
return cmp.Contains(x, y)
|
||||
}
|
||||
}
|
||||
|
||||
// Not checks if two values are not
|
||||
func Not(c Compare) Compare {
|
||||
return func(x interface{}) assert.BoolOrComparison {
|
||||
r := c(x)
|
||||
switch r := r.(type) {
|
||||
case bool:
|
||||
return !r
|
||||
case cmp.Comparison:
|
||||
return !r().Success()
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected type %T", r))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepEquals checks if two values are equal
|
||||
func DeepEquals(y interface{}) Compare {
|
||||
return func(x interface{}) assert.BoolOrComparison {
|
||||
return cmp.DeepEqual(x, y)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepEquals compares if two values are deepequal
|
||||
func HasLen(y int) Compare {
|
||||
return func(x interface{}) assert.BoolOrComparison {
|
||||
return cmp.Len(x, y)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepEquals checks if the given value is nil
|
||||
func IsNil() Compare {
|
||||
return func(x interface{}) assert.BoolOrComparison {
|
||||
return cmp.Nil(x)
|
||||
}
|
||||
}
|
||||
|
||||
// GreaterThan checks if the value is greater than the given value
|
||||
func GreaterThan(y int) Compare {
|
||||
return func(x interface{}) assert.BoolOrComparison {
|
||||
return x.(int) > y
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,10 +3,10 @@ package daemon // import "github.com/docker/docker/integration-cli/daemon"
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/internal/test/daemon"
|
||||
"github.com/go-check/check"
|
||||
"github.com/pkg/errors"
|
||||
"gotest.tools/assert"
|
||||
"gotest.tools/icmd"
|
||||
@@ -88,13 +88,13 @@ func (d *Daemon) inspectFieldWithError(name, field string) (string, error) {
|
||||
|
||||
// CheckActiveContainerCount returns the number of active containers
|
||||
// FIXME(vdemeester) should re-use ActivateContainers in some way
|
||||
func (d *Daemon) CheckActiveContainerCount(c *check.C) (interface{}, check.CommentInterface) {
|
||||
func (d *Daemon) CheckActiveContainerCount(c *testing.T) (interface{}, string) {
|
||||
out, err := d.Cmd("ps", "-q")
|
||||
assert.NilError(c, err)
|
||||
if len(strings.TrimSpace(out)) == 0 {
|
||||
return 0, nil
|
||||
return 0, ""
|
||||
}
|
||||
return len(strings.Split(strings.TrimSpace(out), "\n")), check.Commentf("output: %q", string(out))
|
||||
return len(strings.Split(strings.TrimSpace(out), "\n")), fmt.Sprintf("output: %q", out)
|
||||
}
|
||||
|
||||
// WaitRun waits for a container to be running for 10s
|
||||
|
||||
@@ -4,19 +4,19 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/go-check/check"
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
// CheckServiceTasksInState returns the number of tasks with a matching state,
|
||||
// and optional message substring.
|
||||
func (d *Daemon) CheckServiceTasksInState(service string, state swarm.TaskState, message string) func(*check.C) (interface{}, check.CommentInterface) {
|
||||
return func(c *check.C) (interface{}, check.CommentInterface) {
|
||||
func (d *Daemon) CheckServiceTasksInState(service string, state swarm.TaskState, message string) func(*testing.T) (interface{}, string) {
|
||||
return func(c *testing.T) (interface{}, string) {
|
||||
tasks := d.GetServiceTasks(c, service)
|
||||
var count int
|
||||
for _, task := range tasks {
|
||||
@@ -26,14 +26,14 @@ func (d *Daemon) CheckServiceTasksInState(service string, state swarm.TaskState,
|
||||
}
|
||||
}
|
||||
}
|
||||
return count, nil
|
||||
return count, ""
|
||||
}
|
||||
}
|
||||
|
||||
// CheckServiceTasksInStateWithError returns the number of tasks with a matching state,
|
||||
// and optional message substring.
|
||||
func (d *Daemon) CheckServiceTasksInStateWithError(service string, state swarm.TaskState, errorMessage string) func(*check.C) (interface{}, check.CommentInterface) {
|
||||
return func(c *check.C) (interface{}, check.CommentInterface) {
|
||||
func (d *Daemon) CheckServiceTasksInStateWithError(service string, state swarm.TaskState, errorMessage string) func(*testing.T) (interface{}, string) {
|
||||
return func(c *testing.T) (interface{}, string) {
|
||||
tasks := d.GetServiceTasks(c, service)
|
||||
var count int
|
||||
for _, task := range tasks {
|
||||
@@ -43,62 +43,62 @@ func (d *Daemon) CheckServiceTasksInStateWithError(service string, state swarm.T
|
||||
}
|
||||
}
|
||||
}
|
||||
return count, nil
|
||||
return count, ""
|
||||
}
|
||||
}
|
||||
|
||||
// CheckServiceRunningTasks returns the number of running tasks for the specified service
|
||||
func (d *Daemon) CheckServiceRunningTasks(service string) func(*check.C) (interface{}, check.CommentInterface) {
|
||||
func (d *Daemon) CheckServiceRunningTasks(service string) func(*testing.T) (interface{}, string) {
|
||||
return d.CheckServiceTasksInState(service, swarm.TaskStateRunning, "")
|
||||
}
|
||||
|
||||
// CheckServiceUpdateState returns the current update state for the specified service
|
||||
func (d *Daemon) CheckServiceUpdateState(service string) func(*check.C) (interface{}, check.CommentInterface) {
|
||||
return func(c *check.C) (interface{}, check.CommentInterface) {
|
||||
func (d *Daemon) CheckServiceUpdateState(service string) func(*testing.T) (interface{}, string) {
|
||||
return func(c *testing.T) (interface{}, string) {
|
||||
service := d.GetService(c, service)
|
||||
if service.UpdateStatus == nil {
|
||||
return "", nil
|
||||
return "", ""
|
||||
}
|
||||
return service.UpdateStatus.State, nil
|
||||
return service.UpdateStatus.State, ""
|
||||
}
|
||||
}
|
||||
|
||||
// CheckPluginRunning returns the runtime state of the plugin
|
||||
func (d *Daemon) CheckPluginRunning(plugin string) func(c *check.C) (interface{}, check.CommentInterface) {
|
||||
return func(c *check.C) (interface{}, check.CommentInterface) {
|
||||
func (d *Daemon) CheckPluginRunning(plugin string) func(c *testing.T) (interface{}, string) {
|
||||
return func(c *testing.T) (interface{}, string) {
|
||||
apiclient := d.NewClientT(c)
|
||||
resp, _, err := apiclient.PluginInspectWithRaw(context.Background(), plugin)
|
||||
if client.IsErrNotFound(err) {
|
||||
return false, check.Commentf("%v", err)
|
||||
return false, fmt.Sprintf("%v", err)
|
||||
}
|
||||
assert.NilError(c, err)
|
||||
return resp.Enabled, check.Commentf("%+v", resp)
|
||||
return resp.Enabled, fmt.Sprintf("%+v", resp)
|
||||
}
|
||||
}
|
||||
|
||||
// CheckPluginImage returns the runtime state of the plugin
|
||||
func (d *Daemon) CheckPluginImage(plugin string) func(c *check.C) (interface{}, check.CommentInterface) {
|
||||
return func(c *check.C) (interface{}, check.CommentInterface) {
|
||||
func (d *Daemon) CheckPluginImage(plugin string) func(c *testing.T) (interface{}, string) {
|
||||
return func(c *testing.T) (interface{}, string) {
|
||||
apiclient := d.NewClientT(c)
|
||||
resp, _, err := apiclient.PluginInspectWithRaw(context.Background(), plugin)
|
||||
if client.IsErrNotFound(err) {
|
||||
return false, check.Commentf("%v", err)
|
||||
return false, fmt.Sprintf("%v", err)
|
||||
}
|
||||
assert.NilError(c, err)
|
||||
return resp.PluginReference, check.Commentf("%+v", resp)
|
||||
return resp.PluginReference, fmt.Sprintf("%+v", resp)
|
||||
}
|
||||
}
|
||||
|
||||
// CheckServiceTasks returns the number of tasks for the specified service
|
||||
func (d *Daemon) CheckServiceTasks(service string) func(*check.C) (interface{}, check.CommentInterface) {
|
||||
return func(c *check.C) (interface{}, check.CommentInterface) {
|
||||
func (d *Daemon) CheckServiceTasks(service string) func(*testing.T) (interface{}, string) {
|
||||
return func(c *testing.T) (interface{}, string) {
|
||||
tasks := d.GetServiceTasks(c, service)
|
||||
return len(tasks), nil
|
||||
return len(tasks), ""
|
||||
}
|
||||
}
|
||||
|
||||
// CheckRunningTaskNetworks returns the number of times each network is referenced from a task.
|
||||
func (d *Daemon) CheckRunningTaskNetworks(c *check.C) (interface{}, check.CommentInterface) {
|
||||
func (d *Daemon) CheckRunningTaskNetworks(c *testing.T) (interface{}, string) {
|
||||
cli := d.NewClientT(c)
|
||||
defer cli.Close()
|
||||
|
||||
@@ -118,11 +118,11 @@ func (d *Daemon) CheckRunningTaskNetworks(c *check.C) (interface{}, check.Commen
|
||||
result[network.Target]++
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
return result, ""
|
||||
}
|
||||
|
||||
// CheckRunningTaskImages returns the times each image is running as a task.
|
||||
func (d *Daemon) CheckRunningTaskImages(c *check.C) (interface{}, check.CommentInterface) {
|
||||
func (d *Daemon) CheckRunningTaskImages(c *testing.T) (interface{}, string) {
|
||||
cli := d.NewClientT(c)
|
||||
defer cli.Close()
|
||||
|
||||
@@ -142,11 +142,11 @@ func (d *Daemon) CheckRunningTaskImages(c *check.C) (interface{}, check.CommentI
|
||||
result[task.Spec.ContainerSpec.Image]++
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
return result, ""
|
||||
}
|
||||
|
||||
// CheckNodeReadyCount returns the number of ready node on the swarm
|
||||
func (d *Daemon) CheckNodeReadyCount(c *check.C) (interface{}, check.CommentInterface) {
|
||||
func (d *Daemon) CheckNodeReadyCount(c *testing.T) (interface{}, string) {
|
||||
nodes := d.ListNodes(c)
|
||||
var readyCount int
|
||||
for _, node := range nodes {
|
||||
@@ -154,28 +154,28 @@ func (d *Daemon) CheckNodeReadyCount(c *check.C) (interface{}, check.CommentInte
|
||||
readyCount++
|
||||
}
|
||||
}
|
||||
return readyCount, nil
|
||||
return readyCount, ""
|
||||
}
|
||||
|
||||
// CheckLocalNodeState returns the current swarm node state
|
||||
func (d *Daemon) CheckLocalNodeState(c *check.C) (interface{}, check.CommentInterface) {
|
||||
func (d *Daemon) CheckLocalNodeState(c *testing.T) (interface{}, string) {
|
||||
info := d.SwarmInfo(c)
|
||||
return info.LocalNodeState, nil
|
||||
return info.LocalNodeState, ""
|
||||
}
|
||||
|
||||
// CheckControlAvailable returns the current swarm control available
|
||||
func (d *Daemon) CheckControlAvailable(c *check.C) (interface{}, check.CommentInterface) {
|
||||
func (d *Daemon) CheckControlAvailable(c *testing.T) (interface{}, string) {
|
||||
info := d.SwarmInfo(c)
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
|
||||
return info.ControlAvailable, nil
|
||||
return info.ControlAvailable, ""
|
||||
}
|
||||
|
||||
// CheckLeader returns whether there is a leader on the swarm or not
|
||||
func (d *Daemon) CheckLeader(c *check.C) (interface{}, check.CommentInterface) {
|
||||
func (d *Daemon) CheckLeader(c *testing.T) (interface{}, string) {
|
||||
cli := d.NewClientT(c)
|
||||
defer cli.Close()
|
||||
|
||||
errList := check.Commentf("could not get node list")
|
||||
errList := "could not get node list"
|
||||
|
||||
ls, err := cli.NodeList(context.Background(), types.NodeListOptions{})
|
||||
if err != nil {
|
||||
@@ -184,8 +184,30 @@ func (d *Daemon) CheckLeader(c *check.C) (interface{}, check.CommentInterface) {
|
||||
|
||||
for _, node := range ls {
|
||||
if node.ManagerStatus != nil && node.ManagerStatus.Leader {
|
||||
return nil, nil
|
||||
return nil, ""
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("no leader"), check.Commentf("could not find leader")
|
||||
return fmt.Errorf("no leader"), "could not find leader"
|
||||
}
|
||||
|
||||
// CmdRetryOutOfSequence tries the specified command against the current daemon
|
||||
// up to 10 times, retrying if it encounters an "update out of sequence" error.
|
||||
func (d *Daemon) CmdRetryOutOfSequence(args ...string) (string, error) {
|
||||
var (
|
||||
output string
|
||||
err error
|
||||
)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
output, err = d.Cmd(args...)
|
||||
// error, no error, whatever. if we don't have "update out of
|
||||
// sequence", we don't retry, we just return.
|
||||
if !strings.Contains(output, "update out of sequence") {
|
||||
return output, err
|
||||
}
|
||||
}
|
||||
|
||||
// otherwise, once all of our attempts have been exhausted, just return
|
||||
// whatever the last values were.
|
||||
return output, err
|
||||
}
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/integration-cli/daemon"
|
||||
"github.com/go-check/check"
|
||||
)
|
||||
|
||||
func (s *DockerSwarmSuite) getDaemon(c *check.C, nodeID string) *daemon.Daemon {
|
||||
func (s *DockerSwarmSuite) getDaemon(c *testing.T, nodeID string) *daemon.Daemon {
|
||||
s.daemonsLock.Lock()
|
||||
defer s.daemonsLock.Unlock()
|
||||
for _, d := range s.daemons {
|
||||
@@ -18,6 +19,6 @@ func (s *DockerSwarmSuite) getDaemon(c *check.C, nodeID string) *daemon.Daemon {
|
||||
}
|
||||
|
||||
// nodeCmd executes a command on a given node via the normal docker socket
|
||||
func (s *DockerSwarmSuite) nodeCmd(c *check.C, id string, args ...string) (string, error) {
|
||||
func (s *DockerSwarmSuite) nodeCmd(c *testing.T, id string, args ...string) (string, error) {
|
||||
return s.getDaemon(c, id).Cmd(args...)
|
||||
}
|
||||
|
||||
@@ -10,24 +10,24 @@ import (
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/internal/test/request"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/go-check/check"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/websocket"
|
||||
"gotest.tools/assert"
|
||||
is "gotest.tools/assert/cmp"
|
||||
)
|
||||
|
||||
func (s *DockerSuite) TestGetContainersAttachWebsocket(c *check.C) {
|
||||
func (s *DockerSuite) TestGetContainersAttachWebsocket(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
out, _ := dockerCmd(c, "run", "-dit", "busybox", "cat")
|
||||
|
||||
rwc, err := request.SockConn(time.Duration(10*time.Second), request.DaemonHost())
|
||||
rwc, err := request.SockConn(10*time.Second, request.DaemonHost())
|
||||
assert.NilError(c, err)
|
||||
|
||||
cleanedContainerID := strings.TrimSpace(out)
|
||||
@@ -76,7 +76,7 @@ func (s *DockerSuite) TestGetContainersAttachWebsocket(c *check.C) {
|
||||
}
|
||||
|
||||
// regression gh14320
|
||||
func (s *DockerSuite) TestPostContainersAttachContainerNotFound(c *check.C) {
|
||||
func (s *DockerSuite) TestPostContainersAttachContainerNotFound(c *testing.T) {
|
||||
resp, _, err := request.Post("/containers/doesnotexist/attach")
|
||||
assert.NilError(c, err)
|
||||
// connection will shutdown, err should be "persistent connection closed"
|
||||
@@ -87,7 +87,7 @@ func (s *DockerSuite) TestPostContainersAttachContainerNotFound(c *check.C) {
|
||||
assert.Equal(c, string(content), expected)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestGetContainersWsAttachContainerNotFound(c *check.C) {
|
||||
func (s *DockerSuite) TestGetContainersWsAttachContainerNotFound(c *testing.T) {
|
||||
res, body, err := request.Get("/containers/doesnotexist/attach/ws")
|
||||
assert.Equal(c, res.StatusCode, http.StatusNotFound)
|
||||
assert.NilError(c, err)
|
||||
@@ -97,7 +97,7 @@ func (s *DockerSuite) TestGetContainersWsAttachContainerNotFound(c *check.C) {
|
||||
assert.Assert(c, strings.Contains(getErrorMessage(c, b), expected))
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestPostContainersAttach(c *check.C) {
|
||||
func (s *DockerSuite) TestPostContainersAttach(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
|
||||
expectSuccess := func(conn net.Conn, br *bufio.Reader, stream string, tty bool) {
|
||||
@@ -237,7 +237,7 @@ func sockRequestHijack(method, endpoint string, data io.Reader, ct string, daemo
|
||||
// Deprecated: Use New instead of NewRequestClient
|
||||
// Deprecated: use request.Do (or Get, Delete, Post) instead
|
||||
func newRequestClient(method, endpoint string, data io.Reader, ct, daemon string, modifiers ...func(*http.Request)) (*http.Request, *httputil.ClientConn, error) {
|
||||
c, err := request.SockConn(time.Duration(10*time.Second), daemon)
|
||||
c, err := request.SockConn(10*time.Second, daemon)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("could not dial docker daemon: %v", err)
|
||||
}
|
||||
|
||||
@@ -11,18 +11,18 @@ import (
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/internal/test/fakecontext"
|
||||
"github.com/docker/docker/internal/test/fakegit"
|
||||
"github.com/docker/docker/internal/test/fakestorage"
|
||||
"github.com/docker/docker/internal/test/request"
|
||||
"github.com/go-check/check"
|
||||
"gotest.tools/assert"
|
||||
is "gotest.tools/assert/cmp"
|
||||
)
|
||||
|
||||
func (s *DockerSuite) TestBuildAPIDockerFileRemote(c *check.C) {
|
||||
func (s *DockerSuite) TestBuildAPIDockerFileRemote(c *testing.T) {
|
||||
testRequires(c, NotUserNamespace)
|
||||
|
||||
var testD string
|
||||
@@ -53,7 +53,7 @@ RUN find /tmp/`
|
||||
assert.Assert(c, !strings.Contains(out, "baz"))
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestBuildAPIRemoteTarballContext(c *check.C) {
|
||||
func (s *DockerSuite) TestBuildAPIRemoteTarballContext(c *testing.T) {
|
||||
buffer := new(bytes.Buffer)
|
||||
tw := tar.NewWriter(buffer)
|
||||
defer tw.Close()
|
||||
@@ -80,7 +80,7 @@ func (s *DockerSuite) TestBuildAPIRemoteTarballContext(c *check.C) {
|
||||
b.Close()
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestBuildAPIRemoteTarballContextWithCustomDockerfile(c *check.C) {
|
||||
func (s *DockerSuite) TestBuildAPIRemoteTarballContextWithCustomDockerfile(c *testing.T) {
|
||||
buffer := new(bytes.Buffer)
|
||||
tw := tar.NewWriter(buffer)
|
||||
defer tw.Close()
|
||||
@@ -134,7 +134,7 @@ RUN echo 'right'
|
||||
assert.Assert(c, !strings.Contains(string(content), "wrong"))
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestBuildAPILowerDockerfile(c *check.C) {
|
||||
func (s *DockerSuite) TestBuildAPILowerDockerfile(c *testing.T) {
|
||||
git := fakegit.New(c, "repo", map[string]string{
|
||||
"dockerfile": `FROM busybox
|
||||
RUN echo from dockerfile`,
|
||||
@@ -152,7 +152,7 @@ RUN echo from dockerfile`,
|
||||
assert.Assert(c, is.Contains(out, "from dockerfile"))
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestBuildAPIBuildGitWithF(c *check.C) {
|
||||
func (s *DockerSuite) TestBuildAPIBuildGitWithF(c *testing.T) {
|
||||
git := fakegit.New(c, "repo", map[string]string{
|
||||
"baz": `FROM busybox
|
||||
RUN echo from baz`,
|
||||
@@ -173,7 +173,7 @@ RUN echo from Dockerfile`,
|
||||
assert.Assert(c, is.Contains(out, "from baz"))
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestBuildAPIDoubleDockerfile(c *check.C) {
|
||||
func (s *DockerSuite) TestBuildAPIDoubleDockerfile(c *testing.T) {
|
||||
testRequires(c, UnixCli) // dockerfile overwrites Dockerfile on Windows
|
||||
git := fakegit.New(c, "repo", map[string]string{
|
||||
"Dockerfile": `FROM busybox
|
||||
@@ -195,7 +195,7 @@ RUN echo from dockerfile`,
|
||||
assert.Assert(c, is.Contains(out, "from Dockerfile"))
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestBuildAPIUnnormalizedTarPaths(c *check.C) {
|
||||
func (s *DockerSuite) TestBuildAPIUnnormalizedTarPaths(c *testing.T) {
|
||||
// Make sure that build context tars with entries of the form
|
||||
// x/./y don't cause caching false positives.
|
||||
|
||||
@@ -254,7 +254,7 @@ func (s *DockerSuite) TestBuildAPIUnnormalizedTarPaths(c *check.C) {
|
||||
assert.Assert(c, imageA != imageB)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestBuildOnBuildWithCopy(c *check.C) {
|
||||
func (s *DockerSuite) TestBuildOnBuildWithCopy(c *testing.T) {
|
||||
dockerfile := `
|
||||
FROM ` + minimalBaseImage() + ` as onbuildbase
|
||||
ONBUILD COPY file /file
|
||||
@@ -279,7 +279,7 @@ func (s *DockerSuite) TestBuildOnBuildWithCopy(c *check.C) {
|
||||
assert.Assert(c, is.Contains(string(out), "Successfully built"))
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestBuildOnBuildCache(c *check.C) {
|
||||
func (s *DockerSuite) TestBuildOnBuildCache(c *testing.T) {
|
||||
build := func(dockerfile string) []byte {
|
||||
ctx := fakecontext.New(c, "",
|
||||
fakecontext.WithDockerfile(dockerfile),
|
||||
@@ -321,7 +321,7 @@ func (s *DockerSuite) TestBuildOnBuildCache(c *check.C) {
|
||||
assert.Check(c, is.Equal(parentID, image.Parent))
|
||||
}
|
||||
|
||||
func (s *DockerRegistrySuite) TestBuildCopyFromForcePull(c *check.C) {
|
||||
func (s *DockerRegistrySuite) TestBuildCopyFromForcePull(c *testing.T) {
|
||||
client := testEnv.APIClient()
|
||||
|
||||
repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
|
||||
@@ -358,7 +358,7 @@ func (s *DockerRegistrySuite) TestBuildCopyFromForcePull(c *check.C) {
|
||||
assert.Check(c, is.Contains(string(out), "Successfully built"))
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestBuildAddRemoteNoDecompress(c *check.C) {
|
||||
func (s *DockerSuite) TestBuildAddRemoteNoDecompress(c *testing.T) {
|
||||
buffer := new(bytes.Buffer)
|
||||
tw := tar.NewWriter(buffer)
|
||||
dt := []byte("contents")
|
||||
@@ -402,7 +402,7 @@ func (s *DockerSuite) TestBuildAddRemoteNoDecompress(c *check.C) {
|
||||
assert.Check(c, is.Contains(string(out), "Successfully built"))
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestBuildChownOnCopy(c *check.C) {
|
||||
func (s *DockerSuite) TestBuildChownOnCopy(c *testing.T) {
|
||||
// new feature added in 1.31 - https://github.com/moby/moby/pull/34263
|
||||
testRequires(c, DaemonIsLinux, MinimumAPIVersion("1.31"))
|
||||
dockerfile := `FROM busybox
|
||||
@@ -432,7 +432,7 @@ func (s *DockerSuite) TestBuildChownOnCopy(c *check.C) {
|
||||
assert.Check(c, is.Contains(string(out), "Successfully built"))
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestBuildCopyCacheOnFileChange(c *check.C) {
|
||||
func (s *DockerSuite) TestBuildCopyCacheOnFileChange(c *testing.T) {
|
||||
|
||||
dockerfile := `FROM busybox
|
||||
COPY file /file`
|
||||
@@ -473,7 +473,7 @@ COPY file /file`
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestBuildAddCacheOnFileChange(c *check.C) {
|
||||
func (s *DockerSuite) TestBuildAddCacheOnFileChange(c *testing.T) {
|
||||
|
||||
dockerfile := `FROM busybox
|
||||
ADD file /file`
|
||||
@@ -514,7 +514,7 @@ ADD file /file`
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestBuildScratchCopy(c *check.C) {
|
||||
func (s *DockerSuite) TestBuildScratchCopy(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
dockerfile := `FROM scratch
|
||||
ADD Dockerfile /
|
||||
@@ -543,7 +543,7 @@ type buildLine struct {
|
||||
}
|
||||
}
|
||||
|
||||
func getImageIDsFromBuild(c *check.C, output []byte) []string {
|
||||
func getImageIDsFromBuild(c *testing.T, output []byte) []string {
|
||||
var ids []string
|
||||
for _, line := range bytes.Split(output, []byte("\n")) {
|
||||
if len(line) == 0 {
|
||||
|
||||
@@ -4,15 +4,15 @@ package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/internal/test/fakecontext"
|
||||
"github.com/docker/docker/internal/test/request"
|
||||
"github.com/go-check/check"
|
||||
"gotest.tools/assert"
|
||||
is "gotest.tools/assert/cmp"
|
||||
)
|
||||
|
||||
func (s *DockerSuite) TestBuildWithRecycleBin(c *check.C) {
|
||||
func (s *DockerSuite) TestBuildWithRecycleBin(c *testing.T) {
|
||||
testRequires(c, DaemonIsWindows)
|
||||
|
||||
dockerfile := "" +
|
||||
|
||||
@@ -13,8 +13,8 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
networktypes "github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/integration-cli/checker"
|
||||
"github.com/docker/docker/integration-cli/cli"
|
||||
"github.com/docker/docker/integration-cli/cli/build"
|
||||
"github.com/docker/docker/internal/test/request"
|
||||
@@ -32,13 +31,12 @@ import (
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/volume"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/go-check/check"
|
||||
"gotest.tools/assert"
|
||||
is "gotest.tools/assert/cmp"
|
||||
"gotest.tools/poll"
|
||||
)
|
||||
|
||||
func (s *DockerSuite) TestContainerAPIGetAll(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIGetAll(c *testing.T) {
|
||||
startCount := getContainerCount(c)
|
||||
name := "getall"
|
||||
dockerCmd(c, "run", "--name", name, "busybox", "true")
|
||||
@@ -54,11 +52,11 @@ func (s *DockerSuite) TestContainerAPIGetAll(c *check.C) {
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, len(containers), startCount+1)
|
||||
actual := containers[0].Names[0]
|
||||
c.Assert(actual, checker.Equals, "/"+name)
|
||||
assert.Equal(c, actual, "/"+name)
|
||||
}
|
||||
|
||||
// regression test for empty json field being omitted #13691
|
||||
func (s *DockerSuite) TestContainerAPIGetJSONNoFieldsOmitted(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIGetJSONNoFieldsOmitted(c *testing.T) {
|
||||
startCount := getContainerCount(c)
|
||||
dockerCmd(c, "run", "busybox", "true")
|
||||
|
||||
@@ -98,46 +96,7 @@ func (s *DockerSuite) TestContainerAPIGetJSONNoFieldsOmitted(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
type containerPs struct {
|
||||
Names []string
|
||||
Ports []types.Port
|
||||
}
|
||||
|
||||
// regression test for non-empty fields from #13901
|
||||
func (s *DockerSuite) TestContainerAPIPsOmitFields(c *check.C) {
|
||||
// Problematic for Windows porting due to networking not yet being passed back
|
||||
testRequires(c, DaemonIsLinux)
|
||||
name := "pstest"
|
||||
port := 80
|
||||
runSleepingContainer(c, "--name", name, "--expose", strconv.Itoa(port))
|
||||
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
||||
assert.NilError(c, err)
|
||||
defer cli.Close()
|
||||
|
||||
options := types.ContainerListOptions{
|
||||
All: true,
|
||||
}
|
||||
containers, err := cli.ContainerList(context.Background(), options)
|
||||
assert.NilError(c, err)
|
||||
var foundContainer containerPs
|
||||
for _, c := range containers {
|
||||
for _, testName := range c.Names {
|
||||
if "/"+name == testName {
|
||||
foundContainer.Names = c.Names
|
||||
foundContainer.Ports = c.Ports
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.Assert(foundContainer.Ports, checker.HasLen, 1)
|
||||
c.Assert(foundContainer.Ports[0].PrivatePort, checker.Equals, uint16(port))
|
||||
c.Assert(foundContainer.Ports[0].PublicPort, checker.NotNil)
|
||||
c.Assert(foundContainer.Ports[0].IP, checker.NotNil)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPIGetExport(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIGetExport(c *testing.T) {
|
||||
// Not supported on Windows as Windows does not support docker export
|
||||
testRequires(c, DaemonIsLinux)
|
||||
name := "exportcontainer"
|
||||
@@ -161,10 +120,10 @@ func (s *DockerSuite) TestContainerAPIGetExport(c *check.C) {
|
||||
break
|
||||
}
|
||||
}
|
||||
c.Assert(found, checker.True, check.Commentf("The created test file has not been found in the exported image"))
|
||||
assert.Assert(c, found, "The created test file has not been found in the exported image")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPIGetChanges(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIGetChanges(c *testing.T) {
|
||||
// Not supported on Windows as Windows does not support docker diff (/containers/name/changes)
|
||||
testRequires(c, DaemonIsLinux)
|
||||
name := "changescontainer"
|
||||
@@ -184,10 +143,10 @@ func (s *DockerSuite) TestContainerAPIGetChanges(c *check.C) {
|
||||
success = true
|
||||
}
|
||||
}
|
||||
c.Assert(success, checker.True, check.Commentf("/etc/passwd has been removed but is not present in the diff"))
|
||||
assert.Assert(c, success, "/etc/passwd has been removed but is not present in the diff")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestGetContainerStats(c *check.C) {
|
||||
func (s *DockerSuite) TestGetContainerStats(c *testing.T) {
|
||||
var (
|
||||
name = "statscontainer"
|
||||
)
|
||||
@@ -227,7 +186,7 @@ func (s *DockerSuite) TestGetContainerStats(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestGetContainerStatsRmRunning(c *check.C) {
|
||||
func (s *DockerSuite) TestGetContainerStatsRmRunning(c *testing.T) {
|
||||
out := runSleepingContainer(c)
|
||||
id := strings.TrimSpace(out)
|
||||
|
||||
@@ -255,12 +214,12 @@ func (s *DockerSuite) TestGetContainerStatsRmRunning(c *check.C) {
|
||||
|
||||
// Now remove without `-f` and make sure we are still pulling stats
|
||||
_, _, err = dockerCmdWithError("rm", id)
|
||||
c.Assert(err, checker.Not(checker.IsNil), check.Commentf("rm should have failed but didn't"))
|
||||
assert.Assert(c, err != nil, "rm should have failed but didn't")
|
||||
_, err = buf.ReadTimeout(b, 2*time.Second)
|
||||
assert.NilError(c, err)
|
||||
|
||||
dockerCmd(c, "rm", "-f", id)
|
||||
c.Assert(<-chErr, checker.IsNil)
|
||||
assert.Assert(c, <-chErr == nil)
|
||||
}
|
||||
|
||||
// ChannelBuffer holds a chan of byte array that can be populate in a goroutine.
|
||||
@@ -294,7 +253,7 @@ func (c *ChannelBuffer) ReadTimeout(p []byte, n time.Duration) (int, error) {
|
||||
// regression test for gh13421
|
||||
// previous test was just checking one stat entry so it didn't fail (stats with
|
||||
// stream false always return one stat)
|
||||
func (s *DockerSuite) TestGetContainerStatsStream(c *check.C) {
|
||||
func (s *DockerSuite) TestGetContainerStatsStream(c *testing.T) {
|
||||
name := "statscontainer"
|
||||
runSleepingContainer(c, "--name", name)
|
||||
|
||||
@@ -335,7 +294,7 @@ func (s *DockerSuite) TestGetContainerStatsStream(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestGetContainerStatsNoStream(c *check.C) {
|
||||
func (s *DockerSuite) TestGetContainerStatsNoStream(c *testing.T) {
|
||||
name := "statscontainer"
|
||||
runSleepingContainer(c, "--name", name)
|
||||
|
||||
@@ -375,7 +334,7 @@ func (s *DockerSuite) TestGetContainerStatsNoStream(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestGetStoppedContainerStats(c *check.C) {
|
||||
func (s *DockerSuite) TestGetStoppedContainerStats(c *testing.T) {
|
||||
name := "statscontainer"
|
||||
dockerCmd(c, "create", "--name", name, "busybox", "ps")
|
||||
|
||||
@@ -401,11 +360,11 @@ func (s *DockerSuite) TestGetStoppedContainerStats(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPIPause(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIPause(c *testing.T) {
|
||||
// Problematic on Windows as Windows does not support pause
|
||||
testRequires(c, DaemonIsLinux)
|
||||
|
||||
getPaused := func(c *check.C) []string {
|
||||
getPaused := func(c *testing.T) []string {
|
||||
return strings.Fields(cli.DockerCmd(c, "ps", "-f", "status=paused", "-q", "-a").Combined())
|
||||
}
|
||||
|
||||
@@ -429,13 +388,13 @@ func (s *DockerSuite) TestContainerAPIPause(c *check.C) {
|
||||
assert.NilError(c, err)
|
||||
|
||||
pausedContainers = getPaused(c)
|
||||
c.Assert(pausedContainers, checker.HasLen, 0, check.Commentf("There should be no paused container."))
|
||||
assert.Equal(c, len(pausedContainers), 0, "There should be no paused container.")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPITop(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPITop(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "top")
|
||||
id := strings.TrimSpace(string(out))
|
||||
id := strings.TrimSpace(out)
|
||||
assert.NilError(c, waitRun(id))
|
||||
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
||||
@@ -445,20 +404,20 @@ func (s *DockerSuite) TestContainerAPITop(c *check.C) {
|
||||
// sort by comm[andline] to make sure order stays the same in case of PID rollover
|
||||
top, err := cli.ContainerTop(context.Background(), id, []string{"aux", "--sort=comm"})
|
||||
assert.NilError(c, err)
|
||||
c.Assert(top.Titles, checker.HasLen, 11, check.Commentf("expected 11 titles, found %d: %v", len(top.Titles), top.Titles))
|
||||
assert.Equal(c, len(top.Titles), 11, fmt.Sprintf("expected 11 titles, found %d: %v", len(top.Titles), top.Titles))
|
||||
|
||||
if top.Titles[0] != "USER" || top.Titles[10] != "COMMAND" {
|
||||
c.Fatalf("expected `USER` at `Titles[0]` and `COMMAND` at Titles[10]: %v", top.Titles)
|
||||
}
|
||||
c.Assert(top.Processes, checker.HasLen, 2, check.Commentf("expected 2 processes, found %d: %v", len(top.Processes), top.Processes))
|
||||
c.Assert(top.Processes[0][10], checker.Equals, "/bin/sh -c top")
|
||||
c.Assert(top.Processes[1][10], checker.Equals, "top")
|
||||
assert.Equal(c, len(top.Processes), 2, fmt.Sprintf("expected 2 processes, found %d: %v", len(top.Processes), top.Processes))
|
||||
assert.Equal(c, top.Processes[0][10], "/bin/sh -c top")
|
||||
assert.Equal(c, top.Processes[1][10], "top")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPITopWindows(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPITopWindows(c *testing.T) {
|
||||
testRequires(c, DaemonIsWindows)
|
||||
out := runSleepingContainer(c, "-d")
|
||||
id := strings.TrimSpace(string(out))
|
||||
id := strings.TrimSpace(out)
|
||||
assert.NilError(c, waitRun(id))
|
||||
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
||||
@@ -486,7 +445,7 @@ func (s *DockerSuite) TestContainerAPITopWindows(c *check.C) {
|
||||
assert.Assert(c, foundProcess, "expected to find %s: %v", expectedProcess, top.Processes)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPICommit(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPICommit(c *testing.T) {
|
||||
cName := "testapicommit"
|
||||
dockerCmd(c, "run", "--name="+cName, "busybox", "/bin/sh", "-c", "touch /test")
|
||||
|
||||
@@ -502,13 +461,13 @@ func (s *DockerSuite) TestContainerAPICommit(c *check.C) {
|
||||
assert.NilError(c, err)
|
||||
|
||||
cmd := inspectField(c, img.ID, "Config.Cmd")
|
||||
c.Assert(cmd, checker.Equals, "[/bin/sh -c touch /test]", check.Commentf("got wrong Cmd from commit: %q", cmd))
|
||||
assert.Equal(c, cmd, "[/bin/sh -c touch /test]", fmt.Sprintf("got wrong Cmd from commit: %q", cmd))
|
||||
|
||||
// sanity check, make sure the image is what we think it is
|
||||
dockerCmd(c, "run", img.ID, "ls", "/test")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPICommitWithLabelInConfig(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPICommitWithLabelInConfig(c *testing.T) {
|
||||
cName := "testapicommitwithconfig"
|
||||
dockerCmd(c, "run", "--name="+cName, "busybox", "/bin/sh", "-c", "touch /test")
|
||||
|
||||
@@ -528,19 +487,19 @@ func (s *DockerSuite) TestContainerAPICommitWithLabelInConfig(c *check.C) {
|
||||
assert.NilError(c, err)
|
||||
|
||||
label1 := inspectFieldMap(c, img.ID, "Config.Labels", "key1")
|
||||
c.Assert(label1, checker.Equals, "value1")
|
||||
assert.Equal(c, label1, "value1")
|
||||
|
||||
label2 := inspectFieldMap(c, img.ID, "Config.Labels", "key2")
|
||||
c.Assert(label2, checker.Equals, "value2")
|
||||
assert.Equal(c, label2, "value2")
|
||||
|
||||
cmd := inspectField(c, img.ID, "Config.Cmd")
|
||||
c.Assert(cmd, checker.Equals, "[/bin/sh -c touch /test]", check.Commentf("got wrong Cmd from commit: %q", cmd))
|
||||
assert.Equal(c, cmd, "[/bin/sh -c touch /test]", fmt.Sprintf("got wrong Cmd from commit: %q", cmd))
|
||||
|
||||
// sanity check, make sure the image is what we think it is
|
||||
dockerCmd(c, "run", img.ID, "ls", "/test")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPIBadPort(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIBadPort(c *testing.T) {
|
||||
// TODO Windows to Windows CI - Port this test
|
||||
testRequires(c, DaemonIsLinux)
|
||||
|
||||
@@ -567,7 +526,7 @@ func (s *DockerSuite) TestContainerAPIBadPort(c *check.C) {
|
||||
assert.ErrorContains(c, err, `invalid port specification: "aa80"`)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPICreate(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPICreate(c *testing.T) {
|
||||
config := containertypes.Config{
|
||||
Image: "busybox",
|
||||
Cmd: []string{"/bin/sh", "-c", "touch /test && ls /test"},
|
||||
@@ -584,7 +543,7 @@ func (s *DockerSuite) TestContainerAPICreate(c *check.C) {
|
||||
assert.Equal(c, strings.TrimSpace(out), "/test")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPICreateEmptyConfig(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPICreateEmptyConfig(c *testing.T) {
|
||||
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
||||
assert.NilError(c, err)
|
||||
@@ -596,7 +555,7 @@ func (s *DockerSuite) TestContainerAPICreateEmptyConfig(c *check.C) {
|
||||
assert.ErrorContains(c, err, expected)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPICreateMultipleNetworksConfig(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPICreateMultipleNetworksConfig(c *testing.T) {
|
||||
// Container creation must fail if client specified configurations for more than one network
|
||||
config := containertypes.Config{
|
||||
Image: "busybox",
|
||||
@@ -617,26 +576,26 @@ func (s *DockerSuite) TestContainerAPICreateMultipleNetworksConfig(c *check.C) {
|
||||
_, err = cli.ContainerCreate(context.Background(), &config, &containertypes.HostConfig{}, &networkingConfig, "")
|
||||
msg := err.Error()
|
||||
// network name order in error message is not deterministic
|
||||
c.Assert(msg, checker.Contains, "Container cannot be connected to network endpoints")
|
||||
c.Assert(msg, checker.Contains, "net1")
|
||||
c.Assert(msg, checker.Contains, "net2")
|
||||
c.Assert(msg, checker.Contains, "net3")
|
||||
assert.Assert(c, strings.Contains(msg, "Container cannot be connected to network endpoints"))
|
||||
assert.Assert(c, strings.Contains(msg, "net1"))
|
||||
assert.Assert(c, strings.Contains(msg, "net2"))
|
||||
assert.Assert(c, strings.Contains(msg, "net3"))
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPICreateBridgeNetworkMode(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPICreateBridgeNetworkMode(c *testing.T) {
|
||||
// Windows does not support bridge
|
||||
testRequires(c, DaemonIsLinux)
|
||||
UtilCreateNetworkMode(c, "bridge")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPICreateOtherNetworkModes(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPICreateOtherNetworkModes(c *testing.T) {
|
||||
// Windows does not support these network modes
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace)
|
||||
UtilCreateNetworkMode(c, "host")
|
||||
UtilCreateNetworkMode(c, "container:web1")
|
||||
}
|
||||
|
||||
func UtilCreateNetworkMode(c *check.C, networkMode containertypes.NetworkMode) {
|
||||
func UtilCreateNetworkMode(c *testing.T, networkMode containertypes.NetworkMode) {
|
||||
config := containertypes.Config{
|
||||
Image: "busybox",
|
||||
}
|
||||
@@ -655,10 +614,10 @@ func UtilCreateNetworkMode(c *check.C, networkMode containertypes.NetworkMode) {
|
||||
containerJSON, err := cli.ContainerInspect(context.Background(), container.ID)
|
||||
assert.NilError(c, err)
|
||||
|
||||
c.Assert(containerJSON.HostConfig.NetworkMode, checker.Equals, containertypes.NetworkMode(networkMode), check.Commentf("Mismatched NetworkMode"))
|
||||
assert.Equal(c, containerJSON.HostConfig.NetworkMode, networkMode, "Mismatched NetworkMode")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPICreateWithCpuSharesCpuset(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPICreateWithCpuSharesCpuset(c *testing.T) {
|
||||
// TODO Windows to Windows CI. The CpuShares part could be ported.
|
||||
testRequires(c, DaemonIsLinux)
|
||||
config := containertypes.Config{
|
||||
@@ -686,17 +645,17 @@ func (s *DockerSuite) TestContainerAPICreateWithCpuSharesCpuset(c *check.C) {
|
||||
assert.Equal(c, out, "512")
|
||||
|
||||
outCpuset := inspectField(c, containerJSON.ID, "HostConfig.CpusetCpus")
|
||||
c.Assert(outCpuset, checker.Equals, "0")
|
||||
assert.Equal(c, outCpuset, "0")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPIVerifyHeader(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIVerifyHeader(c *testing.T) {
|
||||
config := map[string]interface{}{
|
||||
"Image": "busybox",
|
||||
}
|
||||
|
||||
create := func(ct string) (*http.Response, io.ReadCloser, error) {
|
||||
jsonData := bytes.NewBuffer(nil)
|
||||
c.Assert(json.NewEncoder(jsonData).Encode(config), checker.IsNil)
|
||||
assert.Assert(c, json.NewEncoder(jsonData).Encode(config) == nil)
|
||||
return request.Post("/containers/create", request.RawContent(ioutil.NopCloser(jsonData)), request.ContentType(ct))
|
||||
}
|
||||
|
||||
@@ -708,7 +667,7 @@ func (s *DockerSuite) TestContainerAPIVerifyHeader(c *check.C) {
|
||||
if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") {
|
||||
assert.Equal(c, res.StatusCode, http.StatusBadRequest)
|
||||
} else {
|
||||
c.Assert(res.StatusCode, checker.Not(checker.Equals), http.StatusOK)
|
||||
assert.Assert(c, res.StatusCode != http.StatusOK)
|
||||
}
|
||||
body.Close()
|
||||
|
||||
@@ -718,7 +677,7 @@ func (s *DockerSuite) TestContainerAPIVerifyHeader(c *check.C) {
|
||||
if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") {
|
||||
assert.Equal(c, res.StatusCode, http.StatusBadRequest)
|
||||
} else {
|
||||
c.Assert(res.StatusCode, checker.Not(checker.Equals), http.StatusOK)
|
||||
assert.Assert(c, res.StatusCode != http.StatusOK)
|
||||
}
|
||||
body.Close()
|
||||
|
||||
@@ -730,7 +689,7 @@ func (s *DockerSuite) TestContainerAPIVerifyHeader(c *check.C) {
|
||||
}
|
||||
|
||||
//Issue 14230. daemon should return 500 for invalid port syntax
|
||||
func (s *DockerSuite) TestContainerAPIInvalidPortSyntax(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIInvalidPortSyntax(c *testing.T) {
|
||||
config := `{
|
||||
"Image": "busybox",
|
||||
"HostConfig": {
|
||||
@@ -748,15 +707,15 @@ func (s *DockerSuite) TestContainerAPIInvalidPortSyntax(c *check.C) {
|
||||
if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") {
|
||||
assert.Equal(c, res.StatusCode, http.StatusBadRequest)
|
||||
} else {
|
||||
c.Assert(res.StatusCode, checker.Not(checker.Equals), http.StatusOK)
|
||||
assert.Assert(c, res.StatusCode != http.StatusOK)
|
||||
}
|
||||
|
||||
b, err := request.ReadBody(body)
|
||||
assert.NilError(c, err)
|
||||
c.Assert(string(b[:]), checker.Contains, "invalid port")
|
||||
assert.Assert(c, strings.Contains(string(b[:]), "invalid port"))
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPIRestartPolicyInvalidPolicyName(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIRestartPolicyInvalidPolicyName(c *testing.T) {
|
||||
config := `{
|
||||
"Image": "busybox",
|
||||
"HostConfig": {
|
||||
@@ -772,15 +731,15 @@ func (s *DockerSuite) TestContainerAPIRestartPolicyInvalidPolicyName(c *check.C)
|
||||
if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") {
|
||||
assert.Equal(c, res.StatusCode, http.StatusBadRequest)
|
||||
} else {
|
||||
c.Assert(res.StatusCode, checker.Not(checker.Equals), http.StatusOK)
|
||||
assert.Assert(c, res.StatusCode != http.StatusOK)
|
||||
}
|
||||
|
||||
b, err := request.ReadBody(body)
|
||||
assert.NilError(c, err)
|
||||
c.Assert(string(b[:]), checker.Contains, "invalid restart policy")
|
||||
assert.Assert(c, strings.Contains(string(b[:]), "invalid restart policy"))
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPIRestartPolicyRetryMismatch(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIRestartPolicyRetryMismatch(c *testing.T) {
|
||||
config := `{
|
||||
"Image": "busybox",
|
||||
"HostConfig": {
|
||||
@@ -796,15 +755,15 @@ func (s *DockerSuite) TestContainerAPIRestartPolicyRetryMismatch(c *check.C) {
|
||||
if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") {
|
||||
assert.Equal(c, res.StatusCode, http.StatusBadRequest)
|
||||
} else {
|
||||
c.Assert(res.StatusCode, checker.Not(checker.Equals), http.StatusOK)
|
||||
assert.Assert(c, res.StatusCode != http.StatusOK)
|
||||
}
|
||||
|
||||
b, err := request.ReadBody(body)
|
||||
assert.NilError(c, err)
|
||||
c.Assert(string(b[:]), checker.Contains, "maximum retry count cannot be used with restart policy")
|
||||
assert.Assert(c, strings.Contains(string(b[:]), "maximum retry count cannot be used with restart policy"))
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPIRestartPolicyNegativeRetryCount(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIRestartPolicyNegativeRetryCount(c *testing.T) {
|
||||
config := `{
|
||||
"Image": "busybox",
|
||||
"HostConfig": {
|
||||
@@ -820,15 +779,15 @@ func (s *DockerSuite) TestContainerAPIRestartPolicyNegativeRetryCount(c *check.C
|
||||
if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") {
|
||||
assert.Equal(c, res.StatusCode, http.StatusBadRequest)
|
||||
} else {
|
||||
c.Assert(res.StatusCode, checker.Not(checker.Equals), http.StatusOK)
|
||||
assert.Assert(c, res.StatusCode != http.StatusOK)
|
||||
}
|
||||
|
||||
b, err := request.ReadBody(body)
|
||||
assert.NilError(c, err)
|
||||
c.Assert(string(b[:]), checker.Contains, "maximum retry count cannot be negative")
|
||||
assert.Assert(c, strings.Contains(string(b[:]), "maximum retry count cannot be negative"))
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPIRestartPolicyDefaultRetryCount(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIRestartPolicyDefaultRetryCount(c *testing.T) {
|
||||
config := `{
|
||||
"Image": "busybox",
|
||||
"HostConfig": {
|
||||
@@ -846,7 +805,7 @@ func (s *DockerSuite) TestContainerAPIRestartPolicyDefaultRetryCount(c *check.C)
|
||||
|
||||
// Issue 7941 - test to make sure a "null" in JSON is just ignored.
|
||||
// W/o this fix a null in JSON would be parsed into a string var as "null"
|
||||
func (s *DockerSuite) TestContainerAPIPostCreateNull(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIPostCreateNull(c *testing.T) {
|
||||
config := `{
|
||||
"Hostname":"",
|
||||
"Domainname":"",
|
||||
@@ -880,17 +839,17 @@ func (s *DockerSuite) TestContainerAPIPostCreateNull(c *check.C) {
|
||||
ID string
|
||||
}
|
||||
var container createResp
|
||||
c.Assert(json.Unmarshal(b, &container), checker.IsNil)
|
||||
assert.Assert(c, json.Unmarshal(b, &container) == nil)
|
||||
out := inspectField(c, container.ID, "HostConfig.CpusetCpus")
|
||||
assert.Equal(c, out, "")
|
||||
|
||||
outMemory := inspectField(c, container.ID, "HostConfig.Memory")
|
||||
c.Assert(outMemory, checker.Equals, "0")
|
||||
assert.Equal(c, outMemory, "0")
|
||||
outMemorySwap := inspectField(c, container.ID, "HostConfig.MemorySwap")
|
||||
c.Assert(outMemorySwap, checker.Equals, "0")
|
||||
assert.Equal(c, outMemorySwap, "0")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestCreateWithTooLowMemoryLimit(c *check.C) {
|
||||
func (s *DockerSuite) TestCreateWithTooLowMemoryLimit(c *testing.T) {
|
||||
// TODO Windows: Port once memory is supported
|
||||
testRequires(c, DaemonIsLinux)
|
||||
config := `{
|
||||
@@ -904,17 +863,17 @@ func (s *DockerSuite) TestCreateWithTooLowMemoryLimit(c *check.C) {
|
||||
res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON)
|
||||
assert.NilError(c, err)
|
||||
b, err2 := request.ReadBody(body)
|
||||
c.Assert(err2, checker.IsNil)
|
||||
assert.Assert(c, err2 == nil)
|
||||
|
||||
if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") {
|
||||
assert.Equal(c, res.StatusCode, http.StatusBadRequest)
|
||||
} else {
|
||||
c.Assert(res.StatusCode, checker.Not(checker.Equals), http.StatusOK)
|
||||
assert.Assert(c, res.StatusCode != http.StatusOK)
|
||||
}
|
||||
c.Assert(string(b), checker.Contains, "Minimum memory limit allowed is 4MB")
|
||||
assert.Assert(c, strings.Contains(string(b), "Minimum memory limit allowed is 4MB"))
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPIRename(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIRename(c *testing.T) {
|
||||
out, _ := dockerCmd(c, "run", "--name", "TestContainerAPIRename", "-d", "busybox", "sh")
|
||||
|
||||
containerID := strings.TrimSpace(out)
|
||||
@@ -928,10 +887,10 @@ func (s *DockerSuite) TestContainerAPIRename(c *check.C) {
|
||||
assert.NilError(c, err)
|
||||
|
||||
name := inspectField(c, containerID, "Name")
|
||||
c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container"))
|
||||
assert.Equal(c, name, "/"+newName, "Failed to rename container")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPIKill(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIKill(c *testing.T) {
|
||||
name := "test-api-kill"
|
||||
runSleepingContainer(c, "-i", "--name", name)
|
||||
|
||||
@@ -943,10 +902,10 @@ func (s *DockerSuite) TestContainerAPIKill(c *check.C) {
|
||||
assert.NilError(c, err)
|
||||
|
||||
state := inspectField(c, name, "State.Running")
|
||||
c.Assert(state, checker.Equals, "false", check.Commentf("got wrong State from container %s: %q", name, state))
|
||||
assert.Equal(c, state, "false", fmt.Sprintf("got wrong State from container %s: %q", name, state))
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPIRestart(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIRestart(c *testing.T) {
|
||||
name := "test-api-restart"
|
||||
runSleepingContainer(c, "-di", "--name", name)
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
||||
@@ -957,10 +916,10 @@ func (s *DockerSuite) TestContainerAPIRestart(c *check.C) {
|
||||
err = cli.ContainerRestart(context.Background(), name, &timeout)
|
||||
assert.NilError(c, err)
|
||||
|
||||
c.Assert(waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second), checker.IsNil)
|
||||
assert.Assert(c, waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second) == nil)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPIRestartNotimeoutParam(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIRestartNotimeoutParam(c *testing.T) {
|
||||
name := "test-api-restart-no-timeout-param"
|
||||
out := runSleepingContainer(c, "-di", "--name", name)
|
||||
id := strings.TrimSpace(out)
|
||||
@@ -973,10 +932,10 @@ func (s *DockerSuite) TestContainerAPIRestartNotimeoutParam(c *check.C) {
|
||||
err = cli.ContainerRestart(context.Background(), name, nil)
|
||||
assert.NilError(c, err)
|
||||
|
||||
c.Assert(waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second), checker.IsNil)
|
||||
assert.Assert(c, waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second) == nil)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPIStart(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIStart(c *testing.T) {
|
||||
name := "testing-start"
|
||||
config := containertypes.Config{
|
||||
Image: "busybox",
|
||||
@@ -1002,7 +961,7 @@ func (s *DockerSuite) TestContainerAPIStart(c *check.C) {
|
||||
// TODO(tibor): figure out why this doesn't work on windows
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPIStop(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIStop(c *testing.T) {
|
||||
name := "test-api-stop"
|
||||
runSleepingContainer(c, "-i", "--name", name)
|
||||
timeout := 30 * time.Second
|
||||
@@ -1013,7 +972,7 @@ func (s *DockerSuite) TestContainerAPIStop(c *check.C) {
|
||||
|
||||
err = cli.ContainerStop(context.Background(), name, &timeout)
|
||||
assert.NilError(c, err)
|
||||
c.Assert(waitInspect(name, "{{ .State.Running }}", "false", 60*time.Second), checker.IsNil)
|
||||
assert.Assert(c, waitInspect(name, "{{ .State.Running }}", "false", 60*time.Second) == nil)
|
||||
|
||||
// second call to start should give 304
|
||||
// maybe add ContainerStartWithRaw to test it
|
||||
@@ -1021,7 +980,7 @@ func (s *DockerSuite) TestContainerAPIStop(c *check.C) {
|
||||
assert.NilError(c, err)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPIWait(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIWait(c *testing.T) {
|
||||
name := "test-api-wait"
|
||||
|
||||
sleepCmd := "/bin/sleep"
|
||||
@@ -1040,11 +999,11 @@ func (s *DockerSuite) TestContainerAPIWait(c *check.C) {
|
||||
case err = <-errC:
|
||||
assert.NilError(c, err)
|
||||
case waitres := <-waitresC:
|
||||
c.Assert(waitres.StatusCode, checker.Equals, int64(0))
|
||||
assert.Equal(c, waitres.StatusCode, int64(0))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPICopyNotExistsAnyMore(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPICopyNotExistsAnyMore(c *testing.T) {
|
||||
name := "test-container-api-copy"
|
||||
dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt")
|
||||
|
||||
@@ -1057,7 +1016,7 @@ func (s *DockerSuite) TestContainerAPICopyNotExistsAnyMore(c *check.C) {
|
||||
assert.Equal(c, res.StatusCode, http.StatusNotFound)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPICopyPre124(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPICopyPre124(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later
|
||||
name := "test-container-api-copy"
|
||||
dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt")
|
||||
@@ -1084,10 +1043,10 @@ func (s *DockerSuite) TestContainerAPICopyPre124(c *check.C) {
|
||||
break
|
||||
}
|
||||
}
|
||||
c.Assert(found, checker.True)
|
||||
assert.Assert(c, found)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPICopyResourcePathEmptyPre124(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPICopyResourcePathEmptyPre124(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later
|
||||
name := "test-container-api-copy-resource-empty"
|
||||
dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt")
|
||||
@@ -1101,14 +1060,15 @@ func (s *DockerSuite) TestContainerAPICopyResourcePathEmptyPre124(c *check.C) {
|
||||
if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") {
|
||||
assert.Equal(c, res.StatusCode, http.StatusBadRequest)
|
||||
} else {
|
||||
c.Assert(res.StatusCode, checker.Not(checker.Equals), http.StatusOK)
|
||||
assert.Assert(c, res.StatusCode != http.StatusOK)
|
||||
}
|
||||
b, err := request.ReadBody(body)
|
||||
assert.NilError(c, err)
|
||||
c.Assert(string(b), checker.Matches, "Path cannot be empty\n")
|
||||
assert.Assert(c, is.Regexp("^Path cannot be empty\n$", string(b)))
|
||||
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPICopyResourcePathNotFoundPre124(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPICopyResourcePathNotFoundPre124(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later
|
||||
name := "test-container-api-copy-resource-not-found"
|
||||
dockerCmd(c, "run", "--name", name, "busybox")
|
||||
@@ -1126,10 +1086,11 @@ func (s *DockerSuite) TestContainerAPICopyResourcePathNotFoundPre124(c *check.C)
|
||||
}
|
||||
b, err := request.ReadBody(body)
|
||||
assert.NilError(c, err)
|
||||
c.Assert(string(b), checker.Matches, "Could not find the file /notexist in container "+name+"\n")
|
||||
assert.Assert(c, is.Regexp("^Could not find the file /notexist in container "+name+"\n$", string(b)))
|
||||
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPICopyContainerNotFoundPr124(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPICopyContainerNotFoundPr124(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later
|
||||
postData := types.CopyConfig{
|
||||
Resource: "/something",
|
||||
@@ -1140,7 +1101,7 @@ func (s *DockerSuite) TestContainerAPICopyContainerNotFoundPr124(c *check.C) {
|
||||
assert.Equal(c, res.StatusCode, http.StatusNotFound)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPIDelete(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIDelete(c *testing.T) {
|
||||
out := runSleepingContainer(c)
|
||||
|
||||
id := strings.TrimSpace(out)
|
||||
@@ -1156,7 +1117,7 @@ func (s *DockerSuite) TestContainerAPIDelete(c *check.C) {
|
||||
assert.NilError(c, err)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPIDeleteNotExist(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIDeleteNotExist(c *testing.T) {
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
||||
assert.NilError(c, err)
|
||||
defer cli.Close()
|
||||
@@ -1165,7 +1126,7 @@ func (s *DockerSuite) TestContainerAPIDeleteNotExist(c *check.C) {
|
||||
assert.ErrorContains(c, err, "No such container: doesnotexist")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPIDeleteForce(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIDeleteForce(c *testing.T) {
|
||||
out := runSleepingContainer(c)
|
||||
id := strings.TrimSpace(out)
|
||||
assert.NilError(c, waitRun(id))
|
||||
@@ -1182,7 +1143,7 @@ func (s *DockerSuite) TestContainerAPIDeleteForce(c *check.C) {
|
||||
assert.NilError(c, err)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPIDeleteRemoveLinks(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIDeleteRemoveLinks(c *testing.T) {
|
||||
// Windows does not support links
|
||||
testRequires(c, DaemonIsLinux)
|
||||
out, _ := dockerCmd(c, "run", "-d", "--name", "tlink1", "busybox", "top")
|
||||
@@ -1193,10 +1154,10 @@ func (s *DockerSuite) TestContainerAPIDeleteRemoveLinks(c *check.C) {
|
||||
out, _ = dockerCmd(c, "run", "--link", "tlink1:tlink1", "--name", "tlink2", "-d", "busybox", "top")
|
||||
|
||||
id2 := strings.TrimSpace(out)
|
||||
c.Assert(waitRun(id2), checker.IsNil)
|
||||
assert.Assert(c, waitRun(id2) == nil)
|
||||
|
||||
links := inspectFieldJSON(c, id2, "HostConfig.Links")
|
||||
c.Assert(links, checker.Equals, "[\"/tlink1:/tlink2/tlink1\"]", check.Commentf("expected to have links between containers"))
|
||||
assert.Equal(c, links, "[\"/tlink1:/tlink2/tlink1\"]", "expected to have links between containers")
|
||||
|
||||
removeOptions := types.ContainerRemoveOptions{
|
||||
RemoveLinks: true,
|
||||
@@ -1210,10 +1171,10 @@ func (s *DockerSuite) TestContainerAPIDeleteRemoveLinks(c *check.C) {
|
||||
assert.NilError(c, err)
|
||||
|
||||
linksPostRm := inspectFieldJSON(c, id2, "HostConfig.Links")
|
||||
c.Assert(linksPostRm, checker.Equals, "null", check.Commentf("call to api deleteContainer links should have removed the specified links"))
|
||||
assert.Equal(c, linksPostRm, "null", "call to api deleteContainer links should have removed the specified links")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPIDeleteConflict(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIDeleteConflict(c *testing.T) {
|
||||
out := runSleepingContainer(c)
|
||||
|
||||
id := strings.TrimSpace(out)
|
||||
@@ -1228,7 +1189,7 @@ func (s *DockerSuite) TestContainerAPIDeleteConflict(c *check.C) {
|
||||
assert.ErrorContains(c, err, expected)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPIDeleteRemoveVolume(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIDeleteRemoveVolume(c *testing.T) {
|
||||
testRequires(c, testEnv.IsLocalDaemon)
|
||||
|
||||
vol := "/testvolume"
|
||||
@@ -1259,11 +1220,11 @@ func (s *DockerSuite) TestContainerAPIDeleteRemoveVolume(c *check.C) {
|
||||
assert.NilError(c, err)
|
||||
|
||||
_, err = os.Stat(source)
|
||||
c.Assert(os.IsNotExist(err), checker.True, check.Commentf("expected to get ErrNotExist error, got %v", err))
|
||||
assert.Assert(c, os.IsNotExist(err), "expected to get ErrNotExist error, got %v", err)
|
||||
}
|
||||
|
||||
// Regression test for https://github.com/docker/docker/issues/6231
|
||||
func (s *DockerSuite) TestContainerAPIChunkedEncoding(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIChunkedEncoding(c *testing.T) {
|
||||
|
||||
config := map[string]interface{}{
|
||||
"Image": "busybox",
|
||||
@@ -1278,16 +1239,16 @@ func (s *DockerSuite) TestContainerAPIChunkedEncoding(c *check.C) {
|
||||
req.ContentLength = -1
|
||||
return nil
|
||||
}))
|
||||
c.Assert(err, checker.IsNil, check.Commentf("error creating container with chunked encoding"))
|
||||
assert.Assert(c, err == nil, "error creating container with chunked encoding")
|
||||
defer resp.Body.Close()
|
||||
assert.Equal(c, resp.StatusCode, http.StatusCreated)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPIPostContainerStop(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIPostContainerStop(c *testing.T) {
|
||||
out := runSleepingContainer(c)
|
||||
|
||||
containerID := strings.TrimSpace(out)
|
||||
c.Assert(waitRun(containerID), checker.IsNil)
|
||||
assert.Assert(c, waitRun(containerID) == nil)
|
||||
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
||||
assert.NilError(c, err)
|
||||
@@ -1295,11 +1256,11 @@ func (s *DockerSuite) TestContainerAPIPostContainerStop(c *check.C) {
|
||||
|
||||
err = cli.ContainerStop(context.Background(), containerID, nil)
|
||||
assert.NilError(c, err)
|
||||
c.Assert(waitInspect(containerID, "{{ .State.Running }}", "false", 60*time.Second), checker.IsNil)
|
||||
assert.Assert(c, waitInspect(containerID, "{{ .State.Running }}", "false", 60*time.Second) == nil)
|
||||
}
|
||||
|
||||
// #14170
|
||||
func (s *DockerSuite) TestPostContainerAPICreateWithStringOrSliceEntrypoint(c *check.C) {
|
||||
func (s *DockerSuite) TestPostContainerAPICreateWithStringOrSliceEntrypoint(c *testing.T) {
|
||||
config := containertypes.Config{
|
||||
Image: "busybox",
|
||||
Entrypoint: []string{"echo"},
|
||||
@@ -1327,7 +1288,7 @@ func (s *DockerSuite) TestPostContainerAPICreateWithStringOrSliceEntrypoint(c *c
|
||||
}
|
||||
|
||||
// #14170
|
||||
func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceCmd(c *check.C) {
|
||||
func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceCmd(c *testing.T) {
|
||||
config := containertypes.Config{
|
||||
Image: "busybox",
|
||||
Cmd: []string{"echo", "hello", "world"},
|
||||
@@ -1356,7 +1317,7 @@ func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceCmd(c *check.C) {
|
||||
// regression #14318
|
||||
// for backward compatibility testing with and without CAP_ prefix
|
||||
// and with upper and lowercase
|
||||
func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceCapAddDrop(c *check.C) {
|
||||
func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceCapAddDrop(c *testing.T) {
|
||||
// Windows doesn't support CapAdd/CapDrop
|
||||
testRequires(c, DaemonIsLinux)
|
||||
config := struct {
|
||||
@@ -1385,7 +1346,7 @@ func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceCapAddDrop(c *che
|
||||
}
|
||||
|
||||
// #14915
|
||||
func (s *DockerSuite) TestContainerAPICreateNoHostConfig118(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPICreateNoHostConfig118(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux) // Windows only support 1.25 or later
|
||||
config := containertypes.Config{
|
||||
Image: "busybox",
|
||||
@@ -1401,7 +1362,7 @@ func (s *DockerSuite) TestContainerAPICreateNoHostConfig118(c *check.C) {
|
||||
// Ensure an error occurs when you have a container read-only rootfs but you
|
||||
// extract an archive to a symlink in a writable volume which points to a
|
||||
// directory outside of the volume.
|
||||
func (s *DockerSuite) TestPutContainerArchiveErrSymlinkInVolumeToReadOnlyRootfs(c *check.C) {
|
||||
func (s *DockerSuite) TestPutContainerArchiveErrSymlinkInVolumeToReadOnlyRootfs(c *testing.T) {
|
||||
// Windows does not support read-only rootfs
|
||||
// Requires local volume mount bind.
|
||||
// --read-only + userns has remount issues
|
||||
@@ -1427,7 +1388,7 @@ func (s *DockerSuite) TestPutContainerArchiveErrSymlinkInVolumeToReadOnlyRootfs(
|
||||
assert.ErrorContains(c, err, "container rootfs is marked read-only")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestPostContainersCreateWithWrongCpusetValues(c *check.C) {
|
||||
func (s *DockerSuite) TestPostContainersCreateWithWrongCpusetValues(c *testing.T) {
|
||||
// Not supported on Windows
|
||||
testRequires(c, DaemonIsLinux)
|
||||
|
||||
@@ -1460,7 +1421,7 @@ func (s *DockerSuite) TestPostContainersCreateWithWrongCpusetValues(c *check.C)
|
||||
assert.ErrorContains(c, err, expected)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestPostContainersCreateShmSizeNegative(c *check.C) {
|
||||
func (s *DockerSuite) TestPostContainersCreateShmSizeNegative(c *testing.T) {
|
||||
// ShmSize is not supported on Windows
|
||||
testRequires(c, DaemonIsLinux)
|
||||
config := containertypes.Config{
|
||||
@@ -1478,7 +1439,7 @@ func (s *DockerSuite) TestPostContainersCreateShmSizeNegative(c *check.C) {
|
||||
assert.ErrorContains(c, err, "SHM size can not be less than 0")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestPostContainersCreateShmSizeHostConfigOmitted(c *check.C) {
|
||||
func (s *DockerSuite) TestPostContainersCreateShmSizeHostConfigOmitted(c *testing.T) {
|
||||
// ShmSize is not supported on Windows
|
||||
testRequires(c, DaemonIsLinux)
|
||||
var defaultSHMSize int64 = 67108864
|
||||
@@ -1497,7 +1458,7 @@ func (s *DockerSuite) TestPostContainersCreateShmSizeHostConfigOmitted(c *check.
|
||||
containerJSON, err := cli.ContainerInspect(context.Background(), container.ID)
|
||||
assert.NilError(c, err)
|
||||
|
||||
c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, defaultSHMSize)
|
||||
assert.Equal(c, containerJSON.HostConfig.ShmSize, defaultSHMSize)
|
||||
|
||||
out, _ := dockerCmd(c, "start", "-i", containerJSON.ID)
|
||||
shmRegexp := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=65536k`)
|
||||
@@ -1506,7 +1467,7 @@ func (s *DockerSuite) TestPostContainersCreateShmSizeHostConfigOmitted(c *check.
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestPostContainersCreateShmSizeOmitted(c *check.C) {
|
||||
func (s *DockerSuite) TestPostContainersCreateShmSizeOmitted(c *testing.T) {
|
||||
// ShmSize is not supported on Windows
|
||||
testRequires(c, DaemonIsLinux)
|
||||
config := containertypes.Config{
|
||||
@@ -1524,7 +1485,7 @@ func (s *DockerSuite) TestPostContainersCreateShmSizeOmitted(c *check.C) {
|
||||
containerJSON, err := cli.ContainerInspect(context.Background(), container.ID)
|
||||
assert.NilError(c, err)
|
||||
|
||||
c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, int64(67108864))
|
||||
assert.Equal(c, containerJSON.HostConfig.ShmSize, int64(67108864))
|
||||
|
||||
out, _ := dockerCmd(c, "start", "-i", containerJSON.ID)
|
||||
shmRegexp := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=65536k`)
|
||||
@@ -1533,7 +1494,7 @@ func (s *DockerSuite) TestPostContainersCreateShmSizeOmitted(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestPostContainersCreateWithShmSize(c *check.C) {
|
||||
func (s *DockerSuite) TestPostContainersCreateWithShmSize(c *testing.T) {
|
||||
// ShmSize is not supported on Windows
|
||||
testRequires(c, DaemonIsLinux)
|
||||
config := containertypes.Config{
|
||||
@@ -1555,7 +1516,7 @@ func (s *DockerSuite) TestPostContainersCreateWithShmSize(c *check.C) {
|
||||
containerJSON, err := cli.ContainerInspect(context.Background(), container.ID)
|
||||
assert.NilError(c, err)
|
||||
|
||||
c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, int64(1073741824))
|
||||
assert.Equal(c, containerJSON.HostConfig.ShmSize, int64(1073741824))
|
||||
|
||||
out, _ := dockerCmd(c, "start", "-i", containerJSON.ID)
|
||||
shmRegex := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=1048576k`)
|
||||
@@ -1564,7 +1525,7 @@ func (s *DockerSuite) TestPostContainersCreateWithShmSize(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestPostContainersCreateMemorySwappinessHostConfigOmitted(c *check.C) {
|
||||
func (s *DockerSuite) TestPostContainersCreateMemorySwappinessHostConfigOmitted(c *testing.T) {
|
||||
// Swappiness is not supported on Windows
|
||||
testRequires(c, DaemonIsLinux)
|
||||
config := containertypes.Config{
|
||||
@@ -1582,14 +1543,14 @@ func (s *DockerSuite) TestPostContainersCreateMemorySwappinessHostConfigOmitted(
|
||||
assert.NilError(c, err)
|
||||
|
||||
if versions.LessThan(testEnv.DaemonAPIVersion(), "1.31") {
|
||||
c.Assert(*containerJSON.HostConfig.MemorySwappiness, check.Equals, int64(-1))
|
||||
assert.Equal(c, *containerJSON.HostConfig.MemorySwappiness, int64(-1))
|
||||
} else {
|
||||
c.Assert(containerJSON.HostConfig.MemorySwappiness, check.IsNil)
|
||||
assert.Assert(c, containerJSON.HostConfig.MemorySwappiness == nil)
|
||||
}
|
||||
}
|
||||
|
||||
// check validation is done daemon side and not only in cli
|
||||
func (s *DockerSuite) TestPostContainersCreateWithOomScoreAdjInvalidRange(c *check.C) {
|
||||
func (s *DockerSuite) TestPostContainersCreateWithOomScoreAdjInvalidRange(c *testing.T) {
|
||||
// OomScoreAdj is not supported on Windows
|
||||
testRequires(c, DaemonIsLinux)
|
||||
|
||||
@@ -1623,7 +1584,7 @@ func (s *DockerSuite) TestPostContainersCreateWithOomScoreAdjInvalidRange(c *che
|
||||
}
|
||||
|
||||
// test case for #22210 where an empty container name caused panic.
|
||||
func (s *DockerSuite) TestContainerAPIDeleteWithEmptyName(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIDeleteWithEmptyName(c *testing.T) {
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
||||
assert.NilError(c, err)
|
||||
defer cli.Close()
|
||||
@@ -1632,7 +1593,7 @@ func (s *DockerSuite) TestContainerAPIDeleteWithEmptyName(c *check.C) {
|
||||
assert.ErrorContains(c, err, "No such container")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPIStatsWithNetworkDisabled(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPIStatsWithNetworkDisabled(c *testing.T) {
|
||||
// Problematic on Windows as Windows does not support stats
|
||||
testRequires(c, DaemonIsLinux)
|
||||
|
||||
@@ -1654,7 +1615,7 @@ func (s *DockerSuite) TestContainerAPIStatsWithNetworkDisabled(c *check.C) {
|
||||
err = cli.ContainerStart(context.Background(), name, types.ContainerStartOptions{})
|
||||
assert.NilError(c, err)
|
||||
|
||||
c.Assert(waitRun(name), check.IsNil)
|
||||
assert.Assert(c, waitRun(name) == nil)
|
||||
|
||||
type b struct {
|
||||
stats types.ContainerStats
|
||||
@@ -1676,12 +1637,12 @@ func (s *DockerSuite) TestContainerAPIStatsWithNetworkDisabled(c *check.C) {
|
||||
case <-time.After(2 * time.Second):
|
||||
c.Fatal("stream was not closed after container was removed")
|
||||
case sr := <-bc:
|
||||
c.Assert(sr.err, checker.IsNil)
|
||||
assert.Assert(c, sr.err == nil)
|
||||
sr.stats.Body.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainersAPICreateMountsValidation(c *check.C) {
|
||||
func (s *DockerSuite) TestContainersAPICreateMountsValidation(c *testing.T) {
|
||||
type testCase struct {
|
||||
config containertypes.Config
|
||||
hostConfig containertypes.HostConfig
|
||||
@@ -1972,7 +1933,7 @@ func (s *DockerSuite) TestContainersAPICreateMountsValidation(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPICreateMountsBindRead(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerAPICreateMountsBindRead(c *testing.T) {
|
||||
testRequires(c, NotUserNamespace, testEnv.IsLocalDaemon)
|
||||
// also with data in the host side
|
||||
prefix, slash := getPrefixAndSlashFromDaemonPlatform()
|
||||
@@ -2003,7 +1964,7 @@ func (s *DockerSuite) TestContainerAPICreateMountsBindRead(c *check.C) {
|
||||
}
|
||||
|
||||
// Test Mounts comes out as expected for the MountPoint
|
||||
func (s *DockerSuite) TestContainersAPICreateMountsCreate(c *check.C) {
|
||||
func (s *DockerSuite) TestContainersAPICreateMountsCreate(c *testing.T) {
|
||||
prefix, slash := getPrefixAndSlashFromDaemonPlatform()
|
||||
destPath := prefix + slash + "foo"
|
||||
|
||||
@@ -2092,8 +2053,7 @@ func (s *DockerSuite) TestContainersAPICreateMountsCreate(c *check.C) {
|
||||
assert.NilError(c, err)
|
||||
defer os.RemoveAll(tmpDir3)
|
||||
|
||||
c.Assert(mount.Mount(tmpDir3, tmpDir3, "none", "bind,rw"), checker.IsNil)
|
||||
c.Assert(mount.ForceMount("", tmpDir3, "none", "shared"), checker.IsNil)
|
||||
assert.Assert(c, mount.Mount(tmpDir3, tmpDir3, "none", "bind,shared") == nil)
|
||||
|
||||
cases = append(cases, []testCase{
|
||||
{
|
||||
@@ -2218,7 +2178,7 @@ func containerExit(apiclient client.APIClient, name string) func(poll.LogT) poll
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainersAPICreateMountsTmpfs(c *check.C) {
|
||||
func (s *DockerSuite) TestContainersAPICreateMountsTmpfs(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
type testCase struct {
|
||||
cfg mounttypes.Mount
|
||||
@@ -2260,7 +2220,7 @@ func (s *DockerSuite) TestContainersAPICreateMountsTmpfs(c *check.C) {
|
||||
assert.NilError(c, err)
|
||||
out, _ := dockerCmd(c, "start", "-a", cName)
|
||||
for _, option := range x.expectedOptions {
|
||||
c.Assert(out, checker.Contains, option)
|
||||
assert.Assert(c, strings.Contains(out, option))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2268,7 +2228,7 @@ func (s *DockerSuite) TestContainersAPICreateMountsTmpfs(c *check.C) {
|
||||
// Regression test for #33334
|
||||
// Makes sure that when a container which has a custom stop signal + restart=always
|
||||
// gets killed (with SIGKILL) by the kill API, that the restart policy is cancelled.
|
||||
func (s *DockerSuite) TestContainerKillCustomStopSignal(c *check.C) {
|
||||
func (s *DockerSuite) TestContainerKillCustomStopSignal(c *testing.T) {
|
||||
id := strings.TrimSpace(runSleepingContainer(c, "--stop-signal=SIGTERM", "--restart=always"))
|
||||
res, _, err := request.Post("/containers/" + id + "/kill")
|
||||
assert.NilError(c, err)
|
||||
|
||||
@@ -8,17 +8,17 @@ import (
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
winio "github.com/Microsoft/go-winio"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/go-check/check"
|
||||
"gotest.tools/assert"
|
||||
is "gotest.tools/assert/cmp"
|
||||
)
|
||||
|
||||
func (s *DockerSuite) TestContainersAPICreateMountsBindNamedPipe(c *check.C) {
|
||||
func (s *DockerSuite) TestContainersAPICreateMountsBindNamedPipe(c *testing.T) {
|
||||
testRequires(c, testEnv.IsLocalDaemon, DaemonIsWindowsAtLeastBuild(16299)) // Named pipe support was added in RS3
|
||||
|
||||
// Create a host pipe to map into the container
|
||||
|
||||
@@ -8,14 +8,14 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/internal/test/request"
|
||||
"github.com/go-check/check"
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func (s *DockerSuite) TestExecResizeAPIHeightWidthNoInt(c *check.C) {
|
||||
func (s *DockerSuite) TestExecResizeAPIHeightWidthNoInt(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
out, _ := dockerCmd(c, "run", "-d", "busybox", "top")
|
||||
cleanedContainerID := strings.TrimSpace(out)
|
||||
@@ -31,7 +31,7 @@ func (s *DockerSuite) TestExecResizeAPIHeightWidthNoInt(c *check.C) {
|
||||
}
|
||||
|
||||
// Part of #14845
|
||||
func (s *DockerSuite) TestExecResizeImmediatelyAfterExecStart(c *check.C) {
|
||||
func (s *DockerSuite) TestExecResizeImmediatelyAfterExecStart(c *testing.T) {
|
||||
name := "exec_resize_test"
|
||||
dockerCmd(c, "run", "-d", "-i", "-t", "--name", name, "--restart", "always", "busybox", "/bin/sh")
|
||||
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
// +build !test_no_exec
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
@@ -11,6 +9,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
@@ -18,12 +17,12 @@ import (
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/integration-cli/checker"
|
||||
"github.com/docker/docker/internal/test/request"
|
||||
"github.com/go-check/check"
|
||||
"gotest.tools/assert"
|
||||
"gotest.tools/poll"
|
||||
)
|
||||
|
||||
// Regression test for #9414
|
||||
func (s *DockerSuite) TestExecAPICreateNoCmd(c *check.C) {
|
||||
func (s *DockerSuite) TestExecAPICreateNoCmd(c *testing.T) {
|
||||
name := "exec_test"
|
||||
dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh")
|
||||
|
||||
@@ -39,7 +38,7 @@ func (s *DockerSuite) TestExecAPICreateNoCmd(c *check.C) {
|
||||
assert.Assert(c, strings.Contains(getErrorMessage(c, b), "No exec command specified"), "Expected message when creating exec command with no Cmd specified")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestExecAPICreateNoValidContentType(c *check.C) {
|
||||
func (s *DockerSuite) TestExecAPICreateNoValidContentType(c *testing.T) {
|
||||
name := "exec_test"
|
||||
dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh")
|
||||
|
||||
@@ -60,7 +59,7 @@ func (s *DockerSuite) TestExecAPICreateNoValidContentType(c *check.C) {
|
||||
assert.Assert(c, strings.Contains(getErrorMessage(c, b), "Content-Type specified"), "Expected message when creating exec command with invalid Content-Type specified")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestExecAPICreateContainerPaused(c *check.C) {
|
||||
func (s *DockerSuite) TestExecAPICreateContainerPaused(c *testing.T) {
|
||||
// Not relevant on Windows as Windows containers cannot be paused
|
||||
testRequires(c, DaemonIsLinux)
|
||||
name := "exec_create_test"
|
||||
@@ -79,7 +78,7 @@ func (s *DockerSuite) TestExecAPICreateContainerPaused(c *check.C) {
|
||||
assert.ErrorContains(c, err, "Container "+name+" is paused, unpause the container before exec", "Expected message when creating exec command with Container %s is paused", name)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestExecAPIStart(c *check.C) {
|
||||
func (s *DockerSuite) TestExecAPIStart(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux) // Uses pause/unpause but bits may be salvageable to Windows to Windows CI
|
||||
dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top")
|
||||
|
||||
@@ -106,7 +105,7 @@ func (s *DockerSuite) TestExecAPIStart(c *check.C) {
|
||||
startExec(c, id, http.StatusOK)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestExecAPIStartEnsureHeaders(c *check.C) {
|
||||
func (s *DockerSuite) TestExecAPIStartEnsureHeaders(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top")
|
||||
|
||||
@@ -116,7 +115,7 @@ func (s *DockerSuite) TestExecAPIStartEnsureHeaders(c *check.C) {
|
||||
assert.Assert(c, resp.Header.Get("Server") != "")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestExecAPIStartBackwardsCompatible(c *check.C) {
|
||||
func (s *DockerSuite) TestExecAPIStartBackwardsCompatible(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later
|
||||
runSleepingContainer(c, "-d", "--name", "test")
|
||||
id := createExec(c, "test")
|
||||
@@ -125,13 +124,13 @@ func (s *DockerSuite) TestExecAPIStartBackwardsCompatible(c *check.C) {
|
||||
assert.NilError(c, err)
|
||||
|
||||
b, err := request.ReadBody(body)
|
||||
comment := check.Commentf("response body: %s", b)
|
||||
comment := fmt.Sprintf("response body: %s", b)
|
||||
assert.NilError(c, err, comment)
|
||||
assert.Equal(c, resp.StatusCode, http.StatusOK, comment)
|
||||
}
|
||||
|
||||
// #19362
|
||||
func (s *DockerSuite) TestExecAPIStartMultipleTimesError(c *check.C) {
|
||||
func (s *DockerSuite) TestExecAPIStartMultipleTimesError(c *testing.T) {
|
||||
runSleepingContainer(c, "-d", "--name", "test")
|
||||
execID := createExec(c, "test")
|
||||
startExec(c, execID, http.StatusOK)
|
||||
@@ -141,7 +140,7 @@ func (s *DockerSuite) TestExecAPIStartMultipleTimesError(c *check.C) {
|
||||
}
|
||||
|
||||
// #20638
|
||||
func (s *DockerSuite) TestExecAPIStartWithDetach(c *check.C) {
|
||||
func (s *DockerSuite) TestExecAPIStartWithDetach(c *testing.T) {
|
||||
name := "foo"
|
||||
runSleepingContainer(c, "-d", "-t", "--name", name)
|
||||
|
||||
@@ -161,7 +160,7 @@ func (s *DockerSuite) TestExecAPIStartWithDetach(c *check.C) {
|
||||
assert.NilError(c, err)
|
||||
|
||||
b, err := request.ReadBody(body)
|
||||
comment := check.Commentf("response body: %s", b)
|
||||
comment := fmt.Sprintf("response body: %s", b)
|
||||
assert.NilError(c, err, comment)
|
||||
|
||||
resp, _, err := request.Get("/_ping")
|
||||
@@ -172,7 +171,7 @@ func (s *DockerSuite) TestExecAPIStartWithDetach(c *check.C) {
|
||||
}
|
||||
|
||||
// #30311
|
||||
func (s *DockerSuite) TestExecAPIStartValidCommand(c *check.C) {
|
||||
func (s *DockerSuite) TestExecAPIStartValidCommand(c *testing.T) {
|
||||
name := "exec_test"
|
||||
dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh")
|
||||
|
||||
@@ -188,7 +187,7 @@ func (s *DockerSuite) TestExecAPIStartValidCommand(c *check.C) {
|
||||
}
|
||||
|
||||
// #30311
|
||||
func (s *DockerSuite) TestExecAPIStartInvalidCommand(c *check.C) {
|
||||
func (s *DockerSuite) TestExecAPIStartInvalidCommand(c *testing.T) {
|
||||
name := "exec_test"
|
||||
dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh")
|
||||
|
||||
@@ -206,7 +205,7 @@ func (s *DockerSuite) TestExecAPIStartInvalidCommand(c *check.C) {
|
||||
assert.Assert(c, inspectJSON.ExecIDs == nil)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestExecStateCleanup(c *check.C) {
|
||||
func (s *DockerSuite) TestExecStateCleanup(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
|
||||
|
||||
// This test checks accidental regressions. Not part of stable API.
|
||||
@@ -217,10 +216,10 @@ func (s *DockerSuite) TestExecStateCleanup(c *check.C) {
|
||||
|
||||
stateDir := "/var/run/docker/containerd/" + cid
|
||||
|
||||
checkReadDir := func(c *check.C) (interface{}, check.CommentInterface) {
|
||||
checkReadDir := func(c *testing.T) (interface{}, string) {
|
||||
fi, err := ioutil.ReadDir(stateDir)
|
||||
assert.NilError(c, err)
|
||||
return len(fi), nil
|
||||
return len(fi), ""
|
||||
}
|
||||
|
||||
fi, err := ioutil.ReadDir(stateDir)
|
||||
@@ -231,13 +230,13 @@ func (s *DockerSuite) TestExecStateCleanup(c *check.C) {
|
||||
startExec(c, id, http.StatusOK)
|
||||
waitForExec(c, id)
|
||||
|
||||
waitAndAssert(c, 5*time.Second, checkReadDir, checker.Equals, len(fi))
|
||||
poll.WaitOn(c, pollCheck(c, checkReadDir, checker.Equals(len(fi))), poll.WithTimeout(5*time.Second))
|
||||
|
||||
id = createExecCmd(c, name, "invalid")
|
||||
startExec(c, id, http.StatusBadRequest)
|
||||
waitForExec(c, id)
|
||||
|
||||
waitAndAssert(c, 5*time.Second, checkReadDir, checker.Equals, len(fi))
|
||||
poll.WaitOn(c, pollCheck(c, checkReadDir, checker.Equals(len(fi))), poll.WithTimeout(5*time.Second))
|
||||
|
||||
dockerCmd(c, "stop", name)
|
||||
_, err = os.Stat(stateDir)
|
||||
@@ -245,11 +244,11 @@ func (s *DockerSuite) TestExecStateCleanup(c *check.C) {
|
||||
assert.Assert(c, os.IsNotExist(err))
|
||||
}
|
||||
|
||||
func createExec(c *check.C, name string) string {
|
||||
func createExec(c *testing.T, name string) string {
|
||||
return createExecCmd(c, name, "true")
|
||||
}
|
||||
|
||||
func createExecCmd(c *check.C, name string, cmd string) string {
|
||||
func createExecCmd(c *testing.T, name string, cmd string) string {
|
||||
_, reader, err := request.Post(fmt.Sprintf("/containers/%s/exec", name), request.JSONBody(map[string]interface{}{"Cmd": []string{cmd}}))
|
||||
assert.NilError(c, err)
|
||||
b, err := ioutil.ReadAll(reader)
|
||||
@@ -262,7 +261,7 @@ func createExecCmd(c *check.C, name string, cmd string) string {
|
||||
return createResp.ID
|
||||
}
|
||||
|
||||
func startExec(c *check.C, id string, code int) {
|
||||
func startExec(c *testing.T, id string, code int) {
|
||||
resp, body, err := request.Post(fmt.Sprintf("/exec/%s/start", id), request.RawString(`{"Detach": true}`), request.JSON)
|
||||
assert.NilError(c, err)
|
||||
|
||||
@@ -271,7 +270,7 @@ func startExec(c *check.C, id string, code int) {
|
||||
assert.Equal(c, resp.StatusCode, code, "response body: %s", b)
|
||||
}
|
||||
|
||||
func inspectExec(c *check.C, id string, out interface{}) {
|
||||
func inspectExec(c *testing.T, id string, out interface{}) {
|
||||
resp, body, err := request.Get(fmt.Sprintf("/exec/%s/json", id))
|
||||
assert.NilError(c, err)
|
||||
defer body.Close()
|
||||
@@ -280,7 +279,7 @@ func inspectExec(c *check.C, id string, out interface{}) {
|
||||
assert.NilError(c, err)
|
||||
}
|
||||
|
||||
func waitForExec(c *check.C, id string) {
|
||||
func waitForExec(c *testing.T, id string) {
|
||||
timeout := time.After(60 * time.Second)
|
||||
var execJSON struct{ Running bool }
|
||||
for {
|
||||
@@ -297,7 +296,7 @@ func waitForExec(c *check.C, id string) {
|
||||
}
|
||||
}
|
||||
|
||||
func inspectContainer(c *check.C, id string, out interface{}) {
|
||||
func inspectContainer(c *testing.T, id string, out interface{}) {
|
||||
resp, body, err := request.Get(fmt.Sprintf("/containers/%s/json", id))
|
||||
assert.NilError(c, err)
|
||||
defer body.Close()
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
@@ -15,11 +16,10 @@ import (
|
||||
"github.com/docker/docker/integration-cli/cli/build"
|
||||
"github.com/docker/docker/internal/test/request"
|
||||
"github.com/docker/docker/pkg/parsers/kernel"
|
||||
"github.com/go-check/check"
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func (s *DockerSuite) TestAPIImagesFilter(c *check.C) {
|
||||
func (s *DockerSuite) TestAPIImagesFilter(c *testing.T) {
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
||||
assert.NilError(c, err)
|
||||
defer cli.Close()
|
||||
@@ -57,13 +57,13 @@ func (s *DockerSuite) TestAPIImagesFilter(c *check.C) {
|
||||
assert.Equal(c, len(images[0].RepoTags), 1)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAPIImagesSaveAndLoad(c *check.C) {
|
||||
func (s *DockerSuite) TestAPIImagesSaveAndLoad(c *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
v, err := kernel.GetKernelVersion()
|
||||
assert.NilError(c, err)
|
||||
build, _ := strconv.Atoi(strings.Split(strings.SplitN(v.String(), " ", 3)[2][1:], ".")[0])
|
||||
if build == 16299 {
|
||||
c.Skip("Temporarily disabled on RS3 builds")
|
||||
if build <= 16299 {
|
||||
c.Skip("Temporarily disabled on RS3 and older because they are too slow. See #39909")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,10 +84,10 @@ func (s *DockerSuite) TestAPIImagesSaveAndLoad(c *check.C) {
|
||||
assert.Equal(c, res.StatusCode, http.StatusOK)
|
||||
|
||||
inspectOut := cli.InspectCmd(c, id, cli.Format(".Id")).Combined()
|
||||
assert.Equal(c, strings.TrimSpace(string(inspectOut)), id, "load did not work properly")
|
||||
assert.Equal(c, strings.TrimSpace(inspectOut), id, "load did not work properly")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAPIImagesDelete(c *check.C) {
|
||||
func (s *DockerSuite) TestAPIImagesDelete(c *testing.T) {
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
||||
assert.NilError(c, err)
|
||||
defer cli.Close()
|
||||
@@ -111,7 +111,7 @@ func (s *DockerSuite) TestAPIImagesDelete(c *check.C) {
|
||||
assert.NilError(c, err)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAPIImagesHistory(c *check.C) {
|
||||
func (s *DockerSuite) TestAPIImagesHistory(c *testing.T) {
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
||||
assert.NilError(c, err)
|
||||
defer cli.Close()
|
||||
@@ -137,7 +137,7 @@ func (s *DockerSuite) TestAPIImagesHistory(c *check.C) {
|
||||
assert.Assert(c, found)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAPIImagesImportBadSrc(c *check.C) {
|
||||
func (s *DockerSuite) TestAPIImagesImportBadSrc(c *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
v, err := kernel.GetKernelVersion()
|
||||
assert.NilError(c, err)
|
||||
@@ -172,7 +172,7 @@ func (s *DockerSuite) TestAPIImagesImportBadSrc(c *check.C) {
|
||||
}
|
||||
|
||||
// #14846
|
||||
func (s *DockerSuite) TestAPIImagesSearchJSONContentType(c *check.C) {
|
||||
func (s *DockerSuite) TestAPIImagesSearchJSONContentType(c *testing.T) {
|
||||
testRequires(c, Network)
|
||||
|
||||
res, b, err := request.Get("/images/search?term=test", request.JSON)
|
||||
@@ -184,7 +184,7 @@ func (s *DockerSuite) TestAPIImagesSearchJSONContentType(c *check.C) {
|
||||
|
||||
// Test case for 30027: image size reported as -1 in v1.12 client against v1.13 daemon.
|
||||
// This test checks to make sure both v1.12 and v1.13 client against v1.13 daemon get correct `Size` after the fix.
|
||||
func (s *DockerSuite) TestAPIImagesSizeCompatibility(c *check.C) {
|
||||
func (s *DockerSuite) TestAPIImagesSizeCompatibility(c *testing.T) {
|
||||
apiclient := testEnv.APIClient()
|
||||
defer apiclient.Close()
|
||||
|
||||
|
||||
@@ -4,16 +4,16 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/versions/v1p20"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/go-check/check"
|
||||
"gotest.tools/assert"
|
||||
is "gotest.tools/assert/cmp"
|
||||
)
|
||||
|
||||
func (s *DockerSuite) TestInspectAPIContainerResponse(c *check.C) {
|
||||
func (s *DockerSuite) TestInspectAPIContainerResponse(c *testing.T) {
|
||||
out, _ := dockerCmd(c, "run", "-d", "busybox", "true")
|
||||
|
||||
cleanedContainerID := strings.TrimSpace(out)
|
||||
@@ -57,7 +57,7 @@ func (s *DockerSuite) TestInspectAPIContainerResponse(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestInspectAPIContainerVolumeDriverLegacy(c *check.C) {
|
||||
func (s *DockerSuite) TestInspectAPIContainerVolumeDriverLegacy(c *testing.T) {
|
||||
// No legacy implications for Windows
|
||||
testRequires(c, DaemonIsLinux)
|
||||
out, _ := dockerCmd(c, "run", "-d", "busybox", "true")
|
||||
@@ -80,7 +80,7 @@ func (s *DockerSuite) TestInspectAPIContainerVolumeDriverLegacy(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestInspectAPIContainerVolumeDriver(c *check.C) {
|
||||
func (s *DockerSuite) TestInspectAPIContainerVolumeDriver(c *testing.T) {
|
||||
out, _ := dockerCmd(c, "run", "-d", "--volume-driver", "local", "busybox", "true")
|
||||
|
||||
cleanedContainerID := strings.TrimSpace(out)
|
||||
@@ -104,7 +104,7 @@ func (s *DockerSuite) TestInspectAPIContainerVolumeDriver(c *check.C) {
|
||||
assert.Assert(c, ok, "API version 1.25 expected to include VolumeDriver in 'HostConfig'")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestInspectAPIImageResponse(c *check.C) {
|
||||
func (s *DockerSuite) TestInspectAPIImageResponse(c *testing.T) {
|
||||
dockerCmd(c, "tag", "busybox:latest", "busybox:mytag")
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
||||
assert.NilError(c, err)
|
||||
@@ -119,7 +119,7 @@ func (s *DockerSuite) TestInspectAPIImageResponse(c *check.C) {
|
||||
}
|
||||
|
||||
// #17131, #17139, #17173
|
||||
func (s *DockerSuite) TestInspectAPIEmptyFieldsInConfigPre121(c *check.C) {
|
||||
func (s *DockerSuite) TestInspectAPIEmptyFieldsInConfigPre121(c *testing.T) {
|
||||
// Not relevant on Windows
|
||||
testRequires(c, DaemonIsLinux)
|
||||
out, _ := dockerCmd(c, "run", "-d", "busybox", "true")
|
||||
@@ -143,7 +143,7 @@ func (s *DockerSuite) TestInspectAPIEmptyFieldsInConfigPre121(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestInspectAPIBridgeNetworkSettings120(c *check.C) {
|
||||
func (s *DockerSuite) TestInspectAPIBridgeNetworkSettings120(c *testing.T) {
|
||||
// Not relevant on Windows, and besides it doesn't have any bridge network settings
|
||||
testRequires(c, DaemonIsLinux)
|
||||
out, _ := dockerCmd(c, "run", "-d", "busybox", "top")
|
||||
@@ -160,7 +160,7 @@ func (s *DockerSuite) TestInspectAPIBridgeNetworkSettings120(c *check.C) {
|
||||
assert.Assert(c, len(settings.IPAddress) != 0)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestInspectAPIBridgeNetworkSettings121(c *check.C) {
|
||||
func (s *DockerSuite) TestInspectAPIBridgeNetworkSettings121(c *testing.T) {
|
||||
// Windows doesn't have any bridge network settings
|
||||
testRequires(c, DaemonIsLinux)
|
||||
out, _ := dockerCmd(c, "run", "-d", "busybox", "top")
|
||||
|
||||
@@ -10,17 +10,17 @@ import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/internal/test/request"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/go-check/check"
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func (s *DockerSuite) TestLogsAPIWithStdout(c *check.C) {
|
||||
func (s *DockerSuite) TestLogsAPIWithStdout(c *testing.T) {
|
||||
out, _ := dockerCmd(c, "run", "-d", "-t", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 1; done")
|
||||
id := strings.TrimSpace(out)
|
||||
assert.NilError(c, waitRun(id))
|
||||
@@ -56,7 +56,7 @@ func (s *DockerSuite) TestLogsAPIWithStdout(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestLogsAPINoStdoutNorStderr(c *check.C) {
|
||||
func (s *DockerSuite) TestLogsAPINoStdoutNorStderr(c *testing.T) {
|
||||
name := "logs_test"
|
||||
dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh")
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
||||
@@ -68,7 +68,7 @@ func (s *DockerSuite) TestLogsAPINoStdoutNorStderr(c *check.C) {
|
||||
}
|
||||
|
||||
// Regression test for #12704
|
||||
func (s *DockerSuite) TestLogsAPIFollowEmptyOutput(c *check.C) {
|
||||
func (s *DockerSuite) TestLogsAPIFollowEmptyOutput(c *testing.T) {
|
||||
name := "logs_test"
|
||||
t0 := time.Now()
|
||||
dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "sleep", "10")
|
||||
@@ -83,14 +83,14 @@ func (s *DockerSuite) TestLogsAPIFollowEmptyOutput(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestLogsAPIContainerNotFound(c *check.C) {
|
||||
func (s *DockerSuite) TestLogsAPIContainerNotFound(c *testing.T) {
|
||||
name := "nonExistentContainer"
|
||||
resp, _, err := request.Get(fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name))
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, resp.StatusCode, http.StatusNotFound)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestLogsAPIUntilFutureFollow(c *check.C) {
|
||||
func (s *DockerSuite) TestLogsAPIUntilFutureFollow(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
name := "logsuntilfuturefollow"
|
||||
dockerCmd(c, "run", "-d", "--name", name, "busybox", "/bin/sh", "-c", "while true; do date +%s; sleep 1; done")
|
||||
@@ -147,7 +147,7 @@ func (s *DockerSuite) TestLogsAPIUntilFutureFollow(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestLogsAPIUntil(c *check.C) {
|
||||
func (s *DockerSuite) TestLogsAPIUntil(c *testing.T) {
|
||||
testRequires(c, MinimumAPIVersion("1.34"))
|
||||
name := "logsuntil"
|
||||
dockerCmd(c, "run", "--name", name, "busybox", "/bin/sh", "-c", "for i in $(seq 1 3); do echo log$i; sleep 1; done")
|
||||
@@ -157,7 +157,7 @@ func (s *DockerSuite) TestLogsAPIUntil(c *check.C) {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
extractBody := func(c *check.C, cfg types.ContainerLogsOptions) []string {
|
||||
extractBody := func(c *testing.T, cfg types.ContainerLogsOptions) []string {
|
||||
reader, err := client.ContainerLogs(context.Background(), name, cfg)
|
||||
assert.NilError(c, err)
|
||||
|
||||
@@ -185,7 +185,7 @@ func (s *DockerSuite) TestLogsAPIUntil(c *check.C) {
|
||||
assert.Assert(c, !strings.Contains(logsString, "log3"), "unexpected log message returned, until=%v", until)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestLogsAPIUntilDefaultValue(c *check.C) {
|
||||
func (s *DockerSuite) TestLogsAPIUntilDefaultValue(c *testing.T) {
|
||||
name := "logsuntildefaultval"
|
||||
dockerCmd(c, "run", "--name", name, "busybox", "/bin/sh", "-c", "for i in $(seq 1 3); do echo log$i; done")
|
||||
|
||||
@@ -194,7 +194,7 @@ func (s *DockerSuite) TestLogsAPIUntilDefaultValue(c *check.C) {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
extractBody := func(c *check.C, cfg types.ContainerLogsOptions) []string {
|
||||
extractBody := func(c *testing.T, cfg types.ContainerLogsOptions) []string {
|
||||
reader, err := client.ContainerLogs(context.Background(), name, cfg)
|
||||
assert.NilError(c, err)
|
||||
|
||||
|
||||
@@ -7,17 +7,17 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/internal/test/request"
|
||||
"github.com/go-check/check"
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func (s *DockerSuite) TestAPINetworkGetDefaults(c *check.C) {
|
||||
func (s *DockerSuite) TestAPINetworkGetDefaults(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
// By default docker daemon creates 3 networks. check if they are present
|
||||
defaults := []string{"bridge", "host", "none"}
|
||||
@@ -26,7 +26,7 @@ func (s *DockerSuite) TestAPINetworkGetDefaults(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAPINetworkCreateCheckDuplicate(c *check.C) {
|
||||
func (s *DockerSuite) TestAPINetworkCreateCheckDuplicate(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
name := "testcheckduplicate"
|
||||
configOnCheck := types.NetworkCreateRequest{
|
||||
@@ -64,13 +64,13 @@ func (s *DockerSuite) TestAPINetworkCreateCheckDuplicate(c *check.C) {
|
||||
createNetwork(c, configNotCheck, http.StatusCreated)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAPINetworkFilter(c *check.C) {
|
||||
func (s *DockerSuite) TestAPINetworkFilter(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
nr := getNetworkResource(c, getNetworkIDByName(c, "bridge"))
|
||||
assert.Equal(c, nr.Name, "bridge")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAPINetworkInspectBridge(c *check.C) {
|
||||
func (s *DockerSuite) TestAPINetworkInspectBridge(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
// Inspect default bridge network
|
||||
nr := getNetworkResource(c, "bridge")
|
||||
@@ -96,7 +96,7 @@ func (s *DockerSuite) TestAPINetworkInspectBridge(c *check.C) {
|
||||
assert.Equal(c, ip.String(), containerIP)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAPINetworkInspectUserDefinedNetwork(c *check.C) {
|
||||
func (s *DockerSuite) TestAPINetworkInspectUserDefinedNetwork(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
// IPAM configuration inspect
|
||||
ipam := &network.IPAM{
|
||||
@@ -127,7 +127,7 @@ func (s *DockerSuite) TestAPINetworkInspectUserDefinedNetwork(c *check.C) {
|
||||
assert.Assert(c, !isNetworkAvailable(c, "br0"))
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAPINetworkConnectDisconnect(c *check.C) {
|
||||
func (s *DockerSuite) TestAPINetworkConnectDisconnect(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
// Create test network
|
||||
name := "testnetwork"
|
||||
@@ -169,7 +169,7 @@ func (s *DockerSuite) TestAPINetworkConnectDisconnect(c *check.C) {
|
||||
deleteNetwork(c, nr.ID, true)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAPINetworkIPAMMultipleBridgeNetworks(c *check.C) {
|
||||
func (s *DockerSuite) TestAPINetworkIPAMMultipleBridgeNetworks(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
// test0 bridge network
|
||||
ipam0 := &network.IPAM{
|
||||
@@ -238,14 +238,14 @@ func (s *DockerSuite) TestAPINetworkIPAMMultipleBridgeNetworks(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAPICreateDeletePredefinedNetworks(c *check.C) {
|
||||
func (s *DockerSuite) TestAPICreateDeletePredefinedNetworks(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, SwarmInactive)
|
||||
createDeletePredefinedNetwork(c, "bridge")
|
||||
createDeletePredefinedNetwork(c, "none")
|
||||
createDeletePredefinedNetwork(c, "host")
|
||||
}
|
||||
|
||||
func createDeletePredefinedNetwork(c *check.C, name string) {
|
||||
func createDeletePredefinedNetwork(c *testing.T, name string) {
|
||||
// Create pre-defined network
|
||||
config := types.NetworkCreateRequest{
|
||||
Name: name,
|
||||
@@ -267,7 +267,7 @@ func createDeletePredefinedNetwork(c *check.C, name string) {
|
||||
deleteNetwork(c, name, false)
|
||||
}
|
||||
|
||||
func isNetworkAvailable(c *check.C, name string) bool {
|
||||
func isNetworkAvailable(c *testing.T, name string) bool {
|
||||
resp, body, err := request.Get("/networks")
|
||||
assert.NilError(c, err)
|
||||
defer resp.Body.Close()
|
||||
@@ -285,7 +285,7 @@ func isNetworkAvailable(c *check.C, name string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func getNetworkIDByName(c *check.C, name string) string {
|
||||
func getNetworkIDByName(c *testing.T, name string) string {
|
||||
var (
|
||||
v = url.Values{}
|
||||
filterArgs = filters.NewArgs()
|
||||
@@ -314,7 +314,7 @@ func getNetworkIDByName(c *check.C, name string) string {
|
||||
return res
|
||||
}
|
||||
|
||||
func getNetworkResource(c *check.C, id string) *types.NetworkResource {
|
||||
func getNetworkResource(c *testing.T, id string) *types.NetworkResource {
|
||||
_, obj, err := request.Get("/networks/" + id)
|
||||
assert.NilError(c, err)
|
||||
|
||||
@@ -325,7 +325,7 @@ func getNetworkResource(c *check.C, id string) *types.NetworkResource {
|
||||
return &nr
|
||||
}
|
||||
|
||||
func createNetwork(c *check.C, config types.NetworkCreateRequest, expectedStatusCode int) string {
|
||||
func createNetwork(c *testing.T, config types.NetworkCreateRequest, expectedStatusCode int) string {
|
||||
resp, body, err := request.Post("/networks/create", request.JSONBody(config))
|
||||
assert.NilError(c, err)
|
||||
defer resp.Body.Close()
|
||||
@@ -346,7 +346,7 @@ func createNetwork(c *check.C, config types.NetworkCreateRequest, expectedStatus
|
||||
return ""
|
||||
}
|
||||
|
||||
func connectNetwork(c *check.C, nid, cid string) {
|
||||
func connectNetwork(c *testing.T, nid, cid string) {
|
||||
config := types.NetworkConnect{
|
||||
Container: cid,
|
||||
}
|
||||
@@ -356,7 +356,7 @@ func connectNetwork(c *check.C, nid, cid string) {
|
||||
assert.NilError(c, err)
|
||||
}
|
||||
|
||||
func disconnectNetwork(c *check.C, nid, cid string) {
|
||||
func disconnectNetwork(c *testing.T, nid, cid string) {
|
||||
config := types.NetworkConnect{
|
||||
Container: cid,
|
||||
}
|
||||
@@ -366,7 +366,7 @@ func disconnectNetwork(c *check.C, nid, cid string) {
|
||||
assert.NilError(c, err)
|
||||
}
|
||||
|
||||
func deleteNetwork(c *check.C, id string, shouldSucceed bool) {
|
||||
func deleteNetwork(c *testing.T, id string, shouldSucceed bool) {
|
||||
resp, _, err := request.Delete("/networks/" + id)
|
||||
assert.NilError(c, err)
|
||||
defer resp.Body.Close()
|
||||
|
||||
@@ -10,19 +10,19 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/internal/test/request"
|
||||
"github.com/go-check/check"
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
var expectedNetworkInterfaceStats = strings.Split("rx_bytes rx_dropped rx_errors rx_packets tx_bytes tx_dropped tx_errors tx_packets", " ")
|
||||
|
||||
func (s *DockerSuite) TestAPIStatsNoStreamGetCpu(c *check.C) {
|
||||
func (s *DockerSuite) TestAPIStatsNoStreamGetCpu(c *testing.T) {
|
||||
out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true;usleep 100; do echo 'Hello'; done")
|
||||
|
||||
id := strings.TrimSpace(out)
|
||||
@@ -62,7 +62,7 @@ func (s *DockerSuite) TestAPIStatsNoStreamGetCpu(c *check.C) {
|
||||
assert.Assert(c, cpuPercent != 0.0, "docker stats with no-stream get cpu usage failed: was %v", cpuPercent)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAPIStatsStoppedContainerInGoroutines(c *check.C) {
|
||||
func (s *DockerSuite) TestAPIStatsStoppedContainerInGoroutines(c *testing.T) {
|
||||
out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo 1")
|
||||
id := strings.TrimSpace(out)
|
||||
|
||||
@@ -97,7 +97,7 @@ func (s *DockerSuite) TestAPIStatsStoppedContainerInGoroutines(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAPIStatsNetworkStats(c *check.C) {
|
||||
func (s *DockerSuite) TestAPIStatsNetworkStats(c *testing.T) {
|
||||
testRequires(c, testEnv.IsLocalDaemon)
|
||||
|
||||
out := runSleepingContainer(c)
|
||||
@@ -162,7 +162,7 @@ func (s *DockerSuite) TestAPIStatsNetworkStats(c *check.C) {
|
||||
assert.Assert(c, postRxPackets >= expRxPkts, "Reported less RxPackets than expected. Expected >= %d. Found %d. %s", expRxPkts, postRxPackets, pingouts)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAPIStatsNetworkStatsVersioning(c *check.C) {
|
||||
func (s *DockerSuite) TestAPIStatsNetworkStatsVersioning(c *testing.T) {
|
||||
// Windows doesn't support API versions less than 1.25, so no point testing 1.17 .. 1.21
|
||||
testRequires(c, testEnv.IsLocalDaemon, DaemonIsLinux)
|
||||
|
||||
@@ -187,7 +187,7 @@ func (s *DockerSuite) TestAPIStatsNetworkStatsVersioning(c *check.C) {
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func getNetworkStats(c *check.C, id string) map[string]types.NetworkStats {
|
||||
func getNetworkStats(c *testing.T, id string) map[string]types.NetworkStats {
|
||||
var st *types.StatsJSON
|
||||
|
||||
_, body, err := request.Get(fmt.Sprintf("/containers/%s/stats?stream=false", id))
|
||||
@@ -204,7 +204,7 @@ func getNetworkStats(c *check.C, id string) map[string]types.NetworkStats {
|
||||
// container with id using an API call with version apiVersion. Since the
|
||||
// stats result type differs between API versions, we simply return
|
||||
// map[string]interface{}.
|
||||
func getVersionedStats(c *check.C, id string, apiVersion string) map[string]interface{} {
|
||||
func getVersionedStats(c *testing.T, id string, apiVersion string) map[string]interface{} {
|
||||
stats := make(map[string]interface{})
|
||||
|
||||
_, body, err := request.Get(fmt.Sprintf("/%s/containers/%s/stats?stream=false", apiVersion, id))
|
||||
@@ -257,7 +257,7 @@ func jsonBlobHasGTE121NetworkStats(blob map[string]interface{}) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAPIStatsContainerNotFound(c *check.C) {
|
||||
func (s *DockerSuite) TestAPIStatsContainerNotFound(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
||||
assert.NilError(c, err)
|
||||
@@ -271,7 +271,7 @@ func (s *DockerSuite) TestAPIStatsContainerNotFound(c *check.C) {
|
||||
assert.ErrorContains(c, err, expected)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAPIStatsNoStreamConnectedContainers(c *check.C) {
|
||||
func (s *DockerSuite) TestAPIStatsNoStreamConnectedContainers(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
|
||||
out1 := runSleepingContainer(c)
|
||||
|
||||
@@ -3,21 +3,24 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/integration-cli/checker"
|
||||
"github.com/docker/docker/integration-cli/daemon"
|
||||
"github.com/go-check/check"
|
||||
"gotest.tools/assert"
|
||||
"gotest.tools/poll"
|
||||
)
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmListNodes(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmListNodes(c *testing.T) {
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
d2 := s.AddDaemon(c, true, false)
|
||||
d3 := s.AddDaemon(c, true, false)
|
||||
|
||||
nodes := d1.ListNodes(c)
|
||||
c.Assert(len(nodes), checker.Equals, 3, check.Commentf("nodes: %#v", nodes))
|
||||
assert.Equal(c, len(nodes), 3, fmt.Sprintf("nodes: %#v", nodes))
|
||||
|
||||
loop0:
|
||||
for _, n := range nodes {
|
||||
@@ -30,7 +33,7 @@ loop0:
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmNodeUpdate(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmNodeUpdate(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
|
||||
nodes := d.ListNodes(c)
|
||||
@@ -40,17 +43,17 @@ func (s *DockerSwarmSuite) TestAPISwarmNodeUpdate(c *check.C) {
|
||||
})
|
||||
|
||||
n := d.GetNode(c, nodes[0].ID)
|
||||
c.Assert(n.Spec.Availability, checker.Equals, swarm.NodeAvailabilityPause)
|
||||
assert.Equal(c, n.Spec.Availability, swarm.NodeAvailabilityPause)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmNodeRemove(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmNodeRemove(c *testing.T) {
|
||||
testRequires(c, Network)
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
d2 := s.AddDaemon(c, true, false)
|
||||
_ = s.AddDaemon(c, true, false)
|
||||
|
||||
nodes := d1.ListNodes(c)
|
||||
c.Assert(len(nodes), checker.Equals, 3, check.Commentf("nodes: %#v", nodes))
|
||||
assert.Equal(c, len(nodes), 3, fmt.Sprintf("nodes: %#v", nodes))
|
||||
|
||||
// Getting the info so we can take the NodeID
|
||||
d2Info := d2.SwarmInfo(c)
|
||||
@@ -59,7 +62,7 @@ func (s *DockerSwarmSuite) TestAPISwarmNodeRemove(c *check.C) {
|
||||
d1.RemoveNode(c, d2Info.NodeID, true)
|
||||
|
||||
nodes = d1.ListNodes(c)
|
||||
c.Assert(len(nodes), checker.Equals, 2, check.Commentf("nodes: %#v", nodes))
|
||||
assert.Equal(c, len(nodes), 2, fmt.Sprintf("nodes: %#v", nodes))
|
||||
|
||||
// Restart the node that was removed
|
||||
d2.RestartNode(c)
|
||||
@@ -69,10 +72,10 @@ func (s *DockerSwarmSuite) TestAPISwarmNodeRemove(c *check.C) {
|
||||
|
||||
// Make sure the node didn't rejoin
|
||||
nodes = d1.ListNodes(c)
|
||||
c.Assert(len(nodes), checker.Equals, 2, check.Commentf("nodes: %#v", nodes))
|
||||
assert.Equal(c, len(nodes), 2, fmt.Sprintf("nodes: %#v", nodes))
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmNodeDrainPause(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmNodeDrainPause(c *testing.T) {
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
d2 := s.AddDaemon(c, true, false)
|
||||
|
||||
@@ -82,16 +85,16 @@ func (s *DockerSwarmSuite) TestAPISwarmNodeDrainPause(c *check.C) {
|
||||
instances := 2
|
||||
id := d1.CreateService(c, simpleTestService, setInstances(instances))
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.GreaterThan, 0)
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.GreaterThan, 0)
|
||||
waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// drain d2, all containers should move to d1
|
||||
d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
|
||||
n.Spec.Availability = swarm.NodeAvailabilityDrain
|
||||
})
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances)
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 0)
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// set d2 back to active
|
||||
d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
|
||||
@@ -100,15 +103,15 @@ func (s *DockerSwarmSuite) TestAPISwarmNodeDrainPause(c *check.C) {
|
||||
|
||||
instances = 1
|
||||
d1.UpdateService(c, d1.GetService(c, id), setInstances(instances))
|
||||
waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout*2))
|
||||
|
||||
instances = 2
|
||||
d1.UpdateService(c, d1.GetService(c, id), setInstances(instances))
|
||||
|
||||
// drained node first so we don't get any old containers
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.GreaterThan, 0)
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.GreaterThan, 0)
|
||||
waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout*2))
|
||||
|
||||
d2ContainerCount := len(d2.ActiveContainers(c))
|
||||
|
||||
@@ -119,7 +122,7 @@ func (s *DockerSwarmSuite) TestAPISwarmNodeDrainPause(c *check.C) {
|
||||
|
||||
instances = 4
|
||||
d1.UpdateService(c, d1.GetService(c, id), setInstances(instances))
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances-d2ContainerCount)
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, d2ContainerCount)
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.Equals(instances-d2ContainerCount)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.Equals(d2ContainerCount)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
@@ -16,10 +17,10 @@ import (
|
||||
"github.com/docker/docker/integration-cli/cli/build"
|
||||
"github.com/docker/docker/integration-cli/daemon"
|
||||
testdaemon "github.com/docker/docker/internal/test/daemon"
|
||||
"github.com/go-check/check"
|
||||
"golang.org/x/sys/unix"
|
||||
"gotest.tools/assert"
|
||||
"gotest.tools/icmd"
|
||||
"gotest.tools/poll"
|
||||
)
|
||||
|
||||
func setPortConfig(portConfig []swarm.PortConfig) testdaemon.ServiceConstructor {
|
||||
@@ -31,13 +32,13 @@ func setPortConfig(portConfig []swarm.PortConfig) testdaemon.ServiceConstructor
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPIServiceUpdatePort(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPIServiceUpdatePort(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
|
||||
// Create a service with a port mapping of 8080:8081.
|
||||
portConfig := []swarm.PortConfig{{TargetPort: 8081, PublishedPort: 8080}}
|
||||
serviceID := d.CreateService(c, simpleTestService, setInstances(1), setPortConfig(portConfig))
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1)
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// Update the service: changed the port mapping from 8080:8081 to 8082:8083.
|
||||
updatedPortConfig := []swarm.PortConfig{{TargetPort: 8083, PublishedPort: 8082}}
|
||||
@@ -52,7 +53,7 @@ func (s *DockerSwarmSuite) TestAPIServiceUpdatePort(c *check.C) {
|
||||
assert.Equal(c, updatedService.Spec.EndpointSpec.Ports[0].PublishedPort, uint32(8082))
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicesEmptyList(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicesEmptyList(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
|
||||
services := d.ListServices(c)
|
||||
@@ -60,12 +61,12 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesEmptyList(c *check.C) {
|
||||
assert.Assert(c, len(services) == 0, "services: %#v", services)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicesCreate(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicesCreate(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
|
||||
instances := 2
|
||||
id := d.CreateService(c, simpleTestService, setInstances(instances))
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
client := d.NewClientT(c)
|
||||
defer client.Close()
|
||||
@@ -87,13 +88,13 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesCreate(c *check.C) {
|
||||
service := d.GetService(c, id)
|
||||
instances = 5
|
||||
d.UpdateService(c, service, setInstances(instances))
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
d.RemoveService(c, service.ID)
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 0)
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicesMultipleAgents(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicesMultipleAgents(c *testing.T) {
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
d2 := s.AddDaemon(c, true, false)
|
||||
d3 := s.AddDaemon(c, true, false)
|
||||
@@ -103,50 +104,50 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesMultipleAgents(c *check.C) {
|
||||
instances := 9
|
||||
id := d1.CreateService(c, simpleTestService, setInstances(instances))
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.GreaterThan, 0)
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.GreaterThan, 0)
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d3.CheckActiveContainerCount, checker.GreaterThan, 0)
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d3.CheckActiveContainerCount, checker.GreaterThan(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// reconciliation on d2 node down
|
||||
d2.Stop(c)
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// test downscaling
|
||||
instances = 5
|
||||
d1.UpdateService(c, d1.GetService(c, id), setInstances(instances))
|
||||
waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicesCreateGlobal(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicesCreateGlobal(c *testing.T) {
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
d2 := s.AddDaemon(c, true, false)
|
||||
d3 := s.AddDaemon(c, true, false)
|
||||
|
||||
d1.CreateService(c, simpleTestService, setGlobalMode)
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, 1)
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 1)
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d3.CheckActiveContainerCount, checker.Equals, 1)
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d3.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
d4 := s.AddDaemon(c, true, false)
|
||||
d5 := s.AddDaemon(c, true, false)
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d4.CheckActiveContainerCount, checker.Equals, 1)
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d5.CheckActiveContainerCount, checker.Equals, 1)
|
||||
poll.WaitOn(c, pollCheck(c, d4.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d5.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *testing.T) {
|
||||
const nodeCount = 3
|
||||
var daemons [nodeCount]*daemon.Daemon
|
||||
for i := 0; i < nodeCount; i++ {
|
||||
daemons[i] = s.AddDaemon(c, true, i == 0)
|
||||
}
|
||||
// wait for nodes ready
|
||||
waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount)
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckNodeReadyCount, checker.Equals(nodeCount)), poll.WithTimeout(5*time.Second))
|
||||
|
||||
// service image at start
|
||||
image1 := "busybox:latest"
|
||||
@@ -166,24 +167,20 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *check.C) {
|
||||
id := daemons[0].CreateService(c, serviceForUpdate, setInstances(instances))
|
||||
|
||||
// wait for tasks ready
|
||||
waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals,
|
||||
map[string]int{image1: instances})
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// issue service update
|
||||
service := daemons[0].GetService(c, id)
|
||||
daemons[0].UpdateService(c, service, setImage(image2))
|
||||
|
||||
// first batch
|
||||
waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals,
|
||||
map[string]int{image1: instances - parallelism, image2: parallelism})
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances - parallelism, image2: parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// 2nd batch
|
||||
waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals,
|
||||
map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism})
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// 3nd batch
|
||||
waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals,
|
||||
map[string]int{image2: instances})
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages, checker.DeepEquals(map[string]int{image2: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// Roll back to the previous version. This uses the CLI because
|
||||
// rollback used to be a client-side operation.
|
||||
@@ -191,15 +188,14 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *check.C) {
|
||||
assert.NilError(c, err, out)
|
||||
|
||||
// first batch
|
||||
waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals,
|
||||
map[string]int{image2: instances - rollbackParallelism, image1: rollbackParallelism})
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages, checker.DeepEquals(map[string]int{image2: instances - rollbackParallelism, image1: rollbackParallelism})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// 2nd batch
|
||||
waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals,
|
||||
map[string]int{image1: instances})
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateStartFirst(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateStartFirst(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
|
||||
// service image at start
|
||||
@@ -223,7 +219,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateStartFirst(c *check.C) {
|
||||
|
||||
checkStartingTasks := func(expected int) []swarm.Task {
|
||||
var startingTasks []swarm.Task
|
||||
waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
|
||||
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
|
||||
tasks := d.GetServiceTasks(c, id)
|
||||
startingTasks = nil
|
||||
for _, t := range tasks {
|
||||
@@ -231,8 +227,8 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateStartFirst(c *check.C) {
|
||||
startingTasks = append(startingTasks, t)
|
||||
}
|
||||
}
|
||||
return startingTasks, nil
|
||||
}, checker.HasLen, expected)
|
||||
return startingTasks, ""
|
||||
}, checker.HasLen(expected)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
return startingTasks
|
||||
}
|
||||
@@ -245,8 +241,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateStartFirst(c *check.C) {
|
||||
}
|
||||
|
||||
// wait for tasks ready
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals,
|
||||
map[string]int{image1: instances})
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// issue service update
|
||||
service := d.GetService(c, id)
|
||||
@@ -257,42 +252,36 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateStartFirst(c *check.C) {
|
||||
// The old tasks should be running, and the new ones should be starting.
|
||||
startingTasks := checkStartingTasks(parallelism)
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals,
|
||||
map[string]int{image1: instances})
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// make it healthy
|
||||
makeTasksHealthy(startingTasks)
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals,
|
||||
map[string]int{image1: instances - parallelism, image2: parallelism})
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances - parallelism, image2: parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// 2nd batch
|
||||
|
||||
// The old tasks should be running, and the new ones should be starting.
|
||||
startingTasks = checkStartingTasks(parallelism)
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals,
|
||||
map[string]int{image1: instances - parallelism, image2: parallelism})
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances - parallelism, image2: parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// make it healthy
|
||||
makeTasksHealthy(startingTasks)
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals,
|
||||
map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism})
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// 3nd batch
|
||||
|
||||
// The old tasks should be running, and the new ones should be starting.
|
||||
startingTasks = checkStartingTasks(1)
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals,
|
||||
map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism})
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// make it healthy
|
||||
makeTasksHealthy(startingTasks)
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals,
|
||||
map[string]int{image2: instances})
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image2: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// Roll back to the previous version. This uses the CLI because
|
||||
// rollback is a client-side operation.
|
||||
@@ -300,22 +289,21 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateStartFirst(c *check.C) {
|
||||
assert.NilError(c, err, out)
|
||||
|
||||
// first batch
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals,
|
||||
map[string]int{image2: instances - rollbackParallelism, image1: rollbackParallelism})
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image2: instances - rollbackParallelism, image1: rollbackParallelism})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// 2nd batch
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals,
|
||||
map[string]int{image1: instances})
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicesFailedUpdate(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicesFailedUpdate(c *testing.T) {
|
||||
const nodeCount = 3
|
||||
var daemons [nodeCount]*daemon.Daemon
|
||||
for i := 0; i < nodeCount; i++ {
|
||||
daemons[i] = s.AddDaemon(c, true, i == 0)
|
||||
}
|
||||
// wait for nodes ready
|
||||
waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount)
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckNodeReadyCount, checker.Equals(nodeCount)), poll.WithTimeout(5*time.Second))
|
||||
|
||||
// service image at start
|
||||
image1 := "busybox:latest"
|
||||
@@ -327,15 +315,14 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesFailedUpdate(c *check.C) {
|
||||
id := daemons[0].CreateService(c, serviceForUpdate, setInstances(instances))
|
||||
|
||||
// wait for tasks ready
|
||||
waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals,
|
||||
map[string]int{image1: instances})
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// issue service update
|
||||
service := daemons[0].GetService(c, id)
|
||||
daemons[0].UpdateService(c, service, setImage(image2), setFailureAction(swarm.UpdateFailureActionPause), setMaxFailureRatio(0.25), setParallelism(1))
|
||||
|
||||
// should update 2 tasks and then pause
|
||||
waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceUpdateState(id), checker.Equals, swarm.UpdateStatePaused)
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceUpdateState(id), checker.Equals(swarm.UpdateStatePaused)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
v, _ := daemons[0].CheckServiceRunningTasks(id)(c)
|
||||
assert.Assert(c, v == instances-2)
|
||||
|
||||
@@ -344,25 +331,25 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesFailedUpdate(c *check.C) {
|
||||
out, err := daemons[0].Cmd("service", "update", "--detach", "--rollback", id)
|
||||
assert.NilError(c, err, out)
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals,
|
||||
map[string]int{image1: instances})
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckRunningTaskImages, checker.DeepEquals(map[string]int{image1: instances})), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintRole(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintRole(c *testing.T) {
|
||||
const nodeCount = 3
|
||||
var daemons [nodeCount]*daemon.Daemon
|
||||
for i := 0; i < nodeCount; i++ {
|
||||
daemons[i] = s.AddDaemon(c, true, i == 0)
|
||||
}
|
||||
// wait for nodes ready
|
||||
waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount)
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckNodeReadyCount, checker.Equals(nodeCount)), poll.WithTimeout(5*time.Second))
|
||||
|
||||
// create service
|
||||
constraints := []string{"node.role==worker"}
|
||||
instances := 3
|
||||
id := daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances))
|
||||
// wait for tasks ready
|
||||
waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
// validate tasks are running on worker nodes
|
||||
tasks := daemons[0].GetServiceTasks(c, id)
|
||||
for _, task := range tasks {
|
||||
@@ -376,7 +363,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintRole(c *check.C) {
|
||||
constraints = []string{"node.role!=worker"}
|
||||
id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances))
|
||||
// wait for tasks ready
|
||||
waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
tasks = daemons[0].GetServiceTasks(c, id)
|
||||
// validate tasks are running on manager nodes
|
||||
for _, task := range tasks {
|
||||
@@ -390,7 +377,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintRole(c *check.C) {
|
||||
constraints = []string{"node.role==nosuchrole"}
|
||||
id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances))
|
||||
// wait for tasks created
|
||||
waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceTasks(id), checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
// let scheduler try
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
// validate tasks are not assigned to any node
|
||||
@@ -400,14 +387,14 @@ func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintRole(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *testing.T) {
|
||||
const nodeCount = 3
|
||||
var daemons [nodeCount]*daemon.Daemon
|
||||
for i := 0; i < nodeCount; i++ {
|
||||
daemons[i] = s.AddDaemon(c, true, i == 0)
|
||||
}
|
||||
// wait for nodes ready
|
||||
waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount)
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckNodeReadyCount, checker.Equals(nodeCount)), poll.WithTimeout(5*time.Second))
|
||||
nodes := daemons[0].ListNodes(c)
|
||||
assert.Equal(c, len(nodes), nodeCount)
|
||||
|
||||
@@ -430,7 +417,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *check.C) {
|
||||
constraints := []string{"node.labels.security==high"}
|
||||
id := daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances))
|
||||
// wait for tasks ready
|
||||
waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
tasks := daemons[0].GetServiceTasks(c, id)
|
||||
// validate all tasks are running on nodes[0]
|
||||
for _, task := range tasks {
|
||||
@@ -443,7 +430,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *check.C) {
|
||||
constraints = []string{"node.labels.security!=high"}
|
||||
id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances))
|
||||
// wait for tasks ready
|
||||
waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
tasks = daemons[0].GetServiceTasks(c, id)
|
||||
// validate all tasks are NOT running on nodes[0]
|
||||
for _, task := range tasks {
|
||||
@@ -455,7 +442,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *check.C) {
|
||||
constraints = []string{"node.labels.security==medium"}
|
||||
id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances))
|
||||
// wait for tasks created
|
||||
waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceTasks(id), checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
// let scheduler try
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
tasks = daemons[0].GetServiceTasks(c, id)
|
||||
@@ -473,7 +460,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *check.C) {
|
||||
}
|
||||
id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances))
|
||||
// wait for tasks created
|
||||
waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceTasks(id), checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
// let scheduler try
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
tasks = daemons[0].GetServiceTasks(c, id)
|
||||
@@ -488,21 +475,21 @@ func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *check.C) {
|
||||
}
|
||||
})
|
||||
// wait for tasks ready
|
||||
waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
tasks = daemons[0].GetServiceTasks(c, id)
|
||||
for _, task := range tasks {
|
||||
assert.Assert(c, task.NodeID == nodes[1].ID)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicePlacementPrefs(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicePlacementPrefs(c *testing.T) {
|
||||
const nodeCount = 3
|
||||
var daemons [nodeCount]*daemon.Daemon
|
||||
for i := 0; i < nodeCount; i++ {
|
||||
daemons[i] = s.AddDaemon(c, true, i == 0)
|
||||
}
|
||||
// wait for nodes ready
|
||||
waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount)
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckNodeReadyCount, checker.Equals(nodeCount)), poll.WithTimeout(5*time.Second))
|
||||
nodes := daemons[0].ListNodes(c)
|
||||
assert.Equal(c, len(nodes), nodeCount)
|
||||
|
||||
@@ -525,7 +512,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServicePlacementPrefs(c *check.C) {
|
||||
prefs := []swarm.PlacementPreference{{Spread: &swarm.SpreadOver{SpreadDescriptor: "node.labels.rack"}}}
|
||||
id := daemons[0].CreateService(c, simpleTestService, setPlacementPrefs(prefs), setInstances(instances))
|
||||
// wait for tasks ready
|
||||
waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, daemons[0].CheckServiceRunningTasks(id), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
tasks := daemons[0].GetServiceTasks(c, id)
|
||||
// validate all tasks are running on nodes[0]
|
||||
tasksOnNode := make(map[string]int)
|
||||
@@ -537,7 +524,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServicePlacementPrefs(c *check.C) {
|
||||
assert.Assert(c, tasksOnNode[nodes[2].ID] == 1)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *testing.T) {
|
||||
testRequires(c, testEnv.IsLocalDaemon)
|
||||
testRequires(c, DaemonIsLinux)
|
||||
|
||||
@@ -550,7 +537,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *check.C) {
|
||||
instances := 9
|
||||
d1.CreateService(c, simpleTestService, setInstances(instances))
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
getContainers := func() map[string]*daemon.Daemon {
|
||||
m := make(map[string]*daemon.Daemon)
|
||||
@@ -572,7 +559,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *check.C) {
|
||||
_, err := containers[toRemove].Cmd("stop", toRemove)
|
||||
assert.NilError(c, err)
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
containers2 := getContainers()
|
||||
assert.Assert(c, len(containers2) == instances)
|
||||
@@ -598,7 +585,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *check.C) {
|
||||
|
||||
time.Sleep(time.Second) // give some time to handle the signal
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
containers2 = getContainers()
|
||||
assert.Assert(c, len(containers2) == instances)
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cloudflare/cfssl/csr"
|
||||
@@ -26,14 +27,15 @@ import (
|
||||
testdaemon "github.com/docker/docker/internal/test/daemon"
|
||||
"github.com/docker/docker/internal/test/request"
|
||||
"github.com/docker/swarmkit/ca"
|
||||
"github.com/go-check/check"
|
||||
"github.com/pkg/errors"
|
||||
"gotest.tools/assert"
|
||||
is "gotest.tools/assert/cmp"
|
||||
"gotest.tools/poll"
|
||||
)
|
||||
|
||||
var defaultReconciliationTimeout = 30 * time.Second
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmInit(c *testing.T) {
|
||||
// todo: should find a better way to verify that components are running than /info
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
info := d1.SwarmInfo(c)
|
||||
@@ -79,7 +81,7 @@ func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) {
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *testing.T) {
|
||||
d1 := s.AddDaemon(c, false, false)
|
||||
d1.SwarmInit(c, swarm.InitRequest{})
|
||||
|
||||
@@ -157,7 +159,7 @@ func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *check.C) {
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestUpdateSwarmAddExternalCA(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestUpdateSwarmAddExternalCA(c *testing.T) {
|
||||
d1 := s.AddDaemon(c, false, false)
|
||||
d1.SwarmInit(c, swarm.InitRequest{})
|
||||
d1.UpdateSwarm(c, func(s *swarm.Spec) {
|
||||
@@ -179,7 +181,7 @@ func (s *DockerSwarmSuite) TestUpdateSwarmAddExternalCA(c *check.C) {
|
||||
assert.Equal(c, info.Cluster.Spec.CAConfig.ExternalCAs[1].CACert, "cacert")
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *testing.T) {
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
d2 := s.AddDaemon(c, false, false)
|
||||
splitToken := strings.Split(d1.JoinTokens(c).Worker, "-")
|
||||
@@ -194,7 +196,7 @@ func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *check.C) {
|
||||
assert.ErrorContains(c, err, "remote CA does not match fingerprint")
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *testing.T) {
|
||||
d1 := s.AddDaemon(c, false, false)
|
||||
d1.SwarmInit(c, swarm.InitRequest{})
|
||||
d2 := s.AddDaemon(c, true, false)
|
||||
@@ -207,13 +209,13 @@ func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *check.C) {
|
||||
n.Spec.Role = swarm.NodeRoleManager
|
||||
})
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True)
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckControlAvailable, checker.True()), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
|
||||
n.Spec.Role = swarm.NodeRoleWorker
|
||||
})
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.False)
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckControlAvailable, checker.False()), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// Wait for the role to change to worker in the cert. This is partially
|
||||
// done because it's something worth testing in its own right, and
|
||||
@@ -221,17 +223,17 @@ func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *check.C) {
|
||||
// back to manager quickly might cause the node to pause for awhile
|
||||
// while waiting for the role to change to worker, and the test can
|
||||
// time out during this interval.
|
||||
waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
|
||||
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
|
||||
certBytes, err := ioutil.ReadFile(filepath.Join(d2.Folder, "root", "swarm", "certificates", "swarm-node.crt"))
|
||||
if err != nil {
|
||||
return "", check.Commentf("error: %v", err)
|
||||
return "", fmt.Sprintf("error: %v", err)
|
||||
}
|
||||
certs, err := helpers.ParseCertificatesPEM(certBytes)
|
||||
if err == nil && len(certs) > 0 && len(certs[0].Subject.OrganizationalUnit) > 0 {
|
||||
return certs[0].Subject.OrganizationalUnit[0], nil
|
||||
return certs[0].Subject.OrganizationalUnit[0], ""
|
||||
}
|
||||
return "", check.Commentf("could not get organizational unit from certificate")
|
||||
}, checker.Equals, "swarm-worker")
|
||||
return "", "could not get organizational unit from certificate"
|
||||
}, checker.Equals("swarm-worker")), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// Demoting last node should fail
|
||||
node := d1.GetNode(c, d1.NodeID())
|
||||
@@ -261,10 +263,10 @@ func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *check.C) {
|
||||
n.Spec.Role = swarm.NodeRoleManager
|
||||
})
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True)
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckControlAvailable, checker.True()), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmLeaderProxy(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmLeaderProxy(c *testing.T) {
|
||||
// add three managers, one of these is leader
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
d2 := s.AddDaemon(c, true, true)
|
||||
@@ -289,7 +291,7 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderProxy(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *testing.T) {
|
||||
if runtime.GOARCH == "s390x" {
|
||||
c.Skip("Disabled on s390x")
|
||||
}
|
||||
@@ -313,13 +315,24 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) {
|
||||
leader *daemon.Daemon // keep track of leader
|
||||
followers []*daemon.Daemon // keep track of followers
|
||||
)
|
||||
var lastErr error
|
||||
checkLeader := func(nodes ...*daemon.Daemon) checkF {
|
||||
return func(c *check.C) (interface{}, check.CommentInterface) {
|
||||
return func(c *testing.T) (interface{}, string) {
|
||||
// clear these out before each run
|
||||
leader = nil
|
||||
followers = nil
|
||||
for _, d := range nodes {
|
||||
if d.GetNode(c, d.NodeID()).ManagerStatus.Leader {
|
||||
n := d.GetNode(c, d.NodeID(), func(err error) bool {
|
||||
if strings.Contains(errors.Cause(err).Error(), context.DeadlineExceeded.Error()) || strings.Contains(err.Error(), "swarm does not have a leader") {
|
||||
lastErr = err
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
if n == nil {
|
||||
return false, fmt.Sprintf("failed to get node: %v", lastErr)
|
||||
}
|
||||
if n.ManagerStatus.Leader {
|
||||
leader = d
|
||||
} else {
|
||||
followers = append(followers, d)
|
||||
@@ -327,16 +340,16 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) {
|
||||
}
|
||||
|
||||
if leader == nil {
|
||||
return false, check.Commentf("no leader elected")
|
||||
return false, "no leader elected"
|
||||
}
|
||||
|
||||
return true, check.Commentf("elected %v", leader.ID())
|
||||
return true, fmt.Sprintf("elected %v", leader.ID())
|
||||
}
|
||||
}
|
||||
|
||||
// wait for an election to occur
|
||||
c.Logf("Waiting for election to occur...")
|
||||
waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d2, d3), checker.True)
|
||||
poll.WaitOn(c, pollCheck(c, checkLeader(d2, d3), checker.True()), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// assert that we have a new leader
|
||||
assert.Assert(c, leader != nil)
|
||||
@@ -349,7 +362,7 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) {
|
||||
|
||||
// wait for possible election
|
||||
c.Logf("Waiting for possible election...")
|
||||
waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d1, d2, d3), checker.True)
|
||||
poll.WaitOn(c, pollCheck(c, checkLeader(d1, d2, d3), checker.True()), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
// pick out the leader and the followers again
|
||||
|
||||
// verify that we still only have 1 leader and 2 followers
|
||||
@@ -359,7 +372,7 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) {
|
||||
assert.Equal(c, leader.NodeID(), stableleader.NodeID())
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *testing.T) {
|
||||
if runtime.GOARCH == "s390x" {
|
||||
c.Skip("Disabled on s390x")
|
||||
}
|
||||
@@ -376,7 +389,7 @@ func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) {
|
||||
d2.Stop(c)
|
||||
|
||||
// make sure there is a leader
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil)
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckLeader, checker.IsNil()), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
|
||||
s.Spec.Name = "top1"
|
||||
@@ -391,22 +404,22 @@ func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) {
|
||||
defer cli.Close()
|
||||
|
||||
// d1 will eventually step down from leader because there is no longer an active quorum, wait for that to happen
|
||||
waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
|
||||
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) {
|
||||
_, err := cli.ServiceCreate(context.Background(), service.Spec, types.ServiceCreateOptions{})
|
||||
return err.Error(), nil
|
||||
}, checker.Contains, "Make sure more than half of the managers are online.")
|
||||
return err.Error(), ""
|
||||
}, checker.Contains("Make sure more than half of the managers are online.")), poll.WithTimeout(defaultReconciliationTimeout*2))
|
||||
|
||||
d2.StartNode(c)
|
||||
|
||||
// make sure there is a leader
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil)
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckLeader, checker.IsNil()), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
|
||||
s.Spec.Name = "top3"
|
||||
})
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
|
||||
instances := 2
|
||||
@@ -416,12 +429,12 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *check.C) {
|
||||
assert.NilError(c, err, id)
|
||||
id = strings.TrimSpace(id)
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances+1)
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances+1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
assert.ErrorContains(c, d.SwarmLeave(c, false), "")
|
||||
assert.NilError(c, d.SwarmLeave(c, true))
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1)
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
id2, err := d.Cmd("ps", "-q")
|
||||
assert.NilError(c, err, id2)
|
||||
@@ -429,7 +442,7 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *check.C) {
|
||||
}
|
||||
|
||||
// #23629
|
||||
func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *testing.T) {
|
||||
testRequires(c, Network)
|
||||
s.AddDaemon(c, true, true)
|
||||
d2 := s.AddDaemon(c, false, false)
|
||||
@@ -450,7 +463,7 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *check.C) {
|
||||
|
||||
assert.NilError(c, d2.SwarmLeave(c, true))
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 1)
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
id2, err := d2.Cmd("ps", "-q")
|
||||
assert.NilError(c, err, id2)
|
||||
@@ -458,7 +471,7 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *check.C) {
|
||||
}
|
||||
|
||||
// #23705
|
||||
func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *testing.T) {
|
||||
testRequires(c, Network)
|
||||
d := s.AddDaemon(c, false, false)
|
||||
client := d.NewClientT(c)
|
||||
@@ -468,7 +481,7 @@ func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *check.C) {
|
||||
})
|
||||
assert.ErrorContains(c, err, "Timeout was reached")
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d.CheckLocalNodeState, checker.Equals, swarm.LocalNodeStatePending)
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckLocalNodeState, checker.Equals(swarm.LocalNodeStatePending)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
d.RestartNode(c)
|
||||
|
||||
@@ -476,7 +489,7 @@ func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *check.C) {
|
||||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *testing.T) {
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
|
||||
instances := 2
|
||||
@@ -503,17 +516,17 @@ func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *check.C) {
|
||||
d3.GetService(c, id)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmScaleNoRollingUpdate(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmScaleNoRollingUpdate(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
|
||||
instances := 2
|
||||
id := d.CreateService(c, simpleTestService, setInstances(instances))
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
containers := d.ActiveContainers(c)
|
||||
instances = 4
|
||||
d.UpdateService(c, d.GetService(c, id), setInstances(instances))
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
containers2 := d.ActiveContainers(c)
|
||||
|
||||
loop0:
|
||||
@@ -527,7 +540,7 @@ loop0:
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmInvalidAddress(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmInvalidAddress(c *testing.T) {
|
||||
d := s.AddDaemon(c, false, false)
|
||||
req := swarm.InitRequest{
|
||||
ListenAddr: "",
|
||||
@@ -545,20 +558,20 @@ func (s *DockerSwarmSuite) TestAPISwarmInvalidAddress(c *check.C) {
|
||||
assert.Equal(c, res.StatusCode, http.StatusBadRequest)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *testing.T) {
|
||||
d1 := s.AddDaemon(c, true, true)
|
||||
d2 := s.AddDaemon(c, true, true)
|
||||
|
||||
instances := 2
|
||||
id := d1.CreateService(c, simpleTestService, setInstances(instances))
|
||||
waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
// drain d2, all containers should move to d1
|
||||
d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
|
||||
n.Spec.Availability = swarm.NodeAvailabilityDrain
|
||||
})
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances)
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 0)
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
d2.Stop(c)
|
||||
|
||||
@@ -567,7 +580,7 @@ func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *check.C) {
|
||||
Spec: swarm.Spec{},
|
||||
})
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
d3 := s.AddDaemon(c, true, true)
|
||||
info := d3.SwarmInfo(c)
|
||||
@@ -577,12 +590,12 @@ func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *check.C) {
|
||||
instances = 4
|
||||
d3.UpdateService(c, d3.GetService(c, id), setInstances(instances))
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
}
|
||||
|
||||
func simpleTestService(s *swarm.Service) {
|
||||
ureplicas := uint64(1)
|
||||
restartDelay := time.Duration(100 * time.Millisecond)
|
||||
restartDelay := 100 * time.Millisecond
|
||||
|
||||
s.Spec = swarm.ServiceSpec{
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
@@ -605,7 +618,7 @@ func simpleTestService(s *swarm.Service) {
|
||||
|
||||
func serviceForUpdate(s *swarm.Service) {
|
||||
ureplicas := uint64(1)
|
||||
restartDelay := time.Duration(100 * time.Millisecond)
|
||||
restartDelay := 100 * time.Millisecond
|
||||
|
||||
s.Spec = swarm.ServiceSpec{
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
@@ -716,7 +729,7 @@ func setGlobalMode(s *swarm.Service) {
|
||||
}
|
||||
}
|
||||
|
||||
func checkClusterHealth(c *check.C, cl []*daemon.Daemon, managerCount, workerCount int) {
|
||||
func checkClusterHealth(c *testing.T, cl []*daemon.Daemon, managerCount, workerCount int) {
|
||||
var totalMCount, totalWCount int
|
||||
|
||||
for _, d := range cl {
|
||||
@@ -725,13 +738,13 @@ func checkClusterHealth(c *check.C, cl []*daemon.Daemon, managerCount, workerCou
|
||||
)
|
||||
|
||||
// check info in a waitAndAssert, because if the cluster doesn't have a leader, `info` will return an error
|
||||
checkInfo := func(c *check.C) (interface{}, check.CommentInterface) {
|
||||
checkInfo := func(c *testing.T) (interface{}, string) {
|
||||
client := d.NewClientT(c)
|
||||
daemonInfo, err := client.Info(context.Background())
|
||||
info = daemonInfo.Swarm
|
||||
return err, check.Commentf("cluster not ready in time")
|
||||
return err, "cluster not ready in time"
|
||||
}
|
||||
waitAndAssert(c, defaultReconciliationTimeout, checkInfo, checker.IsNil)
|
||||
poll.WaitOn(c, pollCheck(c, checkInfo, checker.IsNil()), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
if !info.ControlAvailable {
|
||||
totalWCount++
|
||||
continue
|
||||
@@ -742,25 +755,25 @@ func checkClusterHealth(c *check.C, cl []*daemon.Daemon, managerCount, workerCou
|
||||
var mCount, wCount int
|
||||
|
||||
for _, n := range d.ListNodes(c) {
|
||||
waitReady := func(c *check.C) (interface{}, check.CommentInterface) {
|
||||
waitReady := func(c *testing.T) (interface{}, string) {
|
||||
if n.Status.State == swarm.NodeStateReady {
|
||||
return true, nil
|
||||
return true, ""
|
||||
}
|
||||
nn := d.GetNode(c, n.ID)
|
||||
n = *nn
|
||||
return n.Status.State == swarm.NodeStateReady, check.Commentf("state of node %s, reported by %s", n.ID, d.NodeID())
|
||||
return n.Status.State == swarm.NodeStateReady, fmt.Sprintf("state of node %s, reported by %s", n.ID, d.NodeID())
|
||||
}
|
||||
waitAndAssert(c, defaultReconciliationTimeout, waitReady, checker.True)
|
||||
poll.WaitOn(c, pollCheck(c, waitReady, checker.True()), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
waitActive := func(c *check.C) (interface{}, check.CommentInterface) {
|
||||
waitActive := func(c *testing.T) (interface{}, string) {
|
||||
if n.Spec.Availability == swarm.NodeAvailabilityActive {
|
||||
return true, nil
|
||||
return true, ""
|
||||
}
|
||||
nn := d.GetNode(c, n.ID)
|
||||
n = *nn
|
||||
return n.Spec.Availability == swarm.NodeAvailabilityActive, check.Commentf("availability of node %s, reported by %s", n.ID, d.NodeID())
|
||||
return n.Spec.Availability == swarm.NodeAvailabilityActive, fmt.Sprintf("availability of node %s, reported by %s", n.ID, d.NodeID())
|
||||
}
|
||||
waitAndAssert(c, defaultReconciliationTimeout, waitActive, checker.True)
|
||||
poll.WaitOn(c, pollCheck(c, waitActive, checker.True()), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
if n.Spec.Role == swarm.NodeRoleManager {
|
||||
assert.Assert(c, n.ManagerStatus != nil, "manager status of node %s (manager), reported by %s", n.ID, d.NodeID())
|
||||
@@ -781,7 +794,7 @@ func checkClusterHealth(c *check.C, cl []*daemon.Daemon, managerCount, workerCou
|
||||
assert.Equal(c, totalWCount, workerCount)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *testing.T) {
|
||||
mCount, wCount := 5, 1
|
||||
|
||||
var nodes []*daemon.Daemon
|
||||
@@ -846,12 +859,12 @@ func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) {
|
||||
checkClusterHealth(c, nodes, mCount, wCount)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
|
||||
instances := 2
|
||||
id := d.CreateService(c, simpleTestService, setInstances(instances))
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
service := d.GetService(c, id)
|
||||
instances = 5
|
||||
@@ -861,18 +874,18 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *check.C) {
|
||||
defer cli.Close()
|
||||
_, err := cli.ServiceUpdate(context.Background(), service.Spec.Name, service.Version, service.Spec, types.ServiceUpdateOptions{})
|
||||
assert.NilError(c, err)
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
}
|
||||
|
||||
// Unlocking an unlocked swarm results in an error
|
||||
func (s *DockerSwarmSuite) TestAPISwarmUnlockNotLocked(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmUnlockNotLocked(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
err := d.SwarmUnlock(c, swarm.UnlockRequest{UnlockKey: "wrong-key"})
|
||||
assert.ErrorContains(c, err, "swarm is not locked")
|
||||
}
|
||||
|
||||
// #29885
|
||||
func (s *DockerSwarmSuite) TestAPISwarmErrorHandling(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmErrorHandling(c *testing.T) {
|
||||
ln, err := net.Listen("tcp", fmt.Sprintf(":%d", defaultSwarmPort))
|
||||
assert.NilError(c, err)
|
||||
defer ln.Close()
|
||||
@@ -887,7 +900,7 @@ func (s *DockerSwarmSuite) TestAPISwarmErrorHandling(c *check.C) {
|
||||
// Test case for 30242, where duplicate networks, with different drivers `bridge` and `overlay`,
|
||||
// caused both scopes to be `swarm` for `docker network inspect` and `docker network ls`.
|
||||
// This test makes sure the fixes correctly output scopes instead.
|
||||
func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
cli := d.NewClientT(c)
|
||||
defer cli.Close()
|
||||
@@ -917,7 +930,7 @@ func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *check.C) {
|
||||
}
|
||||
|
||||
// Test case for 30178
|
||||
func (s *DockerSwarmSuite) TestAPISwarmHealthcheckNone(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPISwarmHealthcheckNone(c *testing.T) {
|
||||
// Issue #36386 can be a independent one, which is worth further investigation.
|
||||
c.Skip("Root cause of Issue #36386 is needed")
|
||||
d := s.AddDaemon(c, true, true)
|
||||
@@ -936,7 +949,7 @@ func (s *DockerSwarmSuite) TestAPISwarmHealthcheckNone(c *check.C) {
|
||||
}
|
||||
})
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
|
||||
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
|
||||
|
||||
containers := d.ActiveContainers(c)
|
||||
|
||||
@@ -944,7 +957,7 @@ func (s *DockerSwarmSuite) TestAPISwarmHealthcheckNone(c *check.C) {
|
||||
assert.NilError(c, err, out)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *testing.T) {
|
||||
m := s.AddDaemon(c, true, true)
|
||||
w := s.AddDaemon(c, true, false)
|
||||
|
||||
@@ -1013,7 +1026,7 @@ func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestAPINetworkInspectWithScope(c *check.C) {
|
||||
func (s *DockerSwarmSuite) TestAPINetworkInspectWithScope(c *testing.T) {
|
||||
d := s.AddDaemon(c, true, true)
|
||||
|
||||
name := "test-scoped-network"
|
||||
|
||||
@@ -7,21 +7,21 @@ import (
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/internal/test/request"
|
||||
"github.com/go-check/check"
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func (s *DockerSuite) TestAPIOptionsRoute(c *check.C) {
|
||||
func (s *DockerSuite) TestAPIOptionsRoute(c *testing.T) {
|
||||
resp, _, err := request.Do("/", request.Method(http.MethodOptions))
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, resp.StatusCode, http.StatusOK)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAPIGetEnabledCORS(c *check.C) {
|
||||
func (s *DockerSuite) TestAPIGetEnabledCORS(c *testing.T) {
|
||||
res, body, err := request.Get("/version")
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, res.StatusCode, http.StatusOK)
|
||||
@@ -33,7 +33,7 @@ func (s *DockerSuite) TestAPIGetEnabledCORS(c *check.C) {
|
||||
//assert.Equal(c, res.Header.Get("Access-Control-Allow-Headers"), "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAPIClientVersionOldNotSupported(c *check.C) {
|
||||
func (s *DockerSuite) TestAPIClientVersionOldNotSupported(c *testing.T) {
|
||||
if testEnv.OSType != runtime.GOOS {
|
||||
c.Skip("Daemon platform doesn't match test platform")
|
||||
}
|
||||
@@ -57,7 +57,7 @@ func (s *DockerSuite) TestAPIClientVersionOldNotSupported(c *check.C) {
|
||||
assert.Equal(c, strings.TrimSpace(string(content)), expected)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAPIErrorJSON(c *check.C) {
|
||||
func (s *DockerSuite) TestAPIErrorJSON(c *testing.T) {
|
||||
httpResp, body, err := request.Post("/containers/create", request.JSONBody(struct{}{}))
|
||||
assert.NilError(c, err)
|
||||
if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") {
|
||||
@@ -71,7 +71,7 @@ func (s *DockerSuite) TestAPIErrorJSON(c *check.C) {
|
||||
assert.Equal(c, getErrorMessage(c, b), "Config cannot be empty in order to create a container")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAPIErrorPlainText(c *check.C) {
|
||||
func (s *DockerSuite) TestAPIErrorPlainText(c *testing.T) {
|
||||
// Windows requires API 1.25 or later. This test is validating a behaviour which was present
|
||||
// in v1.23, but changed in 1.24, hence not applicable on Windows. See apiVersionSupportsJSONErrors
|
||||
testRequires(c, DaemonIsLinux)
|
||||
@@ -88,7 +88,7 @@ func (s *DockerSuite) TestAPIErrorPlainText(c *check.C) {
|
||||
assert.Equal(c, strings.TrimSpace(string(b)), "Config cannot be empty in order to create a container")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAPIErrorNotFoundJSON(c *check.C) {
|
||||
func (s *DockerSuite) TestAPIErrorNotFoundJSON(c *testing.T) {
|
||||
// 404 is a different code path to normal errors, so test separately
|
||||
httpResp, body, err := request.Get("/notfound", request.JSON)
|
||||
assert.NilError(c, err)
|
||||
@@ -99,7 +99,7 @@ func (s *DockerSuite) TestAPIErrorNotFoundJSON(c *check.C) {
|
||||
assert.Equal(c, getErrorMessage(c, b), "page not found")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAPIErrorNotFoundPlainText(c *check.C) {
|
||||
func (s *DockerSuite) TestAPIErrorNotFoundPlainText(c *testing.T) {
|
||||
httpResp, body, err := request.Get("/v1.23/notfound", request.JSON)
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, httpResp.StatusCode, http.StatusNotFound)
|
||||
|
||||
@@ -8,17 +8,17 @@ import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/integration-cli/cli"
|
||||
"github.com/go-check/check"
|
||||
"gotest.tools/assert"
|
||||
"gotest.tools/icmd"
|
||||
)
|
||||
|
||||
const attachWait = 5 * time.Second
|
||||
|
||||
func (s *DockerSuite) TestAttachMultipleAndRestart(c *check.C) {
|
||||
func (s *DockerSuite) TestAttachMultipleAndRestart(c *testing.T) {
|
||||
endGroup := &sync.WaitGroup{}
|
||||
startGroup := &sync.WaitGroup{}
|
||||
endGroup.Add(3)
|
||||
@@ -88,7 +88,7 @@ func (s *DockerSuite) TestAttachMultipleAndRestart(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAttachTTYWithoutStdin(c *check.C) {
|
||||
func (s *DockerSuite) TestAttachTTYWithoutStdin(c *testing.T) {
|
||||
// TODO @jhowardmsft. Figure out how to get this running again reliable on Windows.
|
||||
// It works by accident at the moment. Sometimes. I've gone back to v1.13.0 and see the same.
|
||||
// On Windows, docker run -d -ti busybox causes the container to exit immediately.
|
||||
@@ -133,7 +133,7 @@ func (s *DockerSuite) TestAttachTTYWithoutStdin(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAttachDisconnect(c *check.C) {
|
||||
func (s *DockerSuite) TestAttachDisconnect(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
out, _ := dockerCmd(c, "run", "-di", "busybox", "/bin/cat")
|
||||
id := strings.TrimSpace(out)
|
||||
@@ -147,7 +147,7 @@ func (s *DockerSuite) TestAttachDisconnect(c *check.C) {
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
assert.NilError(c, err)
|
||||
defer stdout.Close()
|
||||
c.Assert(cmd.Start(), check.IsNil)
|
||||
assert.Assert(c, cmd.Start() == nil)
|
||||
defer func() {
|
||||
cmd.Process.Kill()
|
||||
cmd.Wait()
|
||||
@@ -157,16 +157,16 @@ func (s *DockerSuite) TestAttachDisconnect(c *check.C) {
|
||||
assert.NilError(c, err)
|
||||
out, err = bufio.NewReader(stdout).ReadString('\n')
|
||||
assert.NilError(c, err)
|
||||
c.Assert(strings.TrimSpace(out), check.Equals, "hello")
|
||||
assert.Equal(c, strings.TrimSpace(out), "hello")
|
||||
|
||||
c.Assert(stdin.Close(), check.IsNil)
|
||||
assert.Assert(c, stdin.Close() == nil)
|
||||
|
||||
// Expect container to still be running after stdin is closed
|
||||
running := inspectField(c, id, "State.Running")
|
||||
c.Assert(running, check.Equals, "true")
|
||||
assert.Equal(c, running, "true")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAttachPausedContainer(c *check.C) {
|
||||
func (s *DockerSuite) TestAttachPausedContainer(c *testing.T) {
|
||||
testRequires(c, IsPausable)
|
||||
runSleepingContainer(c, "-d", "--name=test")
|
||||
dockerCmd(c, "pause", "test")
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user