mirror of
https://github.com/moby/moby.git
synced 2026-01-13 11:42:02 +00:00
Compare commits
225 Commits
23.0
...
v19.03.2-b
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
93c18c73a3 | ||
|
|
cad2cd71b7 | ||
|
|
96e086dc33 | ||
|
|
cddce2dfa7 | ||
|
|
65f964aa6b | ||
|
|
8fca769bd5 | ||
|
|
ef5dd6e46d | ||
|
|
8533594ad6 | ||
|
|
32802bc7d9 | ||
|
|
4bed01298c | ||
|
|
56ca630f27 | ||
|
|
a02539b3e8 | ||
|
|
0dc7bdc325 | ||
|
|
b61ee6e4af | ||
|
|
56784591bf | ||
|
|
6eeb9ec3d6 | ||
|
|
dd7ef76474 | ||
|
|
0375566412 | ||
|
|
3678438dd8 | ||
|
|
1cc7b3881d | ||
|
|
03b1b078f9 | ||
|
|
5067389c36 | ||
|
|
6d98ef8c69 | ||
|
|
d5088c1488 | ||
|
|
df3689f8d0 | ||
|
|
3fd0be03f0 | ||
|
|
37d9901e0f | ||
|
|
29fe4e58c6 | ||
|
|
685565ad18 | ||
|
|
305b2416ea | ||
|
|
f5b64c3ffe | ||
|
|
9f9dab03c1 | ||
|
|
c7139be62b | ||
|
|
b0ef7422b0 | ||
|
|
1fbed3ffc9 | ||
|
|
dd85af0e12 | ||
|
|
3bbf7b0d4d | ||
|
|
bc9183ba0e | ||
|
|
47517880ec | ||
|
|
7b0cf8b16d | ||
|
|
47a7f762d3 | ||
|
|
8ba31dccd1 | ||
|
|
80376f9e13 | ||
|
|
ee64eae903 | ||
|
|
ff0a0e364b | ||
|
|
791aa3c338 | ||
|
|
6e9aba883c | ||
|
|
2f1984c6df | ||
|
|
640193b2bb | ||
|
|
97ca6434e0 | ||
|
|
c364e5d1ba | ||
|
|
3bf3a1ae65 | ||
|
|
439ed140ee | ||
|
|
a50d77700e | ||
|
|
6b7330dcd4 | ||
|
|
8ecf5409e9 | ||
|
|
6efcd74c6b | ||
|
|
eaa83640fa | ||
|
|
cbdf487768 | ||
|
|
b0f01be33f | ||
|
|
80e2871d21 | ||
|
|
4ef8f6d323 | ||
|
|
56ff8ccc91 | ||
|
|
e01625bc70 | ||
|
|
fa8dd90ceb | ||
|
|
509a793378 | ||
|
|
705d9623b7 | ||
|
|
c687381870 | ||
|
|
1eadbf1bd0 | ||
|
|
685f13f3fd | ||
|
|
638cf86cbe | ||
|
|
d27a919cd2 | ||
|
|
a69cd8239f | ||
|
|
8a2f96096a | ||
|
|
b07f53d0a4 | ||
|
|
e61e107040 | ||
|
|
023166b530 | ||
|
|
884c9e268f | ||
|
|
99678a93ed | ||
|
|
99cd23cefd | ||
|
|
4d3dfd24ec | ||
|
|
21ae66c664 | ||
|
|
da6dddcd04 | ||
|
|
d1b0475d89 | ||
|
|
42757e8794 | ||
|
|
3452f743ab | ||
|
|
b9cd7b59b6 | ||
|
|
8f4b96f19e | ||
|
|
186afe3ce3 | ||
|
|
a0063c534a | ||
|
|
9b97965f22 | ||
|
|
e3f83e7aa7 | ||
|
|
44023afb7d | ||
|
|
29ff2800c3 | ||
|
|
d44a48835f | ||
|
|
275bf7ec03 | ||
|
|
de45ce73eb | ||
|
|
ceb773e1ff | ||
|
|
60013ba69b | ||
|
|
96df6d4d0b | ||
|
|
a33a82b42f | ||
|
|
367870a4d5 | ||
|
|
175013d0cb | ||
|
|
a6905fa2e5 | ||
|
|
510e79ebe9 | ||
|
|
a8d1b4a1ab | ||
|
|
88374fa982 | ||
|
|
049a1090c3 | ||
|
|
020bb75219 | ||
|
|
a24b9087ce | ||
|
|
48786ba842 | ||
|
|
dde48c6715 | ||
|
|
e7c02a0508 | ||
|
|
31722d3f5a | ||
|
|
a81278befe | ||
|
|
cad766f6c7 | ||
|
|
f0f7020b5d | ||
|
|
65ba452bb0 | ||
|
|
76d936ae76 | ||
|
|
d1eae89590 | ||
|
|
7d1414ec3e | ||
|
|
5fbc0a16e2 | ||
|
|
0678d71038 | ||
|
|
746dce1994 | ||
|
|
36f0fe6524 | ||
|
|
737d57bad6 | ||
|
|
287240a965 | ||
|
|
ca602fa7c6 | ||
|
|
36324c3bbd | ||
|
|
21c33eb7e3 | ||
|
|
feb373a216 | ||
|
|
d7080a7a2e | ||
|
|
b915ec1e7b | ||
|
|
2de4afdee5 | ||
|
|
26a35ddcd1 | ||
|
|
d575af39ac | ||
|
|
57b59f876e | ||
|
|
3e057d527d | ||
|
|
9781cceb09 | ||
|
|
d0f4f42bd4 | ||
|
|
d59fb97c5b | ||
|
|
e1e47d090d | ||
|
|
a62d9b9c21 | ||
|
|
a004854097 | ||
|
|
5925508b31 | ||
|
|
5051fe047c | ||
|
|
57a9697161 | ||
|
|
936432326a | ||
|
|
9eeb2b5ef0 | ||
|
|
eaa3e69d14 | ||
|
|
80a35e0bd4 | ||
|
|
cdeef06801 | ||
|
|
181a64a5aa | ||
|
|
63eecadf82 | ||
|
|
2b216674da | ||
|
|
868d87b08e | ||
|
|
a7e03f69be | ||
|
|
96daf37c83 | ||
|
|
3dec835d84 | ||
|
|
4da607559f | ||
|
|
8dd7bd9981 | ||
|
|
7cc3681ad6 | ||
|
|
e205cd89cd | ||
|
|
c56df1abf3 | ||
|
|
d8185417d9 | ||
|
|
7cb78b6259 | ||
|
|
79ac8f95af | ||
|
|
1c346f16a3 | ||
|
|
d347049802 | ||
|
|
939aa52465 | ||
|
|
29c50668b3 | ||
|
|
55c5381584 | ||
|
|
750e0ace06 | ||
|
|
29498693dd | ||
|
|
56e92239a6 | ||
|
|
11319732ab | ||
|
|
853816ae79 | ||
|
|
8f61032ec4 | ||
|
|
bff7e300e6 | ||
|
|
ff44133643 | ||
|
|
9fdccf6a47 | ||
|
|
3f4657f6db | ||
|
|
dcc05fcf3e | ||
|
|
03ce4080a4 | ||
|
|
61828453db | ||
|
|
d371b283c3 | ||
|
|
4784740273 | ||
|
|
31b0688de7 | ||
|
|
6896305b57 | ||
|
|
931c4c1023 | ||
|
|
6cc14f5854 | ||
|
|
1910607215 | ||
|
|
dfa1031015 | ||
|
|
790388a8c5 | ||
|
|
ea09008423 | ||
|
|
8d8904f02b | ||
|
|
2a7513a972 | ||
|
|
c47f2a4a1a | ||
|
|
526a72fd77 | ||
|
|
f76879dd64 | ||
|
|
e7a837120d | ||
|
|
04c51495da | ||
|
|
02baf07d77 | ||
|
|
6d0823af0a | ||
|
|
8493fb18ae | ||
|
|
e8b9a752d3 | ||
|
|
14bb71d508 | ||
|
|
2e95499142 | ||
|
|
8d428458a2 | ||
|
|
5f60a56544 | ||
|
|
a3b4e92d66 | ||
|
|
cedf201aef | ||
|
|
545bc6b4d8 | ||
|
|
620d9d3c75 | ||
|
|
e1b045c25e | ||
|
|
11e2802015 | ||
|
|
cb8d67505d | ||
|
|
7d3405b4ba | ||
|
|
d36c7de19e | ||
|
|
6605a26c75 | ||
|
|
ce9cabf0f0 | ||
|
|
dc6d1ac663 | ||
|
|
1fdd24579c | ||
|
|
3afbf83cc5 | ||
|
|
61a234d562 |
21
Dockerfile
21
Dockerfile
@@ -25,11 +25,12 @@
|
||||
#
|
||||
|
||||
ARG CROSS="false"
|
||||
ARG GO_VERSION=1.12.8
|
||||
|
||||
FROM golang:1.12.4 AS base
|
||||
# allow replacing httpredir or deb mirror
|
||||
ARG APT_MIRROR=deb.debian.org
|
||||
RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
|
||||
FROM golang:${GO_VERSION}-stretch AS base
|
||||
ARG APT_MIRROR
|
||||
RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \
|
||||
&& sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list
|
||||
|
||||
FROM base AS criu
|
||||
# Install CRIU for checkpoint/restore support
|
||||
@@ -51,6 +52,11 @@ RUN apt-get update && apt-get install -y \
|
||||
&& make PREFIX=/build/ install-criu
|
||||
|
||||
FROM base AS registry
|
||||
# Install two versions of the registry. The first is an older version that
|
||||
# only supports schema1 manifests. The second is a newer version that supports
|
||||
# both. This allows integration-cli tests to cover push/pull with both schema1
|
||||
# and schema2 manifests.
|
||||
ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
|
||||
ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
@@ -58,6 +64,13 @@ RUN set -x \
|
||||
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \
|
||||
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
|
||||
go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \
|
||||
&& case $(dpkg --print-architecture) in \
|
||||
amd64|ppc64*|s390x) \
|
||||
(cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1"); \
|
||||
GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \
|
||||
go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \
|
||||
;; \
|
||||
esac \
|
||||
&& rm -rf "$GOPATH"
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
## Step 1: Build tests
|
||||
FROM golang:1.12.4-alpine as builder
|
||||
ARG GO_VERSION=1.12.8
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine AS base
|
||||
|
||||
RUN apk --no-cache add \
|
||||
bash \
|
||||
@@ -9,37 +10,55 @@ RUN apk --no-cache add \
|
||||
lvm2-dev \
|
||||
jq
|
||||
|
||||
RUN mkdir -p /build/
|
||||
RUN mkdir -p /go/src/github.com/docker/docker/
|
||||
WORKDIR /go/src/github.com/docker/docker/
|
||||
|
||||
# Generate frozen images
|
||||
COPY contrib/download-frozen-image-v2.sh contrib/download-frozen-image-v2.sh
|
||||
RUN contrib/download-frozen-image-v2.sh /output/docker-frozen-images \
|
||||
FROM base AS frozen-images
|
||||
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
|
||||
COPY contrib/download-frozen-image-v2.sh /
|
||||
RUN /download-frozen-image-v2.sh /build \
|
||||
buildpack-deps:jessie@sha256:dd86dced7c9cd2a724e779730f0a53f93b7ef42228d4344b25ce9a42a1486251 \
|
||||
busybox:latest@sha256:bbc3a03235220b170ba48a157dd097dd1379299370e1ed99ce976df0355d24f0 \
|
||||
busybox:glibc@sha256:0b55a30394294ab23b9afd58fab94e61a923f5834fba7ddbae7f8e0c11ba85e6 \
|
||||
debian:jessie@sha256:287a20c5f73087ab406e6b364833e3fb7b3ae63ca0eb3486555dc27ed32c6e60 \
|
||||
hello-world:latest@sha256:be0cd392e45be79ffeffa6b05338b98ebb16c87b255f48e297ec7f98e123905c
|
||||
# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list)
|
||||
|
||||
# Install dockercli
|
||||
# Please edit hack/dockerfile/install/<name>.installer to update them.
|
||||
COPY hack/dockerfile/install hack/dockerfile/install
|
||||
RUN ./hack/dockerfile/install/install.sh dockercli
|
||||
|
||||
# Set tag and add sources
|
||||
ARG DOCKER_GITCOMMIT
|
||||
ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT:-undefined}
|
||||
ADD . .
|
||||
FROM base AS dockercli
|
||||
ENV INSTALL_BINARY_NAME=dockercli
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
# Build DockerSuite.TestBuild* dependency
|
||||
RUN CGO_ENABLED=0 go build -buildmode=pie -o /output/httpserver github.com/docker/docker/contrib/httpserver
|
||||
FROM base AS contrib
|
||||
COPY contrib/syscall-test /build/syscall-test
|
||||
COPY contrib/httpserver/Dockerfile /build/httpserver/Dockerfile
|
||||
COPY contrib/httpserver contrib/httpserver
|
||||
RUN CGO_ENABLED=0 go build -buildmode=pie -o /build/httpserver/httpserver github.com/docker/docker/contrib/httpserver
|
||||
|
||||
# Build the integration tests and copy the resulting binaries to /output/tests
|
||||
# Build the integration tests and copy the resulting binaries to /build/tests
|
||||
FROM base AS builder
|
||||
|
||||
# Set tag and add sources
|
||||
COPY . .
|
||||
# Copy test sources tests that use assert can print errors
|
||||
RUN mkdir -p /build${PWD} && find integration integration-cli -name \*_test.go -exec cp --parents '{}' /build${PWD} \;
|
||||
# Build and install test binaries
|
||||
ARG DOCKER_GITCOMMIT=undefined
|
||||
RUN hack/make.sh build-integration-test-binary
|
||||
RUN mkdir -p /output/tests && find . -name test.main -exec cp --parents '{}' /output/tests \;
|
||||
RUN mkdir -p /build/tests && find . -name test.main -exec cp --parents '{}' /build/tests \;
|
||||
|
||||
## Step 2: Generate testing image
|
||||
FROM alpine:3.8 as runner
|
||||
## Generate testing image
|
||||
FROM alpine:3.9 as runner
|
||||
|
||||
ENV DOCKER_REMOTE_DAEMON=1
|
||||
ENV DOCKER_INTEGRATION_DAEMON_DEST=/
|
||||
ENTRYPOINT ["/scripts/run.sh"]
|
||||
|
||||
# Add an unprivileged user to be used for tests which need it
|
||||
RUN addgroup docker && adduser -D -G docker unprivilegeduser -s /bin/ash
|
||||
|
||||
# GNU tar is used for generating the emptyfs image
|
||||
RUN apk --no-cache add \
|
||||
@@ -52,21 +71,14 @@ RUN apk --no-cache add \
|
||||
tar \
|
||||
xz
|
||||
|
||||
# Add an unprivileged user to be used for tests which need it
|
||||
RUN addgroup docker && adduser -D -G docker unprivilegeduser -s /bin/ash
|
||||
COPY hack/test/e2e-run.sh /scripts/run.sh
|
||||
COPY hack/make/.ensure-emptyfs /scripts/ensure-emptyfs.sh
|
||||
|
||||
COPY contrib/httpserver/Dockerfile /tests/contrib/httpserver/Dockerfile
|
||||
COPY contrib/syscall-test /tests/contrib/syscall-test
|
||||
COPY integration-cli/fixtures /tests/integration-cli/fixtures
|
||||
COPY integration/testdata /tests/integration/testdata
|
||||
COPY integration/build/testdata /tests/integration/build/testdata
|
||||
COPY integration-cli/fixtures /tests/integration-cli/fixtures
|
||||
|
||||
COPY hack/test/e2e-run.sh /scripts/run.sh
|
||||
COPY hack/make/.ensure-emptyfs /scripts/ensure-emptyfs.sh
|
||||
|
||||
COPY --from=builder /output/docker-frozen-images /docker-frozen-images
|
||||
COPY --from=builder /output/httpserver /tests/contrib/httpserver/httpserver
|
||||
COPY --from=builder /output/tests /tests
|
||||
COPY --from=builder /usr/local/bin/docker /usr/bin/docker
|
||||
|
||||
ENV DOCKER_REMOTE_DAEMON=1 DOCKER_INTEGRATION_DAEMON_DEST=/
|
||||
|
||||
ENTRYPOINT ["/scripts/run.sh"]
|
||||
COPY --from=frozen-images /build/ /docker-frozen-images
|
||||
COPY --from=dockercli /build/ /usr/bin/
|
||||
COPY --from=contrib /build/ /tests/contrib/
|
||||
COPY --from=builder /build/ /
|
||||
|
||||
@@ -5,7 +5,9 @@
|
||||
|
||||
# This represents the bare minimum required to build and test Docker.
|
||||
|
||||
FROM golang:1.12.4
|
||||
ARG GO_VERSION=1.12.8
|
||||
|
||||
FROM golang:${GO_VERSION}-stretch
|
||||
|
||||
# allow replacing httpredir or deb mirror
|
||||
ARG APT_MIRROR=deb.debian.org
|
||||
|
||||
@@ -165,10 +165,12 @@ FROM microsoft/windowsservercore
|
||||
# Use PowerShell as the default shell
|
||||
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
|
||||
|
||||
ARG GO_VERSION=1.12.8
|
||||
|
||||
# Environment variable notes:
|
||||
# - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
|
||||
# - FROM_DOCKERFILE is used for detection of building within a container.
|
||||
ENV GO_VERSION=1.12.4 `
|
||||
ENV GO_VERSION=${GO_VERSION} `
|
||||
GIT_VERSION=2.11.1 `
|
||||
GOPATH=C:\go `
|
||||
FROM_DOCKERFILE=1
|
||||
|
||||
2
Makefile
2
Makefile
@@ -150,7 +150,7 @@ build: DOCKER_BUILD_ARGS += --build-arg=CROSS=$(DOCKER_CROSS)
|
||||
build: DOCKER_BUILDKIT ?= 1
|
||||
build: bundles
|
||||
$(warning The docker client CLI has moved to github.com/docker/cli. For a dev-test cycle involving the CLI, run:${\n} DOCKER_CLI_PATH=/host/path/to/cli/binary make shell ${\n} then change the cli and compile into a binary at the same location.${\n})
|
||||
DOCKER_BUILDKIT="${DOCKER_BUILDKIT}" docker build ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} ${DOCKER_BUILD_OPTS} -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" .
|
||||
DOCKER_BUILDKIT="${DOCKER_BUILDKIT}" docker build --build-arg=GO_VERSION ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} ${DOCKER_BUILD_OPTS} -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" .
|
||||
|
||||
bundles:
|
||||
mkdir bundles
|
||||
|
||||
@@ -87,3 +87,10 @@ To run the integration test suite:
|
||||
```
|
||||
make test-integration
|
||||
```
|
||||
|
||||
You can change a version of golang used for building stuff that is being tested
|
||||
by setting `GO_VERSION` variable, for example:
|
||||
|
||||
```
|
||||
make GO_VERSION=1.12.8 test
|
||||
```
|
||||
|
||||
@@ -91,7 +91,9 @@ func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string
|
||||
stdout := config.ProgressWriter.StdoutFormatter
|
||||
fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(imageID))
|
||||
}
|
||||
err = tagger.TagImages(image.ID(imageID))
|
||||
if imageID != "" {
|
||||
err = tagger.TagImages(image.ID(imageID))
|
||||
}
|
||||
return imageID, err
|
||||
}
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWri
|
||||
|
||||
var postForm map[string]interface{}
|
||||
if err := json.Unmarshal(b, &postForm); err == nil {
|
||||
maskSecretKeys(postForm, r.RequestURI)
|
||||
maskSecretKeys(postForm)
|
||||
formStr, errMarshal := json.Marshal(postForm)
|
||||
if errMarshal == nil {
|
||||
logrus.Debugf("form data: %s", string(formStr))
|
||||
@@ -54,41 +54,37 @@ func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWri
|
||||
}
|
||||
}
|
||||
|
||||
func maskSecretKeys(inp interface{}, path string) {
|
||||
// Remove any query string from the path
|
||||
idx := strings.Index(path, "?")
|
||||
if idx != -1 {
|
||||
path = path[:idx]
|
||||
}
|
||||
// Remove trailing / characters
|
||||
path = strings.TrimRight(path, "/")
|
||||
|
||||
func maskSecretKeys(inp interface{}) {
|
||||
if arr, ok := inp.([]interface{}); ok {
|
||||
for _, f := range arr {
|
||||
maskSecretKeys(f, path)
|
||||
maskSecretKeys(f)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if form, ok := inp.(map[string]interface{}); ok {
|
||||
scrub := []string{
|
||||
// Note: The Data field contains the base64-encoded secret in 'secret'
|
||||
// and 'config' create and update requests. Currently, no other POST
|
||||
// API endpoints use a data field, so we scrub this field unconditionally.
|
||||
// Change this handling to be conditional if a new endpoint is added
|
||||
// in future where this field should not be scrubbed.
|
||||
"data",
|
||||
"jointoken",
|
||||
"password",
|
||||
"secret",
|
||||
"signingcakey",
|
||||
"unlockkey",
|
||||
}
|
||||
loop0:
|
||||
for k, v := range form {
|
||||
for _, m := range []string{"password", "secret", "jointoken", "unlockkey", "signingcakey"} {
|
||||
for _, m := range scrub {
|
||||
if strings.EqualFold(m, k) {
|
||||
form[k] = "*****"
|
||||
continue loop0
|
||||
}
|
||||
}
|
||||
maskSecretKeys(v, path)
|
||||
}
|
||||
|
||||
// Route-specific redactions
|
||||
if strings.HasSuffix(path, "/secrets/create") {
|
||||
for k := range form {
|
||||
if k == "Data" {
|
||||
form[k] = "*****"
|
||||
}
|
||||
}
|
||||
maskSecretKeys(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,31 +9,25 @@ import (
|
||||
|
||||
func TestMaskSecretKeys(t *testing.T) {
|
||||
tests := []struct {
|
||||
path string
|
||||
doc string
|
||||
input map[string]interface{}
|
||||
expected map[string]interface{}
|
||||
}{
|
||||
{
|
||||
path: "/v1.30/secrets/create",
|
||||
doc: "secret/config create and update requests",
|
||||
input: map[string]interface{}{"Data": "foo", "Name": "name", "Labels": map[string]interface{}{}},
|
||||
expected: map[string]interface{}{"Data": "*****", "Name": "name", "Labels": map[string]interface{}{}},
|
||||
},
|
||||
{
|
||||
path: "/v1.30/secrets/create//",
|
||||
input: map[string]interface{}{"Data": "foo", "Name": "name", "Labels": map[string]interface{}{}},
|
||||
expected: map[string]interface{}{"Data": "*****", "Name": "name", "Labels": map[string]interface{}{}},
|
||||
},
|
||||
|
||||
{
|
||||
path: "/secrets/create?key=val",
|
||||
input: map[string]interface{}{"Data": "foo", "Name": "name", "Labels": map[string]interface{}{}},
|
||||
expected: map[string]interface{}{"Data": "*****", "Name": "name", "Labels": map[string]interface{}{}},
|
||||
},
|
||||
{
|
||||
path: "/v1.30/some/other/path",
|
||||
doc: "masking other fields (recursively)",
|
||||
input: map[string]interface{}{
|
||||
"password": "pass",
|
||||
"password": "pass",
|
||||
"secret": "secret",
|
||||
"jointoken": "jointoken",
|
||||
"unlockkey": "unlockkey",
|
||||
"signingcakey": "signingcakey",
|
||||
"other": map[string]interface{}{
|
||||
"password": "pass",
|
||||
"secret": "secret",
|
||||
"jointoken": "jointoken",
|
||||
"unlockkey": "unlockkey",
|
||||
@@ -41,8 +35,13 @@ func TestMaskSecretKeys(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: map[string]interface{}{
|
||||
"password": "*****",
|
||||
"password": "*****",
|
||||
"secret": "*****",
|
||||
"jointoken": "*****",
|
||||
"unlockkey": "*****",
|
||||
"signingcakey": "*****",
|
||||
"other": map[string]interface{}{
|
||||
"password": "*****",
|
||||
"secret": "*****",
|
||||
"jointoken": "*****",
|
||||
"unlockkey": "*****",
|
||||
@@ -50,10 +49,27 @@ func TestMaskSecretKeys(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
doc: "case insensitive field matching",
|
||||
input: map[string]interface{}{
|
||||
"PASSWORD": "pass",
|
||||
"other": map[string]interface{}{
|
||||
"PASSWORD": "pass",
|
||||
},
|
||||
},
|
||||
expected: map[string]interface{}{
|
||||
"PASSWORD": "*****",
|
||||
"other": map[string]interface{}{
|
||||
"PASSWORD": "*****",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testcase := range tests {
|
||||
maskSecretKeys(testcase.input, testcase.path)
|
||||
assert.Check(t, is.DeepEqual(testcase.expected, testcase.input))
|
||||
t.Run(testcase.doc, func(t *testing.T) {
|
||||
maskSecretKeys(testcase.input)
|
||||
assert.Check(t, is.DeepEqual(testcase.expected, testcase.input))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -463,10 +463,6 @@ definitions:
|
||||
type: "array"
|
||||
items:
|
||||
$ref: "#/definitions/DeviceRequest"
|
||||
DiskQuota:
|
||||
description: "Disk limit (in bytes)."
|
||||
type: "integer"
|
||||
format: "int64"
|
||||
KernelMemory:
|
||||
description: "Kernel memory limit in bytes."
|
||||
type: "integer"
|
||||
@@ -1145,6 +1141,7 @@ definitions:
|
||||
type: "object"
|
||||
additionalProperties:
|
||||
type: "array"
|
||||
x-nullable: true
|
||||
items:
|
||||
$ref: "#/definitions/PortBinding"
|
||||
example:
|
||||
@@ -1169,7 +1166,6 @@ definitions:
|
||||
PortBinding represents a binding between a host IP address and a host
|
||||
port.
|
||||
type: "object"
|
||||
x-nullable: true
|
||||
properties:
|
||||
HostIp:
|
||||
description: "Host IP address that the container's port is mapped to."
|
||||
@@ -3809,7 +3805,7 @@ definitions:
|
||||
description: |
|
||||
The driver to use for managing cgroups.
|
||||
type: "string"
|
||||
enum: ["cgroupfs", "systemd"]
|
||||
enum: ["cgroupfs", "systemd", "none"]
|
||||
default: "cgroupfs"
|
||||
example: "cgroupfs"
|
||||
NEventsListener:
|
||||
@@ -4044,7 +4040,7 @@ definitions:
|
||||
SecurityOptions:
|
||||
description: |
|
||||
List of security features that are enabled on the daemon, such as
|
||||
apparmor, seccomp, SELinux, and user-namespaces (userns).
|
||||
apparmor, seccomp, SELinux, user-namespaces (userns), and rootless.
|
||||
|
||||
Additional configuration options for each security feature may
|
||||
be present, and are included as a comma-separated list of key/value
|
||||
@@ -4057,6 +4053,7 @@ definitions:
|
||||
- "name=seccomp,profile=default"
|
||||
- "name=selinux"
|
||||
- "name=userns"
|
||||
- "name=rootless"
|
||||
ProductLicense:
|
||||
description: |
|
||||
Reports a summary of the product license on the daemon.
|
||||
@@ -5466,7 +5463,7 @@ paths:
|
||||
/containers/{id}/resize:
|
||||
post:
|
||||
summary: "Resize a container TTY"
|
||||
description: "Resize the TTY for a container. You must restart the container for the resize to take effect."
|
||||
description: "Resize the TTY for a container."
|
||||
operationId: "ContainerResize"
|
||||
consumes:
|
||||
- "application/octet-stream"
|
||||
@@ -6220,12 +6217,17 @@ paths:
|
||||
in: "query"
|
||||
description: "If “1”, “true”, or “True” then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa."
|
||||
type: "string"
|
||||
- name: "copyUIDGID"
|
||||
in: "query"
|
||||
description: "If “1”, “true”, then it will copy UID/GID maps to the dest file or dir"
|
||||
type: "string"
|
||||
- name: "inputStream"
|
||||
in: "body"
|
||||
required: true
|
||||
description: "The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz."
|
||||
schema:
|
||||
type: "string"
|
||||
format: "binary"
|
||||
tags: ["Container"]
|
||||
/containers/prune:
|
||||
post:
|
||||
@@ -9110,7 +9112,9 @@ paths:
|
||||
type: "string"
|
||||
RemoteAddrs:
|
||||
description: "Addresses of manager nodes already participating in the swarm."
|
||||
type: "string"
|
||||
type: "array"
|
||||
items:
|
||||
type: "string"
|
||||
JoinToken:
|
||||
description: "Secret token for joining this swarm."
|
||||
type: "string"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package {{ .Package }}
|
||||
package {{ .Package }} // import "github.com/docker/docker/api/types/{{ .Package }}"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package container
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package container
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package container
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package container
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package container
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
|
||||
@@ -338,7 +338,6 @@ type Resources struct {
|
||||
Devices []DeviceMapping // List of devices to map inside the container
|
||||
DeviceCgroupRules []string // List of rule to be added to the device cgroup
|
||||
DeviceRequests []DeviceRequest // List of device requests for device drivers
|
||||
DiskQuota int64 // Disk limit (in bytes)
|
||||
KernelMemory int64 // Kernel memory limit (in bytes)
|
||||
KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes)
|
||||
MemoryReservation int64 // Memory soft limit (in bytes)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package image
|
||||
package image // import "github.com/docker/docker/api/types/image"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package volume
|
||||
package volume // import "github.com/docker/docker/api/types/volume"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package volume
|
||||
package volume // import "github.com/docker/docker/api/types/volume"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
|
||||
@@ -135,7 +135,7 @@ func (is *imageSource) resolveRemote(ctx context.Context, ref string, platform *
|
||||
dt []byte
|
||||
}
|
||||
res, err := is.g.Do(ctx, ref, func(ctx context.Context) (interface{}, error) {
|
||||
dgst, dt, err := imageutil.Config(ctx, ref, is.getResolver(ctx, is.ResolverOpt, ref, sm), is.ContentStore, platform)
|
||||
dgst, dt, err := imageutil.Config(ctx, ref, is.getResolver(ctx, is.ResolverOpt, ref, sm), is.ContentStore, nil, platform)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -545,10 +545,10 @@ func (p *puller) Snapshot(ctx context.Context) (cache.ImmutableRef, error) {
|
||||
|
||||
r := image.NewRootFS()
|
||||
rootFS, release, err := p.is.DownloadManager.Download(ctx, *r, runtime.GOOS, layers, pkgprogress.ChanOutput(pchan))
|
||||
stopProgress()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stopProgress()
|
||||
|
||||
ref, err := p.is.CacheAccessor.GetFromSnapshotter(ctx, string(rootFS.ChainID()), cache.WithDescription(fmt.Sprintf("pulled from %s", p.ref)))
|
||||
release()
|
||||
@@ -842,7 +842,7 @@ func (r *resolverCache) Add(ctx context.Context, ref string, resolver remotes.Re
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
ref = r.domain(ref) + "-" + session.FromContext(ctx)
|
||||
ref = r.repo(ref) + "-" + session.FromContext(ctx)
|
||||
|
||||
cr, ok := r.m[ref]
|
||||
cr.timeout = time.Now().Add(time.Minute)
|
||||
@@ -855,19 +855,19 @@ func (r *resolverCache) Add(ctx context.Context, ref string, resolver remotes.Re
|
||||
return &cr
|
||||
}
|
||||
|
||||
func (r *resolverCache) domain(refStr string) string {
|
||||
func (r *resolverCache) repo(refStr string) string {
|
||||
ref, err := distreference.ParseNormalizedNamed(refStr)
|
||||
if err != nil {
|
||||
return refStr
|
||||
}
|
||||
return distreference.Domain(ref)
|
||||
return ref.Name()
|
||||
}
|
||||
|
||||
func (r *resolverCache) Get(ctx context.Context, ref string) remotes.Resolver {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
ref = r.domain(ref) + "-" + session.FromContext(ctx)
|
||||
ref = r.repo(ref) + "-" + session.FromContext(ctx)
|
||||
|
||||
cr, ok := r.m[ref]
|
||||
if !ok {
|
||||
|
||||
@@ -26,9 +26,10 @@ var keySize = []byte("size")
|
||||
|
||||
// Opt defines options for creating the snapshotter
|
||||
type Opt struct {
|
||||
GraphDriver graphdriver.Driver
|
||||
LayerStore layer.Store
|
||||
Root string
|
||||
GraphDriver graphdriver.Driver
|
||||
LayerStore layer.Store
|
||||
Root string
|
||||
IdentityMapping *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
type graphIDRegistrar interface {
|
||||
@@ -74,8 +75,12 @@ func NewSnapshotter(opt Opt) (snapshot.SnapshotterBase, error) {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *snapshotter) Name() string {
|
||||
return "default"
|
||||
}
|
||||
|
||||
func (s *snapshotter) IdentityMapping() *idtools.IdentityMapping {
|
||||
return nil
|
||||
return s.opt.IdentityMapping
|
||||
}
|
||||
|
||||
func (s *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) error {
|
||||
@@ -249,6 +254,7 @@ func (s *snapshotter) Mounts(ctx context.Context, key string) (snapshot.Mountabl
|
||||
id := identity.NewID()
|
||||
var rwlayer layer.RWLayer
|
||||
return &mountable{
|
||||
idmap: s.opt.IdentityMapping,
|
||||
acquire: func() ([]mount.Mount, error) {
|
||||
rwlayer, err = s.opt.LayerStore.CreateRWLayer(id, l.ChainID(), nil)
|
||||
if err != nil {
|
||||
@@ -274,6 +280,7 @@ func (s *snapshotter) Mounts(ctx context.Context, key string) (snapshot.Mountabl
|
||||
id, _ := s.getGraphDriverID(key)
|
||||
|
||||
return &mountable{
|
||||
idmap: s.opt.IdentityMapping,
|
||||
acquire: func() ([]mount.Mount, error) {
|
||||
rootfs, err := s.opt.GraphDriver.Get(id, "")
|
||||
if err != nil {
|
||||
@@ -436,6 +443,7 @@ type mountable struct {
|
||||
acquire func() ([]mount.Mount, error)
|
||||
release func() error
|
||||
refCount int
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func (m *mountable) Mount() ([]mount.Mount, error) {
|
||||
@@ -472,9 +480,12 @@ func (m *mountable) Release() error {
|
||||
}
|
||||
|
||||
m.mounts = nil
|
||||
defer func() {
|
||||
m.release = nil
|
||||
}()
|
||||
return m.release()
|
||||
}
|
||||
|
||||
func (m *mountable) IdentityMapping() *idtools.IdentityMapping {
|
||||
return nil
|
||||
return m.idmap
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/daemon/images"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/libnetwork"
|
||||
@@ -73,6 +74,8 @@ type Opt struct {
|
||||
ResolverOpt resolver.ResolveOptionsFunc
|
||||
BuilderConfig config.BuilderConfig
|
||||
Rootless bool
|
||||
IdentityMapping *idtools.IdentityMapping
|
||||
DNSConfig config.DNSConfig
|
||||
}
|
||||
|
||||
// Builder can build using BuildKit backend
|
||||
@@ -88,6 +91,10 @@ type Builder struct {
|
||||
func New(opt Opt) (*Builder, error) {
|
||||
reqHandler := newReqBodyHandler(tracing.DefaultTransport)
|
||||
|
||||
if opt.IdentityMapping != nil && opt.IdentityMapping.Empty() {
|
||||
opt.IdentityMapping = nil
|
||||
}
|
||||
|
||||
c, err := newController(reqHandler, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -38,7 +38,7 @@ import (
|
||||
)
|
||||
|
||||
func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
if err := os.MkdirAll(opt.Root, 0700); err != nil {
|
||||
if err := os.MkdirAll(opt.Root, 0711); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -55,9 +55,10 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
}
|
||||
|
||||
sbase, err := snapshot.NewSnapshotter(snapshot.Opt{
|
||||
GraphDriver: driver,
|
||||
LayerStore: dist.LayerStore,
|
||||
Root: root,
|
||||
GraphDriver: driver,
|
||||
LayerStore: dist.LayerStore,
|
||||
Root: root,
|
||||
IdentityMapping: opt.IdentityMapping,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -112,7 +113,9 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
exec, err := newExecutor(root, opt.DefaultCgroupParent, opt.NetworkController, opt.Rootless)
|
||||
dns := getDNSConfig(opt.DNSConfig)
|
||||
|
||||
exec, err := newExecutor(root, opt.DefaultCgroupParent, opt.NetworkController, dns, opt.Rootless, opt.IdentityMapping)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -8,8 +8,11 @@ import (
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/libnetwork"
|
||||
"github.com/moby/buildkit/executor"
|
||||
"github.com/moby/buildkit/executor/oci"
|
||||
"github.com/moby/buildkit/executor/runcexecutor"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
@@ -20,7 +23,7 @@ import (
|
||||
|
||||
const networkName = "bridge"
|
||||
|
||||
func newExecutor(root, cgroupParent string, net libnetwork.NetworkController, rootless bool) (executor.Executor, error) {
|
||||
func newExecutor(root, cgroupParent string, net libnetwork.NetworkController, dnsConfig *oci.DNSConfig, rootless bool, idmap *idtools.IdentityMapping) (executor.Executor, error) {
|
||||
networkProviders := map[pb.NetMode]network.Provider{
|
||||
pb.NetMode_UNSET: &bridgeProvider{NetworkController: net, Root: filepath.Join(root, "net")},
|
||||
pb.NetMode_HOST: network.NewHostProvider(),
|
||||
@@ -31,6 +34,9 @@ func newExecutor(root, cgroupParent string, net libnetwork.NetworkController, ro
|
||||
CommandCandidates: []string{"runc"},
|
||||
DefaultCgroupParent: cgroupParent,
|
||||
Rootless: rootless,
|
||||
NoPivot: os.Getenv("DOCKER_RAMDISK") != "",
|
||||
IdentityMapping: idmap,
|
||||
DNS: dnsConfig,
|
||||
}, networkProviders)
|
||||
}
|
||||
|
||||
@@ -114,3 +120,14 @@ func (iface *lnInterface) Close() error {
|
||||
}
|
||||
return iface.err
|
||||
}
|
||||
|
||||
func getDNSConfig(cfg config.DNSConfig) *oci.DNSConfig {
|
||||
if cfg.DNS != nil || cfg.DNSSearch != nil || cfg.DNSOptions != nil {
|
||||
return &oci.DNSConfig{
|
||||
Nameservers: cfg.DNS,
|
||||
SearchDomains: cfg.DNSSearch,
|
||||
Options: cfg.DNSOptions,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,12 +5,15 @@ import (
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/libnetwork"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/executor"
|
||||
"github.com/moby/buildkit/executor/oci"
|
||||
)
|
||||
|
||||
func newExecutor(_, _ string, _ libnetwork.NetworkController, _ bool) (executor.Executor, error) {
|
||||
func newExecutor(_, _ string, _ libnetwork.NetworkController, _ *oci.DNSConfig, _ bool, _ *idtools.IdentityMapping) (executor.Executor, error) {
|
||||
return &winExecutor{}, nil
|
||||
}
|
||||
|
||||
@@ -20,3 +23,7 @@ type winExecutor struct {
|
||||
func (e *winExecutor) Exec(ctx context.Context, meta executor.Meta, rootfs cache.Mountable, mounts []executor.Mount, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error {
|
||||
return errors.New("buildkit executor not implemented for windows")
|
||||
}
|
||||
|
||||
func getDNSConfig(config.DNSConfig) *oci.DNSConfig {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -137,6 +137,37 @@ func normalizeLayersAndHistory(diffs []digest.Digest, history []ocispec.History,
|
||||
history[i] = h
|
||||
}
|
||||
|
||||
// Find the first new layer time. Otherwise, the history item for a first
|
||||
// metadata command would be the creation time of a base image layer.
|
||||
// If there is no such then the last layer with timestamp.
|
||||
var created *time.Time
|
||||
var noCreatedTime bool
|
||||
for _, h := range history {
|
||||
if h.Created != nil {
|
||||
created = h.Created
|
||||
if noCreatedTime {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
noCreatedTime = true
|
||||
}
|
||||
}
|
||||
|
||||
// Fill in created times for all history items to be either the first new
|
||||
// layer time or the previous layer.
|
||||
noCreatedTime = false
|
||||
for i, h := range history {
|
||||
if h.Created != nil {
|
||||
if noCreatedTime {
|
||||
created = h.Created
|
||||
}
|
||||
} else {
|
||||
noCreatedTime = true
|
||||
h.Created = created
|
||||
}
|
||||
history[i] = h
|
||||
}
|
||||
|
||||
return diffs, history
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"io/ioutil"
|
||||
nethttp "net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
@@ -43,6 +44,7 @@ import (
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
const labelCreatedAt = "buildkit/createdat"
|
||||
@@ -257,6 +259,47 @@ func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIf
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PruneCacheMounts removes the current cache snapshots for specified IDs
|
||||
func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error {
|
||||
mu := ops.CacheMountsLocker()
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
for _, id := range ids {
|
||||
id = "cache-dir:" + id
|
||||
sis, err := w.MetadataStore.Search(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, si := range sis {
|
||||
for _, k := range si.Indexes() {
|
||||
if k == id || strings.HasPrefix(k, id+":") {
|
||||
if siCached := w.CacheManager.Metadata(si.ID()); siCached != nil {
|
||||
si = siCached
|
||||
}
|
||||
if err := cache.CachePolicyDefault(si); err != nil {
|
||||
return err
|
||||
}
|
||||
si.Queue(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, k, nil)
|
||||
})
|
||||
if err := si.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
// if ref is unused try to clean it up right away by releasing it
|
||||
if mref, err := w.CacheManager.GetMutable(ctx, si.ID()); err == nil {
|
||||
go mref.Release(context.TODO())
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ops.ClearActiveCacheMounts()
|
||||
return nil
|
||||
}
|
||||
|
||||
// FromRemote converts a remote snapshot reference to a local one
|
||||
func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (cache.ImmutableRef, error) {
|
||||
rootfs, err := getLayers(ctx, remote.Descriptors)
|
||||
|
||||
@@ -556,13 +556,15 @@ func copyFile(archiver Archiver, source, dest *copyEndpoint, identity *idtools.I
|
||||
return errors.Wrapf(err, "failed to create new directory")
|
||||
}
|
||||
} else {
|
||||
// Normal containers
|
||||
if identity == nil {
|
||||
if err := os.MkdirAll(filepath.Dir(dest.path), 0755); err != nil {
|
||||
// Use system.MkdirAll here, which is a custom version of os.MkdirAll
|
||||
// modified for use on Windows to handle volume GUID paths (\\?\{dae8d3ac-b9a1-11e9-88eb-e8554b2ba1db}\path\)
|
||||
if err := system.MkdirAll(filepath.Dir(dest.path), 0755, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest.path), 0755, *identity); err != nil {
|
||||
// Normal containers
|
||||
return errors.Wrapf(err, "failed to create new directory")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,6 +31,8 @@ func (cli *Client) Ping(ctx context.Context) (types.Ping, error) {
|
||||
// Server handled the request, so parse the response
|
||||
return parsePingResponse(cli, serverResp)
|
||||
}
|
||||
} else if IsErrConnectionFailed(err) {
|
||||
return ping, err
|
||||
}
|
||||
|
||||
req, err = cli.buildRequest("GET", path.Join(cli.basePath, "/_ping"), nil, nil)
|
||||
|
||||
@@ -12,6 +12,8 @@ import (
|
||||
const (
|
||||
// defaultShutdownTimeout is the default shutdown timeout for the daemon
|
||||
defaultShutdownTimeout = 15
|
||||
// defaultTrustKeyFile is the default filename for the trust key
|
||||
defaultTrustKeyFile = "key.json"
|
||||
)
|
||||
|
||||
// installCommonConfigFlags adds flags to the pflag.FlagSet to configure the daemon
|
||||
|
||||
@@ -9,12 +9,11 @@ import (
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/docker/docker/rootless"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
func getDefaultPidFile() (string, error) {
|
||||
if !rootless.RunningWithNonRootUsername() {
|
||||
if !honorXDG {
|
||||
return "/var/run/docker.pid", nil
|
||||
}
|
||||
runtimeDir, err := homedir.GetRuntimeDir()
|
||||
@@ -25,7 +24,7 @@ func getDefaultPidFile() (string, error) {
|
||||
}
|
||||
|
||||
func getDefaultDataRoot() (string, error) {
|
||||
if !rootless.RunningWithNonRootUsername() {
|
||||
if !honorXDG {
|
||||
return "/var/lib/docker", nil
|
||||
}
|
||||
dataHome, err := homedir.GetDataHome()
|
||||
@@ -36,7 +35,7 @@ func getDefaultDataRoot() (string, error) {
|
||||
}
|
||||
|
||||
func getDefaultExecRoot() (string, error) {
|
||||
if !rootless.RunningWithNonRootUsername() {
|
||||
if !honorXDG {
|
||||
return "/var/run/docker", nil
|
||||
}
|
||||
runtimeDir, err := homedir.GetRuntimeDir()
|
||||
|
||||
@@ -3,10 +3,13 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/docker/docker/rootless"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
@@ -35,7 +38,16 @@ func installConfigFlags(conf *config.Config, flags *pflag.FlagSet) error {
|
||||
flags.BoolVar(&conf.BridgeConfig.EnableIPv6, "ipv6", false, "Enable IPv6 networking")
|
||||
flags.StringVar(&conf.BridgeConfig.FixedCIDRv6, "fixed-cidr-v6", "", "IPv6 subnet for fixed IPs")
|
||||
flags.BoolVar(&conf.BridgeConfig.EnableUserlandProxy, "userland-proxy", true, "Use userland proxy for loopback traffic")
|
||||
flags.StringVar(&conf.BridgeConfig.UserlandProxyPath, "userland-proxy-path", "", "Path to the userland proxy binary")
|
||||
defaultUserlandProxyPath := ""
|
||||
if rootless.RunningWithRootlessKit() {
|
||||
var err error
|
||||
// use rootlesskit-docker-proxy for exposing the ports in RootlessKit netns to the initial namespace.
|
||||
defaultUserlandProxyPath, err = exec.LookPath(rootless.RootlessKitDockerProxyBinary)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "running with RootlessKit, but %s not installed", rootless.RootlessKitDockerProxyBinary)
|
||||
}
|
||||
}
|
||||
flags.StringVar(&conf.BridgeConfig.UserlandProxyPath, "userland-proxy-path", defaultUserlandProxyPath, "Path to the userland proxy binary")
|
||||
flags.StringVar(&conf.CgroupParent, "cgroup-parent", "", "Set parent cgroup for all containers")
|
||||
flags.StringVar(&conf.RemappedRoot, "userns-remap", "", "User/Group setting for user namespaces")
|
||||
flags.BoolVar(&conf.LiveRestoreEnabled, "live-restore", false, "Enable live restore of docker when containers are still running")
|
||||
@@ -49,7 +61,8 @@ func installConfigFlags(conf *config.Config, flags *pflag.FlagSet) error {
|
||||
flags.BoolVar(&conf.NoNewPrivileges, "no-new-privileges", false, "Set no-new-privileges by default for new containers")
|
||||
flags.StringVar(&conf.IpcMode, "default-ipc-mode", config.DefaultIpcMode, `Default mode for containers ipc ("shareable" | "private")`)
|
||||
flags.Var(&conf.NetworkConfig.DefaultAddressPools, "default-address-pool", "Default address pools for node specific local networks")
|
||||
// Mostly users don't need to set this flag explicitly.
|
||||
flags.BoolVar(&conf.Rootless, "rootless", rootless.RunningWithNonRootUsername(), "Enable rootless mode (experimental)")
|
||||
// rootless needs to be explicitly specified for running "rootful" dockerd in rootless dockerd (#38702)
|
||||
// Note that defaultUserlandProxyPath and honorXDG are configured according to the value of rootless.RunningWithRootlessKit, not the value of --rootless.
|
||||
flags.BoolVar(&conf.Rootless, "rootless", rootless.RunningWithRootlessKit(), "Enable rootless mode; typically used with RootlessKit (experimental)")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -91,6 +91,8 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Info("Starting up")
|
||||
|
||||
cli.configFile = &opts.configFile
|
||||
cli.flags = opts.flags
|
||||
|
||||
@@ -103,6 +105,12 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
||||
if cli.Config.IsRootless() {
|
||||
logrus.Warn("Running in rootless mode. Cgroups, AppArmor, and CRIU are disabled.")
|
||||
}
|
||||
if rootless.RunningWithRootlessKit() {
|
||||
logrus.Info("Running with RootlessKit integration")
|
||||
if !cli.Config.IsRootless() {
|
||||
return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if cli.Config.IsRootless() {
|
||||
return fmt.Errorf("rootless mode is supported only when running in experimental mode")
|
||||
@@ -260,6 +268,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
||||
return errors.Wrap(errAPI, "shutting down due to ServeAPI error")
|
||||
}
|
||||
|
||||
logrus.Info("Daemon shutdown complete")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -309,6 +318,8 @@ func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, e
|
||||
ResolverOpt: d.NewResolveOptionsFunc(),
|
||||
BuilderConfig: config.Builder,
|
||||
Rootless: d.Rootless(),
|
||||
IdentityMapping: d.IdentityMapping(),
|
||||
DNSConfig: config.DNSConfig,
|
||||
})
|
||||
if err != nil {
|
||||
return opts, err
|
||||
@@ -415,6 +426,14 @@ func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) {
|
||||
conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile
|
||||
}
|
||||
|
||||
if conf.TrustKeyPath == "" {
|
||||
daemonConfDir, err := getDaemonConfDir(conf.Root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile)
|
||||
}
|
||||
|
||||
if flags.Changed("graph") && flags.Changed("data-root") {
|
||||
return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`)
|
||||
}
|
||||
@@ -591,7 +610,7 @@ func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, er
|
||||
var hosts []string
|
||||
for i := 0; i < len(cli.Config.Hosts); i++ {
|
||||
var err error
|
||||
if cli.Config.Hosts[i], err = dopts.ParseHost(cli.Config.TLS, rootless.RunningWithNonRootUsername(), cli.Config.Hosts[i]); err != nil {
|
||||
if cli.Config.Hosts[i], err = dopts.ParseHost(cli.Config.TLS, honorXDG, cli.Config.Hosts[i]); err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i])
|
||||
}
|
||||
|
||||
@@ -668,9 +687,9 @@ func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGette
|
||||
return nil
|
||||
}
|
||||
|
||||
func systemContainerdRunning(isRootless bool) (string, bool, error) {
|
||||
func systemContainerdRunning(honorXDG bool) (string, bool, error) {
|
||||
addr := containerddefaults.DefaultAddress
|
||||
if isRootless {
|
||||
if honorXDG {
|
||||
runtimeDir, err := homedir.GetRuntimeDir()
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
|
||||
@@ -18,14 +18,14 @@ import (
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/libcontainerd/supervisor"
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/docker/docker/rootless"
|
||||
"github.com/docker/libnetwork/portallocator"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func getDefaultDaemonConfigDir() (string, error) {
|
||||
if !rootless.RunningWithNonRootUsername() {
|
||||
if !honorXDG {
|
||||
return "/etc/docker", nil
|
||||
}
|
||||
// NOTE: CLI uses ~/.docker while the daemon uses ~/.config/docker, because
|
||||
@@ -58,6 +58,10 @@ func setDefaultUmask() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDaemonConfDir(_ string) (string, error) {
|
||||
return getDefaultDaemonConfigDir()
|
||||
}
|
||||
|
||||
func (cli *DaemonCli) getPlatformContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) {
|
||||
opts := []supervisor.DaemonOpt{
|
||||
supervisor.WithOOMScore(cli.Config.OOMScoreAdjust),
|
||||
@@ -148,11 +152,12 @@ func newCgroupParent(config *config.Config) string {
|
||||
func (cli *DaemonCli) initContainerD(ctx context.Context) (func(time.Duration) error, error) {
|
||||
var waitForShutdown func(time.Duration) error
|
||||
if cli.Config.ContainerdAddr == "" {
|
||||
systemContainerdAddr, ok, err := systemContainerdRunning(cli.Config.IsRootless())
|
||||
systemContainerdAddr, ok, err := systemContainerdRunning(honorXDG)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not determine whether the system containerd is running")
|
||||
}
|
||||
if !ok {
|
||||
logrus.Debug("Containerd not running, starting daemon managed containerd")
|
||||
opts, err := cli.getContainerdDaemonOpts()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to generate containerd options")
|
||||
@@ -162,6 +167,7 @@ func (cli *DaemonCli) initContainerD(ctx context.Context) (func(time.Duration) e
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to start containerd")
|
||||
}
|
||||
logrus.Debug("Started daemon managed containerd")
|
||||
cli.Config.ContainerdAddr = r.Address()
|
||||
|
||||
// Try to wait for containerd to shutdown
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/daemon/config"
|
||||
@@ -23,6 +24,10 @@ func setDefaultUmask() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDaemonConfDir(root string) (string, error) {
|
||||
return filepath.Join(root, `\config`), nil
|
||||
}
|
||||
|
||||
// preNotifySystem sends a message to the host when the API is active, but before the daemon is
|
||||
func preNotifySystem() {
|
||||
// start the service now to prevent timeouts waiting for daemon to start
|
||||
|
||||
@@ -10,11 +10,16 @@ import (
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/docker/docker/rootless"
|
||||
"github.com/moby/buildkit/util/apicaps"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
honorXDG bool
|
||||
)
|
||||
|
||||
func newDaemonCommand() (*cobra.Command, error) {
|
||||
opts := newDaemonOptions(config.New())
|
||||
|
||||
@@ -53,6 +58,14 @@ func init() {
|
||||
if dockerversion.ProductName != "" {
|
||||
apicaps.ExportedProduct = dockerversion.ProductName
|
||||
}
|
||||
// When running with RootlessKit, $XDG_RUNTIME_DIR, $XDG_DATA_HOME, and $XDG_CONFIG_HOME needs to be
|
||||
// honored as the default dirs, because we are unlikely to have permissions to access the system-wide
|
||||
// directories.
|
||||
//
|
||||
// Note that even running with --rootless, when not running with RootlessKit, honorXDG needs to be kept false,
|
||||
// because the system-wide directories in the current mount namespace are expected to be accessible.
|
||||
// ("rootful" dockerd in rootless dockerd, #38702)
|
||||
honorXDG = rootless.RunningWithRootlessKit()
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
@@ -136,7 +136,7 @@ func (container *Container) CopyImagePathContent(v volume.Volume, destination st
|
||||
return err
|
||||
}
|
||||
|
||||
id := stringid.GenerateNonCryptoID()
|
||||
id := stringid.GenerateRandomID()
|
||||
path, err := v.Mount(id)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -153,7 +153,6 @@ func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfi
|
||||
resources.CpusetMems != "" ||
|
||||
len(resources.Devices) != 0 ||
|
||||
len(resources.DeviceCgroupRules) != 0 ||
|
||||
resources.DiskQuota != 0 ||
|
||||
resources.KernelMemory != 0 ||
|
||||
resources.MemoryReservation != 0 ||
|
||||
resources.MemorySwap != 0 ||
|
||||
|
||||
@@ -9,7 +9,9 @@
|
||||
# External dependencies:
|
||||
# * newuidmap and newgidmap needs to be installed.
|
||||
# * /etc/subuid and /etc/subgid needs to be configured for the current user.
|
||||
# * Either slirp4netns (v0.3+) or VPNKit needs to be installed.
|
||||
# * Either one of slirp4netns (v0.3+), VPNKit, lxc-user-nic needs to be installed.
|
||||
# slirp4netns is used by default if installed. Otherwise fallsback to VPNKit.
|
||||
# The default value can be overridden with $DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic)
|
||||
#
|
||||
# See the documentation for the further information.
|
||||
|
||||
@@ -35,24 +37,32 @@ if [ -z $rootlesskit ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
net=""
|
||||
mtu=""
|
||||
if which slirp4netns >/dev/null 2>&1; then
|
||||
if slirp4netns --help | grep -- --disable-host-loopback; then
|
||||
net=slirp4netns
|
||||
mtu=65520
|
||||
else
|
||||
echo "slirp4netns does not support --disable-host-loopback. Falling back to VPNKit."
|
||||
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}"
|
||||
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}"
|
||||
net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET
|
||||
mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU
|
||||
if [ -z $net ]; then
|
||||
if which slirp4netns >/dev/null 2>&1; then
|
||||
if slirp4netns --help | grep -- --disable-host-loopback; then
|
||||
net=slirp4netns
|
||||
if [ -z $mtu ]; then
|
||||
mtu=65520
|
||||
fi
|
||||
else
|
||||
echo "slirp4netns does not support --disable-host-loopback. Falling back to VPNKit."
|
||||
fi
|
||||
fi
|
||||
if [ -z $net ]; then
|
||||
if which vpnkit >/dev/null 2>&1; then
|
||||
net=vpnkit
|
||||
else
|
||||
echo "Either slirp4netns (v0.3+) or vpnkit needs to be installed"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
if [ -z $net ]; then
|
||||
if which vpnkit >/dev/null 2>&1; then
|
||||
net=vpnkit
|
||||
mtu=1500
|
||||
else
|
||||
echo "Either slirp4netns (v0.3+) or vpnkit needs to be installed"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z $mtu ]; then
|
||||
mtu=1500
|
||||
fi
|
||||
|
||||
if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then
|
||||
@@ -65,8 +75,9 @@ if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then
|
||||
# namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host
|
||||
# (by either systemd-networkd or NetworkManager)
|
||||
# * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace
|
||||
$rootlesskit \
|
||||
--net=$net --mtu=$mtu --disable-host-loopback --port-driver=builtin \
|
||||
exec $rootlesskit \
|
||||
--net=$net --mtu=$mtu \
|
||||
--disable-host-loopback --port-driver=builtin \
|
||||
--copy-up=/etc --copy-up=/run \
|
||||
$DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \
|
||||
$0 $@
|
||||
@@ -75,5 +86,5 @@ else
|
||||
# remove the symlinks for the existing files in the parent namespace if any,
|
||||
# so that we can create our own files in our mount namespace.
|
||||
rm -f /run/docker /run/xtables.lock
|
||||
dockerd $@
|
||||
exec dockerd $@
|
||||
fi
|
||||
|
||||
@@ -31,18 +31,19 @@ type archiver interface {
|
||||
}
|
||||
|
||||
// helper functions to extract or archive
|
||||
func extractArchive(i interface{}, src io.Reader, dst string, opts *archive.TarOptions) error {
|
||||
func extractArchive(i interface{}, src io.Reader, dst string, opts *archive.TarOptions, root string) error {
|
||||
if ea, ok := i.(extractor); ok {
|
||||
return ea.ExtractArchive(src, dst, opts)
|
||||
}
|
||||
return chrootarchive.Untar(src, dst, opts)
|
||||
|
||||
return chrootarchive.UntarWithRoot(src, dst, opts, root)
|
||||
}
|
||||
|
||||
func archivePath(i interface{}, src string, opts *archive.TarOptions) (io.ReadCloser, error) {
|
||||
func archivePath(i interface{}, src string, opts *archive.TarOptions, root string) (io.ReadCloser, error) {
|
||||
if ap, ok := i.(archiver); ok {
|
||||
return ap.ArchivePath(src, opts)
|
||||
}
|
||||
return archive.TarWithOptions(src, opts)
|
||||
return chrootarchive.Tar(src, opts, root)
|
||||
}
|
||||
|
||||
// ContainerCopy performs a deprecated operation of archiving the resource at
|
||||
@@ -235,10 +236,16 @@ func (daemon *Daemon) containerArchivePath(container *container.Container, path
|
||||
if driver.Base(resolvedPath) == "." {
|
||||
resolvedPath += string(driver.Separator()) + "."
|
||||
}
|
||||
sourceDir, sourceBase := driver.Dir(resolvedPath), driver.Base(resolvedPath)
|
||||
|
||||
sourceDir := resolvedPath
|
||||
sourceBase := "."
|
||||
|
||||
if stat.Mode&os.ModeDir == 0 { // not dir
|
||||
sourceDir, sourceBase = driver.Split(resolvedPath)
|
||||
}
|
||||
opts := archive.TarResourceRebaseOpts(sourceBase, driver.Base(absPath))
|
||||
|
||||
data, err := archivePath(driver, sourceDir, opts)
|
||||
data, err := archivePath(driver, sourceDir, opts, container.BaseFS.Path())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -367,7 +374,7 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path
|
||||
}
|
||||
}
|
||||
|
||||
if err := extractArchive(driver, content, resolvedPath, options); err != nil {
|
||||
if err := extractArchive(driver, content, resolvedPath, options, container.BaseFS.Path()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -425,14 +432,11 @@ func (daemon *Daemon) containerCopy(container *container.Container, resource str
|
||||
d, f := driver.Split(basePath)
|
||||
basePath = d
|
||||
filter = []string{f}
|
||||
} else {
|
||||
filter = []string{driver.Base(basePath)}
|
||||
basePath = driver.Dir(basePath)
|
||||
}
|
||||
archive, err := archivePath(driver, basePath, &archive.TarOptions{
|
||||
Compression: archive.Uncompressed,
|
||||
IncludeFiles: filter,
|
||||
})
|
||||
}, container.BaseFS.Path())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -58,6 +58,7 @@ import (
|
||||
swarmnode "github.com/docker/swarmkit/node"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
const swarmDirName = "swarm"
|
||||
@@ -399,7 +400,10 @@ func (c *Cluster) Cleanup() {
|
||||
func managerStats(client swarmapi.ControlClient, currentNodeID string) (current bool, reachable int, unreachable int, err error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
nodes, err := client.ListNodes(ctx, &swarmapi.ListNodesRequest{})
|
||||
nodes, err := client.ListNodes(
|
||||
ctx, &swarmapi.ListNodesRequest{},
|
||||
grpc.MaxCallRecvMsgSize(defaultRecvSizeForListResponse),
|
||||
)
|
||||
if err != nil {
|
||||
return false, 0, 0, err
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
@@ -31,10 +30,6 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// Explicitly use the kernel's default setting for CPU quota of 100ms.
|
||||
// https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt
|
||||
cpuQuotaPeriod = 100 * time.Millisecond
|
||||
|
||||
// systemLabelPrefix represents the reserved namespace for system labels.
|
||||
systemLabelPrefix = "com.docker.swarm"
|
||||
)
|
||||
@@ -451,9 +446,7 @@ func (c *containerConfig) resources() enginecontainer.Resources {
|
||||
}
|
||||
|
||||
if r.Limits.NanoCPUs > 0 {
|
||||
// CPU Period must be set in microseconds.
|
||||
resources.CPUPeriod = int64(cpuQuotaPeriod / time.Microsecond)
|
||||
resources.CPUQuota = r.Limits.NanoCPUs * resources.CPUPeriod / 1e9
|
||||
resources.NanoCPUs = r.Limits.NanoCPUs
|
||||
}
|
||||
|
||||
return resources
|
||||
|
||||
@@ -369,11 +369,17 @@ func (r *controller) Shutdown(ctx context.Context) error {
|
||||
}
|
||||
|
||||
if err := r.adapter.shutdown(ctx); err != nil {
|
||||
if isUnknownContainer(err) || isStoppedContainer(err) {
|
||||
return nil
|
||||
if !(isUnknownContainer(err) || isStoppedContainer(err)) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
// Try removing networks referenced in this task in case this
|
||||
// task is the last one referencing it
|
||||
if err := r.adapter.removeNetworks(ctx); err != nil {
|
||||
if !isUnknownContainer(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -419,15 +425,6 @@ func (r *controller) Remove(ctx context.Context) error {
|
||||
log.G(ctx).WithError(err).Debug("shutdown failed on removal")
|
||||
}
|
||||
|
||||
// Try removing networks referenced in this task in case this
|
||||
// task is the last one referencing it
|
||||
if err := r.adapter.removeNetworks(ctx); err != nil {
|
||||
if isUnknownContainer(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if err := r.adapter.remove(ctx); err != nil {
|
||||
if isUnknownContainer(err) {
|
||||
return nil
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/docker/docker/daemon/cluster/convert"
|
||||
"github.com/docker/docker/errdefs"
|
||||
swarmapi "github.com/docker/swarmkit/api"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// GetNodes returns a list of all nodes known to a cluster.
|
||||
@@ -30,7 +31,9 @@ func (c *Cluster) GetNodes(options apitypes.NodeListOptions) ([]types.Node, erro
|
||||
|
||||
r, err := state.controlClient.ListNodes(
|
||||
ctx,
|
||||
&swarmapi.ListNodesRequest{Filters: filters})
|
||||
&swarmapi.ListNodesRequest{Filters: filters},
|
||||
grpc.MaxCallRecvMsgSize(defaultRecvSizeForListResponse),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
types "github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/daemon/cluster/convert"
|
||||
swarmapi "github.com/docker/swarmkit/api"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// GetSecret returns a secret from a managed swarm cluster
|
||||
@@ -44,7 +45,9 @@ func (c *Cluster) GetSecrets(options apitypes.SecretListOptions) ([]types.Secret
|
||||
defer cancel()
|
||||
|
||||
r, err := state.controlClient.ListSecrets(ctx,
|
||||
&swarmapi.ListSecretsRequest{Filters: filters})
|
||||
&swarmapi.ListSecretsRequest{Filters: filters},
|
||||
grpc.MaxCallRecvMsgSize(defaultRecvSizeForListResponse),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
swarmnode "github.com/docker/swarmkit/node"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// Init initializes new cluster from user provided request.
|
||||
@@ -449,7 +450,10 @@ func (c *Cluster) Info() types.Info {
|
||||
|
||||
info.Cluster = &swarm.ClusterInfo
|
||||
|
||||
if r, err := state.controlClient.ListNodes(ctx, &swarmapi.ListNodesRequest{}); err != nil {
|
||||
if r, err := state.controlClient.ListNodes(
|
||||
ctx, &swarmapi.ListNodesRequest{},
|
||||
grpc.MaxCallRecvMsgSize(defaultRecvSizeForListResponse),
|
||||
); err != nil {
|
||||
info.Error = err.Error()
|
||||
} else {
|
||||
info.Nodes = len(r.Nodes)
|
||||
|
||||
@@ -63,6 +63,8 @@ var flatOptions = map[string]bool{
|
||||
var skipValidateOptions = map[string]bool{
|
||||
"features": true,
|
||||
"builder": true,
|
||||
// Corresponding flag has been removed because it was already unusable
|
||||
"deprecated-key-path": true,
|
||||
}
|
||||
|
||||
// skipDuplicates contains configuration keys that
|
||||
@@ -107,6 +109,13 @@ type CommonTLSOptions struct {
|
||||
KeyFile string `json:"tlskey,omitempty"`
|
||||
}
|
||||
|
||||
// DNSConfig defines the DNS configurations.
|
||||
type DNSConfig struct {
|
||||
DNS []string `json:"dns,omitempty"`
|
||||
DNSOptions []string `json:"dns-opts,omitempty"`
|
||||
DNSSearch []string `json:"dns-search,omitempty"`
|
||||
}
|
||||
|
||||
// CommonConfig defines the configuration of a docker daemon which is
|
||||
// common across platforms.
|
||||
// It includes json tags to deserialize configuration from a file
|
||||
@@ -117,9 +126,6 @@ type CommonConfig struct {
|
||||
AutoRestart bool `json:"-"`
|
||||
Context map[string][]string `json:"-"`
|
||||
DisableBridge bool `json:"-"`
|
||||
DNS []string `json:"dns,omitempty"`
|
||||
DNSOptions []string `json:"dns-opts,omitempty"`
|
||||
DNSSearch []string `json:"dns-search,omitempty"`
|
||||
ExecOptions []string `json:"exec-opts,omitempty"`
|
||||
GraphDriver string `json:"storage-driver,omitempty"`
|
||||
GraphOptions []string `json:"storage-opts,omitempty"`
|
||||
@@ -134,6 +140,12 @@ type CommonConfig struct {
|
||||
SocketGroup string `json:"group,omitempty"`
|
||||
CorsHeaders string `json:"api-cors-header,omitempty"`
|
||||
|
||||
// TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests
|
||||
// when pushing to a registry which does not support schema 2. This field is marked as
|
||||
// deprecated because schema 1 manifests are deprecated in favor of schema 2 and the
|
||||
// daemon ID will use a dedicated identifier not shared with exported signatures.
|
||||
TrustKeyPath string `json:"deprecated-key-path,omitempty"`
|
||||
|
||||
// LiveRestoreEnabled determines whether we should keep containers
|
||||
// alive upon daemon shutdown/start
|
||||
LiveRestoreEnabled bool `json:"live-restore,omitempty"`
|
||||
@@ -192,6 +204,7 @@ type CommonConfig struct {
|
||||
|
||||
MetricsAddress string `json:"metrics-addr"`
|
||||
|
||||
DNSConfig
|
||||
LogConfig
|
||||
BridgeConfig // bridgeConfig holds bridge network specific configuration.
|
||||
NetworkConfig
|
||||
@@ -239,7 +252,6 @@ func New() *Config {
|
||||
config := Config{}
|
||||
config.LogConfig.Config = make(map[string]string)
|
||||
config.ClusterOpts = make(map[string]string)
|
||||
|
||||
return &config
|
||||
}
|
||||
|
||||
|
||||
@@ -244,28 +244,36 @@ func TestValidateConfigurationErrors(t *testing.T) {
|
||||
{
|
||||
config: &Config{
|
||||
CommonConfig: CommonConfig{
|
||||
DNS: []string{"1.1.1.1o"},
|
||||
DNSConfig: DNSConfig{
|
||||
DNS: []string{"1.1.1.1o"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
config: &Config{
|
||||
CommonConfig: CommonConfig{
|
||||
DNS: []string{"2.2.2.2", "1.1.1.1o"},
|
||||
DNSConfig: DNSConfig{
|
||||
DNS: []string{"2.2.2.2", "1.1.1.1o"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
config: &Config{
|
||||
CommonConfig: CommonConfig{
|
||||
DNSSearch: []string{"123456"},
|
||||
DNSConfig: DNSConfig{
|
||||
DNSSearch: []string{"123456"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
config: &Config{
|
||||
CommonConfig: CommonConfig{
|
||||
DNSSearch: []string{"a.b.c", "123456"},
|
||||
DNSConfig: DNSConfig{
|
||||
DNSSearch: []string{"a.b.c", "123456"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -329,14 +337,18 @@ func TestValidateConfiguration(t *testing.T) {
|
||||
{
|
||||
config: &Config{
|
||||
CommonConfig: CommonConfig{
|
||||
DNS: []string{"1.1.1.1"},
|
||||
DNSConfig: DNSConfig{
|
||||
DNS: []string{"1.1.1.1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
config: &Config{
|
||||
CommonConfig: CommonConfig{
|
||||
DNSSearch: []string{"a.b.c"},
|
||||
DNSConfig: DNSConfig{
|
||||
DNSSearch: []string{"a.b.c"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -237,7 +237,10 @@ func (daemon *Daemon) buildSandboxOptions(container *container.Container) ([]lib
|
||||
|
||||
func (daemon *Daemon) updateNetworkSettings(container *container.Container, n libnetwork.Network, endpointConfig *networktypes.EndpointSettings) error {
|
||||
if container.NetworkSettings == nil {
|
||||
container.NetworkSettings = &network.Settings{Networks: make(map[string]*network.EndpointSettings)}
|
||||
container.NetworkSettings = &network.Settings{}
|
||||
}
|
||||
if container.NetworkSettings.Networks == nil {
|
||||
container.NetworkSettings.Networks = make(map[string]*network.EndpointSettings)
|
||||
}
|
||||
|
||||
if !container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() {
|
||||
@@ -354,6 +357,15 @@ func (daemon *Daemon) findAndAttachNetwork(container *container.Container, idOrN
|
||||
if container.Managed || !n.Info().Dynamic() {
|
||||
return n, nil, nil
|
||||
}
|
||||
// Throw an error if the container is already attached to the network
|
||||
if container.NetworkSettings.Networks != nil {
|
||||
networkName := n.Name()
|
||||
containerName := strings.TrimPrefix(container.Name, "/")
|
||||
if network, ok := container.NetworkSettings.Networks[networkName]; ok && network.EndpointID != "" {
|
||||
err := fmt.Errorf("%s is already attached to network %s", containerName, networkName)
|
||||
return n, nil, errdefs.Conflict(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var addresses []string
|
||||
|
||||
@@ -41,7 +41,7 @@ func (daemon *Daemon) createContainerOSSpecificSettings(container *container.Con
|
||||
}
|
||||
|
||||
for spec := range config.Volumes {
|
||||
name := stringid.GenerateNonCryptoID()
|
||||
name := stringid.GenerateRandomID()
|
||||
destination := filepath.Clean(spec)
|
||||
|
||||
// Skip volumes for which we already have something mounted on that
|
||||
|
||||
@@ -38,7 +38,7 @@ func (daemon *Daemon) createContainerOSSpecificSettings(container *container.Con
|
||||
|
||||
// If the mountpoint doesn't have a name, generate one.
|
||||
if len(mp.Name) == 0 {
|
||||
mp.Name = stringid.GenerateNonCryptoID()
|
||||
mp.Name = stringid.GenerateRandomID()
|
||||
}
|
||||
|
||||
// Skip volumes for which we already have something mounted on that
|
||||
|
||||
@@ -335,7 +335,7 @@ func (daemon *Daemon) restore() error {
|
||||
}
|
||||
if !alive && process != nil {
|
||||
ec, exitedAt, err = process.Delete(context.Background())
|
||||
if err != nil {
|
||||
if err != nil && !errdefs.IsNotFound(err) {
|
||||
logrus.WithError(err).Errorf("Failed to delete container %s from containerd", c.ID)
|
||||
return
|
||||
}
|
||||
@@ -960,7 +960,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uuid, err := loadOrCreateUUID(filepath.Join(config.Root, "engine_uuid"))
|
||||
trustKey, err := loadOrCreateTrustKey(config.TrustKeyPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1005,7 +1005,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
||||
return nil, errors.New("Devices cgroup isn't mounted")
|
||||
}
|
||||
|
||||
d.ID = uuid
|
||||
d.ID = trustKey.PublicKey().KeyID()
|
||||
d.repository = daemonRepo
|
||||
d.containers = container.NewMemoryStore()
|
||||
if d.containersReplica, err = container.NewViewDB(); err != nil {
|
||||
@@ -1036,6 +1036,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
||||
MaxConcurrentUploads: *config.MaxConcurrentUploads,
|
||||
ReferenceStore: rs,
|
||||
RegistryService: registryService,
|
||||
TrustKey: trustKey,
|
||||
})
|
||||
|
||||
go d.execCommandGC()
|
||||
|
||||
@@ -9,18 +9,13 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/internal/procfs"
|
||||
"github.com/docker/docker/pkg/fileutils"
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
"github.com/docker/libnetwork/resolvconf"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultResolvConf = "/etc/resolv.conf"
|
||||
alternateResolvConf = "/run/systemd/resolve/resolv.conf"
|
||||
)
|
||||
|
||||
// On Linux, plugins use a static path for storing execution state,
|
||||
// instead of deriving path from daemon's exec-root. This is because
|
||||
// plugin socket files are created here and they cannot exceed max
|
||||
@@ -148,20 +143,5 @@ func setupResolvConf(config *config.Config) {
|
||||
if config.ResolvConf != "" {
|
||||
return
|
||||
}
|
||||
|
||||
config.ResolvConf = defaultResolvConf
|
||||
pids, err := procfs.PidOf("systemd-resolved")
|
||||
if err != nil {
|
||||
logrus.Errorf("unable to check systemd-resolved status: %s", err)
|
||||
return
|
||||
}
|
||||
if len(pids) > 0 && pids[0] > 0 {
|
||||
_, err := os.Stat(alternateResolvConf)
|
||||
if err == nil {
|
||||
logrus.Infof("systemd-resolved is running, so using resolvconf: %s", alternateResolvConf)
|
||||
config.ResolvConf = alternateResolvConf
|
||||
return
|
||||
}
|
||||
logrus.Infof("systemd-resolved is running, but %s is not present, fallback to %s", alternateResolvConf, defaultResolvConf)
|
||||
}
|
||||
config.ResolvConf = resolvconf.Path()
|
||||
}
|
||||
|
||||
@@ -73,6 +73,7 @@ const (
|
||||
// constant for cgroup drivers
|
||||
cgroupFsDriver = "cgroupfs"
|
||||
cgroupSystemdDriver = "systemd"
|
||||
cgroupNoneDriver = "none"
|
||||
|
||||
// DefaultRuntimeName is the default runtime to be used by
|
||||
// containerd if none is specified
|
||||
@@ -191,8 +192,8 @@ func getBlkioWeightDevices(config containertypes.Resources) ([]specs.LinuxWeight
|
||||
}
|
||||
weight := weightDevice.Weight
|
||||
d := specs.LinuxWeightDevice{Weight: &weight}
|
||||
d.Major = int64(stat.Rdev / 256)
|
||||
d.Minor = int64(stat.Rdev % 256)
|
||||
d.Major = int64(unix.Major(stat.Rdev))
|
||||
d.Minor = int64(unix.Minor(stat.Rdev))
|
||||
blkioWeightDevices = append(blkioWeightDevices, d)
|
||||
}
|
||||
|
||||
@@ -262,8 +263,8 @@ func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.LinuxThro
|
||||
return nil, err
|
||||
}
|
||||
d := specs.LinuxThrottleDevice{Rate: d.Rate}
|
||||
d.Major = int64(stat.Rdev / 256)
|
||||
d.Minor = int64(stat.Rdev % 256)
|
||||
d.Major = int64(unix.Major(stat.Rdev))
|
||||
d.Minor = int64(unix.Minor(stat.Rdev))
|
||||
throttleDevices = append(throttleDevices, d)
|
||||
}
|
||||
|
||||
@@ -575,6 +576,9 @@ func verifyPlatformContainerResources(resources *containertypes.Resources, sysIn
|
||||
}
|
||||
|
||||
func (daemon *Daemon) getCgroupDriver() string {
|
||||
if daemon.Rootless() {
|
||||
return cgroupNoneDriver
|
||||
}
|
||||
cgroupDriver := cgroupFsDriver
|
||||
|
||||
if UsingSystemd(daemon.configStore) {
|
||||
@@ -601,6 +605,9 @@ func VerifyCgroupDriver(config *config.Config) error {
|
||||
if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver {
|
||||
return nil
|
||||
}
|
||||
if cd == cgroupNoneDriver {
|
||||
return fmt.Errorf("native.cgroupdriver option %s is internally used and cannot be specified manually", cd)
|
||||
}
|
||||
return fmt.Errorf("native.cgroupdriver option %s not supported", cd)
|
||||
}
|
||||
|
||||
|
||||
@@ -6,12 +6,15 @@ import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/blkiodev"
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/pkg/sysinfo"
|
||||
"golang.org/x/sys/unix"
|
||||
"gotest.tools/assert"
|
||||
is "gotest.tools/assert/cmp"
|
||||
)
|
||||
@@ -376,3 +379,61 @@ func sysInfo(t *testing.T, opts ...func(*sysinfo.SysInfo)) sysinfo.SysInfo {
|
||||
}
|
||||
return si
|
||||
}
|
||||
|
||||
const (
|
||||
// prepare major 0x1FD(509 in decimal) and minor 0x130(304)
|
||||
DEVNO = 0x11FD30
|
||||
MAJOR = 509
|
||||
MINOR = 304
|
||||
WEIGHT = 1024
|
||||
)
|
||||
|
||||
func deviceTypeMock(t *testing.T, testAndCheck func(string)) {
|
||||
if os.Getuid() != 0 {
|
||||
t.Skip("root required") // for mknod
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
|
||||
tempDir, err := ioutil.TempDir("", "tempDevDir"+t.Name())
|
||||
assert.NilError(t, err, "create temp file")
|
||||
tempFile := filepath.Join(tempDir, "dev")
|
||||
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
if err = unix.Mknod(tempFile, unix.S_IFCHR, DEVNO); err != nil {
|
||||
t.Fatalf("mknod error %s(%x): %v", tempFile, DEVNO, err)
|
||||
}
|
||||
|
||||
testAndCheck(tempFile)
|
||||
}
|
||||
|
||||
func TestGetBlkioWeightDevices(t *testing.T) {
|
||||
deviceTypeMock(t, func(tempFile string) {
|
||||
mockResource := containertypes.Resources{
|
||||
BlkioWeightDevice: []*blkiodev.WeightDevice{{Path: tempFile, Weight: WEIGHT}},
|
||||
}
|
||||
|
||||
weightDevs, err := getBlkioWeightDevices(mockResource)
|
||||
|
||||
assert.NilError(t, err, "getBlkioWeightDevices")
|
||||
assert.Check(t, is.Len(weightDevs, 1), "getBlkioWeightDevices")
|
||||
assert.Check(t, weightDevs[0].Major == MAJOR, "get major device type")
|
||||
assert.Check(t, weightDevs[0].Minor == MINOR, "get minor device type")
|
||||
assert.Check(t, *weightDevs[0].Weight == WEIGHT, "get device weight")
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetBlkioThrottleDevices(t *testing.T) {
|
||||
deviceTypeMock(t, func(tempFile string) {
|
||||
mockDevs := []*blkiodev.ThrottleDevice{{Path: tempFile, Rate: WEIGHT}}
|
||||
|
||||
retDevs, err := getBlkioThrottleDevices(mockDevs)
|
||||
|
||||
assert.NilError(t, err, "getBlkioThrottleDevices")
|
||||
assert.Check(t, is.Len(retDevs, 1), "getBlkioThrottleDevices")
|
||||
assert.Check(t, retDevs[0].Major == MAJOR, "get major device type")
|
||||
assert.Check(t, retDevs[0].Minor == MINOR, "get minor device type")
|
||||
assert.Check(t, retDevs[0].Rate == WEIGHT, "get device rate")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ type Config struct {
|
||||
// NewConfig initializes the a new exec configuration
|
||||
func NewConfig() *Config {
|
||||
return &Config{
|
||||
ID: stringid.GenerateNonCryptoID(),
|
||||
ID: stringid.GenerateRandomID(),
|
||||
StreamConfig: stream.NewConfig(),
|
||||
Started: make(chan struct{}),
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@ func (daemon *Daemon) containerExport(container *container.Container) (arch io.R
|
||||
Compression: archive.Uncompressed,
|
||||
UIDMaps: daemon.idMapping.UIDs(),
|
||||
GIDMaps: daemon.idMapping.GIDs(),
|
||||
})
|
||||
}, basefs.Path())
|
||||
if err != nil {
|
||||
rwlayer.Unmount()
|
||||
return nil, err
|
||||
|
||||
@@ -72,7 +72,6 @@ func init() {
|
||||
|
||||
// Driver contains information about the filesystem mounted.
|
||||
type Driver struct {
|
||||
sync.Mutex
|
||||
root string
|
||||
uidMaps []idtools.IDMap
|
||||
gidMaps []idtools.IDMap
|
||||
@@ -81,6 +80,7 @@ type Driver struct {
|
||||
pathCache map[string]string
|
||||
naiveDiff graphdriver.DiffDriver
|
||||
locker *locker.Locker
|
||||
mntL sync.Mutex
|
||||
}
|
||||
|
||||
// Init returns a new AUFS driver.
|
||||
@@ -327,11 +327,11 @@ func (a *Driver) Remove(id string) error {
|
||||
break
|
||||
}
|
||||
|
||||
if err != unix.EBUSY {
|
||||
return errors.Wrapf(err, "aufs: unmount error: %s", mountpoint)
|
||||
if errors.Cause(err) != unix.EBUSY {
|
||||
return errors.Wrap(err, "aufs: unmount error")
|
||||
}
|
||||
if retries >= 5 {
|
||||
return errors.Wrapf(err, "aufs: unmount error after retries: %s", mountpoint)
|
||||
return errors.Wrap(err, "aufs: unmount error after retries")
|
||||
}
|
||||
// If unmount returns EBUSY, it could be a transient error. Sleep and retry.
|
||||
retries++
|
||||
@@ -437,7 +437,7 @@ func (a *Driver) Put(id string) error {
|
||||
|
||||
err := a.unmount(m)
|
||||
if err != nil {
|
||||
logger.Debugf("Failed to unmount %s aufs: %v", id, err)
|
||||
logger.WithError(err).WithField("method", "Put()").Warn()
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -547,9 +547,6 @@ func (a *Driver) getParentLayerPaths(id string) ([]string, error) {
|
||||
}
|
||||
|
||||
func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
|
||||
// If the id is mounted or we get an error return
|
||||
if mounted, err := a.mounted(target); err != nil || mounted {
|
||||
return err
|
||||
@@ -564,9 +561,6 @@ func (a *Driver) mount(id string, target string, mountLabel string, layers []str
|
||||
}
|
||||
|
||||
func (a *Driver) unmount(mountPath string) error {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
|
||||
if mounted, err := a.mounted(mountPath); err != nil || !mounted {
|
||||
return err
|
||||
}
|
||||
@@ -579,23 +573,20 @@ func (a *Driver) mounted(mountpoint string) (bool, error) {
|
||||
|
||||
// Cleanup aufs and unmount all mountpoints
|
||||
func (a *Driver) Cleanup() error {
|
||||
var dirs []string
|
||||
if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
dirs = append(dirs, path)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
dir := a.mntPath()
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "aufs readdir error")
|
||||
}
|
||||
for _, f := range files {
|
||||
if !f.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
m := path.Join(dir, f.Name())
|
||||
|
||||
for _, m := range dirs {
|
||||
if err := a.unmount(m); err != nil {
|
||||
logger.Debugf("error unmounting %s: %s", m, err)
|
||||
logger.WithError(err).WithField("method", "Cleanup()").Warn()
|
||||
}
|
||||
}
|
||||
return mount.RecursiveUnmount(a.root)
|
||||
@@ -604,7 +595,7 @@ func (a *Driver) Cleanup() error {
|
||||
func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
Unmount(target)
|
||||
mount.Unmount(target)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -632,14 +623,29 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro
|
||||
opts += ",dirperm1"
|
||||
}
|
||||
data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel)
|
||||
if err = unix.Mount("none", target, "aufs", 0, data); err != nil {
|
||||
a.mntL.Lock()
|
||||
err = unix.Mount("none", target, "aufs", 0, data)
|
||||
a.mntL.Unlock()
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "mount target="+target+" data="+data)
|
||||
return
|
||||
}
|
||||
|
||||
for ; index < len(ro); index++ {
|
||||
layer := fmt.Sprintf(":%s=ro+wh", ro[index])
|
||||
data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel)
|
||||
if err = unix.Mount("none", target, "aufs", unix.MS_REMOUNT, data); err != nil {
|
||||
for index < len(ro) {
|
||||
bp = 0
|
||||
for ; index < len(ro); index++ {
|
||||
layer := fmt.Sprintf("append:%s=ro+wh,", ro[index])
|
||||
if bp+len(layer) > len(b) {
|
||||
break
|
||||
}
|
||||
bp += copy(b[bp:], layer)
|
||||
}
|
||||
data := label.FormatMountLabel(string(b[:bp]), mountLabel)
|
||||
a.mntL.Lock()
|
||||
err = unix.Mount("none", target, "aufs", unix.MS_REMOUNT, data)
|
||||
a.mntL.Unlock()
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "mount target="+target+" flags=MS_REMOUNT data="+data)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -731,7 +731,7 @@ func BenchmarkConcurrentAccess(b *testing.B) {
|
||||
// create a bunch of ids
|
||||
var ids []string
|
||||
for i := 0; i < numConcurrent; i++ {
|
||||
ids = append(ids, stringid.GenerateNonCryptoID())
|
||||
ids = append(ids, stringid.GenerateRandomID())
|
||||
}
|
||||
|
||||
if err := d.Create(ids[0], "", nil); err != nil {
|
||||
|
||||
@@ -4,14 +4,38 @@ package aufs // import "github.com/docker/docker/daemon/graphdriver/aufs"
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
)
|
||||
|
||||
// Unmount the target specified.
|
||||
func Unmount(target string) error {
|
||||
if err := exec.Command("auplink", target, "flush").Run(); err != nil {
|
||||
logger.WithError(err).Warnf("Couldn't run auplink before unmount %s", target)
|
||||
const (
|
||||
EINVAL = 22 // if auplink returns this,
|
||||
retries = 3 // retry a few times
|
||||
)
|
||||
|
||||
for i := 0; ; i++ {
|
||||
out, err := exec.Command("auplink", target, "flush").CombinedOutput()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
rc := 0
|
||||
if exiterr, ok := err.(*exec.ExitError); ok {
|
||||
if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
|
||||
rc = status.ExitStatus()
|
||||
}
|
||||
}
|
||||
if i >= retries || rc != EINVAL {
|
||||
logger.WithError(err).WithField("method", "Unmount").Warnf("auplink flush failed: %s", out)
|
||||
break
|
||||
}
|
||||
// auplink failed to find target in /proc/self/mounts because
|
||||
// kernel can't guarantee continuity while reading from it
|
||||
// while mounts table is being changed
|
||||
logger.Debugf("auplink flush error (retrying %d/%d): %s", i+1, retries, out)
|
||||
}
|
||||
return unix.Unmount(target, 0)
|
||||
|
||||
return mount.Unmount(target)
|
||||
}
|
||||
|
||||
@@ -2,14 +2,6 @@
|
||||
|
||||
package copy // import "github.com/docker/docker/daemon/graphdriver/copy"
|
||||
|
||||
/*
|
||||
#include <linux/fs.h>
|
||||
|
||||
#ifndef FICLONE
|
||||
#define FICLONE _IOW(0x94, 9, int)
|
||||
#endif
|
||||
*/
|
||||
import "C"
|
||||
import (
|
||||
"container/list"
|
||||
"fmt"
|
||||
@@ -50,7 +42,7 @@ func copyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRang
|
||||
defer dstFile.Close()
|
||||
|
||||
if *copyWithFileClone {
|
||||
_, _, err = unix.Syscall(unix.SYS_IOCTL, dstFile.Fd(), C.FICLONE, srcFile.Fd())
|
||||
err = fiClone(srcFile, dstFile)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
22
daemon/graphdriver/copy/copy_cgo.go
Normal file
22
daemon/graphdriver/copy/copy_cgo.go
Normal file
@@ -0,0 +1,22 @@
|
||||
// +build linux,cgo
|
||||
|
||||
package copy // import "github.com/docker/docker/daemon/graphdriver/copy"
|
||||
|
||||
/*
|
||||
#include <linux/fs.h>
|
||||
|
||||
#ifndef FICLONE
|
||||
#define FICLONE _IOW(0x94, 9, int)
|
||||
#endif
|
||||
*/
|
||||
import "C"
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func fiClone(srcFile, dstFile *os.File) error {
|
||||
_, _, err := unix.Syscall(unix.SYS_IOCTL, dstFile.Fd(), C.FICLONE, srcFile.Fd())
|
||||
return err
|
||||
}
|
||||
13
daemon/graphdriver/copy/copy_nocgo.go
Normal file
13
daemon/graphdriver/copy/copy_nocgo.go
Normal file
@@ -0,0 +1,13 @@
|
||||
// +build linux,!cgo
|
||||
|
||||
package copy // import "github.com/docker/docker/daemon/graphdriver/copy"
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func fiClone(srcFile, dstFile *os.File) error {
|
||||
return unix.ENOSYS
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build linux
|
||||
// +build linux,!exclude_disk_quota
|
||||
|
||||
//
|
||||
// projectquota.go - implements XFS project quota controls
|
||||
@@ -63,19 +63,6 @@ import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Quota limit params - currently we only control blocks hard limit
|
||||
type Quota struct {
|
||||
Size uint64
|
||||
}
|
||||
|
||||
// Control - Context to be used by storage driver (e.g. overlay)
|
||||
// who wants to apply project quotas to container dirs
|
||||
type Control struct {
|
||||
backingFsBlockDev string
|
||||
nextProjectID uint32
|
||||
quotas map[string]uint32
|
||||
}
|
||||
|
||||
// NewControl - initialize project quota support.
|
||||
// Test to make sure that quota can be set on a test dir and find
|
||||
// the first project id to be used for the next container create.
|
||||
@@ -166,9 +153,11 @@ func NewControl(basePath string) (*Control, error) {
|
||||
// SetQuota - assign a unique project id to directory and set the quota limits
|
||||
// for that project id
|
||||
func (q *Control) SetQuota(targetPath string, quota Quota) error {
|
||||
|
||||
q.RLock()
|
||||
projectID, ok := q.quotas[targetPath]
|
||||
q.RUnlock()
|
||||
if !ok {
|
||||
q.Lock()
|
||||
projectID = q.nextProjectID
|
||||
|
||||
//
|
||||
@@ -176,11 +165,12 @@ func (q *Control) SetQuota(targetPath string, quota Quota) error {
|
||||
//
|
||||
err := setProjectID(targetPath, projectID)
|
||||
if err != nil {
|
||||
q.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
q.quotas[targetPath] = projectID
|
||||
q.nextProjectID++
|
||||
q.Unlock()
|
||||
}
|
||||
|
||||
//
|
||||
@@ -217,8 +207,9 @@ func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) er
|
||||
|
||||
// GetQuota - get the quota limits of a directory that was configured with SetQuota
|
||||
func (q *Control) GetQuota(targetPath string, quota *Quota) error {
|
||||
|
||||
q.RLock()
|
||||
projectID, ok := q.quotas[targetPath]
|
||||
q.RUnlock()
|
||||
if !ok {
|
||||
return errors.Errorf("quota not found for path: %s", targetPath)
|
||||
}
|
||||
@@ -289,6 +280,8 @@ func setProjectID(targetPath string, projectID uint32) error {
|
||||
// findNextProjectID - find the next project id to be used for containers
|
||||
// by scanning driver home directory to find used project ids
|
||||
func (q *Control) findNextProjectID(home string) error {
|
||||
q.Lock()
|
||||
defer q.Unlock()
|
||||
files, err := ioutil.ReadDir(home)
|
||||
if err != nil {
|
||||
return errors.Errorf("read directory failed: %s", home)
|
||||
|
||||
18
daemon/graphdriver/quota/projectquota_unsupported.go
Normal file
18
daemon/graphdriver/quota/projectquota_unsupported.go
Normal file
@@ -0,0 +1,18 @@
|
||||
// +build linux,exclude_disk_quota
|
||||
|
||||
package quota // import "github.com/docker/docker/daemon/graphdriver/quota"
|
||||
|
||||
func NewControl(basePath string) (*Control, error) {
|
||||
return nil, ErrQuotaNotSupported
|
||||
}
|
||||
|
||||
// SetQuota - assign a unique project id to directory and set the quota limits
|
||||
// for that project id
|
||||
func (q *Control) SetQuota(targetPath string, quota Quota) error {
|
||||
return ErrQuotaNotSupported
|
||||
}
|
||||
|
||||
// GetQuota - get the quota limits of a directory that was configured with SetQuota
|
||||
func (q *Control) GetQuota(targetPath string, quota *Quota) error {
|
||||
return ErrQuotaNotSupported
|
||||
}
|
||||
19
daemon/graphdriver/quota/types.go
Normal file
19
daemon/graphdriver/quota/types.go
Normal file
@@ -0,0 +1,19 @@
|
||||
// +build linux
|
||||
|
||||
package quota // import "github.com/docker/docker/daemon/graphdriver/quota"
|
||||
|
||||
import "sync"
|
||||
|
||||
// Quota limit params - currently we only control blocks hard limit
|
||||
type Quota struct {
|
||||
Size uint64
|
||||
}
|
||||
|
||||
// Control - Context to be used by storage driver (e.g. overlay)
|
||||
// who wants to apply project quotas to container dirs
|
||||
type Control struct {
|
||||
backingFsBlockDev string
|
||||
sync.RWMutex // protect nextProjectID and quotas map
|
||||
nextProjectID uint32
|
||||
quotas map[string]uint32
|
||||
}
|
||||
@@ -338,11 +338,14 @@ func (d *Driver) Remove(id string) error {
|
||||
// If permission denied, it's possible that the scratch is still mounted, an
|
||||
// artifact after a hard daemon crash for example. Worth a shot to try detaching it
|
||||
// before retrying the rename.
|
||||
if detachErr := vhd.DetachVhd(filepath.Join(layerPath, "sandbox.vhdx")); detachErr != nil {
|
||||
return errors.Wrapf(err, "failed to detach VHD: %s", detachErr)
|
||||
}
|
||||
if renameErr := os.Rename(layerPath, tmpLayerPath); renameErr != nil && !os.IsNotExist(renameErr) {
|
||||
return errors.Wrapf(err, "second rename attempt following detach failed: %s", renameErr)
|
||||
sandbox := filepath.Join(layerPath, "sandbox.vhdx")
|
||||
if _, statErr := os.Stat(sandbox); statErr == nil {
|
||||
if detachErr := vhd.DetachVhd(sandbox); detachErr != nil {
|
||||
return errors.Wrapf(err, "failed to detach VHD: %s", detachErr)
|
||||
}
|
||||
if renameErr := os.Rename(layerPath, tmpLayerPath); renameErr != nil && !os.IsNotExist(renameErr) {
|
||||
return errors.Wrapf(err, "second rename attempt following detach failed: %s", renameErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := hcsshim.DestroyLayer(d.info, tmpID); err != nil {
|
||||
|
||||
@@ -54,6 +54,7 @@ func (i *ImageService) PushImage(ctx context.Context, image, tag string, metaHea
|
||||
},
|
||||
ConfigMediaType: schema2.MediaTypeImageConfig,
|
||||
LayerStores: distribution.NewLayerProvidersFromStores(i.layerStores),
|
||||
TrustKey: i.trustKey,
|
||||
UploadManager: i.uploadManager,
|
||||
}
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/docker/docker/layer"
|
||||
dockerreference "github.com/docker/docker/reference"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -39,6 +40,7 @@ type ImageServiceConfig struct {
|
||||
MaxConcurrentUploads int
|
||||
ReferenceStore dockerreference.Store
|
||||
RegistryService registry.Service
|
||||
TrustKey libtrust.PrivateKey
|
||||
}
|
||||
|
||||
// NewImageService returns a new ImageService from a configuration
|
||||
@@ -54,6 +56,7 @@ func NewImageService(config ImageServiceConfig) *ImageService {
|
||||
layerStores: config.LayerStores,
|
||||
referenceStore: config.ReferenceStore,
|
||||
registryService: config.RegistryService,
|
||||
trustKey: config.TrustKey,
|
||||
uploadManager: xfer.NewLayerUploadManager(config.MaxConcurrentUploads),
|
||||
}
|
||||
}
|
||||
@@ -69,6 +72,7 @@ type ImageService struct {
|
||||
pruneRunning int32
|
||||
referenceStore dockerreference.Store
|
||||
registryService registry.Service
|
||||
trustKey libtrust.PrivateKey
|
||||
uploadManager *xfer.LayerUploadManager
|
||||
}
|
||||
|
||||
|
||||
@@ -64,8 +64,6 @@ func (daemon *Daemon) killWithSignal(container *containerpkg.Container, sig int)
|
||||
container.Lock()
|
||||
defer container.Unlock()
|
||||
|
||||
daemon.stopHealthchecks(container)
|
||||
|
||||
if !container.Running {
|
||||
return errNotRunning(container.ID)
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ type journald struct {
|
||||
mu sync.Mutex
|
||||
vars map[string]string // additional variables and values to send to the journal along with the log message
|
||||
readers map[*logger.LogWatcher]struct{}
|
||||
closed bool
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -101,56 +101,10 @@ package journald // import "github.com/docker/docker/daemon/logger/journald"
|
||||
// }
|
||||
// return rc;
|
||||
//}
|
||||
//static int wait_for_data_cancelable(sd_journal *j, int pipefd)
|
||||
//{
|
||||
// struct pollfd fds[2];
|
||||
// uint64_t when = 0;
|
||||
// int timeout, jevents, i;
|
||||
// struct timespec ts;
|
||||
// uint64_t now;
|
||||
//
|
||||
// memset(&fds, 0, sizeof(fds));
|
||||
// fds[0].fd = pipefd;
|
||||
// fds[0].events = POLLHUP;
|
||||
// fds[1].fd = sd_journal_get_fd(j);
|
||||
// if (fds[1].fd < 0) {
|
||||
// return fds[1].fd;
|
||||
// }
|
||||
//
|
||||
// do {
|
||||
// jevents = sd_journal_get_events(j);
|
||||
// if (jevents < 0) {
|
||||
// return jevents;
|
||||
// }
|
||||
// fds[1].events = jevents;
|
||||
// sd_journal_get_timeout(j, &when);
|
||||
// if (when == -1) {
|
||||
// timeout = -1;
|
||||
// } else {
|
||||
// clock_gettime(CLOCK_MONOTONIC, &ts);
|
||||
// now = (uint64_t) ts.tv_sec * 1000000 + ts.tv_nsec / 1000;
|
||||
// timeout = when > now ? (int) ((when - now + 999) / 1000) : 0;
|
||||
// }
|
||||
// i = poll(fds, 2, timeout);
|
||||
// if ((i == -1) && (errno != EINTR)) {
|
||||
// /* An unexpected error. */
|
||||
// return (errno != 0) ? -errno : -EINTR;
|
||||
// }
|
||||
// if (fds[0].revents & POLLHUP) {
|
||||
// /* The close notification pipe was closed. */
|
||||
// return 0;
|
||||
// }
|
||||
// if (sd_journal_process(j) == SD_JOURNAL_APPEND) {
|
||||
// /* Data, which we might care about, was appended. */
|
||||
// return 1;
|
||||
// }
|
||||
// } while ((fds[0].revents & POLLHUP) == 0);
|
||||
// return 0;
|
||||
//}
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"errors"
|
||||
"strings"
|
||||
"time"
|
||||
"unsafe"
|
||||
@@ -163,22 +117,29 @@ import (
|
||||
|
||||
func (s *journald) Close() error {
|
||||
s.mu.Lock()
|
||||
s.closed = true
|
||||
for r := range s.readers {
|
||||
r.ProducerGone()
|
||||
delete(s.readers, r)
|
||||
|
||||
}
|
||||
s.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *journald) drainJournal(logWatcher *logger.LogWatcher, j *C.sd_journal, oldCursor *C.char, untilUnixMicro uint64) (*C.char, bool) {
|
||||
var msg, data, cursor *C.char
|
||||
var length C.size_t
|
||||
var stamp C.uint64_t
|
||||
var priority, partial C.int
|
||||
var done bool
|
||||
// convert error code returned from a sd_journal_* function
|
||||
// (which returns -errno) to a string
|
||||
func CErr(ret C.int) string {
|
||||
return C.GoString(C.strerror(C.int(-ret)))
|
||||
}
|
||||
|
||||
func (s *journald) drainJournal(logWatcher *logger.LogWatcher, j *C.sd_journal, oldCursor *C.char, untilUnixMicro uint64) (*C.char, bool, int) {
|
||||
var (
|
||||
msg, data, cursor *C.char
|
||||
length C.size_t
|
||||
stamp C.uint64_t
|
||||
priority, partial C.int
|
||||
done bool
|
||||
shown int
|
||||
)
|
||||
|
||||
// Walk the journal from here forward until we run out of new entries
|
||||
// or we reach the until value (if provided).
|
||||
@@ -216,12 +177,12 @@ drain:
|
||||
// the stream that we would have
|
||||
// assigned that value.
|
||||
source := ""
|
||||
if C.get_priority(j, &priority) != 0 {
|
||||
source = ""
|
||||
} else if priority == C.int(journal.PriErr) {
|
||||
source = "stderr"
|
||||
} else if priority == C.int(journal.PriInfo) {
|
||||
source = "stdout"
|
||||
if C.get_priority(j, &priority) == 0 {
|
||||
if priority == C.int(journal.PriErr) {
|
||||
source = "stderr"
|
||||
} else if priority == C.int(journal.PriInfo) {
|
||||
source = "stdout"
|
||||
}
|
||||
}
|
||||
// Retrieve the values of any variables we're adding to the journal.
|
||||
var attrs []backend.LogAttr
|
||||
@@ -230,12 +191,29 @@ drain:
|
||||
kv := strings.SplitN(C.GoStringN(data, C.int(length)), "=", 2)
|
||||
attrs = append(attrs, backend.LogAttr{Key: kv[0], Value: kv[1]})
|
||||
}
|
||||
// Send the log message.
|
||||
logWatcher.Msg <- &logger.Message{
|
||||
|
||||
// Send the log message, unless the consumer is gone
|
||||
select {
|
||||
case <-logWatcher.WatchConsumerGone():
|
||||
done = true // we won't be able to write anything anymore
|
||||
break drain
|
||||
case logWatcher.Msg <- &logger.Message{
|
||||
Line: line,
|
||||
Source: source,
|
||||
Timestamp: timestamp.In(time.UTC),
|
||||
Attrs: attrs,
|
||||
}:
|
||||
shown++
|
||||
}
|
||||
// Call sd_journal_process() periodically during the processing loop
|
||||
// to close any opened file descriptors for rotated (deleted) journal files.
|
||||
if shown%1024 == 0 {
|
||||
if ret := C.sd_journal_process(j); ret < 0 {
|
||||
// log a warning but ignore it for now
|
||||
logrus.WithField("container", s.vars["CONTAINER_ID_FULL"]).
|
||||
WithField("error", CErr(ret)).
|
||||
Warn("journald: error processing journal")
|
||||
}
|
||||
}
|
||||
}
|
||||
// If we're at the end of the journal, we're done (for now).
|
||||
@@ -250,104 +228,93 @@ drain:
|
||||
// ensure that we won't be freeing an address that's invalid
|
||||
cursor = nil
|
||||
}
|
||||
return cursor, done
|
||||
return cursor, done, shown
|
||||
}
|
||||
|
||||
func (s *journald) followJournal(logWatcher *logger.LogWatcher, j *C.sd_journal, pfd [2]C.int, cursor *C.char, untilUnixMicro uint64) *C.char {
|
||||
func (s *journald) followJournal(logWatcher *logger.LogWatcher, j *C.sd_journal, cursor *C.char, untilUnixMicro uint64) *C.char {
|
||||
s.mu.Lock()
|
||||
s.readers[logWatcher] = struct{}{}
|
||||
if s.closed {
|
||||
// the journald Logger is closed, presumably because the container has been
|
||||
// reset. So we shouldn't follow, because we'll never be woken up. But we
|
||||
// should make one more drainJournal call to be sure we've got all the logs.
|
||||
// Close pfd[1] so that one drainJournal happens, then cleanup, then return.
|
||||
C.close(pfd[1])
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
newCursor := make(chan *C.char)
|
||||
waitTimeout := C.uint64_t(250000) // 0.25s
|
||||
|
||||
go func() {
|
||||
for {
|
||||
// Keep copying journal data out until we're notified to stop
|
||||
// or we hit an error.
|
||||
status := C.wait_for_data_cancelable(j, pfd[0])
|
||||
if status < 0 {
|
||||
cerrstr := C.strerror(C.int(-status))
|
||||
errstr := C.GoString(cerrstr)
|
||||
fmtstr := "error %q while attempting to follow journal for container %q"
|
||||
logrus.Errorf(fmtstr, errstr, s.vars["CONTAINER_ID_FULL"])
|
||||
break
|
||||
}
|
||||
|
||||
var done bool
|
||||
cursor, done = s.drainJournal(logWatcher, j, cursor, untilUnixMicro)
|
||||
|
||||
if status != 1 || done {
|
||||
// We were notified to stop
|
||||
break
|
||||
for {
|
||||
status := C.sd_journal_wait(j, waitTimeout)
|
||||
if status < 0 {
|
||||
logWatcher.Err <- errors.New("error waiting for journal: " + CErr(status))
|
||||
goto cleanup
|
||||
}
|
||||
select {
|
||||
case <-logWatcher.WatchConsumerGone():
|
||||
goto cleanup // won't be able to write anything anymore
|
||||
case <-logWatcher.WatchProducerGone():
|
||||
// container is gone, drain journal
|
||||
default:
|
||||
// container is still alive
|
||||
if status == C.SD_JOURNAL_NOP {
|
||||
// no new data -- keep waiting
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up.
|
||||
C.close(pfd[0])
|
||||
s.mu.Lock()
|
||||
delete(s.readers, logWatcher)
|
||||
s.mu.Unlock()
|
||||
close(logWatcher.Msg)
|
||||
newCursor <- cursor
|
||||
}()
|
||||
|
||||
// Wait until we're told to stop.
|
||||
select {
|
||||
case cursor = <-newCursor:
|
||||
case <-logWatcher.WatchConsumerGone():
|
||||
// Notify the other goroutine that its work is done.
|
||||
C.close(pfd[1])
|
||||
cursor = <-newCursor
|
||||
newCursor, done, recv := s.drainJournal(logWatcher, j, cursor, untilUnixMicro)
|
||||
cursor = newCursor
|
||||
if done || (status == C.SD_JOURNAL_NOP && recv == 0) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
cleanup:
|
||||
s.mu.Lock()
|
||||
delete(s.readers, logWatcher)
|
||||
s.mu.Unlock()
|
||||
close(logWatcher.Msg)
|
||||
return cursor
|
||||
}
|
||||
|
||||
func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) {
|
||||
var j *C.sd_journal
|
||||
var cmatch, cursor *C.char
|
||||
var stamp C.uint64_t
|
||||
var sinceUnixMicro uint64
|
||||
var untilUnixMicro uint64
|
||||
var pipes [2]C.int
|
||||
var (
|
||||
j *C.sd_journal
|
||||
cmatch, cursor *C.char
|
||||
stamp C.uint64_t
|
||||
sinceUnixMicro uint64
|
||||
untilUnixMicro uint64
|
||||
)
|
||||
|
||||
// Get a handle to the journal.
|
||||
rc := C.sd_journal_open(&j, C.int(0))
|
||||
if rc != 0 {
|
||||
logWatcher.Err <- fmt.Errorf("error opening journal")
|
||||
if rc := C.sd_journal_open(&j, C.int(0)); rc != 0 {
|
||||
logWatcher.Err <- errors.New("error opening journal: " + CErr(rc))
|
||||
close(logWatcher.Msg)
|
||||
return
|
||||
}
|
||||
if config.Follow {
|
||||
// Initialize library inotify watches early
|
||||
if rc := C.sd_journal_get_fd(j); rc < 0 {
|
||||
logWatcher.Err <- errors.New("error getting journald fd: " + CErr(rc))
|
||||
close(logWatcher.Msg)
|
||||
return
|
||||
}
|
||||
}
|
||||
// If we end up following the log, we can set the journal context
|
||||
// pointer and the channel pointer to nil so that we won't close them
|
||||
// here, potentially while the goroutine that uses them is still
|
||||
// running. Otherwise, close them when we return from this function.
|
||||
following := false
|
||||
defer func(pfollowing *bool) {
|
||||
if !*pfollowing {
|
||||
defer func() {
|
||||
if !following {
|
||||
close(logWatcher.Msg)
|
||||
}
|
||||
C.sd_journal_close(j)
|
||||
}(&following)
|
||||
}()
|
||||
// Remove limits on the size of data items that we'll retrieve.
|
||||
rc = C.sd_journal_set_data_threshold(j, C.size_t(0))
|
||||
if rc != 0 {
|
||||
logWatcher.Err <- fmt.Errorf("error setting journal data threshold")
|
||||
if rc := C.sd_journal_set_data_threshold(j, C.size_t(0)); rc != 0 {
|
||||
logWatcher.Err <- errors.New("error setting journal data threshold: " + CErr(rc))
|
||||
return
|
||||
}
|
||||
// Add a match to have the library do the searching for us.
|
||||
cmatch = C.CString("CONTAINER_ID_FULL=" + s.vars["CONTAINER_ID_FULL"])
|
||||
defer C.free(unsafe.Pointer(cmatch))
|
||||
rc = C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch))
|
||||
if rc != 0 {
|
||||
logWatcher.Err <- fmt.Errorf("error setting journal match")
|
||||
if rc := C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch)); rc != 0 {
|
||||
logWatcher.Err <- errors.New("error setting journal match: " + CErr(rc))
|
||||
return
|
||||
}
|
||||
// If we have a cutoff time, convert it to Unix time once.
|
||||
@@ -360,76 +327,53 @@ func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadCon
|
||||
nano := config.Until.UnixNano()
|
||||
untilUnixMicro = uint64(nano / 1000)
|
||||
}
|
||||
if config.Tail > 0 {
|
||||
lines := config.Tail
|
||||
if config.Tail >= 0 {
|
||||
// If until time provided, start from there.
|
||||
// Otherwise start at the end of the journal.
|
||||
if untilUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(untilUnixMicro)) < 0 {
|
||||
logWatcher.Err <- fmt.Errorf("error seeking provided until value")
|
||||
return
|
||||
} else if C.sd_journal_seek_tail(j) < 0 {
|
||||
logWatcher.Err <- fmt.Errorf("error seeking to end of journal")
|
||||
return
|
||||
}
|
||||
if C.sd_journal_previous(j) < 0 {
|
||||
logWatcher.Err <- fmt.Errorf("error backtracking to previous journal entry")
|
||||
return
|
||||
}
|
||||
// Walk backward.
|
||||
for lines > 0 {
|
||||
// Stop if the entry time is before our cutoff.
|
||||
// We'll need the entry time if it isn't, so go
|
||||
// ahead and parse it now.
|
||||
if C.sd_journal_get_realtime_usec(j, &stamp) != 0 {
|
||||
break
|
||||
} else {
|
||||
// Compare the timestamp on the entry to our threshold value.
|
||||
if sinceUnixMicro != 0 && sinceUnixMicro > uint64(stamp) {
|
||||
break
|
||||
}
|
||||
if untilUnixMicro != 0 {
|
||||
if rc := C.sd_journal_seek_realtime_usec(j, C.uint64_t(untilUnixMicro)); rc != 0 {
|
||||
logWatcher.Err <- errors.New("error seeking provided until value: " + CErr(rc))
|
||||
return
|
||||
}
|
||||
lines--
|
||||
// If we're at the start of the journal, or
|
||||
// don't need to back up past any more entries,
|
||||
// stop.
|
||||
if lines == 0 || C.sd_journal_previous(j) <= 0 {
|
||||
break
|
||||
} else if rc := C.sd_journal_seek_tail(j); rc != 0 {
|
||||
logWatcher.Err <- errors.New("error seeking to end of journal: " + CErr(rc))
|
||||
return
|
||||
}
|
||||
// (Try to) skip backwards by the requested number of lines...
|
||||
if C.sd_journal_previous_skip(j, C.uint64_t(config.Tail)) >= 0 {
|
||||
// ...but not before "since"
|
||||
if sinceUnixMicro != 0 &&
|
||||
C.sd_journal_get_realtime_usec(j, &stamp) == 0 &&
|
||||
uint64(stamp) < sinceUnixMicro {
|
||||
C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Start at the beginning of the journal.
|
||||
if C.sd_journal_seek_head(j) < 0 {
|
||||
logWatcher.Err <- fmt.Errorf("error seeking to start of journal")
|
||||
if rc := C.sd_journal_seek_head(j); rc != 0 {
|
||||
logWatcher.Err <- errors.New("error seeking to start of journal: " + CErr(rc))
|
||||
return
|
||||
}
|
||||
// If we have a cutoff date, fast-forward to it.
|
||||
if sinceUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)) != 0 {
|
||||
logWatcher.Err <- fmt.Errorf("error seeking to start time in journal")
|
||||
return
|
||||
if sinceUnixMicro != 0 {
|
||||
if rc := C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)); rc != 0 {
|
||||
logWatcher.Err <- errors.New("error seeking to start time in journal: " + CErr(rc))
|
||||
return
|
||||
}
|
||||
}
|
||||
if C.sd_journal_next(j) < 0 {
|
||||
logWatcher.Err <- fmt.Errorf("error skipping to next journal entry")
|
||||
if rc := C.sd_journal_next(j); rc < 0 {
|
||||
logWatcher.Err <- errors.New("error skipping to next journal entry: " + CErr(rc))
|
||||
return
|
||||
}
|
||||
}
|
||||
cursor, _ = s.drainJournal(logWatcher, j, nil, untilUnixMicro)
|
||||
if config.Tail != 0 { // special case for --tail 0
|
||||
cursor, _, _ = s.drainJournal(logWatcher, j, nil, untilUnixMicro)
|
||||
}
|
||||
if config.Follow {
|
||||
// Allocate a descriptor for following the journal, if we'll
|
||||
// need one. Do it here so that we can report if it fails.
|
||||
if fd := C.sd_journal_get_fd(j); fd < C.int(0) {
|
||||
logWatcher.Err <- fmt.Errorf("error opening journald follow descriptor: %q", C.GoString(C.strerror(-fd)))
|
||||
} else {
|
||||
// Create a pipe that we can poll at the same time as
|
||||
// the journald descriptor.
|
||||
if C.pipe(&pipes[0]) == C.int(-1) {
|
||||
logWatcher.Err <- fmt.Errorf("error opening journald close notification pipe")
|
||||
} else {
|
||||
cursor = s.followJournal(logWatcher, j, pipes, cursor, untilUnixMicro)
|
||||
// Let followJournal handle freeing the journal context
|
||||
// object and closing the channel.
|
||||
following = true
|
||||
}
|
||||
}
|
||||
cursor = s.followJournal(logWatcher, j, cursor, untilUnixMicro)
|
||||
// Let followJournal handle freeing the journal context
|
||||
// object and closing the channel.
|
||||
following = true
|
||||
}
|
||||
|
||||
C.free(unsafe.Pointer(cursor))
|
||||
|
||||
@@ -81,7 +81,7 @@ func makePluginCreator(name string, l logPlugin, scopePath func(s string) string
|
||||
return nil, err
|
||||
}
|
||||
|
||||
id := stringid.GenerateNonCryptoID()
|
||||
id := stringid.GenerateRandomID()
|
||||
a := &pluginAdapter{
|
||||
driverName: name,
|
||||
id: id,
|
||||
|
||||
@@ -38,7 +38,7 @@ func (daemon *Daemon) registerName(container *container.Container) error {
|
||||
func (daemon *Daemon) generateIDAndName(name string) (string, string, error) {
|
||||
var (
|
||||
err error
|
||||
id = stringid.GenerateNonCryptoID()
|
||||
id = stringid.GenerateRandomID()
|
||||
)
|
||||
|
||||
if name == "" {
|
||||
|
||||
@@ -9,13 +9,9 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
/*
|
||||
#include <unistd.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// platformNewStatsCollector performs platform specific initialisation of the
|
||||
// Collector structure.
|
||||
func platformNewStatsCollector(s *Collector) {
|
||||
@@ -70,13 +66,10 @@ func (s *Collector) getSystemCPUUsage() (uint64, error) {
|
||||
}
|
||||
|
||||
func (s *Collector) getNumberOnlineCPUs() (uint32, error) {
|
||||
i, err := C.sysconf(C._SC_NPROCESSORS_ONLN)
|
||||
// According to POSIX - errno is undefined after successful
|
||||
// sysconf, and can be non-zero in several cases, so look for
|
||||
// error in returned value not in errno.
|
||||
// (https://sourceware.org/bugzilla/show_bug.cgi?id=21536)
|
||||
if i == -1 {
|
||||
var cpuset unix.CPUSet
|
||||
err := unix.SchedGetaffinity(0, &cpuset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint32(i), nil
|
||||
return uint32(cpuset.Count()), nil
|
||||
}
|
||||
|
||||
57
daemon/trustkey.go
Normal file
57
daemon/trustkey.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package daemon // import "github.com/docker/docker/daemon"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/libtrust"
|
||||
)
|
||||
|
||||
// LoadOrCreateTrustKey attempts to load the libtrust key at the given path,
|
||||
// otherwise generates a new one
|
||||
// TODO: this should use more of libtrust.LoadOrCreateTrustKey which may need
|
||||
// a refactor or this function to be moved into libtrust
|
||||
func loadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
|
||||
err := system.MkdirAll(filepath.Dir(trustKeyPath), 0755, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
trustKey, err := libtrust.LoadKeyFile(trustKeyPath)
|
||||
if err == libtrust.ErrKeyFileDoesNotExist {
|
||||
trustKey, err = libtrust.GenerateECP256PrivateKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error generating key: %s", err)
|
||||
}
|
||||
encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error serializing key: %s", err)
|
||||
}
|
||||
if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil {
|
||||
return nil, fmt.Errorf("Error saving key file: %s", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err)
|
||||
}
|
||||
return trustKey, nil
|
||||
}
|
||||
|
||||
func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) {
|
||||
if ext == ".json" || ext == ".jwk" {
|
||||
encoded, err = json.Marshal(key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to encode private key JWK: %s", err)
|
||||
}
|
||||
} else {
|
||||
pemBlock, err := key.PEMBlock()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to encode private key PEM: %s", err)
|
||||
}
|
||||
encoded = pem.EncodeToMemory(pemBlock)
|
||||
}
|
||||
return
|
||||
}
|
||||
71
daemon/trustkey_test.go
Normal file
71
daemon/trustkey_test.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package daemon // import "github.com/docker/docker/daemon"
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"gotest.tools/assert"
|
||||
is "gotest.tools/assert/cmp"
|
||||
"gotest.tools/fs"
|
||||
)
|
||||
|
||||
// LoadOrCreateTrustKey
|
||||
func TestLoadOrCreateTrustKeyInvalidKeyFile(t *testing.T) {
|
||||
tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(tmpKeyFolderPath)
|
||||
|
||||
tmpKeyFile, err := ioutil.TempFile(tmpKeyFolderPath, "keyfile")
|
||||
assert.NilError(t, err)
|
||||
|
||||
_, err = loadOrCreateTrustKey(tmpKeyFile.Name())
|
||||
assert.Check(t, is.ErrorContains(err, "Error loading key file"))
|
||||
}
|
||||
|
||||
func TestLoadOrCreateTrustKeyCreateKeyWhenFileDoesNotExist(t *testing.T) {
|
||||
tmpKeyFolderPath := fs.NewDir(t, "api-trustkey-test")
|
||||
defer tmpKeyFolderPath.Remove()
|
||||
|
||||
// Without the need to create the folder hierarchy
|
||||
tmpKeyFile := tmpKeyFolderPath.Join("keyfile")
|
||||
|
||||
key, err := loadOrCreateTrustKey(tmpKeyFile)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, key != nil)
|
||||
|
||||
_, err = os.Stat(tmpKeyFile)
|
||||
assert.NilError(t, err, "key file doesn't exist")
|
||||
}
|
||||
|
||||
func TestLoadOrCreateTrustKeyCreateKeyWhenDirectoryDoesNotExist(t *testing.T) {
|
||||
tmpKeyFolderPath := fs.NewDir(t, "api-trustkey-test")
|
||||
defer tmpKeyFolderPath.Remove()
|
||||
tmpKeyFile := tmpKeyFolderPath.Join("folder/hierarchy/keyfile")
|
||||
|
||||
key, err := loadOrCreateTrustKey(tmpKeyFile)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, key != nil)
|
||||
|
||||
_, err = os.Stat(tmpKeyFile)
|
||||
assert.NilError(t, err, "key file doesn't exist")
|
||||
}
|
||||
|
||||
func TestLoadOrCreateTrustKeyCreateKeyNoPath(t *testing.T) {
|
||||
defer os.Remove("keyfile")
|
||||
key, err := loadOrCreateTrustKey("keyfile")
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, key != nil)
|
||||
|
||||
_, err = os.Stat("keyfile")
|
||||
assert.NilError(t, err, "key file doesn't exist")
|
||||
}
|
||||
|
||||
func TestLoadOrCreateTrustKeyLoadValidKey(t *testing.T) {
|
||||
tmpKeyFile := filepath.Join("testdata", "keyfile")
|
||||
key, err := loadOrCreateTrustKey(tmpKeyFile)
|
||||
assert.NilError(t, err)
|
||||
expected := "AWX2:I27X:WQFX:IOMK:CNAK:O7PW:VYNB:ZLKC:CVAE:YJP2:SI4A:XXAY"
|
||||
assert.Check(t, is.Contains(key.String(), expected))
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
package daemon // import "github.com/docker/docker/daemon"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
func loadOrCreateUUID(path string) (string, error) {
|
||||
err := os.MkdirAll(filepath.Dir(path), 0755)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var id string
|
||||
idb, err := ioutil.ReadFile(path)
|
||||
if os.IsNotExist(err) {
|
||||
id = uuid.New().String()
|
||||
if err := ioutils.AtomicWriteFile(path, []byte(id), os.FileMode(0600)); err != nil {
|
||||
return "", fmt.Errorf("Error saving uuid file: %s", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return "", fmt.Errorf("Error loading uuid file %s: %s", path, err)
|
||||
} else {
|
||||
idp, err := uuid.Parse(string(idb))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error parsing uuid in file %s: %s", path, err)
|
||||
}
|
||||
id = idp.String()
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/docker/docker/pkg/system"
|
||||
refstore "github.com/docker/docker/reference"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
@@ -72,6 +73,9 @@ type ImagePushConfig struct {
|
||||
ConfigMediaType string
|
||||
// LayerStores (indexed by operating system) manages layers.
|
||||
LayerStores map[string]PushLayerProvider
|
||||
// TrustKey is the private key for legacy signatures. This is typically
|
||||
// an ephemeral key, since these signatures are no longer verified.
|
||||
TrustKey libtrust.PrivateKey
|
||||
// UploadManager dispatches uploads.
|
||||
UploadManager *xfer.LayerUploadManager
|
||||
}
|
||||
|
||||
@@ -39,12 +39,7 @@ func newPuller(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo,
|
||||
repoInfo: repoInfo,
|
||||
}, nil
|
||||
case registry.APIVersion1:
|
||||
return &v1Puller{
|
||||
v1IDService: metadata.NewV1IDService(imagePullConfig.MetadataStore),
|
||||
endpoint: endpoint,
|
||||
config: imagePullConfig,
|
||||
repoInfo: repoInfo,
|
||||
}, nil
|
||||
return nil, fmt.Errorf("protocol version %d no longer supported. Please contact admins of registry %s", endpoint.Version, endpoint.URL)
|
||||
}
|
||||
return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL)
|
||||
}
|
||||
|
||||
@@ -1,365 +0,0 @@
|
||||
package distribution // import "github.com/docker/docker/distribution"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
"github.com/docker/docker/distribution/metadata"
|
||||
"github.com/docker/docker/distribution/xfer"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/image/v1"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/registry"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type v1Puller struct {
|
||||
v1IDService *metadata.V1IDService
|
||||
endpoint registry.APIEndpoint
|
||||
config *ImagePullConfig
|
||||
repoInfo *registry.RepositoryInfo
|
||||
session *registry.Session
|
||||
}
|
||||
|
||||
func (p *v1Puller) Pull(ctx context.Context, ref reference.Named, _ *specs.Platform) error {
|
||||
if _, isCanonical := ref.(reference.Canonical); isCanonical {
|
||||
// Allowing fallback, because HTTPS v1 is before HTTP v2
|
||||
return fallbackError{err: ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}}
|
||||
}
|
||||
|
||||
tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Adds Docker-specific headers as well as user-specified headers (metaHeaders)
|
||||
tr := transport.NewTransport(
|
||||
// TODO(tiborvass): was ReceiveTimeout
|
||||
registry.NewTransport(tlsConfig),
|
||||
registry.Headers(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)...,
|
||||
)
|
||||
client := registry.HTTPClient(tr)
|
||||
v1Endpoint := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)
|
||||
p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint)
|
||||
if err != nil {
|
||||
// TODO(dmcgowan): Check if should fallback
|
||||
logrus.Debugf("Fallback from error: %s", err)
|
||||
return fallbackError{err: err}
|
||||
}
|
||||
if err := p.pullRepository(ctx, ref); err != nil {
|
||||
// TODO(dmcgowan): Check if should fallback
|
||||
return err
|
||||
}
|
||||
progress.Message(p.config.ProgressOutput, "", p.repoInfo.Name.Name()+": this image was pulled from a legacy registry. Important: This registry version will not be supported in future versions of docker.")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *v1Puller) pullRepository(ctx context.Context, ref reference.Named) error {
|
||||
progress.Message(p.config.ProgressOutput, "", "Pulling repository "+p.repoInfo.Name.Name())
|
||||
|
||||
tagged, isTagged := ref.(reference.NamedTagged)
|
||||
|
||||
repoData, err := p.session.GetRepositoryData(p.repoInfo.Name)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "HTTP code: 404") {
|
||||
if isTagged {
|
||||
return fmt.Errorf("Error: image %s:%s not found", reference.Path(p.repoInfo.Name), tagged.Tag())
|
||||
}
|
||||
return fmt.Errorf("Error: image %s not found", reference.Path(p.repoInfo.Name))
|
||||
}
|
||||
// Unexpected HTTP error
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debug("Retrieving the tag list")
|
||||
var tagsList map[string]string
|
||||
if !isTagged {
|
||||
tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo.Name)
|
||||
} else {
|
||||
var tagID string
|
||||
tagsList = make(map[string]string)
|
||||
tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo.Name, tagged.Tag())
|
||||
if err == registry.ErrRepoNotFound {
|
||||
return fmt.Errorf("Tag %s not found in repository %s", tagged.Tag(), p.repoInfo.Name.Name())
|
||||
}
|
||||
tagsList[tagged.Tag()] = tagID
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Errorf("unable to get remote tags: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
for tag, id := range tagsList {
|
||||
repoData.ImgList[id] = ®istry.ImgData{
|
||||
ID: id,
|
||||
Tag: tag,
|
||||
Checksum: "",
|
||||
}
|
||||
}
|
||||
|
||||
layersDownloaded := false
|
||||
for _, imgData := range repoData.ImgList {
|
||||
if isTagged && imgData.Tag != tagged.Tag() {
|
||||
continue
|
||||
}
|
||||
|
||||
err := p.downloadImage(ctx, repoData, imgData, &layersDownloaded)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *v1Puller) downloadImage(ctx context.Context, repoData *registry.RepositoryData, img *registry.ImgData, layersDownloaded *bool) error {
|
||||
if img.Tag == "" {
|
||||
logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
localNameRef, err := reference.WithTag(p.repoInfo.Name, img.Tag)
|
||||
if err != nil {
|
||||
retErr := fmt.Errorf("Image (id: %s) has invalid tag: %s", img.ID, img.Tag)
|
||||
logrus.Debug(retErr.Error())
|
||||
return retErr
|
||||
}
|
||||
|
||||
if err := v1.ValidateID(img.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s", img.Tag, p.repoInfo.Name.Name())
|
||||
success := false
|
||||
var lastErr error
|
||||
for _, ep := range p.repoInfo.Index.Mirrors {
|
||||
ep += "v1/"
|
||||
progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.Name.Name(), ep))
|
||||
if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil {
|
||||
// Don't report errors when pulling from mirrors.
|
||||
logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.Name.Name(), ep, err)
|
||||
continue
|
||||
}
|
||||
success = true
|
||||
break
|
||||
}
|
||||
if !success {
|
||||
for _, ep := range repoData.Endpoints {
|
||||
progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.Name.Name(), ep)
|
||||
if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil {
|
||||
// It's not ideal that only the last error is returned, it would be better to concatenate the errors.
|
||||
// As the error is also given to the output stream the user will see the error.
|
||||
lastErr = err
|
||||
progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.Name.Name(), ep, err)
|
||||
continue
|
||||
}
|
||||
success = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !success {
|
||||
err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.Name.Name(), lastErr)
|
||||
progress.Update(p.config.ProgressOutput, stringid.TruncateID(img.ID), err.Error())
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *v1Puller) pullImage(ctx context.Context, v1ID, endpoint string, localNameRef reference.Named, layersDownloaded *bool) (err error) {
|
||||
var history []string
|
||||
history, err = p.session.GetRemoteHistory(v1ID, endpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(history) < 1 {
|
||||
return fmt.Errorf("empty history for image %s", v1ID)
|
||||
}
|
||||
progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1ID), "Pulling dependent layers")
|
||||
|
||||
var (
|
||||
descriptors []xfer.DownloadDescriptor
|
||||
newHistory []image.History
|
||||
imgJSON []byte
|
||||
imgSize int64
|
||||
)
|
||||
|
||||
// Iterate over layers, in order from bottom-most to top-most. Download
|
||||
// config for all layers and create descriptors.
|
||||
for i := len(history) - 1; i >= 0; i-- {
|
||||
v1LayerID := history[i]
|
||||
imgJSON, imgSize, err = p.downloadLayerConfig(v1LayerID, endpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create a new-style config from the legacy configs
|
||||
h, err := v1.HistoryFromConfig(imgJSON, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newHistory = append(newHistory, h)
|
||||
|
||||
layerDescriptor := &v1LayerDescriptor{
|
||||
v1LayerID: v1LayerID,
|
||||
indexName: p.repoInfo.Index.Name,
|
||||
endpoint: endpoint,
|
||||
v1IDService: p.v1IDService,
|
||||
layersDownloaded: layersDownloaded,
|
||||
layerSize: imgSize,
|
||||
session: p.session,
|
||||
}
|
||||
|
||||
descriptors = append(descriptors, layerDescriptor)
|
||||
}
|
||||
|
||||
rootFS := image.NewRootFS()
|
||||
resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, "", descriptors, p.config.ProgressOutput)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer release()
|
||||
|
||||
config, err := v1.MakeConfigFromV1Config(imgJSON, &resultRootFS, newHistory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imageID, err := p.config.ImageStore.Put(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if p.config.ReferenceStore != nil {
|
||||
if err := p.config.ReferenceStore.AddTag(localNameRef, imageID, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *v1Puller) downloadLayerConfig(v1LayerID, endpoint string) (imgJSON []byte, imgSize int64, err error) {
|
||||
progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Pulling metadata")
|
||||
|
||||
retries := 5
|
||||
for j := 1; j <= retries; j++ {
|
||||
imgJSON, imgSize, err := p.session.GetRemoteImageJSON(v1LayerID, endpoint)
|
||||
if err != nil && j == retries {
|
||||
progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Error pulling layer metadata")
|
||||
return nil, 0, err
|
||||
} else if err != nil {
|
||||
time.Sleep(time.Duration(j) * 500 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
return imgJSON, imgSize, nil
|
||||
}
|
||||
|
||||
// not reached
|
||||
return nil, 0, nil
|
||||
}
|
||||
|
||||
type v1LayerDescriptor struct {
|
||||
v1LayerID string
|
||||
indexName string
|
||||
endpoint string
|
||||
v1IDService *metadata.V1IDService
|
||||
layersDownloaded *bool
|
||||
layerSize int64
|
||||
session *registry.Session
|
||||
tmpFile *os.File
|
||||
}
|
||||
|
||||
func (ld *v1LayerDescriptor) Key() string {
|
||||
return "v1:" + ld.v1LayerID
|
||||
}
|
||||
|
||||
func (ld *v1LayerDescriptor) ID() string {
|
||||
return stringid.TruncateID(ld.v1LayerID)
|
||||
}
|
||||
|
||||
func (ld *v1LayerDescriptor) DiffID() (layer.DiffID, error) {
|
||||
return ld.v1IDService.Get(ld.v1LayerID, ld.indexName)
|
||||
}
|
||||
|
||||
func (ld *v1LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
|
||||
progress.Update(progressOutput, ld.ID(), "Pulling fs layer")
|
||||
layerReader, err := ld.session.GetRemoteImageLayer(ld.v1LayerID, ld.endpoint, ld.layerSize)
|
||||
if err != nil {
|
||||
progress.Update(progressOutput, ld.ID(), "Error pulling dependent layers")
|
||||
if uerr, ok := err.(*url.Error); ok {
|
||||
err = uerr.Err
|
||||
}
|
||||
if terr, ok := err.(net.Error); ok && terr.Timeout() {
|
||||
return nil, 0, err
|
||||
}
|
||||
return nil, 0, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
*ld.layersDownloaded = true
|
||||
|
||||
ld.tmpFile, err = ioutil.TempFile("", "GetImageBlob")
|
||||
if err != nil {
|
||||
layerReader.Close()
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerReader), progressOutput, ld.layerSize, ld.ID(), "Downloading")
|
||||
defer reader.Close()
|
||||
|
||||
_, err = io.Copy(ld.tmpFile, reader)
|
||||
if err != nil {
|
||||
ld.Close()
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
progress.Update(progressOutput, ld.ID(), "Download complete")
|
||||
|
||||
logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), ld.tmpFile.Name())
|
||||
|
||||
ld.tmpFile.Seek(0, 0)
|
||||
|
||||
// hand off the temporary file to the download manager, so it will only
|
||||
// be closed once
|
||||
tmpFile := ld.tmpFile
|
||||
ld.tmpFile = nil
|
||||
|
||||
return ioutils.NewReadCloserWrapper(tmpFile, func() error {
|
||||
tmpFile.Close()
|
||||
err := os.RemoveAll(tmpFile.Name())
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
|
||||
}
|
||||
return err
|
||||
}), ld.layerSize, nil
|
||||
}
|
||||
|
||||
func (ld *v1LayerDescriptor) Close() {
|
||||
if ld.tmpFile != nil {
|
||||
ld.tmpFile.Close()
|
||||
if err := os.RemoveAll(ld.tmpFile.Name()); err != nil {
|
||||
logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
|
||||
}
|
||||
ld.tmpFile = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (ld *v1LayerDescriptor) Registered(diffID layer.DiffID) {
|
||||
// Cache mapping from this layer's DiffID to the blobsum
|
||||
ld.v1IDService.Set(ld.v1LayerID, ld.indexName, diffID)
|
||||
}
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
"github.com/docker/docker/distribution/metadata"
|
||||
"github.com/docker/docker/distribution/xfer"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/image/v1"
|
||||
v1 "github.com/docker/docker/image/v1"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
@@ -392,6 +392,10 @@ func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named, platform
|
||||
if p.config.RequireSchema2 {
|
||||
return false, fmt.Errorf("invalid manifest: not schema2")
|
||||
}
|
||||
msg := schema1DeprecationMessage(ref)
|
||||
logrus.Warn(msg)
|
||||
progress.Message(p.config.ProgressOutput, "", msg)
|
||||
|
||||
id, manifestDigest, err = p.pullSchema1(ctx, ref, v, platform)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -787,6 +791,10 @@ func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mf
|
||||
|
||||
switch v := manifest.(type) {
|
||||
case *schema1.SignedManifest:
|
||||
msg := schema1DeprecationMessage(ref)
|
||||
logrus.Warn(msg)
|
||||
progress.Message(p.config.ProgressOutput, "", msg)
|
||||
|
||||
platform := toOCIPlatform(manifestMatches[0].Platform)
|
||||
id, _, err = p.pullSchema1(ctx, manifestRef, v, &platform)
|
||||
if err != nil {
|
||||
|
||||
@@ -41,13 +41,7 @@ func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *reg
|
||||
config: imagePushConfig,
|
||||
}, nil
|
||||
case registry.APIVersion1:
|
||||
return &v1Pusher{
|
||||
v1IDService: metadata.NewV1IDService(imagePushConfig.MetadataStore),
|
||||
ref: ref,
|
||||
endpoint: endpoint,
|
||||
repoInfo: repoInfo,
|
||||
config: imagePushConfig,
|
||||
}, nil
|
||||
return nil, fmt.Errorf("protocol version %d no longer supported. Please contact admins of registry %s", endpoint.Version, endpoint.URL)
|
||||
}
|
||||
return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL)
|
||||
}
|
||||
|
||||
@@ -1,457 +0,0 @@
|
||||
package distribution // import "github.com/docker/docker/distribution"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
"github.com/docker/docker/distribution/metadata"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/image/v1"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type v1Pusher struct {
|
||||
v1IDService *metadata.V1IDService
|
||||
endpoint registry.APIEndpoint
|
||||
ref reference.Named
|
||||
repoInfo *registry.RepositoryInfo
|
||||
config *ImagePushConfig
|
||||
session *registry.Session
|
||||
}
|
||||
|
||||
func (p *v1Pusher) Push(ctx context.Context) error {
|
||||
tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Adds Docker-specific headers as well as user-specified headers (metaHeaders)
|
||||
tr := transport.NewTransport(
|
||||
// TODO(tiborvass): was NoTimeout
|
||||
registry.NewTransport(tlsConfig),
|
||||
registry.Headers(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)...,
|
||||
)
|
||||
client := registry.HTTPClient(tr)
|
||||
v1Endpoint := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)
|
||||
p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint)
|
||||
if err != nil {
|
||||
// TODO(dmcgowan): Check if should fallback
|
||||
return fallbackError{err: err}
|
||||
}
|
||||
if err := p.pushRepository(ctx); err != nil {
|
||||
// TODO(dmcgowan): Check if should fallback
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// v1Image exposes the configuration, filesystem layer ID, and a v1 ID for an
|
||||
// image being pushed to a v1 registry.
|
||||
type v1Image interface {
|
||||
Config() []byte
|
||||
Layer() layer.Layer
|
||||
V1ID() string
|
||||
}
|
||||
|
||||
type v1ImageCommon struct {
|
||||
layer layer.Layer
|
||||
config []byte
|
||||
v1ID string
|
||||
}
|
||||
|
||||
func (common *v1ImageCommon) Config() []byte {
|
||||
return common.config
|
||||
}
|
||||
|
||||
func (common *v1ImageCommon) V1ID() string {
|
||||
return common.v1ID
|
||||
}
|
||||
|
||||
func (common *v1ImageCommon) Layer() layer.Layer {
|
||||
return common.layer
|
||||
}
|
||||
|
||||
// v1TopImage defines a runnable (top layer) image being pushed to a v1
|
||||
// registry.
|
||||
type v1TopImage struct {
|
||||
v1ImageCommon
|
||||
imageID image.ID
|
||||
}
|
||||
|
||||
func newV1TopImage(imageID image.ID, img *image.Image, l layer.Layer, parent *v1DependencyImage) (*v1TopImage, error) {
|
||||
v1ID := imageID.Digest().Hex()
|
||||
parentV1ID := ""
|
||||
if parent != nil {
|
||||
parentV1ID = parent.V1ID()
|
||||
}
|
||||
|
||||
config, err := v1.MakeV1ConfigFromConfig(img, v1ID, parentV1ID, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v1TopImage{
|
||||
v1ImageCommon: v1ImageCommon{
|
||||
v1ID: v1ID,
|
||||
config: config,
|
||||
layer: l,
|
||||
},
|
||||
imageID: imageID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// v1DependencyImage defines a dependency layer being pushed to a v1 registry.
|
||||
type v1DependencyImage struct {
|
||||
v1ImageCommon
|
||||
}
|
||||
|
||||
func newV1DependencyImage(l layer.Layer, parent *v1DependencyImage) *v1DependencyImage {
|
||||
v1ID := digest.Digest(l.ChainID()).Hex()
|
||||
|
||||
var config string
|
||||
if parent != nil {
|
||||
config = fmt.Sprintf(`{"id":"%s","parent":"%s"}`, v1ID, parent.V1ID())
|
||||
} else {
|
||||
config = fmt.Sprintf(`{"id":"%s"}`, v1ID)
|
||||
}
|
||||
return &v1DependencyImage{
|
||||
v1ImageCommon: v1ImageCommon{
|
||||
v1ID: v1ID,
|
||||
config: []byte(config),
|
||||
layer: l,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve the all the images to be uploaded in the correct order
|
||||
func (p *v1Pusher) getImageList() (imageList []v1Image, tagsByImage map[image.ID][]string, referencedLayers []PushLayer, err error) {
|
||||
tagsByImage = make(map[image.ID][]string)
|
||||
|
||||
// Ignore digest references
|
||||
if _, isCanonical := p.ref.(reference.Canonical); isCanonical {
|
||||
return
|
||||
}
|
||||
|
||||
tagged, isTagged := p.ref.(reference.NamedTagged)
|
||||
if isTagged {
|
||||
// Push a specific tag
|
||||
var imgID image.ID
|
||||
var dgst digest.Digest
|
||||
dgst, err = p.config.ReferenceStore.Get(p.ref)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
imgID = image.IDFromDigest(dgst)
|
||||
|
||||
imageList, err = p.imageListForTag(imgID, nil, &referencedLayers)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
tagsByImage[imgID] = []string{tagged.Tag()}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
imagesSeen := make(map[digest.Digest]struct{})
|
||||
dependenciesSeen := make(map[layer.ChainID]*v1DependencyImage)
|
||||
|
||||
associations := p.config.ReferenceStore.ReferencesByName(p.ref)
|
||||
for _, association := range associations {
|
||||
if tagged, isTagged = association.Ref.(reference.NamedTagged); !isTagged {
|
||||
// Ignore digest references.
|
||||
continue
|
||||
}
|
||||
|
||||
imgID := image.IDFromDigest(association.ID)
|
||||
tagsByImage[imgID] = append(tagsByImage[imgID], tagged.Tag())
|
||||
|
||||
if _, present := imagesSeen[association.ID]; present {
|
||||
// Skip generating image list for already-seen image
|
||||
continue
|
||||
}
|
||||
imagesSeen[association.ID] = struct{}{}
|
||||
|
||||
imageListForThisTag, err := p.imageListForTag(imgID, dependenciesSeen, &referencedLayers)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
// append to main image list
|
||||
imageList = append(imageList, imageListForThisTag...)
|
||||
}
|
||||
if len(imageList) == 0 {
|
||||
return nil, nil, nil, fmt.Errorf("No images found for the requested repository / tag")
|
||||
}
|
||||
logrus.Debugf("Image list: %v", imageList)
|
||||
logrus.Debugf("Tags by image: %v", tagsByImage)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (p *v1Pusher) imageListForTag(imgID image.ID, dependenciesSeen map[layer.ChainID]*v1DependencyImage, referencedLayers *[]PushLayer) (imageListForThisTag []v1Image, err error) {
|
||||
ics, ok := p.config.ImageStore.(*imageConfigStore)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("only image store images supported for v1 push")
|
||||
}
|
||||
img, err := ics.Store.Get(imgID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
topLayerID := img.RootFS.ChainID()
|
||||
|
||||
if !system.IsOSSupported(img.OperatingSystem()) {
|
||||
return nil, system.ErrNotSupportedOperatingSystem
|
||||
}
|
||||
pl, err := p.config.LayerStores[img.OperatingSystem()].Get(topLayerID)
|
||||
*referencedLayers = append(*referencedLayers, pl)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get top layer from image: %v", err)
|
||||
}
|
||||
|
||||
// V1 push is deprecated, only support existing layerstore layers
|
||||
lsl, ok := pl.(*storeLayer)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("only layer store layers supported for v1 push")
|
||||
}
|
||||
l := lsl.Layer
|
||||
|
||||
dependencyImages, parent := generateDependencyImages(l.Parent(), dependenciesSeen)
|
||||
|
||||
topImage, err := newV1TopImage(imgID, img, l, parent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
imageListForThisTag = append(dependencyImages, topImage)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func generateDependencyImages(l layer.Layer, dependenciesSeen map[layer.ChainID]*v1DependencyImage) (imageListForThisTag []v1Image, parent *v1DependencyImage) {
|
||||
if l == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
imageListForThisTag, parent = generateDependencyImages(l.Parent(), dependenciesSeen)
|
||||
|
||||
if dependenciesSeen != nil {
|
||||
if dependencyImage, present := dependenciesSeen[l.ChainID()]; present {
|
||||
// This layer is already on the list, we can ignore it
|
||||
// and all its parents.
|
||||
return imageListForThisTag, dependencyImage
|
||||
}
|
||||
}
|
||||
|
||||
dependencyImage := newV1DependencyImage(l, parent)
|
||||
imageListForThisTag = append(imageListForThisTag, dependencyImage)
|
||||
|
||||
if dependenciesSeen != nil {
|
||||
dependenciesSeen[l.ChainID()] = dependencyImage
|
||||
}
|
||||
|
||||
return imageListForThisTag, dependencyImage
|
||||
}
|
||||
|
||||
// createImageIndex returns an index of an image's layer IDs and tags.
|
||||
func createImageIndex(images []v1Image, tags map[image.ID][]string) []*registry.ImgData {
|
||||
var imageIndex []*registry.ImgData
|
||||
for _, img := range images {
|
||||
v1ID := img.V1ID()
|
||||
|
||||
if topImage, isTopImage := img.(*v1TopImage); isTopImage {
|
||||
if tags, hasTags := tags[topImage.imageID]; hasTags {
|
||||
// If an image has tags you must add an entry in the image index
|
||||
// for each tag
|
||||
for _, tag := range tags {
|
||||
imageIndex = append(imageIndex, ®istry.ImgData{
|
||||
ID: v1ID,
|
||||
Tag: tag,
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// If the image does not have a tag it still needs to be sent to the
|
||||
// registry with an empty tag so that it is associated with the repository
|
||||
imageIndex = append(imageIndex, ®istry.ImgData{
|
||||
ID: v1ID,
|
||||
Tag: "",
|
||||
})
|
||||
}
|
||||
return imageIndex
|
||||
}
|
||||
|
||||
// lookupImageOnEndpoint checks the specified endpoint to see if an image exists
|
||||
// and if it is absent then it sends the image id to the channel to be pushed.
|
||||
func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, endpoint string, images chan v1Image, imagesToPush chan string) {
|
||||
defer wg.Done()
|
||||
for image := range images {
|
||||
v1ID := image.V1ID()
|
||||
truncID := stringid.TruncateID(image.Layer().DiffID().String())
|
||||
if err := p.session.LookupRemoteImage(v1ID, endpoint); err != nil {
|
||||
logrus.Errorf("Error in LookupRemoteImage: %s", err)
|
||||
imagesToPush <- v1ID
|
||||
progress.Update(p.config.ProgressOutput, truncID, "Waiting")
|
||||
} else {
|
||||
progress.Update(p.config.ProgressOutput, truncID, "Already exists")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *v1Pusher) pushImageToEndpoint(ctx context.Context, endpoint string, imageList []v1Image, tags map[image.ID][]string, repo *registry.RepositoryData) error {
|
||||
workerCount := len(imageList)
|
||||
// start a maximum of 5 workers to check if images exist on the specified endpoint.
|
||||
if workerCount > 5 {
|
||||
workerCount = 5
|
||||
}
|
||||
var (
|
||||
wg = &sync.WaitGroup{}
|
||||
imageData = make(chan v1Image, workerCount*2)
|
||||
imagesToPush = make(chan string, workerCount*2)
|
||||
pushes = make(chan map[string]struct{}, 1)
|
||||
)
|
||||
for i := 0; i < workerCount; i++ {
|
||||
wg.Add(1)
|
||||
go p.lookupImageOnEndpoint(wg, endpoint, imageData, imagesToPush)
|
||||
}
|
||||
// start a go routine that consumes the images to push
|
||||
go func() {
|
||||
shouldPush := make(map[string]struct{})
|
||||
for id := range imagesToPush {
|
||||
shouldPush[id] = struct{}{}
|
||||
}
|
||||
pushes <- shouldPush
|
||||
}()
|
||||
for _, v1Image := range imageList {
|
||||
imageData <- v1Image
|
||||
}
|
||||
// close the channel to notify the workers that there will be no more images to check.
|
||||
close(imageData)
|
||||
wg.Wait()
|
||||
close(imagesToPush)
|
||||
// wait for all the images that require pushes to be collected into a consumable map.
|
||||
shouldPush := <-pushes
|
||||
// finish by pushing any images and tags to the endpoint. The order that the images are pushed
|
||||
// is very important that is why we are still iterating over the ordered list of imageIDs.
|
||||
for _, img := range imageList {
|
||||
v1ID := img.V1ID()
|
||||
if _, push := shouldPush[v1ID]; push {
|
||||
if _, err := p.pushImage(ctx, img, endpoint); err != nil {
|
||||
// FIXME: Continue on error?
|
||||
return err
|
||||
}
|
||||
}
|
||||
if topImage, isTopImage := img.(*v1TopImage); isTopImage {
|
||||
for _, tag := range tags[topImage.imageID] {
|
||||
progress.Messagef(p.config.ProgressOutput, "", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(v1ID), endpoint+"repositories/"+reference.Path(p.repoInfo.Name)+"/tags/"+tag)
|
||||
if err := p.session.PushRegistryTag(p.repoInfo.Name, v1ID, tag, endpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// pushRepository pushes layers that do not already exist on the registry.
|
||||
func (p *v1Pusher) pushRepository(ctx context.Context) error {
|
||||
imgList, tags, referencedLayers, err := p.getImageList()
|
||||
defer func() {
|
||||
for _, l := range referencedLayers {
|
||||
l.Release()
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imageIndex := createImageIndex(imgList, tags)
|
||||
for _, data := range imageIndex {
|
||||
logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag)
|
||||
}
|
||||
|
||||
// Register all the images in a repository with the registry
|
||||
// If an image is not in this list it will not be associated with the repository
|
||||
repoData, err := p.session.PushImageJSONIndex(p.repoInfo.Name, imageIndex, false, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// push the repository to each of the endpoints only if it does not exist.
|
||||
for _, endpoint := range repoData.Endpoints {
|
||||
if err := p.pushImageToEndpoint(ctx, endpoint, imgList, tags, repoData); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
_, err = p.session.PushImageJSONIndex(p.repoInfo.Name, imageIndex, true, repoData.Endpoints)
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *v1Pusher) pushImage(ctx context.Context, v1Image v1Image, ep string) (checksum string, err error) {
|
||||
l := v1Image.Layer()
|
||||
v1ID := v1Image.V1ID()
|
||||
truncID := stringid.TruncateID(l.DiffID().String())
|
||||
|
||||
jsonRaw := v1Image.Config()
|
||||
progress.Update(p.config.ProgressOutput, truncID, "Pushing")
|
||||
|
||||
// General rule is to use ID for graph accesses and compatibilityID for
|
||||
// calls to session.registry()
|
||||
imgData := ®istry.ImgData{
|
||||
ID: v1ID,
|
||||
}
|
||||
|
||||
// Send the json
|
||||
if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil {
|
||||
if err == registry.ErrAlreadyExists {
|
||||
progress.Update(p.config.ProgressOutput, truncID, "Image already pushed, skipping")
|
||||
return "", nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
arch, err := l.TarStream()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer arch.Close()
|
||||
|
||||
// don't care if this fails; best effort
|
||||
size, _ := l.DiffSize()
|
||||
|
||||
// Send the layer
|
||||
logrus.Debugf("rendered layer for %s of [%d] size", v1ID, size)
|
||||
|
||||
reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), p.config.ProgressOutput, size, truncID, "Pushing")
|
||||
defer reader.Close()
|
||||
|
||||
checksum, checksumPayload, err := p.session.PushImageLayerRegistry(v1ID, reader, ep, jsonRaw)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
imgData.Checksum = checksum
|
||||
imgData.ChecksumPayload = checksumPayload
|
||||
// Send the checksum
|
||||
if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err := p.v1IDService.Set(v1ID, p.repoInfo.Index.Name, l.DiffID()); err != nil {
|
||||
logrus.Warnf("Could not set v1 ID mapping: %v", err)
|
||||
}
|
||||
|
||||
progress.Update(p.config.ProgressOutput, truncID, "Image successfully pushed")
|
||||
return imgData.Checksum, nil
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -180,8 +181,30 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id
|
||||
|
||||
putOptions := []distribution.ManifestServiceOption{distribution.WithTag(ref.Tag())}
|
||||
if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil {
|
||||
logrus.Warnf("failed to upload schema2 manifest: %v", err)
|
||||
return err
|
||||
if runtime.GOOS == "windows" || p.config.TrustKey == nil || p.config.RequireSchema2 {
|
||||
logrus.Warnf("failed to upload schema2 manifest: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err)
|
||||
|
||||
msg := schema1DeprecationMessage(ref)
|
||||
logrus.Warn(msg)
|
||||
progress.Message(p.config.ProgressOutput, "", msg)
|
||||
|
||||
manifestRef, err := reference.WithTag(p.repo.Named(), ref.Tag())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, manifestRef, imgConfig)
|
||||
manifest, err = manifestFromBuilder(ctx, builder, descriptors)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var canonicalManifest []byte
|
||||
|
||||
@@ -156,3 +156,7 @@ func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[s
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token))
|
||||
return nil
|
||||
}
|
||||
|
||||
func schema1DeprecationMessage(ref reference.Named) string {
|
||||
return fmt.Sprintf("[DEPRECATION NOTICE] registry v2 schema1 support will be removed in an upcoming release. Please contact admins of the %s registry NOW to avoid future disruption.", reference.Domain(ref))
|
||||
}
|
||||
|
||||
@@ -49,6 +49,11 @@ keywords: "API, Docker, rcli, REST, documentation"
|
||||
* `GET /info` now returns information about `DataPathPort` that is currently used in swarm
|
||||
* `GET /info` now returns `PidsLimit` boolean to indicate if the host kernel has
|
||||
PID limit support enabled.
|
||||
* `GET /info` now includes `name=rootless` in `SecurityOptions` when the daemon is running in
|
||||
rootless mode. This change is not versioned, and affects all API versions if the daemon has
|
||||
this patch.
|
||||
* `GET /info` now returns `none` as `CgroupDriver` when the daemon is running in rootless mode.
|
||||
This change is not versioned, and affects all API versions if the daemon has this patch.
|
||||
* `POST /containers/create` now accepts `DeviceRequests` as part of `HostConfig`.
|
||||
Can be used to set Nvidia GPUs.
|
||||
* `GET /swarm` endpoint now returns DataPathPort info
|
||||
@@ -215,6 +220,7 @@ keywords: "API, Docker, rcli, REST, documentation"
|
||||
* `GET /events` now supports service, node and secret events which are emitted when users create, update and remove service, node and secret
|
||||
* `GET /events` now supports network remove event which is emitted when users remove a swarm scoped network
|
||||
* `GET /events` now supports a filter type `scope` in which supported value could be swarm and local
|
||||
* `PUT /containers/(name)/archive` now accepts a `copyUIDGID` parameter to allow copy UID/GID maps to dest file or dir.
|
||||
|
||||
## v1.29 API changes
|
||||
|
||||
|
||||
@@ -20,7 +20,6 @@ $ grep ^$(whoami): /etc/subgid
|
||||
penguin:231072:65536
|
||||
```
|
||||
|
||||
* Either [slirp4netns](https://github.com/rootless-containers/slirp4netns) (v0.3+) or [VPNKit](https://github.com/moby/vpnkit) needs to be installed. slirp4netns is preferred for the best performance.
|
||||
|
||||
### Distribution-specific hint
|
||||
|
||||
@@ -55,10 +54,9 @@ penguin:231072:65536
|
||||
You need to run `dockerd-rootless.sh` instead of `dockerd`.
|
||||
|
||||
```console
|
||||
$ dockerd-rootless.sh --experimental --userland-proxy --userland-proxy-path=$(which rootlesskit-docker-proxy)"
|
||||
$ dockerd-rootless.sh --experimental
|
||||
```
|
||||
As Rootless mode is experimental per se, currently you always need to run `dockerd-rootless.sh` with `--experimental`.
|
||||
Also, to expose ports, you need to set `--userland-proxy-path` to the path of `rootlesskit-docker-proxy` binary.
|
||||
|
||||
Remarks:
|
||||
* The socket path is set to `$XDG_RUNTIME_DIR/docker.sock` by default. `$XDG_RUNTIME_DIR` is typically set to `/run/user/$UID`.
|
||||
@@ -66,6 +64,8 @@ Remarks:
|
||||
* The exec dir is set to `$XDG_RUNTIME_DIR/docker` by default.
|
||||
* The daemon config dir is set to `~/.config/docker` (not `~/.docker`, which is used by the client) by default.
|
||||
* The `dockerd-rootless.sh` script executes `dockerd` in its own user, mount, and network namespaces. You can enter the namespaces by running `nsenter -U --preserve-credentials -n -m -t $(cat $XDG_RUNTIME_DIR/docker.pid)`.
|
||||
* `docker info` shows `rootless` in `SecurityOptions`
|
||||
* `docker info` shows `none` as `Cgroup Driver`
|
||||
|
||||
### Client
|
||||
|
||||
@@ -75,6 +75,17 @@ You can just use the upstream Docker client but you need to set the socket path
|
||||
$ docker -H unix://$XDG_RUNTIME_DIR/docker.sock run -d nginx
|
||||
```
|
||||
|
||||
### Expose Docker API socket via TCP
|
||||
|
||||
To expose the Docker API socket via TCP, you need to launch `dockerd-rootless.sh` with `DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS="-p 0.0.0.0:2376:2376/tcp"`.
|
||||
|
||||
```console
|
||||
$ DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS="-p 0.0.0.0:2376:2376/tcp" \
|
||||
dockerd-rootless.sh --experimental \
|
||||
-H tcp://0.0.0.0:2376 \
|
||||
--tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem
|
||||
```
|
||||
|
||||
### Routing ping packets
|
||||
|
||||
To route ping packets, you need to set up `net.ipv4.ping_group_range` properly as the root.
|
||||
@@ -82,3 +93,12 @@ To route ping packets, you need to set up `net.ipv4.ping_group_range` properly a
|
||||
```console
|
||||
$ sudo sh -c "echo 0 2147483647 > /proc/sys/net/ipv4/ping_group_range"
|
||||
```
|
||||
|
||||
### Changing network stack
|
||||
|
||||
`dockerd-rootless.sh` uses [slirp4netns](https://github.com/rootless-containers/slirp4netns) (if installed) or [VPNKit](https://github.com/moby/vpnkit) as the network stack by default.
|
||||
These network stacks run in userspace and might have performance overhead. See [RootlessKit documentation](https://github.com/rootless-containers/rootlesskit/tree/v0.6.0#network-drivers) for further information.
|
||||
|
||||
Optionally, you can use `lxc-user-nic` instead for the best performance.
|
||||
To use `lxc-user-nic`, you need to edit [`/etc/lxc/lxc-usernet`](https://github.com/rootless-containers/rootlesskit/tree/v0.6.0#--netlxc-user-nic-experimental) and set `$DOCKERD_ROOTLESS_ROOTLESSKIT_NET=lxc-user-nic`.
|
||||
|
||||
|
||||
@@ -119,6 +119,7 @@ $FinallyColour="Cyan"
|
||||
#$env:INTEGRATION_IN_CONTAINER="yes"
|
||||
#$env:WINDOWS_BASE_IMAGE=""
|
||||
#$env:SKIP_COPY_GO="yes"
|
||||
#$env:INTEGRATION_TESTFLAGS="-test.v"
|
||||
|
||||
Function Nuke-Everything {
|
||||
$ErrorActionPreference = 'SilentlyContinue'
|
||||
@@ -409,7 +410,7 @@ Try {
|
||||
# Redirect to a temporary location.
|
||||
$TEMPORIG=$env:TEMP
|
||||
$env:TEMP="$env:TESTRUN_DRIVE`:\$env:TESTRUN_SUBDIR\CI-$COMMITHASH"
|
||||
$env:LOCALAPPDATA="$TEMP\localappdata"
|
||||
$env:LOCALAPPDATA="$env:TEMP\localappdata"
|
||||
$errorActionPreference='Stop'
|
||||
New-Item -ItemType Directory "$env:TEMP" -ErrorAction SilentlyContinue | Out-Null
|
||||
New-Item -ItemType Directory "$env:TEMP\userprofile" -ErrorAction SilentlyContinue | Out-Null
|
||||
@@ -425,20 +426,9 @@ Try {
|
||||
Write-Host -ForegroundColor Green "INFO: Location for testing is $env:TEMP"
|
||||
|
||||
# CI Integrity check - ensure Dockerfile.windows and Dockerfile go versions match
|
||||
$goVersionDockerfileWindows=$(Get-Content ".\Dockerfile.windows" | Select-String "^ENV GO_VERSION" | Select-object -First 1).ToString().Replace("ENV GO_VERSION=","").Replace("\","").Replace("``","").Trim()
|
||||
$goVersionDockerfile=$(Get-Content ".\Dockerfile" | Select-String "^ENV GO_VERSION" | Select-object -First 1)
|
||||
|
||||
# As of go 1.11, Dockerfile changed to be in the format like "FROM golang:1.11.0 AS base".
|
||||
# If a version number ends with .0 (as in 1.11.0, a convention used in golang docker
|
||||
# image versions), it needs to be removed (i.e. "1.11.0" becomes "1.11").
|
||||
if ($null -eq $goVersionDockerfile) {
|
||||
$goVersionDockerfile=$(Get-Content ".\Dockerfile" | Select-String "^FROM golang:" | Select-object -First 1)
|
||||
if ($null -ne $goVersionDockerfile) {
|
||||
$goVersionDockerfile = $goVersionDockerfile.ToString().Split(" ")[1].Split(":")[1] -replace '\.0$',''
|
||||
}
|
||||
} else {
|
||||
$goVersionDockerfile = $goVersionDockerfile.ToString().Split(" ")[2]
|
||||
}
|
||||
$goVersionDockerfileWindows=(Select-String -Path ".\Dockerfile.windows" -Pattern "^ARG[\s]+GO_VERSION=(.*)$").Matches.groups[1].Value
|
||||
$goVersionDockerfile=(Select-String -Path ".\Dockerfile" -Pattern "^ARG[\s]+GO_VERSION=(.*)$").Matches.groups[1].Value
|
||||
|
||||
if ($null -eq $goVersionDockerfile) {
|
||||
Throw "ERROR: Failed to extract golang version from Dockerfile"
|
||||
}
|
||||
@@ -452,7 +442,7 @@ Try {
|
||||
Write-Host -ForegroundColor Cyan "`n`nINFO: Building the image from Dockerfile.windows at $(Get-Date)..."
|
||||
Write-Host
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
$Duration=$(Measure-Command { docker build -t docker -f Dockerfile.windows . | Out-Host })
|
||||
$Duration=$(Measure-Command { docker build --build-arg=GO_VERSION -t docker -f Dockerfile.windows . | Out-Host })
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not($LastExitCode -eq 0)) {
|
||||
Throw "ERROR: Failed to build image from Dockerfile.windows"
|
||||
@@ -825,18 +815,32 @@ Try {
|
||||
docker `
|
||||
"`$env`:PATH`='c`:\target;'+`$env:PATH`; `$env:DOCKER_HOST`='tcp`://'+(ipconfig | select -last 1).Substring(39)+'`:2357'; c:\target\runIntegrationCLI.ps1" | Out-Host } )
|
||||
} else {
|
||||
Write-Host -ForegroundColor Green "INFO: Integration tests being run from the host:"
|
||||
Set-Location "$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker\integration-cli"
|
||||
$env:DOCKER_HOST=$DASHH_CUT
|
||||
$env:PATH="$env:TEMP\binary;$env:PATH;" # Force to use the test binaries, not the host ones.
|
||||
Write-Host -ForegroundColor Green "INFO: $c"
|
||||
Write-Host -ForegroundColor Green "INFO: DOCKER_HOST at $DASHH_CUT"
|
||||
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
Write-Host -ForegroundColor Cyan "INFO: Integration API tests being run from the host:"
|
||||
if (!($env:INTEGRATION_TESTFLAGS)) {
|
||||
$env:INTEGRATION_TESTFLAGS = "-test.v"
|
||||
}
|
||||
Set-Location "$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker"
|
||||
$start=(Get-Date); Invoke-Expression ".\hack\make.ps1 -TestIntegration"; $Duration=New-Timespan -Start $start -End (Get-Date)
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not($LastExitCode -eq 0)) {
|
||||
Throw "ERROR: Integration API tests failed at $(Get-Date). Duration`:$Duration"
|
||||
}
|
||||
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
Write-Host -ForegroundColor Green "INFO: Integration CLI tests being run from the host:"
|
||||
Write-Host -ForegroundColor Green "INFO: $c"
|
||||
Set-Location "$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker\integration-cli"
|
||||
# Explicit to not use measure-command otherwise don't get output as it goes
|
||||
$start=(Get-Date); Invoke-Expression $c; $Duration=New-Timespan -Start $start -End (Get-Date)
|
||||
}
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not($LastExitCode -eq 0)) {
|
||||
Throw "ERROR: Integration tests failed at $(Get-Date). Duration`:$Duration"
|
||||
Throw "ERROR: Integration CLI tests failed at $(Get-Date). Duration`:$Duration"
|
||||
}
|
||||
Write-Host -ForegroundColor Green "INFO: Integration tests ended at $(Get-Date). Duration`:$Duration"
|
||||
} else {
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
# LIBNETWORK_COMMIT is used to build the docker-userland-proxy binary. When
|
||||
# updating the binary version, consider updating github.com/docker/libnetwork
|
||||
# in vendor.conf accordingly
|
||||
LIBNETWORK_COMMIT=9ff9b57c344df5cd47443ad9e65702ec85c5aeb0
|
||||
LIBNETWORK_COMMIT=fc5a7d91d54cc98f64fc28f9e288b46a0bee756c
|
||||
|
||||
install_proxy() {
|
||||
case "$1" in
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/bin/sh
|
||||
|
||||
# v0.3.0
|
||||
ROOTLESSKIT_COMMIT=70e0502f328bc5ffb14692a7ea41abb77196043b
|
||||
# v0.6.0
|
||||
ROOTLESSKIT_COMMIT=2fcff6ceae968a1d895e6205e5154b107247356f
|
||||
|
||||
install_rootlesskit() {
|
||||
case "$1" in
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# The version of runc should match the version that is used by the containerd
|
||||
# version that is used. If you need to update runc, open a pull request in
|
||||
# the containerd project first, and update both after that is merged.
|
||||
RUNC_COMMIT=029124da7af7360afa781a0234d1b083550f797c # v1.0.0-rc7-6-g029124da
|
||||
RUNC_COMMIT=425e105d5a03fabd737a126ad93d62a9eeede87f # v1.0.0-rc8
|
||||
|
||||
install_runc() {
|
||||
# If using RHEL7 kernels (3.10.0 el7), disable kmem accounting/limiting
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/bin/sh
|
||||
|
||||
VNDR_COMMIT=81cb8916aad3c8d06193f008dba3e16f82851f52
|
||||
VNDR_COMMIT=f5ab8fc5fb64d66b5c6e55a0bcb58b2e92362fa0
|
||||
|
||||
install_vndr() {
|
||||
echo "Install vndr version $VNDR_COMMIT"
|
||||
|
||||
@@ -60,6 +60,9 @@
|
||||
.PARAMETER TestUnit
|
||||
Runs unit tests.
|
||||
|
||||
.PARAMETER TestIntegration
|
||||
Runs integration tests.
|
||||
|
||||
.PARAMETER All
|
||||
Runs everything this script knows about that can run in a container.
|
||||
|
||||
@@ -84,6 +87,7 @@ param(
|
||||
[Parameter(Mandatory=$False)][switch]$PkgImports,
|
||||
[Parameter(Mandatory=$False)][switch]$GoFormat,
|
||||
[Parameter(Mandatory=$False)][switch]$TestUnit,
|
||||
[Parameter(Mandatory=$False)][switch]$TestIntegration,
|
||||
[Parameter(Mandatory=$False)][switch]$All
|
||||
)
|
||||
|
||||
@@ -130,7 +134,7 @@ Function Check-InContainer() {
|
||||
# outside of a container where it may be out of date with master.
|
||||
Function Verify-GoVersion() {
|
||||
Try {
|
||||
$goVersionDockerfile=(Select-String -Path ".\Dockerfile" -Pattern "^FROM golang:").ToString().Split(" ")[1].SubString(7) -replace '\.0$',''
|
||||
$goVersionDockerfile=(Select-String -Path ".\Dockerfile" -Pattern "^ARG[\s]+GO_VERSION=(.*)$").Matches.groups[1].Value.TrimEnd(".0")
|
||||
$goVersionInstalled=(go version).ToString().Split(" ")[2].SubString(2)
|
||||
}
|
||||
Catch [Exception] {
|
||||
@@ -320,6 +324,40 @@ Function Run-UnitTests() {
|
||||
if ($LASTEXITCODE -ne 0) { Throw "Unit tests failed" }
|
||||
}
|
||||
|
||||
# Run the integration tests
|
||||
Function Run-IntegrationTests() {
|
||||
$env:DOCKER_INTEGRATION_DAEMON_DEST = $root + "\bundles\tmp"
|
||||
$dirs = Get-ChildItem -Path integration -Directory -Recurse
|
||||
$integration_api_dirs = @()
|
||||
ForEach($dir in $dirs) {
|
||||
$RelativePath = "." + $dir.FullName -replace "$($PWD.Path -replace "\\","\\")",""
|
||||
If ($RelativePath -notmatch '(^.\\integration($|\\internal)|\\testdata)') {
|
||||
$integration_api_dirs += $dir
|
||||
Write-Host "Building test suite binary $RelativePath"
|
||||
go test -c -o "$RelativePath\test.exe" $RelativePath
|
||||
}
|
||||
}
|
||||
|
||||
ForEach($dir in $integration_api_dirs) {
|
||||
Set-Location $dir.FullName
|
||||
Write-Host "Running $($PWD.Path)"
|
||||
$pinfo = New-Object System.Diagnostics.ProcessStartInfo
|
||||
$pinfo.FileName = "$($PWD.Path)\test.exe"
|
||||
$pinfo.WorkingDirectory = "$($PWD.Path)"
|
||||
$pinfo.RedirectStandardError = $true
|
||||
$pinfo.UseShellExecute = $false
|
||||
$pinfo.Arguments = $env:INTEGRATION_TESTFLAGS
|
||||
$p = New-Object System.Diagnostics.Process
|
||||
$p.StartInfo = $pinfo
|
||||
$p.Start() | Out-Null
|
||||
$p.WaitForExit()
|
||||
$err = $p.StandardError.ReadToEnd()
|
||||
if (($LASTEXITCODE -ne 0) -and ($err -notlike "*warning: no tests to run*")) {
|
||||
Throw "Integration tests failed: $err"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Start of main code.
|
||||
Try {
|
||||
Write-Host -ForegroundColor Cyan "INFO: make.ps1 starting at $(Get-Date)"
|
||||
@@ -331,13 +369,13 @@ Try {
|
||||
# Handle the "-All" shortcut to turn on all things we can handle.
|
||||
# Note we expressly only include the items which can run in a container - the validations tests cannot
|
||||
# as they require the .git directory which is excluded from the image by .dockerignore
|
||||
if ($All) { $Client=$True; $Daemon=$True; $TestUnit=$True }
|
||||
if ($All) { $Client=$True; $Daemon=$True; $TestUnit=$True; }
|
||||
|
||||
# Handle the "-Binary" shortcut to build both client and daemon.
|
||||
if ($Binary) { $Client = $True; $Daemon = $True }
|
||||
|
||||
# Default to building the daemon if not asked for anything explicitly.
|
||||
if (-not($Client) -and -not($Daemon) -and -not($DCO) -and -not($PkgImports) -and -not($GoFormat) -and -not($TestUnit)) { $Daemon=$True }
|
||||
if (-not($Client) -and -not($Daemon) -and -not($DCO) -and -not($PkgImports) -and -not($GoFormat) -and -not($TestUnit) -and -not($TestIntegration)) { $Daemon=$True }
|
||||
|
||||
# Verify git is installed
|
||||
if ($(Get-Command git -ErrorAction SilentlyContinue) -eq $nil) { Throw "Git does not appear to be installed" }
|
||||
@@ -425,6 +463,9 @@ Try {
|
||||
# Run unit tests
|
||||
if ($TestUnit) { Run-UnitTests }
|
||||
|
||||
# Run integration tests
|
||||
if ($TestIntegration) { Run-IntegrationTests }
|
||||
|
||||
# Gratuitous ASCII art.
|
||||
if ($Daemon -or $Client) {
|
||||
Write-Host
|
||||
|
||||
@@ -14,4 +14,4 @@ export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
. ${SCRIPTDIR}/toml
|
||||
. ${SCRIPTDIR}/changelog-well-formed
|
||||
. ${SCRIPTDIR}/changelog-date-descending
|
||||
. ${SCRIPTDIR}/deprecate-integration-cli
|
||||
#. ${SCRIPTDIR}/deprecate-integration-cli
|
||||
|
||||
@@ -141,6 +141,39 @@ func (s *DockerRegistrySuite) TearDownTest(c *check.C) {
|
||||
s.ds.TearDownTest(c)
|
||||
}
|
||||
|
||||
func init() {
|
||||
check.Suite(&DockerSchema1RegistrySuite{
|
||||
ds: &DockerSuite{},
|
||||
})
|
||||
}
|
||||
|
||||
type DockerSchema1RegistrySuite struct {
|
||||
ds *DockerSuite
|
||||
reg *registry.V2
|
||||
d *daemon.Daemon
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) OnTimeout(c *check.C) {
|
||||
s.d.DumpStackAndQuit()
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) SetUpTest(c *check.C) {
|
||||
testRequires(c, DaemonIsLinux, RegistryHosting, NotArm64, testEnv.IsLocalDaemon)
|
||||
s.reg = registry.NewV2(c, registry.Schema1)
|
||||
s.reg.WaitReady(c)
|
||||
s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) TearDownTest(c *check.C) {
|
||||
if s.reg != nil {
|
||||
s.reg.Close()
|
||||
}
|
||||
if s.d != nil {
|
||||
s.d.Stop(c)
|
||||
}
|
||||
s.ds.TearDownTest(c)
|
||||
}
|
||||
|
||||
func init() {
|
||||
check.Suite(&DockerRegistryAuthHtpasswdSuite{
|
||||
ds: &DockerSuite{},
|
||||
|
||||
@@ -623,29 +623,6 @@ func (s *DockerSuite) TestContainerAPICreateMultipleNetworksConfig(c *check.C) {
|
||||
c.Assert(msg, checker.Contains, "net3")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPICreateWithHostName(c *check.C) {
|
||||
domainName := "test-domain"
|
||||
hostName := "test-hostname"
|
||||
config := containertypes.Config{
|
||||
Image: "busybox",
|
||||
Hostname: hostName,
|
||||
Domainname: domainName,
|
||||
}
|
||||
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
||||
assert.NilError(c, err)
|
||||
defer cli.Close()
|
||||
|
||||
container, err := cli.ContainerCreate(context.Background(), &config, &containertypes.HostConfig{}, &networktypes.NetworkingConfig{}, "")
|
||||
assert.NilError(c, err)
|
||||
|
||||
containerJSON, err := cli.ContainerInspect(context.Background(), container.ID)
|
||||
assert.NilError(c, err)
|
||||
|
||||
c.Assert(containerJSON.Config.Hostname, checker.Equals, hostName, check.Commentf("Mismatched Hostname"))
|
||||
c.Assert(containerJSON.Config.Domainname, checker.Equals, domainName, check.Commentf("Mismatched Domainname"))
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerAPICreateBridgeNetworkMode(c *check.C) {
|
||||
// Windows does not support bridge
|
||||
testRequires(c, DaemonIsLinux)
|
||||
|
||||
@@ -3,9 +3,12 @@ package main
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/integration-cli/checker"
|
||||
@@ -77,6 +80,10 @@ func (s *DockerRegistrySuite) TestPullByTagDisplaysDigest(c *check.C) {
|
||||
testPullByTagDisplaysDigest(c)
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) TestPullByTagDisplaysDigest(c *check.C) {
|
||||
testPullByTagDisplaysDigest(c)
|
||||
}
|
||||
|
||||
func testPullByDigest(c *check.C) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
pushDigest, err := setupImage(c)
|
||||
@@ -99,6 +106,10 @@ func (s *DockerRegistrySuite) TestPullByDigest(c *check.C) {
|
||||
testPullByDigest(c)
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) TestPullByDigest(c *check.C) {
|
||||
testPullByDigest(c)
|
||||
}
|
||||
|
||||
func testPullByDigestNoFallback(c *check.C) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
// pull from the registry using the <name>@<digest> reference
|
||||
@@ -112,6 +123,10 @@ func (s *DockerRegistrySuite) TestPullByDigestNoFallback(c *check.C) {
|
||||
testPullByDigestNoFallback(c)
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) TestPullByDigestNoFallback(c *check.C) {
|
||||
testPullByDigestNoFallback(c)
|
||||
}
|
||||
|
||||
func (s *DockerRegistrySuite) TestCreateByDigest(c *check.C) {
|
||||
pushDigest, err := setupImage(c)
|
||||
assert.NilError(c, err, "error setting up image")
|
||||
@@ -546,3 +561,131 @@ func (s *DockerRegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) {
|
||||
expectedErrorMsg := fmt.Sprintf("manifest verification failed for digest %s", manifestDigest)
|
||||
assert.Assert(c, is.Contains(out, expectedErrorMsg))
|
||||
}
|
||||
|
||||
// TestPullFailsWithAlteredManifest tests that a `docker pull` fails when
|
||||
// we have modified a manifest blob and its digest cannot be verified.
|
||||
// This is the schema1 version of the test.
|
||||
func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
manifestDigest, err := setupImage(c)
|
||||
c.Assert(err, checker.IsNil, check.Commentf("error setting up image"))
|
||||
|
||||
// Load the target manifest blob.
|
||||
manifestBlob := s.reg.ReadBlobContents(c, manifestDigest)
|
||||
|
||||
var imgManifest schema1.Manifest
|
||||
err = json.Unmarshal(manifestBlob, &imgManifest)
|
||||
c.Assert(err, checker.IsNil, check.Commentf("unable to decode image manifest from blob"))
|
||||
|
||||
// Change a layer in the manifest.
|
||||
imgManifest.FSLayers[0] = schema1.FSLayer{
|
||||
BlobSum: digest.Digest("sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"),
|
||||
}
|
||||
|
||||
// Move the existing data file aside, so that we can replace it with a
|
||||
// malicious blob of data. NOTE: we defer the returned undo func.
|
||||
undo := s.reg.TempMoveBlobData(c, manifestDigest)
|
||||
defer undo()
|
||||
|
||||
alteredManifestBlob, err := json.MarshalIndent(imgManifest, "", " ")
|
||||
c.Assert(err, checker.IsNil, check.Commentf("unable to encode altered image manifest to JSON"))
|
||||
|
||||
s.reg.WriteBlobContents(c, manifestDigest, alteredManifestBlob)
|
||||
|
||||
// Now try pulling that image by digest. We should get an error about
|
||||
// digest verification for the manifest digest.
|
||||
|
||||
// Pull from the registry using the <name>@<digest> reference.
|
||||
imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest)
|
||||
out, exitStatus, _ := dockerCmdWithError("pull", imageReference)
|
||||
c.Assert(exitStatus, checker.Not(check.Equals), 0)
|
||||
|
||||
expectedErrorMsg := fmt.Sprintf("image verification failed for digest %s", manifestDigest)
|
||||
c.Assert(out, checker.Contains, expectedErrorMsg)
|
||||
}
|
||||
|
||||
// TestPullFailsWithAlteredLayer tests that a `docker pull` fails when
|
||||
// we have modified a layer blob and its digest cannot be verified.
|
||||
// This is the schema2 version of the test.
|
||||
func (s *DockerRegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
manifestDigest, err := setupImage(c)
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
// Load the target manifest blob.
|
||||
manifestBlob := s.reg.ReadBlobContents(c, manifestDigest)
|
||||
|
||||
var imgManifest schema2.Manifest
|
||||
err = json.Unmarshal(manifestBlob, &imgManifest)
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
// Next, get the digest of one of the layers from the manifest.
|
||||
targetLayerDigest := imgManifest.Layers[0].Digest
|
||||
|
||||
// Move the existing data file aside, so that we can replace it with a
|
||||
// malicious blob of data. NOTE: we defer the returned undo func.
|
||||
undo := s.reg.TempMoveBlobData(c, targetLayerDigest)
|
||||
defer undo()
|
||||
|
||||
// Now make a fake data blob in this directory.
|
||||
s.reg.WriteBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for."))
|
||||
|
||||
// Now try pulling that image by digest. We should get an error about
|
||||
// digest verification for the target layer digest.
|
||||
|
||||
// Remove distribution cache to force a re-pull of the blobs
|
||||
if err := os.RemoveAll(filepath.Join(testEnv.DaemonInfo.DockerRootDir, "image", s.d.StorageDriver(), "distribution")); err != nil {
|
||||
c.Fatalf("error clearing distribution cache: %v", err)
|
||||
}
|
||||
|
||||
// Pull from the registry using the <name>@<digest> reference.
|
||||
imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest)
|
||||
out, exitStatus, _ := dockerCmdWithError("pull", imageReference)
|
||||
c.Assert(exitStatus, checker.Not(check.Equals), 0, check.Commentf("expected a non-zero exit status"))
|
||||
|
||||
expectedErrorMsg := fmt.Sprintf("filesystem layer verification failed for digest %s", targetLayerDigest)
|
||||
c.Assert(out, checker.Contains, expectedErrorMsg, check.Commentf("expected error message in output: %s", out))
|
||||
}
|
||||
|
||||
// TestPullFailsWithAlteredLayer tests that a `docker pull` fails when
|
||||
// we have modified a layer blob and its digest cannot be verified.
|
||||
// This is the schema1 version of the test.
|
||||
func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
manifestDigest, err := setupImage(c)
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
// Load the target manifest blob.
|
||||
manifestBlob := s.reg.ReadBlobContents(c, manifestDigest)
|
||||
|
||||
var imgManifest schema1.Manifest
|
||||
err = json.Unmarshal(manifestBlob, &imgManifest)
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
// Next, get the digest of one of the layers from the manifest.
|
||||
targetLayerDigest := imgManifest.FSLayers[0].BlobSum
|
||||
|
||||
// Move the existing data file aside, so that we can replace it with a
|
||||
// malicious blob of data. NOTE: we defer the returned undo func.
|
||||
undo := s.reg.TempMoveBlobData(c, targetLayerDigest)
|
||||
defer undo()
|
||||
|
||||
// Now make a fake data blob in this directory.
|
||||
s.reg.WriteBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for."))
|
||||
|
||||
// Now try pulling that image by digest. We should get an error about
|
||||
// digest verification for the target layer digest.
|
||||
|
||||
// Remove distribution cache to force a re-pull of the blobs
|
||||
if err := os.RemoveAll(filepath.Join(testEnv.DaemonInfo.DockerRootDir, "image", s.d.StorageDriver(), "distribution")); err != nil {
|
||||
c.Fatalf("error clearing distribution cache: %v", err)
|
||||
}
|
||||
|
||||
// Pull from the registry using the <name>@<digest> reference.
|
||||
imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest)
|
||||
out, exitStatus, _ := dockerCmdWithError("pull", imageReference)
|
||||
c.Assert(exitStatus, checker.Not(check.Equals), 0, check.Commentf("expected a non-zero exit status"))
|
||||
|
||||
expectedErrorMsg := fmt.Sprintf("filesystem layer verification failed for digest %s", targetLayerDigest)
|
||||
c.Assert(out, checker.Contains, expectedErrorMsg, check.Commentf("expected error message in output: %s", out))
|
||||
}
|
||||
|
||||
@@ -35,6 +35,7 @@ import (
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/docker/libnetwork/iptables"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/go-check/check"
|
||||
"github.com/kr/pty"
|
||||
"golang.org/x/sys/unix"
|
||||
@@ -550,6 +551,23 @@ func (s *DockerDaemonSuite) TestDaemonAllocatesListeningPort(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonKeyGeneration(c *check.C) {
|
||||
// TODO: skip or update for Windows daemon
|
||||
os.Remove("/etc/docker/key.json")
|
||||
s.d.Start(c)
|
||||
s.d.Stop(c)
|
||||
|
||||
k, err := libtrust.LoadKeyFile("/etc/docker/key.json")
|
||||
if err != nil {
|
||||
c.Fatalf("Error opening key file")
|
||||
}
|
||||
kid := k.KeyID()
|
||||
// Test Key ID is a valid fingerprint (e.g. QQXN:JY5W:TBXI:MK3X:GX6P:PD5D:F56N:NHCS:LVRZ:JA46:R24J:XEFF)
|
||||
if len(kid) != 59 {
|
||||
c.Fatalf("Bad key ID: %s", kid)
|
||||
}
|
||||
}
|
||||
|
||||
// GH#11320 - verify that the daemon exits on failure properly
|
||||
// Note that this explicitly tests the conflict of {-b,--bridge} and {--bip} options as the means
|
||||
// to get a daemon init failure; no other tests for -b/--bip conflict are therefore required
|
||||
@@ -1174,6 +1192,59 @@ func (s *DockerDaemonSuite) TestDaemonUnixSockCleanedUp(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonWithWrongkey(c *check.C) {
|
||||
type Config struct {
|
||||
Crv string `json:"crv"`
|
||||
D string `json:"d"`
|
||||
Kid string `json:"kid"`
|
||||
Kty string `json:"kty"`
|
||||
X string `json:"x"`
|
||||
Y string `json:"y"`
|
||||
}
|
||||
|
||||
os.Remove("/etc/docker/key.json")
|
||||
s.d.Start(c)
|
||||
s.d.Stop(c)
|
||||
|
||||
config := &Config{}
|
||||
bytes, err := ioutil.ReadFile("/etc/docker/key.json")
|
||||
if err != nil {
|
||||
c.Fatalf("Error reading key.json file: %s", err)
|
||||
}
|
||||
|
||||
// byte[] to Data-Struct
|
||||
if err := json.Unmarshal(bytes, &config); err != nil {
|
||||
c.Fatalf("Error Unmarshal: %s", err)
|
||||
}
|
||||
|
||||
//replace config.Kid with the fake value
|
||||
config.Kid = "VSAJ:FUYR:X3H2:B2VZ:KZ6U:CJD5:K7BX:ZXHY:UZXT:P4FT:MJWG:HRJ4"
|
||||
|
||||
// NEW Data-Struct to byte[]
|
||||
newBytes, err := json.Marshal(&config)
|
||||
if err != nil {
|
||||
c.Fatalf("Error Marshal: %s", err)
|
||||
}
|
||||
|
||||
// write back
|
||||
if err := ioutil.WriteFile("/etc/docker/key.json", newBytes, 0400); err != nil {
|
||||
c.Fatalf("Error ioutil.WriteFile: %s", err)
|
||||
}
|
||||
|
||||
defer os.Remove("/etc/docker/key.json")
|
||||
|
||||
if err := s.d.StartWithError(); err == nil {
|
||||
c.Fatalf("It should not be successful to start daemon with wrong key: %v", err)
|
||||
}
|
||||
|
||||
content, err := s.d.ReadLogFile()
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
if !strings.Contains(string(content), "Public Key ID does not match") {
|
||||
c.Fatalf("Missing KeyID message from daemon logs: %s", string(content))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonRestartKillWait(c *check.C) {
|
||||
s.d.StartWithBusybox(c)
|
||||
|
||||
@@ -1715,7 +1786,7 @@ func (s *DockerDaemonSuite) TestDaemonNoSpaceLeftOnDeviceError(c *check.C) {
|
||||
dockerCmd(c, "run", "--rm", "-v", testDir+":/test", "busybox", "sh", "-c", "dd of=/test/testfs.img bs=1M seek=3 count=0")
|
||||
icmd.RunCommand("mkfs.ext4", "-F", filepath.Join(testDir, "testfs.img")).Assert(c, icmd.Success)
|
||||
|
||||
dockerCmd(c, "run", "--privileged", "--rm", "-v", testDir+":/test:shared", "busybox", "sh", "-c", "mkdir -p /test/test-mount/vfs && mount -n /test/testfs.img /test/test-mount/vfs")
|
||||
dockerCmd(c, "run", "--privileged", "--rm", "-v", testDir+":/test:shared", "busybox", "sh", "-c", "mkdir -p /test/test-mount/vfs && mount -n -t ext4 /test/testfs.img /test/test-mount/vfs")
|
||||
defer mount.Unmount(filepath.Join(testDir, "test-mount"))
|
||||
|
||||
s.d.Start(c, "--storage-driver", "vfs", "--data-root", filepath.Join(testDir, "test-mount"))
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user