mirror of
https://github.com/moby/moby.git
synced 2026-01-15 18:02:03 +00:00
Compare commits
290 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3f46cadf39 | ||
|
|
b57d41c4bf | ||
|
|
29edd17909 | ||
|
|
fd550344b1 | ||
|
|
2834da861b | ||
|
|
1f43ad3a16 | ||
|
|
b8067d159f | ||
|
|
a38ca9a548 | ||
|
|
9c9a6cb941 | ||
|
|
380ded6309 | ||
|
|
c58a765707 | ||
|
|
ae0331d8f5 | ||
|
|
b2d06baf9b | ||
|
|
57bf7a8c70 | ||
|
|
bb8fcf3031 | ||
|
|
c49ce64514 | ||
|
|
aa51a79ef9 | ||
|
|
ecde7b9b7c | ||
|
|
7eda35fd05 | ||
|
|
0462b5e318 | ||
|
|
b23d267cb5 | ||
|
|
0dd5959eeb | ||
|
|
0bb761698c | ||
|
|
8e1c366773 | ||
|
|
1fa6a46c5d | ||
|
|
89604f1df1 | ||
|
|
14623770e1 | ||
|
|
250792c1a5 | ||
|
|
eacbbdeec6 | ||
|
|
73520a5ab7 | ||
|
|
36f87754c2 | ||
|
|
ece7e02b86 | ||
|
|
adb9e9135a | ||
|
|
f3d6830d27 | ||
|
|
cdad178d02 | ||
|
|
fb2983ab0d | ||
|
|
a6928fd396 | ||
|
|
511cd1c0a7 | ||
|
|
e2b95a9525 | ||
|
|
099d3ee008 | ||
|
|
0c5e816638 | ||
|
|
3fc5bfd565 | ||
|
|
f96dc9d1a5 | ||
|
|
4d35864c3d | ||
|
|
5d2006256f | ||
|
|
499e15d4ab | ||
|
|
325076df0c | ||
|
|
97688e8d06 | ||
|
|
3e993060ee | ||
|
|
91ba210bc8 | ||
|
|
bfbd1004f4 | ||
|
|
5f9d99b4cc | ||
|
|
ea6c76ee03 | ||
|
|
fab94808f5 | ||
|
|
6df89e7961 | ||
|
|
466dda63dc | ||
|
|
6c73266a71 | ||
|
|
ae3a1ac602 | ||
|
|
cd89a35ea0 | ||
|
|
da039bf8e6 | ||
|
|
58eabf4b36 | ||
|
|
888c14749d | ||
|
|
7435e4a1be | ||
|
|
ba03cd7a63 | ||
|
|
27e7c650b8 | ||
|
|
d72e434d30 | ||
|
|
224b393eb3 | ||
|
|
b1ac2a53ed | ||
|
|
a8af27bbae | ||
|
|
7d49b014b6 | ||
|
|
9d04c28def | ||
|
|
cdb3590e1a | ||
|
|
970fc1b6f7 | ||
|
|
d8a5e8928b | ||
|
|
d96d20d45f | ||
|
|
29e0db25e7 | ||
|
|
bcbcbb73fa | ||
|
|
5172617617 | ||
|
|
45daa6de06 | ||
|
|
241d685574 | ||
|
|
489cd7edfc | ||
|
|
31ac5cb6d9 | ||
|
|
667c7d70b3 | ||
|
|
eaae4b5fb6 | ||
|
|
78be7ebad7 | ||
|
|
f8806f2b80 | ||
|
|
d0154d3e59 | ||
|
|
45f9d679f8 | ||
|
|
57a042b77c | ||
|
|
564abf9157 | ||
|
|
ba12b2d0bc | ||
|
|
cf1001d555 | ||
|
|
1b187e0959 | ||
|
|
f410dbda88 | ||
|
|
c3fa7c1779 | ||
|
|
dbea045e0d | ||
|
|
a527e5a546 | ||
|
|
9ba5c5d70e | ||
|
|
3509feb1a5 | ||
|
|
185651d26b | ||
|
|
7d9c50db2b | ||
|
|
ebc6c065d2 | ||
|
|
83278485c0 | ||
|
|
a0a86d0982 | ||
|
|
25a80bd48e | ||
|
|
d7d91b6bc5 | ||
|
|
96601d1211 | ||
|
|
4eebd2c920 | ||
|
|
e22d04e8a9 | ||
|
|
40650c6982 | ||
|
|
1c79c893b1 | ||
|
|
d9fd0c2db4 | ||
|
|
8a5f141b0e | ||
|
|
9d8c8382d3 | ||
|
|
8b920b2812 | ||
|
|
4642704ed7 | ||
|
|
7acef8101e | ||
|
|
da3b31fb2c | ||
|
|
4f7588f6ad | ||
|
|
c95e17638f | ||
|
|
b3bafd4b78 | ||
|
|
e7ab601ab9 | ||
|
|
b3791dea92 | ||
|
|
27568e54ce | ||
|
|
a427477220 | ||
|
|
a91bcc677b | ||
|
|
2c54f6f316 | ||
|
|
6cbca96bfa | ||
|
|
79b22645fc | ||
|
|
6422ff2804 | ||
|
|
872b6d5df9 | ||
|
|
d7b743b856 | ||
|
|
43b7c78cbd | ||
|
|
5ea21c927c | ||
|
|
f1ec5bf14f | ||
|
|
3fc36bcac4 | ||
|
|
b5c99c0e95 | ||
|
|
0a83a476d8 | ||
|
|
2a272a0c5d | ||
|
|
f158d2e809 | ||
|
|
11bf52e9e3 | ||
|
|
51d7f95c4b | ||
|
|
68451d3c99 | ||
|
|
d377cd3810 | ||
|
|
cc8bd2016e | ||
|
|
4ad6854eb3 | ||
|
|
c6cdfbf495 | ||
|
|
00c988caa4 | ||
|
|
ad386f64e5 | ||
|
|
ec82bc35c3 | ||
|
|
f3aebbf9d8 | ||
|
|
f80feba181 | ||
|
|
26e516dff4 | ||
|
|
7d742ebf75 | ||
|
|
59169d0f97 | ||
|
|
d4e70f6325 | ||
|
|
74b71c41ac | ||
|
|
fc58c829e8 | ||
|
|
44b7a42fc6 | ||
|
|
4f65e35f02 | ||
|
|
a7daab5df4 | ||
|
|
36295bb9ed | ||
|
|
2e92272753 | ||
|
|
5b8a41934b | ||
|
|
795461eceb | ||
|
|
e1e58409a1 | ||
|
|
b5b6e1b24c | ||
|
|
db275ddbc1 | ||
|
|
a9e22ee5e7 | ||
|
|
25905ab6c6 | ||
|
|
749e35cf5e | ||
|
|
094df015b1 | ||
|
|
843e51459f | ||
|
|
b5280352e9 | ||
|
|
7243860557 | ||
|
|
f01a2eb710 | ||
|
|
4390ab275a | ||
|
|
67670ddc80 | ||
|
|
0cabd9dfba | ||
|
|
0b5e1f904a | ||
|
|
725e699741 | ||
|
|
c5c9dc0376 | ||
|
|
31f9ae0d19 | ||
|
|
cb0a9d713c | ||
|
|
6b258ce567 | ||
|
|
83aaa3428f | ||
|
|
6439824449 | ||
|
|
09ee47de39 | ||
|
|
37f866285a | ||
|
|
adfed82ab8 | ||
|
|
409707b633 | ||
|
|
4cc249d7e3 | ||
|
|
81e267c013 | ||
|
|
ec7fe73690 | ||
|
|
b1c526b4a9 | ||
|
|
0d95e1680a | ||
|
|
6c643bc366 | ||
|
|
a18dae049f | ||
|
|
abcc70b9ef | ||
|
|
071d8b21e9 | ||
|
|
8b6a045aa4 | ||
|
|
8653af5854 | ||
|
|
20a2807caa | ||
|
|
f1ecce6877 | ||
|
|
d6afe88b3c | ||
|
|
03918c5b07 | ||
|
|
c91318e6c0 | ||
|
|
2e8bf8b0ab | ||
|
|
a420005d4e | ||
|
|
d8fa2f8071 | ||
|
|
0ab6f07c31 | ||
|
|
2d643b6835 | ||
|
|
4fbfb618c3 | ||
|
|
b8323abe0a | ||
|
|
86eff82789 | ||
|
|
dc963a00c1 | ||
|
|
b8cacdf324 | ||
|
|
cea56c1d9c | ||
|
|
6de8ba3bc5 | ||
|
|
8f506a51e5 | ||
|
|
264fc2fac8 | ||
|
|
849c723078 | ||
|
|
82b25f0947 | ||
|
|
7b2e47846c | ||
|
|
8d9e3502ab | ||
|
|
fbe2330989 | ||
|
|
f5d84a45cc | ||
|
|
072ea62fcc | ||
|
|
454a7a7358 | ||
|
|
a60603bfa3 | ||
|
|
2be7f48561 | ||
|
|
6430e49a55 | ||
|
|
cc90726fb8 | ||
|
|
19a0f886da | ||
|
|
f14c23a90f | ||
|
|
82ec984d10 | ||
|
|
003bf197d7 | ||
|
|
444a1597ff | ||
|
|
330857ad0f | ||
|
|
2fce935df2 | ||
|
|
ecb03c4cda | ||
|
|
accda3119d | ||
|
|
7c09e4e607 | ||
|
|
99356b6e17 | ||
|
|
6a0f71cac9 | ||
|
|
e2011affd4 | ||
|
|
3e957c6240 | ||
|
|
4db84b197d | ||
|
|
697956a8c7 | ||
|
|
c1cd4e5eb4 | ||
|
|
f6a642f588 | ||
|
|
69a307600d | ||
|
|
34bc972519 | ||
|
|
06e319e210 | ||
|
|
108d7d1004 | ||
|
|
c02ca31fbc | ||
|
|
d01ee23c15 | ||
|
|
081987b647 | ||
|
|
af14f3e7d3 | ||
|
|
d0b4bdbd25 | ||
|
|
73aa7e933c | ||
|
|
d494520aa0 | ||
|
|
183ca46099 | ||
|
|
c164eec7e9 | ||
|
|
f7853799fc | ||
|
|
80bf93c9d7 | ||
|
|
e1e6d35277 | ||
|
|
daeb6fb0b7 | ||
|
|
eeee17eaad | ||
|
|
c1c5f16b8b | ||
|
|
be6e92a57b | ||
|
|
ef56b83597 | ||
|
|
452ff75159 | ||
|
|
fa21996da5 | ||
|
|
126d4cf672 | ||
|
|
230f178f8b | ||
|
|
c7fbe1c2ba | ||
|
|
1c00755826 | ||
|
|
7b964974e7 | ||
|
|
e4079dbbf1 | ||
|
|
770200d154 | ||
|
|
8be5696c37 | ||
|
|
485cb90b77 | ||
|
|
fce915897c | ||
|
|
1daeaec333 | ||
|
|
084b7cec1a | ||
|
|
ff061e28c1 | ||
|
|
88a5bca43c | ||
|
|
09e804f570 | ||
|
|
0823d76ec5 |
25
.github/workflows/.test.yml
vendored
25
.github/workflows/.test.yml
vendored
@@ -21,7 +21,7 @@ on:
|
||||
default: "graphdriver"
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.23.7"
|
||||
GO_VERSION: "1.23.8"
|
||||
GOTESTLIST_VERSION: v0.3.1
|
||||
TESTSTAT_VERSION: v0.1.25
|
||||
ITG_CLI_MATRIX_SIZE: 6
|
||||
@@ -47,7 +47,6 @@ jobs:
|
||||
script: |
|
||||
let includes = [
|
||||
{ mode: '' },
|
||||
{ mode: 'rootless' },
|
||||
{ mode: 'systemd' },
|
||||
];
|
||||
if ("${{ inputs.storage }}" == "snapshotter") {
|
||||
@@ -84,9 +83,13 @@ jobs:
|
||||
run: |
|
||||
CACHE_DEV_SCOPE=dev
|
||||
if [[ "${{ matrix.mode }}" == *"firewalld"* ]]; then
|
||||
echo "DOCKER_FIREWALLD=true" >> $GITHUB_ENV
|
||||
echo "FIREWALLD=true" >> $GITHUB_ENV
|
||||
CACHE_DEV_SCOPE="${CACHE_DEV_SCOPE}firewalld"
|
||||
fi
|
||||
if [[ "${{ matrix.mode }}" == *"systemd"* ]]; then
|
||||
echo "SYSTEMD=true" >> $GITHUB_ENV
|
||||
CACHE_DEV_SCOPE="${CACHE_DEV_SCOPE}systemd"
|
||||
fi
|
||||
echo "CACHE_DEV_SCOPE=${CACHE_DEV_SCOPE}" >> $GITHUB_ENV
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
@@ -219,7 +222,7 @@ jobs:
|
||||
retention-days: 1
|
||||
|
||||
integration-flaky:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
steps:
|
||||
@@ -264,14 +267,12 @@ jobs:
|
||||
with:
|
||||
script: |
|
||||
let includes = [
|
||||
{ os: 'ubuntu-20.04', mode: '' },
|
||||
{ os: 'ubuntu-20.04', mode: 'rootless' },
|
||||
{ os: 'ubuntu-20.04', mode: 'systemd' },
|
||||
{ os: 'ubuntu-24.04', mode: '' },
|
||||
{ os: 'ubuntu-22.04', mode: '' },
|
||||
{ os: 'ubuntu-22.04', mode: 'rootless' },
|
||||
{ os: 'ubuntu-22.04', mode: 'systemd' },
|
||||
{ os: 'ubuntu-24.04', mode: '' },
|
||||
// { os: 'ubuntu-24.04', mode: 'rootless' }, // FIXME: https://github.com/moby/moby/pull/49579#issuecomment-2698622223
|
||||
{ os: 'ubuntu-24.04', mode: 'systemd' },
|
||||
// { os: 'ubuntu-20.04', mode: 'rootless-systemd' }, // FIXME: https://github.com/moby/moby/issues/44084
|
||||
// { os: 'ubuntu-24.04', mode: 'rootless-systemd' }, // FIXME: https://github.com/moby/moby/issues/44084
|
||||
];
|
||||
if ("${{ inputs.storage }}" == "snapshotter") {
|
||||
@@ -318,7 +319,7 @@ jobs:
|
||||
CACHE_DEV_SCOPE="${CACHE_DEV_SCOPE}systemd"
|
||||
fi
|
||||
if [[ "${{ matrix.mode }}" == *"firewalld"* ]]; then
|
||||
echo "DOCKER_FIREWALLD=true" >> $GITHUB_ENV
|
||||
echo "FIREWALLD=true" >> $GITHUB_ENV
|
||||
CACHE_DEV_SCOPE="${CACHE_DEV_SCOPE}firewalld"
|
||||
fi
|
||||
echo "CACHE_DEV_SCOPE=${CACHE_DEV_SCOPE}" >> $GITHUB_ENV
|
||||
@@ -485,7 +486,7 @@ jobs:
|
||||
echo ${{ steps.set.outputs.matrix }}
|
||||
|
||||
integration-cli:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
needs:
|
||||
@@ -508,7 +509,7 @@ jobs:
|
||||
run: |
|
||||
CACHE_DEV_SCOPE=dev
|
||||
if [[ "${{ matrix.mode }}" == *"firewalld"* ]]; then
|
||||
echo "DOCKER_FIREWALLD=true" >> $GITHUB_ENV
|
||||
echo "FIREWALLD=true" >> $GITHUB_ENV
|
||||
CACHE_DEV_SCOPE="${CACHE_DEV_SCOPE}firewalld"
|
||||
fi
|
||||
echo "CACHE_DEV_SCOPE=${CACHE_DEV_SCOPE}" >> $GITHUB_ENV
|
||||
|
||||
2
.github/workflows/.windows.yml
vendored
2
.github/workflows/.windows.yml
vendored
@@ -28,7 +28,7 @@ on:
|
||||
default: false
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.23.7"
|
||||
GO_VERSION: "1.23.8"
|
||||
GOTESTLIST_VERSION: v0.3.1
|
||||
TESTSTAT_VERSION: v0.1.25
|
||||
WINDOWS_BASE_IMAGE: mcr.microsoft.com/windows/servercore
|
||||
|
||||
2
.github/workflows/arm64.yml
vendored
2
.github/workflows/arm64.yml
vendored
@@ -23,7 +23,7 @@ on:
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.23.7"
|
||||
GO_VERSION: "1.23.8"
|
||||
TESTSTAT_VERSION: v0.1.25
|
||||
DESTDIR: ./build
|
||||
SETUP_BUILDX_VERSION: edge
|
||||
|
||||
2
.github/workflows/buildkit.yml
vendored
2
.github/workflows/buildkit.yml
vendored
@@ -23,7 +23,7 @@ on:
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.23.7"
|
||||
GO_VERSION: "1.23.8"
|
||||
DESTDIR: ./build
|
||||
SETUP_BUILDX_VERSION: edge
|
||||
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
|
||||
|
||||
20
.github/workflows/ci.yml
vendored
20
.github/workflows/ci.yml
vendored
@@ -154,3 +154,23 @@ jobs:
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
with:
|
||||
sarif_file: ${{ env.DESTDIR }}/govulncheck.out
|
||||
|
||||
build-dind:
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- validate-dco
|
||||
steps:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dind image
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
targets: dind
|
||||
set: |
|
||||
*.output=type=cacheonly
|
||||
|
||||
2
.github/workflows/codeql.yml
vendored
2
.github/workflows/codeql.yml
vendored
@@ -58,7 +58,7 @@ jobs:
|
||||
- name: Update Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.23.7"
|
||||
go-version: "1.23.8"
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
|
||||
2
.github/workflows/test.yml
vendored
2
.github/workflows/test.yml
vendored
@@ -23,7 +23,7 @@ on:
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.23.7"
|
||||
GO_VERSION: "1.23.8"
|
||||
GIT_PAGER: "cat"
|
||||
PAGER: "cat"
|
||||
SETUP_BUILDX_VERSION: edge
|
||||
|
||||
@@ -40,7 +40,7 @@ linters:
|
||||
run:
|
||||
# prevent golangci-lint from deducting the go version to lint for through go.mod,
|
||||
# which causes it to fallback to go1.17 semantics.
|
||||
go: "1.23.7"
|
||||
go: "1.23.8"
|
||||
concurrency: 2
|
||||
# Only supported with go modules enabled (build flag -mod=vendor only valid when using modules)
|
||||
# modules-download-mode: vendor
|
||||
@@ -212,6 +212,11 @@ issues:
|
||||
linters:
|
||||
- staticcheck
|
||||
|
||||
# FIXME(thaJeztah): ignoring these transitional utilities until BuildKit is vendored with https://github.com/moby/moby/pull/49743
|
||||
- text: "SA1019: idtools\\.(ToUserIdentityMapping|FromUserIdentityMapping) is deprecated"
|
||||
linters:
|
||||
- staticcheck
|
||||
|
||||
# Ignore "nested context in function literal (fatcontext)" as we intentionally set up tracing on a base-context for tests.
|
||||
# FIXME(thaJeztah): see if there's a more iodiomatic way to do this.
|
||||
- text: 'nested context in function literal'
|
||||
|
||||
4
AUTHORS
4
AUTHORS
@@ -293,6 +293,7 @@ Brandon Liu <bdon@bdon.org>
|
||||
Brandon Philips <brandon.philips@coreos.com>
|
||||
Brandon Rhodes <brandon@rhodesmill.org>
|
||||
Brendan Dixon <brendand@microsoft.com>
|
||||
Brendon Smith <bws@bws.bio>
|
||||
Brennan Kinney <5098581+polarathene@users.noreply.github.com>
|
||||
Brent Salisbury <brent.salisbury@docker.com>
|
||||
Brett Higgins <brhiggins@arbor.net>
|
||||
@@ -347,6 +348,7 @@ Casey Bisson <casey.bisson@joyent.com>
|
||||
Catalin Pirvu <pirvu.catalin94@gmail.com>
|
||||
Ce Gao <ce.gao@outlook.com>
|
||||
Cedric Davies <cedricda@microsoft.com>
|
||||
Cesar Talledo <cesar.talledo@docker.com>
|
||||
Cezar Sa Espinola <cezarsa@gmail.com>
|
||||
Chad Swenson <chadswen@gmail.com>
|
||||
Chance Zibolski <chance.zibolski@gmail.com>
|
||||
@@ -1281,6 +1283,7 @@ Krasi Georgiev <krasi@vip-consult.solutions>
|
||||
Krasimir Georgiev <support@vip-consult.co.uk>
|
||||
Kris-Mikael Krister <krismikael@protonmail.com>
|
||||
Kristian Haugene <kristian.haugene@capgemini.com>
|
||||
Kristian Heljas <kristian@kristian.ee>
|
||||
Kristina Zabunova <triara.xiii@gmail.com>
|
||||
Krystian Wojcicki <kwojcicki@sympatico.ca>
|
||||
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
|
||||
@@ -1712,6 +1715,7 @@ Patrick Hemmer <patrick.hemmer@gmail.com>
|
||||
Patrick St. laurent <patrick@saint-laurent.us>
|
||||
Patrick Stapleton <github@gdi2290.com>
|
||||
Patrik Cyvoct <patrik@ptrk.io>
|
||||
Patrik Leifert <patrikleifert@hotmail.com>
|
||||
pattichen <craftsbear@gmail.com>
|
||||
Paul "TBBle" Hampson <Paul.Hampson@Pobox.com>
|
||||
Paul <paul9869@gmail.com>
|
||||
|
||||
34
Dockerfile
34
Dockerfile
@@ -1,6 +1,6 @@
|
||||
# syntax=docker/dockerfile:1.7
|
||||
|
||||
ARG GO_VERSION=1.23.7
|
||||
ARG GO_VERSION=1.23.8
|
||||
ARG BASE_DEBIAN_DISTRO="bookworm"
|
||||
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
||||
ARG XX_VERSION=1.6.1
|
||||
@@ -13,7 +13,7 @@ ARG DOCKERCLI_REPOSITORY="https://github.com/docker/cli.git"
|
||||
|
||||
# cli version used for integration-cli tests
|
||||
ARG DOCKERCLI_INTEGRATION_REPOSITORY="https://github.com/docker/cli.git"
|
||||
ARG DOCKERCLI_INTEGRATION_VERSION=v17.06.2-ce
|
||||
ARG DOCKERCLI_INTEGRATION_VERSION=v18.06.3-ce
|
||||
# BUILDX_VERSION is the version of buildx to install in the dev container.
|
||||
ARG BUILDX_VERSION=0.20.1
|
||||
ARG COMPOSE_VERSION=v2.33.1
|
||||
@@ -26,11 +26,16 @@ ARG DOCKER_STATIC=1
|
||||
# https://hub.docker.com/r/distribution/distribution. This version of
|
||||
# the registry is used to test schema 2 manifests. Generally, the version
|
||||
# specified here should match a current release.
|
||||
ARG REGISTRY_VERSION=3.0.0-rc.1
|
||||
ARG REGISTRY_VERSION=3.0.0
|
||||
|
||||
# delve is currently only supported on linux/amd64 and linux/arm64;
|
||||
# https://github.com/go-delve/delve/blob/v1.8.1/pkg/proc/native/support_sentinel.go#L1-L6
|
||||
ARG DELVE_SUPPORTED=${TARGETPLATFORM#linux/amd64} DELVE_SUPPORTED=${DELVE_SUPPORTED#linux/arm64}
|
||||
# https://github.com/go-delve/delve/blob/v1.24.1/pkg/proc/native/support_sentinel.go#L1
|
||||
# https://github.com/go-delve/delve/blob/v1.24.1/pkg/proc/native/support_sentinel_linux.go#L1
|
||||
#
|
||||
# ppc64le support was added in v1.21.1, but is still experimental, and requires
|
||||
# the "-tags exp.linuxppc64le" build-tag to be set:
|
||||
# https://github.com/go-delve/delve/commit/71f12207175a1cc09668f856340d8a543c87dcca
|
||||
ARG DELVE_SUPPORTED=${TARGETPLATFORM#linux/amd64} DELVE_SUPPORTED=${DELVE_SUPPORTED#linux/arm64} DELVE_SUPPORTED=${DELVE_SUPPORTED#linux/ppc64le}
|
||||
ARG DELVE_SUPPORTED=${DELVE_SUPPORTED:+"unsupported"}
|
||||
ARG DELVE_SUPPORTED=${DELVE_SUPPORTED:-"supported"}
|
||||
|
||||
@@ -47,6 +52,11 @@ COPY --from=build-dummy /build /build
|
||||
# base
|
||||
FROM --platform=$BUILDPLATFORM ${GOLANG_IMAGE} AS base
|
||||
COPY --from=xx / /
|
||||
# Disable collecting local telemetry, as collected by Go and Delve;
|
||||
#
|
||||
# - https://github.com/go-delve/delve/blob/v1.24.1/CHANGELOG.md#1231-2024-09-23
|
||||
# - https://go.dev/doc/telemetry#background
|
||||
RUN go telemetry off && [ "$(go telemetry)" = "off" ] || { echo "Failed to disable Go telemetry"; exit 1; }
|
||||
RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y file
|
||||
ENV GO111MODULE=off
|
||||
@@ -153,7 +163,7 @@ RUN git init . && git remote add origin "https://github.com/go-delve/delve.git"
|
||||
# from the https://github.com/go-delve/delve repository.
|
||||
# It can be used to run Docker with a possibility of
|
||||
# attaching debugger to it.
|
||||
ARG DELVE_VERSION=v1.23.0
|
||||
ARG DELVE_VERSION=v1.24.1
|
||||
RUN git fetch -q --depth 1 origin "${DELVE_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS delve-supported
|
||||
@@ -279,7 +289,7 @@ RUN git init . && git remote add origin "https://github.com/opencontainers/runc.
|
||||
# that is used. If you need to update runc, open a pull request in the containerd
|
||||
# project first, and update both after that is merged. When updating RUNC_VERSION,
|
||||
# consider updating runc in vendor.mod accordingly.
|
||||
ARG RUNC_VERSION=v1.2.5
|
||||
ARG RUNC_VERSION=v1.2.6
|
||||
RUN git fetch -q --depth 1 origin "${RUNC_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS runc-build
|
||||
@@ -498,7 +508,6 @@ RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
|
||||
--mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
firewalld
|
||||
RUN sed -i 's/FirewallBackend=nftables/FirewallBackend=iptables/' /etc/firewalld/firewalld.conf
|
||||
|
||||
FROM dev-firewalld-${FIREWALLD} AS dev-base
|
||||
RUN groupadd -r docker
|
||||
@@ -648,6 +657,15 @@ FROM dev-base AS devcontainer
|
||||
COPY --link . .
|
||||
COPY --link --from=gopls /build/ /usr/local/bin/
|
||||
|
||||
# usage:
|
||||
# > docker buildx bake dind
|
||||
# > docker run -d --restart always --privileged --name devdind -p 12375:2375 docker-dind --debug --host=tcp://0.0.0.0:2375 --tlsverify=false
|
||||
FROM docker:dind AS dind
|
||||
COPY --link --from=dockercli /build/docker /usr/local/bin/
|
||||
COPY --link --from=buildx /buildx /usr/local/libexec/docker/cli-plugins/docker-buildx
|
||||
COPY --link --from=compose /docker-compose /usr/local/libexec/docker/cli-plugins/docker-compose
|
||||
COPY --link --from=all / /usr/local/bin/
|
||||
|
||||
# usage:
|
||||
# > make shell
|
||||
# > SYSTEMD=true make shell
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
# This represents the bare minimum required to build and test Docker.
|
||||
|
||||
ARG GO_VERSION=1.23.7
|
||||
ARG GO_VERSION=1.23.8
|
||||
|
||||
ARG BASE_DEBIAN_DISTRO="bookworm"
|
||||
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
||||
@@ -35,10 +35,10 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
vim-common \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install runc, containerd, tini and docker-proxy
|
||||
# Install runc, containerd, and tini
|
||||
# Please edit hack/dockerfile/install/<name>.installer to update them.
|
||||
COPY hack/dockerfile/install hack/dockerfile/install
|
||||
RUN for i in runc containerd tini proxy dockercli; \
|
||||
RUN set -e; for i in runc containerd tini dockercli; \
|
||||
do hack/dockerfile/install/install.sh $i; \
|
||||
done
|
||||
ENV PATH=/usr/local/cli:$PATH
|
||||
|
||||
@@ -161,7 +161,7 @@ FROM ${WINDOWS_BASE_IMAGE}:${WINDOWS_BASE_IMAGE_TAG}
|
||||
# Use PowerShell as the default shell
|
||||
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
|
||||
|
||||
ARG GO_VERSION=1.23.7
|
||||
ARG GO_VERSION=1.23.8
|
||||
ARG GOTESTSUM_VERSION=v1.12.0
|
||||
ARG GOWINRES_VERSION=v0.3.1
|
||||
ARG CONTAINERD_VERSION=v1.7.27
|
||||
|
||||
5
Makefile
5
Makefile
@@ -38,7 +38,6 @@ DOCKER_ENVS := \
|
||||
-e DOCKERCLI_INTEGRATION_REPOSITORY \
|
||||
-e DOCKER_DEBUG \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_FIREWALLD \
|
||||
-e DOCKER_GITCOMMIT \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e DOCKER_LDFLAGS \
|
||||
@@ -50,6 +49,7 @@ DOCKER_ENVS := \
|
||||
-e DOCKER_USERLANDPROXY \
|
||||
-e DOCKERD_ARGS \
|
||||
-e DELVE_PORT \
|
||||
-e FIREWALLD \
|
||||
-e GITHUB_ACTIONS \
|
||||
-e TEST_FORCE_VALIDATE \
|
||||
-e TEST_INTEGRATION_DIR \
|
||||
@@ -57,7 +57,6 @@ DOCKER_ENVS := \
|
||||
-e TEST_INTEGRATION_FAIL_FAST \
|
||||
-e TEST_SKIP_INTEGRATION \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
-e TEST_IGNORE_CGROUP_CHECK \
|
||||
-e TESTCOVERAGE \
|
||||
-e TESTDEBUG \
|
||||
-e TESTDIRS \
|
||||
@@ -150,7 +149,7 @@ DOCKER_BUILD_ARGS += --build-arg=DOCKERCLI_INTEGRATION_REPOSITORY
|
||||
ifdef DOCKER_SYSTEMD
|
||||
DOCKER_BUILD_ARGS += --build-arg=SYSTEMD=true
|
||||
endif
|
||||
ifdef DOCKER_FIREWALLD
|
||||
ifdef FIREWALLD
|
||||
DOCKER_BUILD_ARGS += --build-arg=FIREWALLD=true
|
||||
endif
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api"
|
||||
// Common constants for daemon and client.
|
||||
const (
|
||||
// DefaultVersion of the current REST API.
|
||||
DefaultVersion = "1.48"
|
||||
DefaultVersion = "1.49"
|
||||
|
||||
// MinSupportedAPIVersion is the minimum API version that can be supported
|
||||
// by the API server, specified as "major.minor". Note that the daemon
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
|
||||
// DebugRequestMiddleware dumps the request to logger
|
||||
func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) (retErr error) {
|
||||
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
logger := log.G(ctx)
|
||||
|
||||
// Use a variable for fields to prevent overhead of repeatedly
|
||||
@@ -28,25 +28,27 @@ func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWri
|
||||
"request-url": r.RequestURI,
|
||||
"vars": vars,
|
||||
}
|
||||
logger.WithFields(fields).Debugf("handling %s request", r.Method)
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
handleWithLogs := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
logger.WithFields(fields).Debugf("handling %s request", r.Method)
|
||||
err := handler(ctx, w, r, vars)
|
||||
if err != nil {
|
||||
// TODO(thaJeztah): unify this with Server.makeHTTPHandler, which also logs internal server errors as error-log. See https://github.com/moby/moby/pull/48740#discussion_r1816675574
|
||||
fields["error-response"] = retErr
|
||||
fields["status"] = httpstatus.FromError(retErr)
|
||||
fields["error-response"] = err
|
||||
fields["status"] = httpstatus.FromError(err)
|
||||
logger.WithFields(fields).Debugf("error response for %s request", r.Method)
|
||||
}
|
||||
}()
|
||||
return err
|
||||
}
|
||||
|
||||
if r.Method != http.MethodPost {
|
||||
return handler(ctx, w, r, vars)
|
||||
return handleWithLogs(ctx, w, r, vars)
|
||||
}
|
||||
if err := httputils.CheckForJSON(r); err != nil {
|
||||
return handler(ctx, w, r, vars)
|
||||
return handleWithLogs(ctx, w, r, vars)
|
||||
}
|
||||
maxBodySize := 4096 // 4KB
|
||||
if r.ContentLength > int64(maxBodySize) {
|
||||
return handler(ctx, w, r, vars)
|
||||
return handleWithLogs(ctx, w, r, vars)
|
||||
}
|
||||
|
||||
body := r.Body
|
||||
@@ -56,7 +58,7 @@ func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWri
|
||||
b, err := bufReader.Peek(maxBodySize)
|
||||
if err != io.EOF {
|
||||
// either there was an error reading, or the buffer is full (in which case the request is too large)
|
||||
return handler(ctx, w, r, vars)
|
||||
return handleWithLogs(ctx, w, r, vars)
|
||||
}
|
||||
|
||||
var postForm map[string]interface{}
|
||||
@@ -74,7 +76,7 @@ func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWri
|
||||
}
|
||||
}
|
||||
|
||||
return handler(ctx, w, r, vars)
|
||||
return handleWithLogs(ctx, w, r, vars)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
containerpkg "github.com/docker/docker/container"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/moby/go-archive"
|
||||
)
|
||||
|
||||
// execBackend includes functions to implement to provide exec functionality.
|
||||
|
||||
@@ -960,21 +960,17 @@ func (c *containerRouter) postContainersResize(ctx context.Context, w http.Respo
|
||||
}
|
||||
|
||||
func (c *containerRouter) postContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
err := httputils.ParseForm(r)
|
||||
if err != nil {
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
containerName := vars["name"]
|
||||
|
||||
_, upgrade := r.Header["Upgrade"]
|
||||
detachKeys := r.FormValue("detachKeys")
|
||||
|
||||
hijacker, ok := w.(http.Hijacker)
|
||||
if !ok {
|
||||
return errdefs.InvalidParameter(errors.Errorf("error attaching to container %s, hijack connection missing", containerName))
|
||||
}
|
||||
|
||||
contentType := types.MediaTypeRawStream
|
||||
_, upgrade := r.Header["Upgrade"]
|
||||
setupStreams := func(multiplexed bool, cancel func()) (io.ReadCloser, io.Writer, io.Writer, error) {
|
||||
conn, _, err := hijacker.Hijack()
|
||||
if err != nil {
|
||||
@@ -1004,18 +1000,16 @@ func (c *containerRouter) postContainersAttach(ctx context.Context, w http.Respo
|
||||
return ioutils.NewReadCloserWrapper(conn, closer), conn, conn, nil
|
||||
}
|
||||
|
||||
attachConfig := &backend.ContainerAttachConfig{
|
||||
if err := c.backend.ContainerAttach(containerName, &backend.ContainerAttachConfig{
|
||||
GetStreams: setupStreams,
|
||||
UseStdin: httputils.BoolValue(r, "stdin"),
|
||||
UseStdout: httputils.BoolValue(r, "stdout"),
|
||||
UseStderr: httputils.BoolValue(r, "stderr"),
|
||||
Logs: httputils.BoolValue(r, "logs"),
|
||||
Stream: httputils.BoolValue(r, "stream"),
|
||||
DetachKeys: detachKeys,
|
||||
DetachKeys: r.FormValue("detachKeys"),
|
||||
MuxStreams: true,
|
||||
}
|
||||
|
||||
if err = c.backend.ContainerAttach(containerName, attachConfig); err != nil {
|
||||
}); err != nil {
|
||||
log.G(ctx).WithError(err).Errorf("Handler for %s %s returned error", r.Method, r.URL.Path)
|
||||
// Remember to close stream if error happens
|
||||
conn, _, errHijack := hijacker.Hijack()
|
||||
@@ -1037,9 +1031,6 @@ func (c *containerRouter) wsContainersAttach(ctx context.Context, w http.Respons
|
||||
}
|
||||
containerName := vars["name"]
|
||||
|
||||
var err error
|
||||
detachKeys := r.FormValue("detachKeys")
|
||||
|
||||
done := make(chan struct{})
|
||||
started := make(chan struct{})
|
||||
|
||||
@@ -1076,18 +1067,16 @@ func (c *containerRouter) wsContainersAttach(ctx context.Context, w http.Respons
|
||||
useStderr = httputils.BoolValue(r, "stderr")
|
||||
}
|
||||
|
||||
attachConfig := &backend.ContainerAttachConfig{
|
||||
err := c.backend.ContainerAttach(containerName, &backend.ContainerAttachConfig{
|
||||
GetStreams: setupStreams,
|
||||
UseStdin: useStdin,
|
||||
UseStdout: useStdout,
|
||||
UseStderr: useStderr,
|
||||
Logs: httputils.BoolValue(r, "logs"),
|
||||
Stream: httputils.BoolValue(r, "stream"),
|
||||
DetachKeys: detachKeys,
|
||||
DetachKeys: r.FormValue("detachKeys"),
|
||||
MuxStreams: false, // never multiplex, as we rely on websocket to manage distinct streams
|
||||
}
|
||||
|
||||
err = c.backend.ContainerAttach(containerName, attachConfig)
|
||||
})
|
||||
close(done)
|
||||
select {
|
||||
case <-started:
|
||||
|
||||
@@ -341,8 +341,22 @@ func (ir *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWrite
|
||||
manifests = httputils.BoolValue(r, "manifests")
|
||||
}
|
||||
|
||||
var platform *ocispec.Platform
|
||||
if r.Form.Get("platform") != "" && versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.49") {
|
||||
p, err := httputils.DecodePlatform(r.Form.Get("platform"))
|
||||
if err != nil {
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
platform = p
|
||||
}
|
||||
|
||||
if manifests && platform != nil {
|
||||
return errdefs.InvalidParameter(errors.New("conflicting options: manifests and platform options cannot both be set"))
|
||||
}
|
||||
|
||||
imageInspect, err := ir.backend.ImageInspect(ctx, vars["name"], backend.ImageInspectOpts{
|
||||
Manifests: manifests,
|
||||
Platform: platform,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
// to provide network specific functionality.
|
||||
type Backend interface {
|
||||
GetNetworks(filters.Args, backend.NetworkListConfig) ([]network.Inspect, error)
|
||||
CreateNetwork(nc network.CreateRequest) (*network.CreateResponse, error)
|
||||
CreateNetwork(ctx context.Context, nc network.CreateRequest) (*network.CreateResponse, error)
|
||||
ConnectContainerToNetwork(ctx context.Context, containerName, networkName string, endpointConfig *network.EndpointSettings) error
|
||||
DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error
|
||||
DeleteNetwork(networkID string) error
|
||||
|
||||
@@ -223,7 +223,7 @@ func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWr
|
||||
// validate the configuration. The network will not be created but, if the
|
||||
// configuration is valid, ManagerRedirectError will be returned and handled
|
||||
// below.
|
||||
nw, err := n.backend.CreateNetwork(create)
|
||||
nw, err := n.backend.CreateNetwork(ctx, create)
|
||||
if err != nil {
|
||||
if _, ok := err.(libnetwork.ManagerRedirectError); !ok {
|
||||
return err
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.22
|
||||
|
||||
package system // import "github.com/docker/docker/api/server/router/system"
|
||||
|
||||
import (
|
||||
@@ -103,15 +106,23 @@ func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *ht
|
||||
if versions.LessThan(version, "1.47") {
|
||||
// Field is omitted in API 1.48 and up, but should still be included
|
||||
// in older versions, even if no values are set.
|
||||
info.RegistryConfig.AllowNondistributableArtifactsCIDRs = []*registry.NetIPNet{}
|
||||
info.RegistryConfig.AllowNondistributableArtifactsHostnames = []string{}
|
||||
info.RegistryConfig.ExtraFields = map[string]any{
|
||||
"AllowNondistributableArtifactsCIDRs": json.RawMessage(nil),
|
||||
"AllowNondistributableArtifactsHostnames": json.RawMessage(nil),
|
||||
}
|
||||
}
|
||||
if versions.LessThan(version, "1.49") {
|
||||
// FirewallBackend field introduced in API v1.49.
|
||||
info.FirewallBackend = nil
|
||||
}
|
||||
|
||||
// TODO(thaJeztah): Expected commits are deprecated, and should no longer be set in API 1.49.
|
||||
info.ContainerdCommit.Expected = info.ContainerdCommit.ID //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.49.
|
||||
info.RuncCommit.Expected = info.RuncCommit.ID //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.49.
|
||||
info.InitCommit.Expected = info.InitCommit.ID //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.49.
|
||||
|
||||
if versions.LessThan(version, "1.49") {
|
||||
// Expected commits are omitted in API 1.49, but should still be
|
||||
// included in older versions.
|
||||
info.ContainerdCommit.Expected = info.ContainerdCommit.ID //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.49.
|
||||
info.RuncCommit.Expected = info.RuncCommit.ID //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.49.
|
||||
info.InitCommit.Expected = info.InitCommit.ID //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.49.
|
||||
}
|
||||
if versions.GreaterThanOrEqualTo(version, "1.42") {
|
||||
info.KernelMemory = false
|
||||
}
|
||||
|
||||
@@ -10,9 +10,12 @@ import (
|
||||
"github.com/docker/docker/api/server/middleware"
|
||||
"github.com/docker/docker/api/server/router"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/internal/otelutil"
|
||||
"github.com/gorilla/mux"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
"go.opentelemetry.io/otel/baggage"
|
||||
)
|
||||
|
||||
// versionMatcher defines a variable matcher to be parsed by the router
|
||||
@@ -42,7 +45,10 @@ func (s *Server) makeHTTPHandler(handler httputils.APIFunc, operation string) ht
|
||||
|
||||
// use intermediate variable to prevent "should not use basic type
|
||||
// string as key in context.WithValue" golint errors
|
||||
ctx := context.WithValue(r.Context(), dockerversion.UAStringKey{}, r.Header.Get("User-Agent"))
|
||||
ua := r.Header.Get("User-Agent")
|
||||
ctx := baggage.ContextWithBaggage(context.WithValue(r.Context(), dockerversion.UAStringKey{}, ua), otelutil.MustNewBaggage(
|
||||
otelutil.MustNewMemberRaw(otelutil.TriggerKey, "api"),
|
||||
))
|
||||
|
||||
r = r.WithContext(ctx)
|
||||
handlerFunc := s.handlerWithGlobalMiddlewares(handler)
|
||||
@@ -57,9 +63,20 @@ func (s *Server) makeHTTPHandler(handler httputils.APIFunc, operation string) ht
|
||||
if statusCode >= 500 {
|
||||
log.G(ctx).Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err)
|
||||
}
|
||||
_ = httputils.WriteJSON(w, statusCode, &types.ErrorResponse{
|
||||
Message: err.Error(),
|
||||
})
|
||||
// While we no longer support API versions older 1.24 [api.MinSupportedAPIVersion],
|
||||
// a client may try to connect using an older version and expect a plain-text error
|
||||
// instead of a JSON error. This would result in an "API version too old" error
|
||||
// formatted in JSON being printed as-is.
|
||||
//
|
||||
// Let's be nice, and return errors in plain-text to provide a more readable error
|
||||
// to help the user understand the API version they're using is no longer supported.
|
||||
if v := vars["version"]; v != "" && versions.LessThan(v, "1.24") {
|
||||
http.Error(w, err.Error(), statusCode)
|
||||
} else {
|
||||
_ = httputils.WriteJSON(w, statusCode, &types.ErrorResponse{
|
||||
Message: err.Error(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}), operation).ServeHTTP
|
||||
}
|
||||
|
||||
@@ -19,10 +19,10 @@ produces:
|
||||
consumes:
|
||||
- "application/json"
|
||||
- "text/plain"
|
||||
basePath: "/v1.48"
|
||||
basePath: "/v1.49"
|
||||
info:
|
||||
title: "Docker Engine API"
|
||||
version: "1.48"
|
||||
version: "1.49"
|
||||
x-logo:
|
||||
url: "https://docs.docker.com/assets/images/logo-docker-main.png"
|
||||
description: |
|
||||
@@ -55,8 +55,8 @@ info:
|
||||
the URL is not supported by the daemon, a HTTP `400 Bad Request` error message
|
||||
is returned.
|
||||
|
||||
If you omit the version-prefix, the current version of the API (v1.48) is used.
|
||||
For example, calling `/info` is the same as calling `/v1.48/info`. Using the
|
||||
If you omit the version-prefix, the current version of the API (v1.49) is used.
|
||||
For example, calling `/info` is the same as calling `/v1.49/info`. Using the
|
||||
API without a version-prefix is deprecated and will be removed in a future release.
|
||||
|
||||
Engine releases in the near future should support this version of the API,
|
||||
@@ -6856,6 +6856,8 @@ definitions:
|
||||
description: "The network pool size"
|
||||
type: "integer"
|
||||
example: "24"
|
||||
FirewallBackend:
|
||||
$ref: "#/definitions/FirewallInfo"
|
||||
Warnings:
|
||||
description: |
|
||||
List of warnings / informational messages about missing features, or
|
||||
@@ -6939,6 +6941,37 @@ definitions:
|
||||
default: "plugins.moby"
|
||||
example: "plugins.moby"
|
||||
|
||||
FirewallInfo:
|
||||
description: |
|
||||
Information about the daemon's firewalling configuration.
|
||||
|
||||
This field is currently only used on Linux, and omitted on other platforms.
|
||||
type: "object"
|
||||
x-nullable: true
|
||||
properties:
|
||||
Driver:
|
||||
description: |
|
||||
The name of the firewall backend driver.
|
||||
type: "string"
|
||||
example: "nftables"
|
||||
Info:
|
||||
description: |
|
||||
Information about the firewall backend, provided as
|
||||
"label" / "value" pairs.
|
||||
|
||||
<p><br /></p>
|
||||
|
||||
> **Note**: The information returned in this field, including the
|
||||
> formatting of values and labels, should not be considered stable,
|
||||
> and may change without notice.
|
||||
type: "array"
|
||||
items:
|
||||
type: "array"
|
||||
items:
|
||||
type: "string"
|
||||
example:
|
||||
- ["ReloadedAt", "2025-01-01T00:00:00Z"]
|
||||
|
||||
# PluginsInfo is a temp struct holding Plugins name
|
||||
# registered with docker daemon. It is used by Info struct
|
||||
PluginsInfo:
|
||||
@@ -6984,32 +7017,6 @@ definitions:
|
||||
type: "object"
|
||||
x-nullable: true
|
||||
properties:
|
||||
AllowNondistributableArtifactsCIDRs:
|
||||
description: |
|
||||
List of IP ranges to which nondistributable artifacts can be pushed,
|
||||
using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632).
|
||||
|
||||
<p><br /></p>
|
||||
|
||||
> **Deprecated**: Pushing nondistributable artifacts is now always enabled
|
||||
> and this field is always `null`. This field will be removed in a API v1.49.
|
||||
type: "array"
|
||||
items:
|
||||
type: "string"
|
||||
example: []
|
||||
AllowNondistributableArtifactsHostnames:
|
||||
description: |
|
||||
List of registry hostnames to which nondistributable artifacts can be
|
||||
pushed, using the format `<hostname>[:<port>]` or `<IP address>[:<port>]`.
|
||||
|
||||
<p><br /></p>
|
||||
|
||||
> **Deprecated**: Pushing nondistributable artifacts is now always enabled
|
||||
> and this field is always `null`. This field will be removed in a API v1.49.
|
||||
type: "array"
|
||||
items:
|
||||
type: "string"
|
||||
example: []
|
||||
InsecureRegistryCIDRs:
|
||||
description: |
|
||||
List of IP ranges of insecure registries, using the CIDR syntax
|
||||
@@ -7179,13 +7186,6 @@ definitions:
|
||||
description: "Actual commit ID of external tool."
|
||||
type: "string"
|
||||
example: "cfb82a876ecc11b5ca0977d1733adbe58599088a"
|
||||
Expected:
|
||||
description: |
|
||||
Commit ID of external tool expected by dockerd as set at build time.
|
||||
|
||||
**Deprecated**: This field is deprecated and will be omitted in a API v1.49.
|
||||
type: "string"
|
||||
example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4"
|
||||
|
||||
SwarmInfo:
|
||||
description: |
|
||||
@@ -10491,13 +10491,9 @@ paths:
|
||||
|
||||
### Image tarball format
|
||||
|
||||
An image tarball contains one directory per image layer (named using its long ID), each containing these files:
|
||||
An image tarball contains [Content as defined in the OCI Image Layout Specification](https://github.com/opencontainers/image-spec/blob/v1.1.1/image-layout.md#content).
|
||||
|
||||
- `VERSION`: currently `1.0` - the file format version
|
||||
- `json`: detailed layer information, similar to `docker inspect layer_id`
|
||||
- `layer.tar`: A tarfile containing the filesystem changes in this layer
|
||||
|
||||
The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions.
|
||||
Additionally, includes the manifest.json file associated with a backwards compatible docker save format.
|
||||
|
||||
If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs.
|
||||
|
||||
@@ -10537,6 +10533,7 @@ paths:
|
||||
If not provided, the full multi-platform image will be saved.
|
||||
|
||||
Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}`
|
||||
tags: ["Image"]
|
||||
/images/get:
|
||||
get:
|
||||
summary: "Export several images"
|
||||
@@ -10571,6 +10568,16 @@ paths:
|
||||
type: "array"
|
||||
items:
|
||||
type: "string"
|
||||
- name: "platform"
|
||||
type: "string"
|
||||
in: "query"
|
||||
description: |
|
||||
JSON encoded OCI platform describing a platform which will be used
|
||||
to select a platform-specific image to be saved if the image is
|
||||
multi-platform.
|
||||
If not provided, the full multi-platform image will be saved.
|
||||
|
||||
Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}`
|
||||
tags: ["Image"]
|
||||
/images/load:
|
||||
post:
|
||||
|
||||
@@ -153,6 +153,7 @@ type GetImageOpts struct {
|
||||
// ImageInspectOpts holds parameters to inspect an image.
|
||||
type ImageInspectOpts struct {
|
||||
Manifests bool
|
||||
Platform *ocispec.Platform
|
||||
}
|
||||
|
||||
// CommitConfig is the configuration for creating an image as part of a build.
|
||||
|
||||
@@ -128,11 +128,12 @@ type InspectResponse struct {
|
||||
// compatibility.
|
||||
Descriptor *ocispec.Descriptor `json:"Descriptor,omitempty"`
|
||||
|
||||
// Manifests is a list of image manifests available in this image. It
|
||||
// Manifests is a list of image manifests available in this image. It
|
||||
// provides a more detailed view of the platform-specific image manifests or
|
||||
// other image-attached data like build attestations.
|
||||
//
|
||||
// Only available if the daemon provides a multi-platform image store.
|
||||
// Only available if the daemon provides a multi-platform image store, the client
|
||||
// requests manifests AND does not request a specific platform.
|
||||
//
|
||||
// WARNING: This is experimental and may change at any time without any backward
|
||||
// compatibility.
|
||||
|
||||
@@ -106,6 +106,11 @@ type LoadOptions struct {
|
||||
type InspectOptions struct {
|
||||
// Manifests returns the image manifests.
|
||||
Manifests bool
|
||||
|
||||
// Platform selects the specific platform of a multi-platform image to inspect.
|
||||
//
|
||||
// This option is only available for API version 1.49 and up.
|
||||
Platform *ocispec.Platform
|
||||
}
|
||||
|
||||
// SaveOptions holds parameters to save images.
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.22
|
||||
|
||||
package registry // import "github.com/docker/docker/api/types/registry"
|
||||
|
||||
import (
|
||||
@@ -15,23 +18,26 @@ type ServiceConfig struct {
|
||||
InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"`
|
||||
IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"`
|
||||
Mirrors []string
|
||||
|
||||
// ExtraFields is for internal use to include deprecated fields on older API versions.
|
||||
ExtraFields map[string]any `json:"-"`
|
||||
}
|
||||
|
||||
// MarshalJSON implements a custom marshaler to include legacy fields
|
||||
// in API responses.
|
||||
func (sc ServiceConfig) MarshalJSON() ([]byte, error) {
|
||||
tmp := map[string]interface{}{
|
||||
"InsecureRegistryCIDRs": sc.InsecureRegistryCIDRs,
|
||||
"IndexConfigs": sc.IndexConfigs,
|
||||
"Mirrors": sc.Mirrors,
|
||||
func (sc *ServiceConfig) MarshalJSON() ([]byte, error) {
|
||||
type tmp ServiceConfig
|
||||
base, err := json.Marshal((*tmp)(sc))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if sc.AllowNondistributableArtifactsCIDRs != nil {
|
||||
tmp["AllowNondistributableArtifactsCIDRs"] = nil
|
||||
var merged map[string]any
|
||||
_ = json.Unmarshal(base, &merged)
|
||||
|
||||
for k, v := range sc.ExtraFields {
|
||||
merged[k] = v
|
||||
}
|
||||
if sc.AllowNondistributableArtifactsHostnames != nil {
|
||||
tmp["AllowNondistributableArtifactsHostnames"] = nil
|
||||
}
|
||||
return json.Marshal(tmp)
|
||||
return json.Marshal(merged)
|
||||
}
|
||||
|
||||
// NetIPNet is the net.IPNet type, which can be marshalled and
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.22
|
||||
|
||||
package registry
|
||||
|
||||
import (
|
||||
@@ -20,8 +23,10 @@ func TestServiceConfigMarshalLegacyFields(t *testing.T) {
|
||||
// used for API versions < 1.49.
|
||||
t.Run("with legacy fields", func(t *testing.T) {
|
||||
b, err := json.Marshal(&ServiceConfig{
|
||||
AllowNondistributableArtifactsCIDRs: []*NetIPNet{},
|
||||
AllowNondistributableArtifactsHostnames: []string{},
|
||||
ExtraFields: map[string]any{
|
||||
"AllowNondistributableArtifactsCIDRs": json.RawMessage(nil),
|
||||
"AllowNondistributableArtifactsHostnames": json.RawMessage(nil),
|
||||
},
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
const expected = `{"AllowNondistributableArtifactsCIDRs":null,"AllowNondistributableArtifactsHostnames":null,"IndexConfigs":null,"InsecureRegistryCIDRs":null,"Mirrors":null}`
|
||||
|
||||
@@ -73,6 +73,7 @@ type Info struct {
|
||||
SecurityOptions []string
|
||||
ProductLicense string `json:",omitempty"`
|
||||
DefaultAddressPools []NetworkAddressPool `json:",omitempty"`
|
||||
FirewallBackend *FirewallInfo `json:"FirewallBackend,omitempty"`
|
||||
CDISpecDirs []string
|
||||
|
||||
Containerd *ContainerdInfo `json:",omitempty"`
|
||||
@@ -143,7 +144,7 @@ type Commit struct {
|
||||
// Expected is the commit ID of external tool expected by dockerd as set at build time.
|
||||
//
|
||||
// Deprecated: this field is no longer used in API v1.49, but kept for backward-compatibility with older API versions.
|
||||
Expected string
|
||||
Expected string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// NetworkAddressPool is a temp struct used by [Info] struct.
|
||||
@@ -151,3 +152,11 @@ type NetworkAddressPool struct {
|
||||
Base string
|
||||
Size int
|
||||
}
|
||||
|
||||
// FirewallInfo describes the firewall backend.
|
||||
type FirewallInfo struct {
|
||||
// Driver is the name of the firewall backend driver.
|
||||
Driver string `json:"Driver"`
|
||||
// Info is a list of label/value pairs, containing information related to the firewall.
|
||||
Info [][2]string `json:"Info,omitempty"`
|
||||
}
|
||||
|
||||
@@ -13,11 +13,11 @@ import (
|
||||
cerrdefs "github.com/containerd/errdefs"
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/moby/buildkit/util/leaseutil"
|
||||
"github.com/moby/locker"
|
||||
"github.com/moby/sys/user"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
@@ -36,7 +36,7 @@ type Opt struct {
|
||||
GraphDriver graphdriver.Driver
|
||||
LayerStore layer.Store
|
||||
Root string
|
||||
IdentityMapping idtools.IdentityMapping
|
||||
IdentityMapping user.IdentityMapping
|
||||
}
|
||||
|
||||
type graphIDRegistrar interface {
|
||||
@@ -106,7 +106,7 @@ func (s *snapshotter) Name() string {
|
||||
return "default"
|
||||
}
|
||||
|
||||
func (s *snapshotter) IdentityMapping() *idtools.IdentityMapping {
|
||||
func (s *snapshotter) IdentityMapping() *user.IdentityMapping {
|
||||
// Returning a non-nil but empty *IdentityMapping breaks BuildKit:
|
||||
// https://github.com/moby/moby/pull/39444
|
||||
if s.opt.IdentityMapping.Empty() {
|
||||
@@ -494,7 +494,7 @@ type mountable struct {
|
||||
acquire func() ([]mount.Mount, func() error, error)
|
||||
release func() error
|
||||
refCount int
|
||||
idmap idtools.IdentityMapping
|
||||
idmap user.IdentityMapping
|
||||
}
|
||||
|
||||
func (m *mountable) Mount() ([]mount.Mount, func() error, error) {
|
||||
@@ -538,7 +538,7 @@ func (m *mountable) releaseMount() error {
|
||||
return m.release()
|
||||
}
|
||||
|
||||
func (m *mountable) IdentityMapping() *idtools.IdentityMapping {
|
||||
func (m *mountable) IdentityMapping() *user.IdentityMapping {
|
||||
// Returning a non-nil but empty *IdentityMapping breaks BuildKit:
|
||||
// https://github.com/moby/moby/pull/39444
|
||||
if m.idmap.Empty() {
|
||||
|
||||
@@ -26,7 +26,6 @@ import (
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/libnetwork"
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
"github.com/moby/buildkit/client"
|
||||
@@ -35,6 +34,7 @@ import (
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
"github.com/moby/sys/user"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc"
|
||||
@@ -89,7 +89,7 @@ type Opt struct {
|
||||
RegistryHosts docker.RegistryHosts
|
||||
BuilderConfig config.BuilderConfig
|
||||
Rootless bool
|
||||
IdentityMapping idtools.IdentityMapping
|
||||
IdentityMapping user.IdentityMapping
|
||||
DNSConfig config.DNSConfig
|
||||
ApparmorProfile string
|
||||
UseSnapshotter bool
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/libnetwork"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/moby/buildkit/executor"
|
||||
"github.com/moby/buildkit/executor/oci"
|
||||
@@ -22,12 +21,13 @@ import (
|
||||
"github.com/moby/buildkit/solver/llbsolver/cdidevices"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/network"
|
||||
"github.com/moby/sys/user"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
const networkName = "bridge"
|
||||
|
||||
func newExecutor(root, cgroupParent string, net *libnetwork.Controller, dnsConfig *oci.DNSConfig, rootless bool, idmap idtools.IdentityMapping, apparmorProfile string, cdiManager *cdidevices.Manager) (executor.Executor, error) {
|
||||
func newExecutor(root, cgroupParent string, net *libnetwork.Controller, dnsConfig *oci.DNSConfig, rootless bool, idmap user.IdentityMapping, apparmorProfile string, cdiManager *cdidevices.Manager) (executor.Executor, error) {
|
||||
netRoot := filepath.Join(root, "net")
|
||||
networkProviders := map[pb.NetMode]network.Provider{
|
||||
pb.NetMode_UNSET: &bridgeProvider{Controller: net, Root: netRoot},
|
||||
|
||||
@@ -9,14 +9,14 @@ import (
|
||||
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/libnetwork"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/moby/buildkit/executor"
|
||||
"github.com/moby/buildkit/executor/oci"
|
||||
resourcetypes "github.com/moby/buildkit/executor/resources/types"
|
||||
"github.com/moby/buildkit/solver/llbsolver/cdidevices"
|
||||
"github.com/moby/sys/user"
|
||||
)
|
||||
|
||||
func newExecutor(_, _ string, _ *libnetwork.Controller, _ *oci.DNSConfig, _ bool, _ idtools.IdentityMapping, _ string, _ *cdidevices.Manager) (executor.Executor, error) {
|
||||
func newExecutor(_, _ string, _ *libnetwork.Controller, _ *oci.DNSConfig, _ bool, _ user.IdentityMapping, _ string, _ *cdidevices.Manager) (executor.Executor, error) {
|
||||
return &stubExecutor{}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -52,6 +52,7 @@ func (e *imageExporterMobyWrapper) Resolve(ctx context.Context, id int, exporter
|
||||
if _, has := exporterAttrs[string(exptypes.OptKeyDanglingPrefix)]; !has {
|
||||
exporterAttrs[string(exptypes.OptKeyDanglingPrefix)] = "moby-dangling"
|
||||
}
|
||||
exporterAttrs[string(exptypes.OptKeyDanglingEmptyOnly)] = "true"
|
||||
|
||||
inst, err := e.exp.Resolve(ctx, id, exporterAttrs)
|
||||
if err != nil {
|
||||
@@ -82,21 +83,33 @@ func (i *imageExporterInstanceWrapper) Export(ctx context.Context, src *exporter
|
||||
}
|
||||
|
||||
if i.callbacks.Named != nil {
|
||||
for _, name := range strings.Split(out[string(exptypes.OptKeyName)], ",") {
|
||||
ref, err := reference.ParseNormalizedNamed(name)
|
||||
if err != nil {
|
||||
// Shouldn't happen, but log if it does and continue.
|
||||
log.G(ctx).WithFields(log.Fields{
|
||||
"name": name,
|
||||
"error": err,
|
||||
}).Warn("image named with invalid reference produced by buildkit")
|
||||
continue
|
||||
}
|
||||
|
||||
namedTagged := reference.TagNameOnly(ref).(reference.NamedTagged)
|
||||
i.callbacks.Named(ctx, namedTagged, desc)
|
||||
}
|
||||
i.processNamedCallback(ctx, out, desc)
|
||||
}
|
||||
|
||||
return out, ref, nil
|
||||
}
|
||||
|
||||
func (i *imageExporterInstanceWrapper) processNamedCallback(ctx context.Context, out map[string]string, desc ocispec.Descriptor) {
|
||||
// TODO(vvoland): Change to exptypes.ExporterImageNameKey when BuildKit v0.21 is vendored.
|
||||
imageName := out["image.name"]
|
||||
if imageName == "" {
|
||||
log.G(ctx).Warn("image named with empty image.name produced by buildkit")
|
||||
return
|
||||
}
|
||||
|
||||
for _, name := range strings.Split(imageName, ",") {
|
||||
ref, err := reference.ParseNormalizedNamed(name)
|
||||
if err != nil {
|
||||
// Shouldn't happen, but log if it does and continue.
|
||||
log.G(ctx).WithFields(log.Fields{
|
||||
"name": name,
|
||||
"error": err,
|
||||
}).Warn("image named with invalid reference produced by buildkit")
|
||||
continue
|
||||
}
|
||||
|
||||
if namedTagged, ok := reference.TagNameOnly(ref).(reference.NamedTagged); ok {
|
||||
i.callbacks.Named(ctx, namedTagged, desc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,12 +16,12 @@ import (
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/remotecontext"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/parser"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/shell"
|
||||
"github.com/moby/sys/user"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/syncmap"
|
||||
@@ -47,13 +47,13 @@ const (
|
||||
|
||||
// BuildManager is shared across all Builder objects
|
||||
type BuildManager struct {
|
||||
idMapping idtools.IdentityMapping
|
||||
idMapping user.IdentityMapping
|
||||
backend builder.Backend
|
||||
pathCache pathCache // TODO: make this persistent
|
||||
}
|
||||
|
||||
// NewBuildManager creates a BuildManager
|
||||
func NewBuildManager(b builder.Backend, identityMapping idtools.IdentityMapping) (*BuildManager, error) {
|
||||
func NewBuildManager(b builder.Backend, identityMapping user.IdentityMapping) (*BuildManager, error) {
|
||||
bm := &BuildManager{
|
||||
backend: b,
|
||||
pathCache: &syncmap.Map{},
|
||||
@@ -103,7 +103,7 @@ type builderOptions struct {
|
||||
Backend builder.Backend
|
||||
ProgressWriter backend.ProgressWriter
|
||||
PathCache pathCache
|
||||
IDMapping idtools.IdentityMapping
|
||||
IDMapping user.IdentityMapping
|
||||
}
|
||||
|
||||
// Builder is a Dockerfile builder
|
||||
@@ -118,7 +118,7 @@ type Builder struct {
|
||||
|
||||
docker builder.Backend
|
||||
|
||||
idMapping idtools.IdentityMapping
|
||||
idMapping user.IdentityMapping
|
||||
disableCommit bool
|
||||
imageSources *imageSources
|
||||
pathCache pathCache
|
||||
|
||||
@@ -17,14 +17,14 @@ import (
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/remotecontext"
|
||||
"github.com/docker/docker/builder/remotecontext/urlutil"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/longpath"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||
"github.com/moby/go-archive"
|
||||
"github.com/moby/sys/symlink"
|
||||
"github.com/moby/sys/user"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -446,9 +446,15 @@ func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote b
|
||||
return lc, filename, err
|
||||
}
|
||||
|
||||
type identity struct {
|
||||
UID int
|
||||
GID int
|
||||
SID string
|
||||
}
|
||||
|
||||
type copyFileOptions struct {
|
||||
decompress bool
|
||||
identity *idtools.Identity
|
||||
identity *identity
|
||||
archiver *archive.Archiver
|
||||
}
|
||||
|
||||
@@ -498,7 +504,7 @@ func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions)
|
||||
return copyFile(archiver, srcPath, destPath, options.identity)
|
||||
}
|
||||
|
||||
func copyDirectory(archiver *archive.Archiver, source, dest string, identity *idtools.Identity) error {
|
||||
func copyDirectory(archiver *archive.Archiver, source, dest string, identity *identity) error {
|
||||
destExists, err := isExistingDirectory(dest)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to query destination path")
|
||||
@@ -513,13 +519,13 @@ func copyDirectory(archiver *archive.Archiver, source, dest string, identity *id
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyFile(archiver *archive.Archiver, source, dest string, identity *idtools.Identity) error {
|
||||
func copyFile(archiver *archive.Archiver, source, dest string, identity *identity) error {
|
||||
if identity == nil {
|
||||
if err := os.MkdirAll(filepath.Dir(dest), 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest), 0o755, *identity); err != nil {
|
||||
if err := user.MkdirAllAndChown(filepath.Dir(dest), 0o755, identity.UID, identity.GID, user.WithOnlyNew); err != nil {
|
||||
return errors.Wrapf(err, "failed to create new directory")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,11 +7,9 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
)
|
||||
|
||||
func fixPermissions(source, destination string, identity idtools.Identity, overrideSkip bool) error {
|
||||
func fixPermissions(source, destination string, id identity, overrideSkip bool) error {
|
||||
var (
|
||||
skipChownRoot bool
|
||||
err error
|
||||
@@ -39,7 +37,7 @@ func fixPermissions(source, destination string, identity idtools.Identity, overr
|
||||
}
|
||||
|
||||
fullpath = filepath.Join(destination, cleaned)
|
||||
return os.Lchown(fullpath, identity.UID, identity.GID)
|
||||
return os.Lchown(fullpath, id.UID, id.GID)
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
|
||||
winio "github.com/Microsoft/go-winio"
|
||||
"github.com/docker/docker/internal/usergroup"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/moby/sys/reexec"
|
||||
"github.com/pkg/errors"
|
||||
@@ -24,12 +23,12 @@ func init() {
|
||||
reexec.Register("windows-fix-permissions", fixPermissionsReexec)
|
||||
}
|
||||
|
||||
func fixPermissions(source, destination string, identity idtools.Identity, _ bool) error {
|
||||
if identity.SID == "" {
|
||||
func fixPermissions(source, destination string, id identity, _ bool) error {
|
||||
if id.SID == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
cmd := reexec.Command("windows-fix-permissions", source, destination, identity.SID)
|
||||
cmd := reexec.Command("windows-fix-permissions", source, destination, id.SID)
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
return errors.Wrapf(err, "failed to exec windows-fix-permissions: %s", output)
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/builder/remotecontext"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||
"github.com/moby/go-archive"
|
||||
"github.com/moby/sys/reexec"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
|
||||
@@ -19,10 +19,10 @@ import (
|
||||
"github.com/docker/docker/builder"
|
||||
networkSettings "github.com/docker/docker/daemon/network"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/chrootarchive"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/moby/go-archive"
|
||||
"github.com/moby/go-archive/chrootarchive"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -152,12 +152,13 @@ func (b *Builder) performCopy(ctx context.Context, req dispatchRequest, inst cop
|
||||
return err
|
||||
}
|
||||
|
||||
identity := b.idMapping.RootPair()
|
||||
uid, gid := b.idMapping.RootPair()
|
||||
id := identity{UID: uid, GID: gid}
|
||||
// if a chown was requested, perform the steps to get the uid, gid
|
||||
// translated (if necessary because of user namespaces), and replace
|
||||
// the root pair with the chown pair for copy operations
|
||||
if inst.chownStr != "" {
|
||||
identity, err = parseChownFlag(ctx, b, state, inst.chownStr, destInfo.root, b.idMapping)
|
||||
id, err = parseChownFlag(ctx, b, state, inst.chownStr, destInfo.root, b.idMapping)
|
||||
if err != nil {
|
||||
if b.options.Platform != "windows" {
|
||||
return errors.Wrapf(err, "unable to convert uid/gid chown string to host mapping")
|
||||
@@ -173,7 +174,7 @@ func (b *Builder) performCopy(ctx context.Context, req dispatchRequest, inst cop
|
||||
archiver: b.getArchiver(),
|
||||
}
|
||||
if !inst.preserveOwnership {
|
||||
opts.identity = &identity
|
||||
opts.identity = &id
|
||||
}
|
||||
if err := performCopyForInfo(destInfo, info, opts); err != nil {
|
||||
return errors.Wrapf(err, "failed to copy files")
|
||||
|
||||
@@ -6,17 +6,16 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/moby/sys/symlink"
|
||||
"github.com/moby/sys/user"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func parseChownFlag(ctx context.Context, builder *Builder, state *dispatchState, chown, ctrRootPath string, identityMapping idtools.IdentityMapping) (idtools.Identity, error) {
|
||||
func parseChownFlag(ctx context.Context, builder *Builder, state *dispatchState, chown, ctrRootPath string, identityMapping user.IdentityMapping) (identity, error) {
|
||||
var userStr, grpStr string
|
||||
parts := strings.Split(chown, ":")
|
||||
if len(parts) > 2 {
|
||||
return idtools.Identity{}, errors.New("invalid chown string format: " + chown)
|
||||
return identity{}, errors.New("invalid chown string format: " + chown)
|
||||
}
|
||||
if len(parts) == 1 {
|
||||
// if no group specified, use the user spec as group as well
|
||||
@@ -27,27 +26,27 @@ func parseChownFlag(ctx context.Context, builder *Builder, state *dispatchState,
|
||||
|
||||
passwdPath, err := symlink.FollowSymlinkInScope(filepath.Join(ctrRootPath, "etc", "passwd"), ctrRootPath)
|
||||
if err != nil {
|
||||
return idtools.Identity{}, errors.Wrap(err, "can't resolve /etc/passwd path in container rootfs")
|
||||
return identity{}, errors.Wrap(err, "can't resolve /etc/passwd path in container rootfs")
|
||||
}
|
||||
groupPath, err := symlink.FollowSymlinkInScope(filepath.Join(ctrRootPath, "etc", "group"), ctrRootPath)
|
||||
if err != nil {
|
||||
return idtools.Identity{}, errors.Wrap(err, "can't resolve /etc/group path in container rootfs")
|
||||
return identity{}, errors.Wrap(err, "can't resolve /etc/group path in container rootfs")
|
||||
}
|
||||
uid, err := lookupUser(userStr, passwdPath)
|
||||
if err != nil {
|
||||
return idtools.Identity{}, errors.Wrap(err, "can't find uid for user "+userStr)
|
||||
return identity{}, errors.Wrap(err, "can't find uid for user "+userStr)
|
||||
}
|
||||
gid, err := lookupGroup(grpStr, groupPath)
|
||||
if err != nil {
|
||||
return idtools.Identity{}, errors.Wrap(err, "can't find gid for group "+grpStr)
|
||||
return identity{}, errors.Wrap(err, "can't find gid for group "+grpStr)
|
||||
}
|
||||
|
||||
// convert as necessary because of user namespaces
|
||||
chownPair, err := identityMapping.ToHost(idtools.Identity{UID: uid, GID: gid})
|
||||
uid, gid, err = identityMapping.ToHost(uid, gid)
|
||||
if err != nil {
|
||||
return idtools.Identity{}, errors.Wrap(err, "unable to convert uid/gid to host mapping")
|
||||
return identity{}, errors.Wrap(err, "unable to convert uid/gid to host mapping")
|
||||
}
|
||||
return chownPair, nil
|
||||
return identity{UID: uid, GID: gid}, nil
|
||||
}
|
||||
|
||||
func lookupUser(userStr, filepath string) (int, error) {
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/moby/sys/user"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
)
|
||||
@@ -28,15 +28,15 @@ othergrp:x:6666:
|
||||
`,
|
||||
}
|
||||
// test mappings for validating use of maps
|
||||
idMaps := []idtools.IDMap{
|
||||
idMaps := []user.IDMap{
|
||||
{
|
||||
ContainerID: 0,
|
||||
HostID: 100000,
|
||||
Size: 65536,
|
||||
ID: 0,
|
||||
ParentID: 100000,
|
||||
Count: 65536,
|
||||
},
|
||||
}
|
||||
remapped := idtools.IdentityMapping{UIDMaps: idMaps, GIDMaps: idMaps}
|
||||
unmapped := idtools.IdentityMapping{}
|
||||
remapped := user.IdentityMapping{UIDMaps: idMaps, GIDMaps: idMaps}
|
||||
unmapped := user.IdentityMapping{}
|
||||
|
||||
contextDir, cleanup := createTestTempDir(t, "", "builder-chown-parse-test")
|
||||
defer cleanup()
|
||||
@@ -54,9 +54,9 @@ othergrp:x:6666:
|
||||
builder *Builder
|
||||
name string
|
||||
chownStr string
|
||||
idMapping idtools.IdentityMapping
|
||||
idMapping user.IdentityMapping
|
||||
state *dispatchState
|
||||
expected idtools.Identity
|
||||
expected identity
|
||||
}{
|
||||
{
|
||||
builder: &Builder{options: &types.ImageBuildOptions{Platform: "linux"}},
|
||||
@@ -64,7 +64,7 @@ othergrp:x:6666:
|
||||
chownStr: "1",
|
||||
idMapping: unmapped,
|
||||
state: &dispatchState{},
|
||||
expected: idtools.Identity{UID: 1, GID: 1},
|
||||
expected: identity{UID: 1, GID: 1},
|
||||
},
|
||||
{
|
||||
builder: &Builder{options: &types.ImageBuildOptions{Platform: "linux"}},
|
||||
@@ -72,7 +72,7 @@ othergrp:x:6666:
|
||||
chownStr: "0:1",
|
||||
idMapping: unmapped,
|
||||
state: &dispatchState{},
|
||||
expected: idtools.Identity{UID: 0, GID: 1},
|
||||
expected: identity{UID: 0, GID: 1},
|
||||
},
|
||||
{
|
||||
builder: &Builder{options: &types.ImageBuildOptions{Platform: "linux"}},
|
||||
@@ -80,7 +80,7 @@ othergrp:x:6666:
|
||||
chownStr: "0",
|
||||
idMapping: remapped,
|
||||
state: &dispatchState{},
|
||||
expected: idtools.Identity{UID: 100000, GID: 100000},
|
||||
expected: identity{UID: 100000, GID: 100000},
|
||||
},
|
||||
{
|
||||
builder: &Builder{options: &types.ImageBuildOptions{Platform: "linux"}},
|
||||
@@ -88,7 +88,7 @@ othergrp:x:6666:
|
||||
chownStr: "1:33",
|
||||
idMapping: remapped,
|
||||
state: &dispatchState{},
|
||||
expected: idtools.Identity{UID: 100001, GID: 100033},
|
||||
expected: identity{UID: 100001, GID: 100033},
|
||||
},
|
||||
{
|
||||
builder: &Builder{options: &types.ImageBuildOptions{Platform: "linux"}},
|
||||
@@ -96,7 +96,7 @@ othergrp:x:6666:
|
||||
chownStr: "bin:5555",
|
||||
idMapping: unmapped,
|
||||
state: &dispatchState{},
|
||||
expected: idtools.Identity{UID: 1, GID: 5555},
|
||||
expected: identity{UID: 1, GID: 5555},
|
||||
},
|
||||
{
|
||||
builder: &Builder{options: &types.ImageBuildOptions{Platform: "linux"}},
|
||||
@@ -104,7 +104,7 @@ othergrp:x:6666:
|
||||
chownStr: "0:unicorn",
|
||||
idMapping: remapped,
|
||||
state: &dispatchState{},
|
||||
expected: idtools.Identity{UID: 100000, GID: 101002},
|
||||
expected: identity{UID: 100000, GID: 101002},
|
||||
},
|
||||
{
|
||||
builder: &Builder{options: &types.ImageBuildOptions{Platform: "linux"}},
|
||||
@@ -112,7 +112,7 @@ othergrp:x:6666:
|
||||
chownStr: "unicorn",
|
||||
idMapping: remapped,
|
||||
state: &dispatchState{},
|
||||
expected: idtools.Identity{UID: 101001, GID: 101002},
|
||||
expected: identity{UID: 101001, GID: 101002},
|
||||
},
|
||||
} {
|
||||
t.Run(testcase.name, func(t *testing.T) {
|
||||
@@ -127,7 +127,7 @@ othergrp:x:6666:
|
||||
builder *Builder
|
||||
name string
|
||||
chownStr string
|
||||
idMapping idtools.IdentityMapping
|
||||
idMapping user.IdentityMapping
|
||||
state *dispatchState
|
||||
descr string
|
||||
}{
|
||||
|
||||
@@ -14,8 +14,8 @@ import (
|
||||
"github.com/docker/docker/builder/remotecontext"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/moby/go-archive"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
|
||||
@@ -12,27 +12,28 @@ import (
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/internal/usergroup"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/moby/sys/user"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
func parseChownFlag(ctx context.Context, builder *Builder, state *dispatchState, chown, ctrRootPath string, identityMapping idtools.IdentityMapping) (idtools.Identity, error) {
|
||||
func parseChownFlag(ctx context.Context, builder *Builder, state *dispatchState, chown, ctrRootPath string, identityMapping user.IdentityMapping) (identity, error) {
|
||||
if builder.options.Platform == "windows" {
|
||||
return getAccountIdentity(ctx, builder, chown, ctrRootPath, state)
|
||||
}
|
||||
|
||||
return identityMapping.RootPair(), nil
|
||||
uid, gid := identityMapping.RootPair()
|
||||
return identity{UID: uid, GID: gid}, nil
|
||||
}
|
||||
|
||||
func getAccountIdentity(ctx context.Context, builder *Builder, accountName string, ctrRootPath string, state *dispatchState) (idtools.Identity, error) {
|
||||
func getAccountIdentity(ctx context.Context, builder *Builder, accountName string, ctrRootPath string, state *dispatchState) (identity, error) {
|
||||
// If this is potentially a string SID then attempt to convert it to verify
|
||||
// this, otherwise continue looking for the account.
|
||||
if strings.HasPrefix(accountName, "S-") || strings.HasPrefix(accountName, "s-") {
|
||||
sid, err := windows.StringToSid(accountName)
|
||||
|
||||
if err == nil {
|
||||
return idtools.Identity{SID: sid.String()}, nil
|
||||
return identity{SID: sid.String()}, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,14 +42,14 @@ func getAccountIdentity(ctx context.Context, builder *Builder, accountName strin
|
||||
|
||||
// If this is a SID that is built-in and hence the same across all systems then use that.
|
||||
if err == nil && (accType == windows.SidTypeAlias || accType == windows.SidTypeWellKnownGroup) {
|
||||
return idtools.Identity{SID: sid.String()}, nil
|
||||
return identity{SID: sid.String()}, nil
|
||||
}
|
||||
|
||||
// Check if the account name is one unique to containers.
|
||||
if strings.EqualFold(accountName, "ContainerAdministrator") {
|
||||
return idtools.Identity{SID: usergroup.ContainerAdministratorSidString}, nil
|
||||
return identity{SID: usergroup.ContainerAdministratorSidString}, nil
|
||||
} else if strings.EqualFold(accountName, "ContainerUser") {
|
||||
return idtools.Identity{SID: usergroup.ContainerUserSidString}, nil
|
||||
return identity{SID: usergroup.ContainerUserSidString}, nil
|
||||
}
|
||||
|
||||
// All other lookups failed, so therefore determine if the account in
|
||||
@@ -56,7 +57,7 @@ func getAccountIdentity(ctx context.Context, builder *Builder, accountName strin
|
||||
return lookupNTAccount(ctx, builder, accountName, state)
|
||||
}
|
||||
|
||||
func lookupNTAccount(ctx context.Context, builder *Builder, accountName string, state *dispatchState) (idtools.Identity, error) {
|
||||
func lookupNTAccount(ctx context.Context, builder *Builder, accountName string, state *dispatchState) (identity, error) {
|
||||
source, _ := filepath.Split(os.Args[0])
|
||||
|
||||
target := "C:\\Docker"
|
||||
@@ -64,7 +65,7 @@ func lookupNTAccount(ctx context.Context, builder *Builder, accountName string,
|
||||
|
||||
optionsPlatform, err := platforms.Parse(builder.options.Platform)
|
||||
if err != nil {
|
||||
return idtools.Identity{}, errdefs.InvalidParameter(err)
|
||||
return identity{}, errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
runConfig := copyRunConfig(state.runConfig,
|
||||
@@ -85,7 +86,7 @@ func lookupNTAccount(ctx context.Context, builder *Builder, accountName string,
|
||||
|
||||
container, err := builder.containerManager.Create(ctx, runConfig, hostConfig)
|
||||
if err != nil {
|
||||
return idtools.Identity{}, err
|
||||
return identity{}, err
|
||||
}
|
||||
|
||||
stdout := new(bytes.Buffer)
|
||||
@@ -93,15 +94,15 @@ func lookupNTAccount(ctx context.Context, builder *Builder, accountName string,
|
||||
|
||||
if err := builder.containerManager.Run(ctx, container.ID, stdout, stderr); err != nil {
|
||||
if err, ok := err.(*statusCodeError); ok {
|
||||
return idtools.Identity{}, &jsonmessage.JSONError{
|
||||
return identity{}, &jsonmessage.JSONError{
|
||||
Message: stderr.String(),
|
||||
Code: err.StatusCode(),
|
||||
}
|
||||
}
|
||||
return idtools.Identity{}, err
|
||||
return identity{}, err
|
||||
}
|
||||
|
||||
accountSid := stdout.String()
|
||||
|
||||
return idtools.Identity{SID: accountSid}, nil
|
||||
return identity{SID: accountSid}, nil
|
||||
}
|
||||
|
||||
@@ -6,11 +6,11 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/chrootarchive"
|
||||
"github.com/docker/docker/pkg/longpath"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/docker/pkg/tarsum"
|
||||
"github.com/moby/go-archive/chrootarchive"
|
||||
"github.com/moby/go-archive/compression"
|
||||
"github.com/moby/sys/symlink"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -66,7 +66,7 @@ func FromArchive(tarStream io.Reader) (builder.Source, error) {
|
||||
}
|
||||
}()
|
||||
|
||||
decompressedStream, err := archive.DecompressStream(tarStream)
|
||||
decompressedStream, err := compression.DecompressStream(tarStream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"hash"
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/tarsum"
|
||||
"github.com/moby/go-archive"
|
||||
)
|
||||
|
||||
// NewFileHash returns new hash that is used for the builder cache keys
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/remotecontext/git"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/moby/go-archive"
|
||||
)
|
||||
|
||||
// MakeGitContext returns a Context from gitURL that is cloned in a temporary directory.
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/moby/go-archive"
|
||||
"github.com/moby/sys/reexec"
|
||||
"github.com/pkg/errors"
|
||||
"gotest.tools/v3/skip"
|
||||
|
||||
@@ -32,7 +32,7 @@ func (cli *Client) ContainerCommit(ctx context.Context, containerID string, opti
|
||||
if tagged, ok := ref.(reference.Tagged); ok {
|
||||
tag = tagged.Tag()
|
||||
}
|
||||
repository = reference.FamiliarName(ref)
|
||||
repository = ref.Name()
|
||||
}
|
||||
|
||||
query := url.Values{}
|
||||
|
||||
@@ -33,13 +33,15 @@ func TestContainerCommitError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestContainerCommit(t *testing.T) {
|
||||
expectedURL := "/commit"
|
||||
expectedContainerID := "container_id"
|
||||
specifiedReference := "repository_name:tag"
|
||||
expectedRepositoryName := "repository_name"
|
||||
expectedTag := "tag"
|
||||
expectedComment := "comment"
|
||||
expectedAuthor := "author"
|
||||
const (
|
||||
expectedURL = "/commit"
|
||||
expectedContainerID = "container_id"
|
||||
specifiedReference = "repository_name:tag"
|
||||
expectedRepositoryName = "docker.io/library/repository_name"
|
||||
expectedTag = "tag"
|
||||
expectedComment = "comment"
|
||||
expectedAuthor = "author"
|
||||
)
|
||||
expectedChanges := []string{"change1", "change2"}
|
||||
|
||||
client := &Client{
|
||||
|
||||
@@ -21,7 +21,7 @@ func (cli *Client) ImageCreate(ctx context.Context, parentReference string, opti
|
||||
}
|
||||
|
||||
query := url.Values{}
|
||||
query.Set("fromImage", reference.FamiliarName(ref))
|
||||
query.Set("fromImage", ref.Name())
|
||||
query.Set("tag", getAPITagFromNamedRef(ref))
|
||||
if options.Platform != "" {
|
||||
query.Set("platform", strings.ToLower(options.Platform))
|
||||
|
||||
@@ -25,11 +25,14 @@ func TestImageCreateError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestImageCreate(t *testing.T) {
|
||||
expectedURL := "/images/create"
|
||||
expectedImage := "test:5000/my_image"
|
||||
expectedTag := "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
|
||||
expectedReference := fmt.Sprintf("%s@%s", expectedImage, expectedTag)
|
||||
expectedRegistryAuth := "eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsiYXV0aCI6ImRHOTBid289IiwiZW1haWwiOiJqb2huQGRvZS5jb20ifX0="
|
||||
const (
|
||||
expectedURL = "/images/create"
|
||||
expectedImage = "docker.io/test/my_image"
|
||||
expectedTag = "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
|
||||
specifiedReference = "test/my_image:latest@" + expectedTag
|
||||
expectedRegistryAuth = "eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsiYXV0aCI6ImRHOTBid289IiwiZW1haWwiOiJqb2huQGRvZS5jb20ifX0="
|
||||
)
|
||||
|
||||
client := &Client{
|
||||
client: newMockClient(func(r *http.Request) (*http.Response, error) {
|
||||
if !strings.HasPrefix(r.URL.Path, expectedURL) {
|
||||
@@ -58,7 +61,7 @@ func TestImageCreate(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
|
||||
createResponse, err := client.ImageCreate(context.Background(), expectedReference, image.CreateOptions{
|
||||
createResponse, err := client.ImageCreate(context.Background(), specifiedReference, image.CreateOptions{
|
||||
RegistryAuth: expectedRegistryAuth,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -32,6 +32,17 @@ func (cli *Client) ImageInspect(ctx context.Context, imageID string, inspectOpts
|
||||
query.Set("manifests", "1")
|
||||
}
|
||||
|
||||
if opts.apiOptions.Platform != nil {
|
||||
if err := cli.NewVersionError(ctx, "1.49", "platform"); err != nil {
|
||||
return image.InspectResponse{}, err
|
||||
}
|
||||
platform, err := encodePlatform(opts.apiOptions.Platform)
|
||||
if err != nil {
|
||||
return image.InspectResponse{}, err
|
||||
}
|
||||
query.Set("platform", platform)
|
||||
}
|
||||
|
||||
resp, err := cli.get(ctx, "/images/"+imageID+"/json", query, nil)
|
||||
defer ensureReaderClosed(resp)
|
||||
if err != nil {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
|
||||
"github.com/docker/docker/api/types/image"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// ImageInspectOption is a type representing functional options for the image inspect operation.
|
||||
@@ -36,6 +37,17 @@ func ImageInspectWithManifests(manifests bool) ImageInspectOption {
|
||||
})
|
||||
}
|
||||
|
||||
// ImageInspectWithPlatform sets platform API option for the image inspect operation.
|
||||
// This option is only available for API version 1.49 and up.
|
||||
// With this option set, the image inspect operation will return information for the
|
||||
// specified platform variant of the multi-platform image.
|
||||
func ImageInspectWithPlatform(platform *ocispec.Platform) ImageInspectOption {
|
||||
return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error {
|
||||
clientOpts.apiOptions.Platform = platform
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// ImageInspectWithAPIOpts sets the API options for the image inspect operation.
|
||||
func ImageInspectWithAPIOpts(opts image.InspectOptions) ImageInspectOption {
|
||||
return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error {
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/errdefs"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
)
|
||||
@@ -79,3 +80,47 @@ func TestImageInspect(t *testing.T) {
|
||||
t.Fatalf("expected `%v`, got %v", expectedTags, imageInspect.RepoTags)
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageInspectWithPlatform(t *testing.T) {
|
||||
expectedURL := "/images/image_id/json"
|
||||
requestedPlatform := &ocispec.Platform{
|
||||
OS: "linux",
|
||||
Architecture: "arm64",
|
||||
}
|
||||
|
||||
expectedPlatform, err := encodePlatform(requestedPlatform)
|
||||
assert.NilError(t, err)
|
||||
|
||||
client := &Client{
|
||||
client: newMockClient(func(req *http.Request) (*http.Response, error) {
|
||||
if !strings.HasPrefix(req.URL.Path, expectedURL) {
|
||||
return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL)
|
||||
}
|
||||
|
||||
// Check if platform parameter is passed correctly
|
||||
platform := req.URL.Query().Get("platform")
|
||||
if platform != expectedPlatform {
|
||||
return nil, fmt.Errorf("Expected platform '%s', got '%s'", expectedPlatform, platform)
|
||||
}
|
||||
|
||||
content, err := json.Marshal(image.InspectResponse{
|
||||
ID: "image_id",
|
||||
Architecture: "arm64",
|
||||
Os: "linux",
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewReader(content)),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
|
||||
imageInspect, err := client.ImageInspect(context.Background(), "image_id", ImageInspectWithPlatform(requestedPlatform))
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, imageInspect.ID, "image_id")
|
||||
assert.Equal(t, imageInspect.Architecture, "arm64")
|
||||
assert.Equal(t, imageInspect.Os, "linux")
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ func (cli *Client) ImagePull(ctx context.Context, refStr string, options image.P
|
||||
}
|
||||
|
||||
query := url.Values{}
|
||||
query.Set("fromImage", reference.FamiliarName(ref))
|
||||
query.Set("fromImage", ref.Name())
|
||||
if !options.All {
|
||||
query.Set("tag", getAPITagFromNamedRef(ref))
|
||||
}
|
||||
|
||||
@@ -74,7 +74,7 @@ func TestImagePullWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T)
|
||||
}
|
||||
|
||||
func TestImagePullWithPrivilegedFuncNoError(t *testing.T) {
|
||||
expectedURL := "/images/create"
|
||||
const expectedURL = "/images/create"
|
||||
client := &Client{
|
||||
client: newMockClient(func(req *http.Request) (*http.Response, error) {
|
||||
if !strings.HasPrefix(req.URL.Path, expectedURL) {
|
||||
@@ -92,8 +92,8 @@ func TestImagePullWithPrivilegedFuncNoError(t *testing.T) {
|
||||
}
|
||||
query := req.URL.Query()
|
||||
fromImage := query.Get("fromImage")
|
||||
if fromImage != "myimage" {
|
||||
return nil, fmt.Errorf("fromimage not set in URL query properly. Expected '%s', got %s", "myimage", fromImage)
|
||||
if fromImage != "docker.io/library/myimage" {
|
||||
return nil, fmt.Errorf("fromimage not set in URL query properly. Expected '%s', got %s", "docker.io/library/myimage", fromImage)
|
||||
}
|
||||
tag := query.Get("tag")
|
||||
if tag != "latest" {
|
||||
@@ -125,8 +125,10 @@ func TestImagePullWithPrivilegedFuncNoError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestImagePullWithoutErrors(t *testing.T) {
|
||||
expectedURL := "/images/create"
|
||||
expectedOutput := "hello world"
|
||||
const (
|
||||
expectedURL = "/images/create"
|
||||
expectedOutput = "hello world"
|
||||
)
|
||||
pullCases := []struct {
|
||||
all bool
|
||||
reference string
|
||||
@@ -136,61 +138,88 @@ func TestImagePullWithoutErrors(t *testing.T) {
|
||||
{
|
||||
all: false,
|
||||
reference: "myimage",
|
||||
expectedImage: "myimage",
|
||||
expectedImage: "docker.io/library/myimage",
|
||||
expectedTag: "latest",
|
||||
},
|
||||
{
|
||||
all: false,
|
||||
reference: "myimage:tag",
|
||||
expectedImage: "myimage",
|
||||
expectedImage: "docker.io/library/myimage",
|
||||
expectedTag: "tag",
|
||||
},
|
||||
{
|
||||
all: true,
|
||||
reference: "myimage",
|
||||
expectedImage: "myimage",
|
||||
expectedImage: "docker.io/library/myimage",
|
||||
expectedTag: "",
|
||||
},
|
||||
{
|
||||
all: true,
|
||||
reference: "myimage:anything",
|
||||
expectedImage: "myimage",
|
||||
expectedImage: "docker.io/library/myimage",
|
||||
expectedTag: "",
|
||||
},
|
||||
{
|
||||
reference: "myname/myimage",
|
||||
expectedImage: "docker.io/myname/myimage",
|
||||
expectedTag: "latest",
|
||||
},
|
||||
{
|
||||
reference: "docker.io/myname/myimage",
|
||||
expectedImage: "docker.io/myname/myimage",
|
||||
expectedTag: "latest",
|
||||
},
|
||||
{
|
||||
reference: "index.docker.io/myname/myimage:tag",
|
||||
expectedImage: "docker.io/myname/myimage",
|
||||
expectedTag: "tag",
|
||||
},
|
||||
{
|
||||
reference: "localhost/myname/myimage",
|
||||
expectedImage: "localhost/myname/myimage",
|
||||
expectedTag: "latest",
|
||||
},
|
||||
{
|
||||
reference: "registry.example.com:5000/myimage:tag",
|
||||
expectedImage: "registry.example.com:5000/myimage",
|
||||
expectedTag: "tag",
|
||||
},
|
||||
}
|
||||
for _, pullCase := range pullCases {
|
||||
client := &Client{
|
||||
client: newMockClient(func(req *http.Request) (*http.Response, error) {
|
||||
if !strings.HasPrefix(req.URL.Path, expectedURL) {
|
||||
return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL)
|
||||
}
|
||||
query := req.URL.Query()
|
||||
fromImage := query.Get("fromImage")
|
||||
if fromImage != pullCase.expectedImage {
|
||||
return nil, fmt.Errorf("fromimage not set in URL query properly. Expected '%s', got %s", pullCase.expectedImage, fromImage)
|
||||
}
|
||||
tag := query.Get("tag")
|
||||
if tag != pullCase.expectedTag {
|
||||
return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", pullCase.expectedTag, tag)
|
||||
}
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewReader([]byte(expectedOutput))),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
resp, err := client.ImagePull(context.Background(), pullCase.reference, image.PullOptions{
|
||||
All: pullCase.all,
|
||||
t.Run(pullCase.reference, func(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(func(req *http.Request) (*http.Response, error) {
|
||||
if !strings.HasPrefix(req.URL.Path, expectedURL) {
|
||||
return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL)
|
||||
}
|
||||
query := req.URL.Query()
|
||||
fromImage := query.Get("fromImage")
|
||||
if fromImage != pullCase.expectedImage {
|
||||
return nil, fmt.Errorf("fromimage not set in URL query properly. Expected '%s', got %s", pullCase.expectedImage, fromImage)
|
||||
}
|
||||
tag := query.Get("tag")
|
||||
if tag != pullCase.expectedTag {
|
||||
return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", pullCase.expectedTag, tag)
|
||||
}
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewReader([]byte(expectedOutput))),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
resp, err := client.ImagePull(context.Background(), pullCase.reference, image.PullOptions{
|
||||
All: pullCase.all,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
body, err := io.ReadAll(resp)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(body) != expectedOutput {
|
||||
t.Fatalf("expected '%s', got %s", expectedOutput, string(body))
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
body, err := io.ReadAll(resp)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(body) != expectedOutput {
|
||||
t.Fatalf("expected '%s', got %s", expectedOutput, string(body))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,7 +29,6 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options image.Pu
|
||||
return nil, errors.New("cannot push a digest reference")
|
||||
}
|
||||
|
||||
name := reference.FamiliarName(ref)
|
||||
query := url.Values{}
|
||||
if !options.All {
|
||||
ref = reference.TagNameOnly(ref)
|
||||
@@ -52,13 +51,13 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options image.Pu
|
||||
query.Set("platform", string(pJson))
|
||||
}
|
||||
|
||||
resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth)
|
||||
resp, err := cli.tryImagePush(ctx, ref.Name(), query, options.RegistryAuth)
|
||||
if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil {
|
||||
newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx)
|
||||
if privilegeErr != nil {
|
||||
return nil, privilegeErr
|
||||
}
|
||||
resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader)
|
||||
resp, err = cli.tryImagePush(ctx, ref.Name(), query, newAuthHeader)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -79,7 +79,7 @@ func TestImagePushWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T)
|
||||
}
|
||||
|
||||
func TestImagePushWithPrivilegedFuncNoError(t *testing.T) {
|
||||
expectedURL := "/images/myimage/push"
|
||||
const expectedURL = "/images/docker.io/myname/myimage/push"
|
||||
client := &Client{
|
||||
client: newMockClient(func(req *http.Request) (*http.Response, error) {
|
||||
if !strings.HasPrefix(req.URL.Path, expectedURL) {
|
||||
@@ -109,7 +109,7 @@ func TestImagePushWithPrivilegedFuncNoError(t *testing.T) {
|
||||
privilegeFunc := func(_ context.Context) (string, error) {
|
||||
return "IAmValid", nil
|
||||
}
|
||||
resp, err := client.ImagePush(context.Background(), "myimage:tag", image.PushOptions{
|
||||
resp, err := client.ImagePush(context.Background(), "myname/myimage:tag", image.PushOptions{
|
||||
RegistryAuth: "NotValid",
|
||||
PrivilegeFunc: privilegeFunc,
|
||||
})
|
||||
@@ -126,8 +126,10 @@ func TestImagePushWithPrivilegedFuncNoError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestImagePushWithoutErrors(t *testing.T) {
|
||||
expectedOutput := "hello world"
|
||||
expectedURLFormat := "/images/%s/push"
|
||||
const (
|
||||
expectedURLFormat = "/images/%s/push"
|
||||
expectedOutput = "hello world"
|
||||
)
|
||||
testCases := []struct {
|
||||
all bool
|
||||
reference string
|
||||
@@ -137,27 +139,52 @@ func TestImagePushWithoutErrors(t *testing.T) {
|
||||
{
|
||||
all: false,
|
||||
reference: "myimage",
|
||||
expectedImage: "myimage",
|
||||
expectedImage: "docker.io/library/myimage",
|
||||
expectedTag: "latest",
|
||||
},
|
||||
{
|
||||
all: false,
|
||||
reference: "myimage:tag",
|
||||
expectedImage: "myimage",
|
||||
expectedImage: "docker.io/library/myimage",
|
||||
expectedTag: "tag",
|
||||
},
|
||||
{
|
||||
all: true,
|
||||
reference: "myimage",
|
||||
expectedImage: "myimage",
|
||||
expectedImage: "docker.io/library/myimage",
|
||||
expectedTag: "",
|
||||
},
|
||||
{
|
||||
all: true,
|
||||
reference: "myimage:anything",
|
||||
expectedImage: "myimage",
|
||||
expectedImage: "docker.io/library/myimage",
|
||||
expectedTag: "",
|
||||
},
|
||||
{
|
||||
reference: "myname/myimage",
|
||||
expectedImage: "docker.io/myname/myimage",
|
||||
expectedTag: "latest",
|
||||
},
|
||||
{
|
||||
reference: "docker.io/myname/myimage",
|
||||
expectedImage: "docker.io/myname/myimage",
|
||||
expectedTag: "latest",
|
||||
},
|
||||
{
|
||||
reference: "index.docker.io/myname/myimage:tag",
|
||||
expectedImage: "docker.io/myname/myimage",
|
||||
expectedTag: "tag",
|
||||
},
|
||||
{
|
||||
reference: "localhost/myname/myimage",
|
||||
expectedImage: "localhost/myname/myimage",
|
||||
expectedTag: "latest",
|
||||
},
|
||||
{
|
||||
reference: "registry.example.com:5000/myimage:tag",
|
||||
expectedImage: "registry.example.com:5000/myimage",
|
||||
expectedTag: "tag",
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("%s,all-tags=%t", tc.reference, tc.all), func(t *testing.T) {
|
||||
|
||||
@@ -26,7 +26,7 @@ func (cli *Client) ImageTag(ctx context.Context, source, target string) error {
|
||||
ref = reference.TagNameOnly(ref)
|
||||
|
||||
query := url.Values{}
|
||||
query.Set("repo", reference.FamiliarName(ref))
|
||||
query.Set("repo", ref.Name())
|
||||
if tagged, ok := ref.(reference.Tagged); ok {
|
||||
query.Set("tag", tagged.Tag())
|
||||
}
|
||||
|
||||
@@ -95,7 +95,7 @@ func TestImageTagHexSource(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestImageTag(t *testing.T) {
|
||||
expectedURL := "/images/image_id/tag"
|
||||
const expectedURL = "/images/image_id/tag"
|
||||
tagCases := []struct {
|
||||
reference string
|
||||
expectedQueryParams map[string]string
|
||||
@@ -103,37 +103,37 @@ func TestImageTag(t *testing.T) {
|
||||
{
|
||||
reference: "repository:tag1",
|
||||
expectedQueryParams: map[string]string{
|
||||
"repo": "repository",
|
||||
"repo": "docker.io/library/repository",
|
||||
"tag": "tag1",
|
||||
},
|
||||
}, {
|
||||
reference: "another_repository:latest",
|
||||
expectedQueryParams: map[string]string{
|
||||
"repo": "another_repository",
|
||||
"repo": "docker.io/library/another_repository",
|
||||
"tag": "latest",
|
||||
},
|
||||
}, {
|
||||
reference: "another_repository",
|
||||
expectedQueryParams: map[string]string{
|
||||
"repo": "another_repository",
|
||||
"repo": "docker.io/library/another_repository",
|
||||
"tag": "latest",
|
||||
},
|
||||
}, {
|
||||
reference: "test/another_repository",
|
||||
expectedQueryParams: map[string]string{
|
||||
"repo": "test/another_repository",
|
||||
"repo": "docker.io/test/another_repository",
|
||||
"tag": "latest",
|
||||
},
|
||||
}, {
|
||||
reference: "test/another_repository:tag1",
|
||||
expectedQueryParams: map[string]string{
|
||||
"repo": "test/another_repository",
|
||||
"repo": "docker.io/test/another_repository",
|
||||
"tag": "tag1",
|
||||
},
|
||||
}, {
|
||||
reference: "test/test/another_repository:tag1",
|
||||
expectedQueryParams: map[string]string{
|
||||
"repo": "test/test/another_repository",
|
||||
"repo": "docker.io/test/test/another_repository",
|
||||
"tag": "tag1",
|
||||
},
|
||||
}, {
|
||||
|
||||
@@ -237,7 +237,7 @@ func (cli *Client) checkResponseErr(serverResp *http.Response) (retErr error) {
|
||||
}
|
||||
|
||||
var daemonErr error
|
||||
if serverResp.Header.Get("Content-Type") == "application/json" && (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) {
|
||||
if serverResp.Header.Get("Content-Type") == "application/json" {
|
||||
var errorResponse types.ErrorResponse
|
||||
if err := json.Unmarshal(body, &errorResponse); err != nil {
|
||||
return errors.Wrap(err, "Error reading JSON")
|
||||
|
||||
@@ -132,12 +132,15 @@ func TestResponseErrors(t *testing.T) {
|
||||
expected: `Error response from daemon: Some error occurred`,
|
||||
},
|
||||
{
|
||||
// API versions before 1.24 did not support JSON errors, and return response as-is.
|
||||
// API versions before 1.24 did not support JSON errors. Technically,
|
||||
// we no longer downgrade to older API versions, but we make an
|
||||
// exception for errors so that older clients would print a more
|
||||
// readable error.
|
||||
doc: "JSON error on old API",
|
||||
apiVersion: "1.23",
|
||||
contentType: "application/json",
|
||||
response: `{"message":"Some error occurred"}`,
|
||||
expected: `Error response from daemon: {"message":"Some error occurred"}`,
|
||||
contentType: "text/plain; charset=utf-8",
|
||||
response: `client version 1.10 is too old. Minimum supported API version is 1.24, please upgrade your client to a newer version`,
|
||||
expected: `Error response from daemon: client version 1.10 is too old. Minimum supported API version is 1.24, please upgrade your client to a newer version`,
|
||||
},
|
||||
{
|
||||
doc: "plain-text error",
|
||||
|
||||
@@ -13,12 +13,9 @@ import (
|
||||
// installCommonConfigFlags adds flags to the pflag.FlagSet to configure the daemon
|
||||
func installCommonConfigFlags(conf *config.Config, flags *pflag.FlagSet) {
|
||||
var (
|
||||
allowNonDistributable = opts.NewNamedListOptsRef("allow-nondistributable-artifacts", &conf.AllowNondistributableArtifacts, registry.ValidateIndexName)
|
||||
registryMirrors = opts.NewNamedListOptsRef("registry-mirrors", &conf.Mirrors, registry.ValidateMirror)
|
||||
insecureRegistries = opts.NewNamedListOptsRef("insecure-registries", &conf.InsecureRegistries, registry.ValidateIndexName)
|
||||
registryMirrors = opts.NewNamedListOptsRef("registry-mirrors", &conf.Mirrors, registry.ValidateMirror)
|
||||
insecureRegistries = opts.NewNamedListOptsRef("insecure-registries", &conf.InsecureRegistries, registry.ValidateIndexName)
|
||||
)
|
||||
flags.Var(allowNonDistributable, "allow-nondistributable-artifacts", "Allow push of nondistributable artifacts to registry")
|
||||
_ = flags.MarkDeprecated("allow-nondistributable-artifacts", "Pushing nondistributable artifacts is now enabled by default. ")
|
||||
flags.Var(registryMirrors, "registry-mirror", "Preferred Docker registry mirror")
|
||||
flags.Var(insecureRegistries, "insecure-registry", "Enable insecure registry communication")
|
||||
|
||||
@@ -77,6 +74,9 @@ func installCommonConfigFlags(conf *config.Config, flags *pflag.FlagSet) {
|
||||
flags.Var(opts.NewNamedListOptsRef("cdi-spec-dirs", &conf.CDISpecDirs, nil), "cdi-spec-dir", "CDI specification directories to use")
|
||||
|
||||
// Deprecated flags / options
|
||||
allowNonDistributable := opts.NewNamedListOptsRef("allow-nondistributable-artifacts", &([]string{}), registry.ValidateIndexName)
|
||||
flags.Var(allowNonDistributable, "allow-nondistributable-artifacts", "Allow push of nondistributable artifacts to registry")
|
||||
_ = flags.MarkDeprecated("allow-nondistributable-artifacts", "Pushing nondistributable artifacts is now enabled by default. ")
|
||||
|
||||
// TODO(thaJeztah): option is used to produce error when used; remove in next release
|
||||
flags.StringVar(&conf.CorsHeaders, "api-cors-header", "", "Set CORS headers in the Engine API; deprecated: this feature was deprecated in 27.0, and now removed")
|
||||
|
||||
@@ -661,11 +661,6 @@ func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(conf.AllowNondistributableArtifacts) > 0 {
|
||||
// TODO(thaJeztah): move to config.Validate and change into an error for v29.0 and remove in v30.0.
|
||||
log.G(context.TODO()).Warn(`DEPRECATED: The "allow-nondistributable-artifacts" config parameter is deprecated and always enabled; this option will be removed in the next release`)
|
||||
}
|
||||
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/moby/go-archive"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
||||
@@ -30,15 +30,15 @@ import (
|
||||
"github.com/docker/docker/image"
|
||||
libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
|
||||
"github.com/docker/docker/oci"
|
||||
"github.com/docker/docker/pkg/atomicwriter"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/restartmanager"
|
||||
"github.com/docker/docker/volume"
|
||||
volumemounts "github.com/docker/docker/volume/mounts"
|
||||
"github.com/docker/go-units"
|
||||
agentexec "github.com/moby/swarmkit/v2/agent/exec"
|
||||
"github.com/moby/sys/atomicwriter"
|
||||
"github.com/moby/sys/signal"
|
||||
"github.com/moby/sys/symlink"
|
||||
"github.com/moby/sys/user"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"go.opentelemetry.io/otel"
|
||||
@@ -320,7 +320,7 @@ func (container *Container) CommitInMemory(store *ViewDB) error {
|
||||
}
|
||||
|
||||
// SetupWorkingDirectory sets up the container's working directory as set in container.Config.WorkingDir
|
||||
func (container *Container) SetupWorkingDirectory(rootIdentity idtools.Identity) error {
|
||||
func (container *Container) SetupWorkingDirectory(uid int, gid int) error {
|
||||
if container.Config.WorkingDir == "" {
|
||||
return nil
|
||||
}
|
||||
@@ -331,7 +331,7 @@ func (container *Container) SetupWorkingDirectory(rootIdentity idtools.Identity)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := idtools.MkdirAllAndChownNew(pth, 0o755, rootIdentity); err != nil {
|
||||
if err := user.MkdirAllAndChown(pth, 0o755, uid, gid, user.WithOnlyNew); err != nil {
|
||||
pthInfo, err2 := os.Stat(pth)
|
||||
if err2 == nil && pthInfo != nil && !pthInfo.IsDir() {
|
||||
return errors.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir)
|
||||
|
||||
@@ -435,7 +435,7 @@ type containerByIDIndexer struct{}
|
||||
const terminator = "\x00"
|
||||
|
||||
// FromObject implements the memdb.SingleIndexer interface for Container objects
|
||||
func (e *containerByIDIndexer) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
func (e *containerByIDIndexer) FromObject(obj any) (bool, []byte, error) {
|
||||
c, ok := obj.(*Container)
|
||||
if !ok {
|
||||
return false, nil, fmt.Errorf("%T is not a Container", obj)
|
||||
@@ -445,7 +445,7 @@ func (e *containerByIDIndexer) FromObject(obj interface{}) (bool, []byte, error)
|
||||
}
|
||||
|
||||
// FromArgs implements the memdb.Indexer interface
|
||||
func (e *containerByIDIndexer) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
func (e *containerByIDIndexer) FromArgs(args ...any) ([]byte, error) {
|
||||
if len(args) != 1 {
|
||||
return nil, fmt.Errorf("must provide only a single argument")
|
||||
}
|
||||
@@ -457,7 +457,7 @@ func (e *containerByIDIndexer) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
return []byte(arg + terminator), nil
|
||||
}
|
||||
|
||||
func (e *containerByIDIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) {
|
||||
func (e *containerByIDIndexer) PrefixFromArgs(args ...any) ([]byte, error) {
|
||||
val, err := e.FromArgs(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -470,7 +470,7 @@ func (e *containerByIDIndexer) PrefixFromArgs(args ...interface{}) ([]byte, erro
|
||||
// namesByNameIndexer is used to index container name associations by name.
|
||||
type namesByNameIndexer struct{}
|
||||
|
||||
func (e *namesByNameIndexer) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
func (e *namesByNameIndexer) FromObject(obj any) (bool, []byte, error) {
|
||||
n, ok := obj.(nameAssociation)
|
||||
if !ok {
|
||||
return false, nil, fmt.Errorf(`%T does not have type "nameAssociation"`, obj)
|
||||
@@ -480,7 +480,7 @@ func (e *namesByNameIndexer) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
return true, []byte(n.name + terminator), nil
|
||||
}
|
||||
|
||||
func (e *namesByNameIndexer) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
func (e *namesByNameIndexer) FromArgs(args ...any) ([]byte, error) {
|
||||
if len(args) != 1 {
|
||||
return nil, fmt.Errorf("must provide only a single argument")
|
||||
}
|
||||
@@ -495,7 +495,7 @@ func (e *namesByNameIndexer) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
// namesByContainerIDIndexer is used to index container names by container ID.
|
||||
type namesByContainerIDIndexer struct{}
|
||||
|
||||
func (e *namesByContainerIDIndexer) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
func (e *namesByContainerIDIndexer) FromObject(obj any) (bool, []byte, error) {
|
||||
n, ok := obj.(nameAssociation)
|
||||
if !ok {
|
||||
return false, nil, fmt.Errorf(`%T does not have type "nameAssociation"`, obj)
|
||||
@@ -505,7 +505,7 @@ func (e *namesByContainerIDIndexer) FromObject(obj interface{}) (bool, []byte, e
|
||||
return true, []byte(n.containerID + terminator), nil
|
||||
}
|
||||
|
||||
func (e *namesByContainerIDIndexer) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
func (e *namesByContainerIDIndexer) FromArgs(args ...any) ([]byte, error) {
|
||||
if len(args) != 1 {
|
||||
return nil, fmt.Errorf("must provide only a single argument")
|
||||
}
|
||||
|
||||
@@ -143,7 +143,15 @@ init() {
|
||||
|
||||
# instruction: iptables dependency check
|
||||
faced_iptables_error=""
|
||||
if ! command -v iptables > /dev/null 2>&1 && [ ! -f /sbin/iptables ] && [ ! -f /usr/sbin/iptables ]; then
|
||||
# Many OSs now use iptables-nft by default so, check for module nf_tables by default. But,
|
||||
# if "iptables --version" worked and reported "legacy", check for module ip_tables instead.
|
||||
iptables_module="nf_tables"
|
||||
iptables_command=$(PATH=$PATH:/sbin:/usr/sbin command -v iptables 2> /dev/null) || :
|
||||
if [ -n "$iptables_command" ]; then
|
||||
iptables_version=$($iptables_command --version 2> /dev/null) || :
|
||||
case $iptables_version in
|
||||
*legacy*) iptables_module="ip_tables" ;;
|
||||
esac
|
||||
faced_iptables_error=1
|
||||
if [ -z "$OPT_SKIP_IPTABLES" ]; then
|
||||
if command -v apt-get > /dev/null 2>&1; then
|
||||
@@ -178,14 +186,14 @@ init() {
|
||||
fi
|
||||
|
||||
# instruction: ip_tables module dependency check
|
||||
if ! grep -q ip_tables /proc/modules 2> /dev/null && ! grep -q ip_tables /lib/modules/$(uname -r)/modules.builtin 2> /dev/null; then
|
||||
if ! grep -q $iptables_module /proc/modules 2> /dev/null && ! grep -q $iptables_module /lib/modules/$(uname -r)/modules.builtin 2> /dev/null; then
|
||||
faced_iptables_error=1
|
||||
if [ -z "$OPT_SKIP_IPTABLES" ]; then
|
||||
instructions=$(
|
||||
cat <<- EOI
|
||||
${instructions}
|
||||
# Load ip_tables module
|
||||
modprobe ip_tables
|
||||
# Load $iptables_module module
|
||||
modprobe $iptables_module
|
||||
EOI
|
||||
)
|
||||
fi
|
||||
|
||||
28
contrib/otel/README.md
Normal file
28
contrib/otel/README.md
Normal file
@@ -0,0 +1,28 @@
|
||||
# Sample stack for testing OTEL functionality with moby
|
||||
|
||||
To easily test the OTEL functionality present in moby, you can spin up a small demo compose stack that includes:
|
||||
- an OTEL collector container;
|
||||
- a Jaeger container to visualize traces;
|
||||
- an alternative Aspire Dashboard container to visualize traces;
|
||||
|
||||
The OTEL collector is configured to export Traces to both the Jaeger and Aspire containers.
|
||||
|
||||
The `contrib/otel` directory contains the compose file with the services configured, along with a basic configuration file for the OTEL collector.
|
||||
|
||||
## How can I use it?
|
||||
|
||||
1. Export the env var used to override the OTLP endpoint:
|
||||
`export OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4318` (if running in a devcontainer or in other ways, you might have to change how you pass this env var to the daemon);
|
||||
2. Start the moby engine you want to get traces from (make sure it gets the env var declared above);
|
||||
3. Start the otel compose stack by running `docker compose up -d` in the `contrib/otel/` directory;
|
||||
4. Make some calls to the engine using the Docker CLI to send some traces to the endpoint;
|
||||
5. Browse Jaeger at `http://localhost:16686` or the Aspire Dashboard at `http://localhost:18888/traces`;
|
||||
6. To see some traces from the engine, select `dockerd` in the top left dropdown
|
||||
|
||||
> **Note**: The precise steps may vary based on how you're working on the codebase (buiding a binary and executing natively, running/debugging in a devcontainer, etc... )
|
||||
|
||||
## Cleanup?
|
||||
|
||||
Simply run `docker compose down` in the `contrib/otel/` directory.
|
||||
|
||||
You can also run `unset OTEL_EXPORTER_OTLP_ENDPOINT` to get rid of the OTLP env var from your environment
|
||||
31
contrib/otel/compose.yaml
Normal file
31
contrib/otel/compose.yaml
Normal file
@@ -0,0 +1,31 @@
|
||||
name: moby-otel
|
||||
|
||||
services:
|
||||
|
||||
jaeger:
|
||||
image: jaegertracing/all-in-one:latest
|
||||
restart: always
|
||||
ports:
|
||||
- 16686:16686
|
||||
|
||||
aspire-dashboard:
|
||||
image: mcr.microsoft.com/dotnet/nightly/aspire-dashboard
|
||||
restart: always
|
||||
ports:
|
||||
- 0.0.0.0:18888:18888
|
||||
environment:
|
||||
DOTNET_DASHBOARD_UNSECURED_ALLOW_ANONYMOUS: 'true'
|
||||
|
||||
otelcol:
|
||||
image: otel/opentelemetry-collector-contrib:latest
|
||||
restart: always
|
||||
depends_on:
|
||||
- jaeger
|
||||
- aspire-dashboard
|
||||
ports:
|
||||
- 4318:4318 # default otlp http port
|
||||
develop:
|
||||
watch:
|
||||
- action: sync+restart
|
||||
path: ./otelcol.yaml
|
||||
target: /etc/otelcol-contrib/config.yaml
|
||||
23
contrib/otel/otelcol.yaml
Normal file
23
contrib/otel/otelcol.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
# Receive signals over gRPC and HTTP
|
||||
# moby currently uses http
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
|
||||
exporters:
|
||||
otlp/jaeger:
|
||||
endpoint: jaeger:4317
|
||||
tls::insecure: true
|
||||
otlp/aspire:
|
||||
endpoint: aspire-dashboard:18889
|
||||
tls::insecure: true
|
||||
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
exporters: [otlp/jaeger, otlp/aspire]
|
||||
@@ -1,7 +1,7 @@
|
||||
package daemon // import "github.com/docker/docker/daemon"
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/moby/go-archive"
|
||||
)
|
||||
|
||||
// defaultTarCopyOptions is the setting that is used when unpacking an archive
|
||||
|
||||
@@ -10,8 +10,7 @@ import (
|
||||
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/moby/go-archive"
|
||||
"github.com/moby/sys/user"
|
||||
)
|
||||
|
||||
@@ -27,7 +26,7 @@ func (daemon *Daemon) tarCopyOptions(ctr *container.Container, noOverwriteDirNon
|
||||
|
||||
return &archive.TarOptions{
|
||||
NoOverwriteDirNonDir: noOverwriteDirNonDir,
|
||||
ChownOpts: &idtools.Identity{UID: uid, GID: gid},
|
||||
ChownOpts: &archive.ChownOpts{UID: uid, GID: gid},
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -12,9 +12,9 @@ import (
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
volumemounts "github.com/docker/docker/volume/mounts"
|
||||
"github.com/moby/go-archive"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
||||
@@ -10,9 +10,9 @@ import (
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/chrootarchive"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/moby/go-archive"
|
||||
"github.com/moby/go-archive/chrootarchive"
|
||||
)
|
||||
|
||||
// containerStatPath stats the filesystem resource at the specified path in this
|
||||
|
||||
@@ -33,12 +33,10 @@ func (daemon *Daemon) ContainerAttach(prefixOrName string, req *backend.Containe
|
||||
return err
|
||||
}
|
||||
if ctr.IsPaused() {
|
||||
err := fmt.Errorf("container %s is paused, unpause the container before attach", prefixOrName)
|
||||
return errdefs.Conflict(err)
|
||||
return errdefs.Conflict(fmt.Errorf("container %s is paused, unpause the container before attach", prefixOrName))
|
||||
}
|
||||
if ctr.IsRestarting() {
|
||||
err := fmt.Errorf("container %s is restarting, wait until the container is running", prefixOrName)
|
||||
return errdefs.Conflict(err)
|
||||
return errdefs.Conflict(fmt.Errorf("container %s is restarting, wait until the container is running", prefixOrName))
|
||||
}
|
||||
|
||||
cfg := stream.AttachConfig{
|
||||
@@ -51,8 +49,6 @@ func (daemon *Daemon) ContainerAttach(prefixOrName string, req *backend.Containe
|
||||
}
|
||||
ctr.StreamConfig.AttachStreams(&cfg)
|
||||
|
||||
multiplexed := !ctr.Config.Tty && req.MuxStreams
|
||||
|
||||
clientCtx, closeNotify := context.WithCancel(context.Background())
|
||||
defer closeNotify()
|
||||
go func() {
|
||||
@@ -68,6 +64,7 @@ func (daemon *Daemon) ContainerAttach(prefixOrName string, req *backend.Containe
|
||||
}
|
||||
}()
|
||||
|
||||
multiplexed := !ctr.Config.Tty && req.MuxStreams
|
||||
inStream, outStream, errStream, err := req.GetStreams(multiplexed, closeNotify)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -91,7 +88,7 @@ func (daemon *Daemon) ContainerAttach(prefixOrName string, req *backend.Containe
|
||||
}
|
||||
|
||||
if err := daemon.containerAttach(ctr, &cfg, req.Logs, req.Stream); err != nil {
|
||||
fmt.Fprintf(outStream, "Error attaching: %s\n", err)
|
||||
_, _ = fmt.Fprintln(outStream, "Error attaching:", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -124,16 +121,19 @@ func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadClose
|
||||
return daemon.containerAttach(ctr, &cfg, false, doStream)
|
||||
}
|
||||
|
||||
func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.AttachConfig, logs, doStream bool) error {
|
||||
if logs {
|
||||
logDriver, logCreated, err := daemon.getLogger(c)
|
||||
func (daemon *Daemon) containerAttach(ctr *container.Container, cfg *stream.AttachConfig, enableLogs, doStream bool) error {
|
||||
if enableLogs {
|
||||
logDriver, logCreated, err := daemon.getLogger(ctr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if logCreated {
|
||||
defer func() {
|
||||
if err = logDriver.Close(); err != nil {
|
||||
log.G(context.TODO()).Errorf("Error closing logger: %v", err)
|
||||
log.G(context.TODO()).WithFields(log.Fields{
|
||||
"error": err,
|
||||
"container": ctr.ID,
|
||||
}).Error("Error closing logger")
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -141,13 +141,13 @@ func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.Attach
|
||||
if !ok {
|
||||
return logger.ErrReadLogsNotSupported{}
|
||||
}
|
||||
logs := cLog.ReadLogs(context.TODO(), logger.ReadConfig{Tail: -1})
|
||||
defer logs.ConsumerGone()
|
||||
logWatcher := cLog.ReadLogs(context.TODO(), logger.ReadConfig{Tail: -1})
|
||||
defer logWatcher.ConsumerGone()
|
||||
|
||||
LogLoop:
|
||||
for {
|
||||
select {
|
||||
case msg, ok := <-logs.Msg:
|
||||
case msg, ok := <-logWatcher.Msg:
|
||||
if !ok {
|
||||
break LogLoop
|
||||
}
|
||||
@@ -157,14 +157,17 @@ func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.Attach
|
||||
if msg.Source == "stderr" && cfg.Stderr != nil {
|
||||
cfg.Stderr.Write(msg.Line)
|
||||
}
|
||||
case err := <-logs.Err:
|
||||
log.G(context.TODO()).Errorf("Error streaming logs: %v", err)
|
||||
case err := <-logWatcher.Err:
|
||||
log.G(context.TODO()).WithFields(log.Fields{
|
||||
"error": err,
|
||||
"container": ctr.ID,
|
||||
}).Error("Error streaming logs")
|
||||
break LogLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
daemon.LogContainerEvent(c, events.ActionAttach)
|
||||
daemon.LogContainerEvent(ctr, events.ActionAttach)
|
||||
|
||||
if !doStream {
|
||||
return nil
|
||||
@@ -173,33 +176,38 @@ func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.Attach
|
||||
if cfg.Stdin != nil {
|
||||
r, w := io.Pipe()
|
||||
go func(stdin io.ReadCloser) {
|
||||
defer w.Close()
|
||||
defer log.G(context.TODO()).Debug("Closing buffered stdin pipe")
|
||||
io.Copy(w, stdin)
|
||||
log.G(context.TODO()).WithFields(log.Fields{
|
||||
"container": ctr.ID,
|
||||
}).Debug("Closing buffered stdin pipe")
|
||||
w.Close()
|
||||
}(cfg.Stdin)
|
||||
cfg.Stdin = r
|
||||
}
|
||||
|
||||
if !c.Config.OpenStdin {
|
||||
if !ctr.Config.OpenStdin {
|
||||
cfg.Stdin = nil
|
||||
}
|
||||
|
||||
if c.Config.StdinOnce && !c.Config.Tty {
|
||||
if ctr.Config.StdinOnce && !ctr.Config.Tty {
|
||||
// Wait for the container to stop before returning.
|
||||
waitChan := c.Wait(context.Background(), container.WaitConditionNotRunning)
|
||||
waitChan := ctr.Wait(context.Background(), container.WaitConditionNotRunning)
|
||||
defer func() {
|
||||
<-waitChan // Ignore returned exit code.
|
||||
}()
|
||||
}
|
||||
|
||||
ctx := c.AttachContext()
|
||||
err := <-c.StreamConfig.CopyStreams(ctx, cfg)
|
||||
ctx := ctr.AttachContext()
|
||||
err := <-ctr.StreamConfig.CopyStreams(ctx, cfg)
|
||||
if err != nil {
|
||||
var ierr term.EscapeError
|
||||
if errors.Is(err, context.Canceled) || errors.As(err, &ierr) {
|
||||
daemon.LogContainerEvent(c, events.ActionDetach)
|
||||
daemon.LogContainerEvent(ctr, events.ActionDetach)
|
||||
} else {
|
||||
log.G(ctx).Errorf("attach failed with error: %v", err)
|
||||
log.G(ctx).WithFields(log.Fields{
|
||||
"error": err,
|
||||
"container": ctr.ID,
|
||||
}).Error("attach failed with error")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/internal/metrics"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/moby/go-archive"
|
||||
)
|
||||
|
||||
// ContainerChanges returns a list of container fs changes
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/atomicwriter"
|
||||
"github.com/moby/sys/atomicwriter"
|
||||
)
|
||||
|
||||
// convertKVStringsToMap converts ["key=value"] to {"key":"value"}
|
||||
|
||||
@@ -91,8 +91,10 @@ var flatOptions = map[string]bool{
|
||||
var skipValidateOptions = map[string]bool{
|
||||
"features": true,
|
||||
"builder": true,
|
||||
// Corresponding flag has been removed because it was already unusable
|
||||
"deprecated-key-path": true,
|
||||
|
||||
// Deprecated options that are safe to ignore if present.
|
||||
"deprecated-key-path": true,
|
||||
"allow-nondistributable-artifacts": true,
|
||||
}
|
||||
|
||||
// skipDuplicates contains configuration keys that
|
||||
|
||||
@@ -365,3 +365,34 @@ func TestDaemonConfigurationHostGatewayIP(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestDaemonLegacyOptions verifies that loading config files containing
|
||||
// deprecated / legacy options does not prevent the daemon from loading
|
||||
// the config (some options may have an explicit error, and can be tested
|
||||
// separately).
|
||||
func TestDaemonLegacyOptions(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
configJSON string
|
||||
}{
|
||||
{
|
||||
name: "deprecated-key-path",
|
||||
configJSON: `{"deprecated-key-path": "/etc/docker/key.json"}`,
|
||||
},
|
||||
{
|
||||
name: "allow-nondistributable-artifacts",
|
||||
configJSON: `{"allow-nondistributable-artifacts": ["127.0.0.0/8", "10.10.1.11:5000", "10.10.1.22:5000", "registry.example.com", "registry.example.com"]}`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
configFile := makeConfigFile(t, tc.configJSON)
|
||||
flags := pflag.NewFlagSet("test", pflag.ContinueOnError)
|
||||
c, err := New()
|
||||
assert.NilError(t, err)
|
||||
_, err = MergeDaemonConfigurations(c, flags, configFile)
|
||||
assert.NilError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,10 +19,10 @@ import (
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/libnetwork"
|
||||
"github.com/docker/docker/libnetwork/drivers/bridge"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/process"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/moby/sys/mount"
|
||||
"github.com/moby/sys/user"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
"go.opentelemetry.io/otel"
|
||||
@@ -251,14 +251,14 @@ func (daemon *Daemon) setupIPCDirs(ctr *container.Container) error {
|
||||
fallthrough
|
||||
|
||||
case ipcMode.IsShareable():
|
||||
rootIDs := daemon.idMapping.RootPair()
|
||||
uid, gid := daemon.idMapping.RootPair()
|
||||
if !ctr.HasMountFor("/dev/shm") {
|
||||
shmPath, err := ctr.ShmResourcePath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := idtools.MkdirAllAndChown(shmPath, 0o700, rootIDs); err != nil {
|
||||
if err := user.MkdirAllAndChown(shmPath, 0o700, uid, gid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -266,7 +266,7 @@ func (daemon *Daemon) setupIPCDirs(ctr *container.Container) error {
|
||||
if err := unix.Mount("shm", shmPath, "tmpfs", uintptr(unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV), label.FormatMountLabel(shmproperty, ctr.GetMountLabel())); err != nil {
|
||||
return fmt.Errorf("mounting shm tmpfs: %s", err)
|
||||
}
|
||||
if err := os.Chown(shmPath, rootIDs.UID, rootIDs.GID); err != nil {
|
||||
if err := os.Chown(shmPath, uid, gid); err != nil {
|
||||
return err
|
||||
}
|
||||
ctr.ShmPath = shmPath
|
||||
@@ -298,7 +298,7 @@ func (daemon *Daemon) setupSecretDir(ctr *container.Container) (setupErr error)
|
||||
}
|
||||
|
||||
// retrieve possible remapped range start for root UID, GID
|
||||
rootIDs := daemon.idMapping.RootPair()
|
||||
ruid, rgid := daemon.idMapping.RootPair()
|
||||
|
||||
for _, s := range ctr.SecretReferences {
|
||||
// TODO (ehazlett): use type switch when more are supported
|
||||
@@ -313,7 +313,7 @@ func (daemon *Daemon) setupSecretDir(ctr *container.Container) (setupErr error)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error getting secret file path")
|
||||
}
|
||||
if err := idtools.MkdirAllAndChown(filepath.Dir(fPath), 0o700, rootIDs); err != nil {
|
||||
if err := user.MkdirAllAndChown(filepath.Dir(fPath), 0o700, ruid, rgid); err != nil {
|
||||
return errors.Wrap(err, "error creating secret mount path")
|
||||
}
|
||||
|
||||
@@ -338,7 +338,7 @@ func (daemon *Daemon) setupSecretDir(ctr *container.Container) (setupErr error)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.Chown(fPath, rootIDs.UID+uid, rootIDs.GID+gid); err != nil {
|
||||
if err := os.Chown(fPath, ruid+uid, rgid+gid); err != nil {
|
||||
return errors.Wrap(err, "error setting ownership for secret")
|
||||
}
|
||||
if err := os.Chmod(fPath, s.File.Mode); err != nil {
|
||||
@@ -364,7 +364,7 @@ func (daemon *Daemon) setupSecretDir(ctr *container.Container) (setupErr error)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error getting config file path for container")
|
||||
}
|
||||
if err := idtools.MkdirAllAndChown(filepath.Dir(fPath), 0o700, rootIDs); err != nil {
|
||||
if err := user.MkdirAllAndChown(filepath.Dir(fPath), 0o700, ruid, rgid); err != nil {
|
||||
return errors.Wrap(err, "error creating config mount path")
|
||||
}
|
||||
|
||||
@@ -389,7 +389,7 @@ func (daemon *Daemon) setupSecretDir(ctr *container.Container) (setupErr error)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.Chown(fPath, rootIDs.UID+uid, rootIDs.GID+gid); err != nil {
|
||||
if err := os.Chown(fPath, ruid+uid, rgid+gid); err != nil {
|
||||
return errors.Wrap(err, "error setting ownership for config")
|
||||
}
|
||||
if err := os.Chmod(fPath, configRef.File.Mode); err != nil {
|
||||
@@ -404,18 +404,18 @@ func (daemon *Daemon) setupSecretDir(ctr *container.Container) (setupErr error)
|
||||
// In practice this is using a tmpfs mount and is used for both "configs" and "secrets"
|
||||
func (daemon *Daemon) createSecretsDir(ctr *container.Container) error {
|
||||
// retrieve possible remapped range start for root UID, GID
|
||||
rootIDs := daemon.idMapping.RootPair()
|
||||
uid, gid := daemon.idMapping.RootPair()
|
||||
dir, err := ctr.SecretMountPath()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error getting container secrets dir")
|
||||
}
|
||||
|
||||
// create tmpfs
|
||||
if err := idtools.MkdirAllAndChown(dir, 0o700, rootIDs); err != nil {
|
||||
if err := user.MkdirAllAndChown(dir, 0o700, uid, gid); err != nil {
|
||||
return errors.Wrap(err, "error creating secret local mount path")
|
||||
}
|
||||
|
||||
tmpfsOwnership := fmt.Sprintf("uid=%d,gid=%d", rootIDs.UID, rootIDs.GID)
|
||||
tmpfsOwnership := fmt.Sprintf("uid=%d,gid=%d", uid, gid)
|
||||
if err := mount.Mount("tmpfs", dir, "tmpfs", "nodev,nosuid,noexec,"+tmpfsOwnership); err != nil {
|
||||
return errors.Wrap(err, "unable to setup secret mount")
|
||||
}
|
||||
@@ -430,8 +430,8 @@ func (daemon *Daemon) remountSecretDir(ctr *container.Container) error {
|
||||
if err := label.Relabel(dir, ctr.MountLabel, false); err != nil {
|
||||
log.G(context.TODO()).WithError(err).WithField("dir", dir).Warn("Error while attempting to set selinux label")
|
||||
}
|
||||
rootIDs := daemon.idMapping.RootPair()
|
||||
tmpfsOwnership := fmt.Sprintf("uid=%d,gid=%d", rootIDs.UID, rootIDs.GID)
|
||||
uid, gid := daemon.idMapping.RootPair()
|
||||
tmpfsOwnership := fmt.Sprintf("uid=%d,gid=%d", uid, gid)
|
||||
|
||||
// remount secrets ro
|
||||
if err := mount.Mount("tmpfs", dir, "tmpfs", "remount,ro,"+tmpfsOwnership); err != nil {
|
||||
@@ -577,5 +577,6 @@ func (daemon *Daemon) setupContainerMountsRoot(ctr *container.Container) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return idtools.MkdirAllAndChown(p, 0o710, idtools.Identity{UID: idtools.CurrentIdentity().UID, GID: daemon.IdentityMapping().RootPair().GID})
|
||||
_, gid := daemon.IdentityMapping().RootPair()
|
||||
return user.MkdirAllAndChown(p, 0o710, os.Getuid(), gid)
|
||||
}
|
||||
|
||||
@@ -30,12 +30,11 @@ import (
|
||||
"github.com/docker/docker/image"
|
||||
dimage "github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
registrypkg "github.com/docker/docker/registry"
|
||||
imagespec "github.com/moby/docker-image-spec/specs-go/v1"
|
||||
"github.com/moby/go-archive"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
"github.com/opencontainers/image-spec/specs-go"
|
||||
@@ -144,12 +143,7 @@ func (i *ImageService) pullForBuilder(ctx context.Context, name string, authConf
|
||||
pullRegistryAuth := ®istry.AuthConfig{}
|
||||
if len(authConfigs) > 0 {
|
||||
// The request came with a full auth config, use it
|
||||
repoInfo, err := i.registryService.ResolveRepository(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resolvedConfig := registrypkg.ResolveAuthConfig(authConfigs, repoInfo.Index)
|
||||
resolvedConfig := i.registryService.ResolveAuthConfig(authConfigs, ref)
|
||||
pullRegistryAuth = &resolvedConfig
|
||||
}
|
||||
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
"github.com/containerd/containerd/v2/core/mount"
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/moby/go-archive"
|
||||
)
|
||||
|
||||
func (i *ImageService) Changes(ctx context.Context, ctr *container.Container) ([]archive.Change, error) {
|
||||
|
||||
@@ -18,8 +18,8 @@ import (
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
imagespec "github.com/moby/docker-image-spec/specs-go/v1"
|
||||
"github.com/moby/go-archive"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@@ -152,8 +152,8 @@ func (i *ImageService) createDiff(ctx context.Context, name string, sn snapshots
|
||||
if !i.idMapping.Empty() {
|
||||
// The rootfs of the container is remapped if an id mapping exists, we
|
||||
// need to "unremap" it before committing the snapshot
|
||||
rootPair := i.idMapping.RootPair()
|
||||
usernsID := fmt.Sprintf("%s-%d-%d-%s", name, rootPair.UID, rootPair.GID, uniquePart())
|
||||
uid, gid := i.idMapping.RootPair()
|
||||
usernsID := fmt.Sprintf("%s-%d-%d-%s", name, uid, gid, uniquePart())
|
||||
remappedID := usernsID + remapSuffix
|
||||
baseName := name
|
||||
|
||||
|
||||
@@ -18,8 +18,8 @@ import (
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/daemon/images"
|
||||
"github.com/docker/docker/errdefs"
|
||||
dockerarchive "github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/moby/go-archive/compression"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -230,7 +230,7 @@ func (i *ImageService) leaseContent(ctx context.Context, store content.Store, de
|
||||
// complement of ExportImage. The input stream is an uncompressed tar
|
||||
// ball containing images and metadata.
|
||||
func (i *ImageService) LoadImage(ctx context.Context, inTar io.ReadCloser, platform *ocispec.Platform, outStream io.Writer, quiet bool) error {
|
||||
decompressed, err := dockerarchive.DecompressStream(inTar)
|
||||
decompressed, err := compression.DecompressStream(inTar)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to decompress input tar archive")
|
||||
}
|
||||
@@ -248,8 +248,21 @@ func (i *ImageService) LoadImage(ctx context.Context, inTar io.ReadCloser, platf
|
||||
if nameFromArchive == "" {
|
||||
return false
|
||||
}
|
||||
_, err := reference.ParseNormalizedNamed(nameFromArchive)
|
||||
return err == nil
|
||||
|
||||
ref, err := reference.ParseNormalizedNamed(nameFromArchive)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Look up if there is an existing image with this name and ensure a dangling image exists.
|
||||
if img, err := i.images.Get(ctx, ref.String()); err == nil {
|
||||
if err := i.ensureDanglingImage(ctx, img); err != nil {
|
||||
log.G(ctx).WithError(err).Warnf("failed to keep the previous image for %s as dangling", img.Name)
|
||||
}
|
||||
} else if !errdefs.IsNotFound(err) {
|
||||
log.G(ctx).WithError(err).Warn("failed to retrieve image: %w", err)
|
||||
}
|
||||
return true
|
||||
}),
|
||||
}
|
||||
|
||||
|
||||
@@ -20,10 +20,10 @@ import (
|
||||
"github.com/docker/docker/builder/dockerfile"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/pools"
|
||||
"github.com/google/uuid"
|
||||
imagespec "github.com/moby/docker-image-spec/specs-go/v1"
|
||||
"github.com/moby/go-archive/compression"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@@ -168,17 +168,17 @@ func saveArchive(ctx context.Context, cs content.Store, layerReader io.Reader) (
|
||||
bufRd := p.Get(layerReader)
|
||||
defer p.Put(bufRd)
|
||||
|
||||
compression, err := detectCompression(bufRd)
|
||||
comp, err := detectCompression(bufRd)
|
||||
if err != nil {
|
||||
return "", "", "", err
|
||||
}
|
||||
|
||||
var uncompressedReader io.Reader = bufRd
|
||||
switch compression {
|
||||
case archive.Gzip, archive.Zstd:
|
||||
switch comp {
|
||||
case compression.Gzip, compression.Zstd:
|
||||
// If the input is already a compressed layer, just save it as is.
|
||||
mediaType := ocispec.MediaTypeImageLayerGzip
|
||||
if compression == archive.Zstd {
|
||||
if comp == compression.Zstd {
|
||||
mediaType = ocispec.MediaTypeImageLayerZstd
|
||||
}
|
||||
|
||||
@@ -188,19 +188,17 @@ func saveArchive(ctx context.Context, cs content.Store, layerReader io.Reader) (
|
||||
}
|
||||
|
||||
return compressedDigest, uncompressedDigest, mediaType, nil
|
||||
case archive.Bzip2, archive.Xz:
|
||||
r, err := archive.DecompressStream(bufRd)
|
||||
case compression.Bzip2, compression.Xz:
|
||||
r, err := compression.DecompressStream(bufRd)
|
||||
if err != nil {
|
||||
return "", "", "", errdefs.InvalidParameter(err)
|
||||
}
|
||||
defer r.Close()
|
||||
uncompressedReader = r
|
||||
fallthrough
|
||||
case archive.Uncompressed:
|
||||
case compression.None:
|
||||
mediaType := ocispec.MediaTypeImageLayerGzip
|
||||
compression := archive.Gzip
|
||||
|
||||
compressedDigest, uncompressedDigest, err := compressAndWriteBlob(ctx, cs, compression, mediaType, uncompressedReader)
|
||||
compressedDigest, uncompressedDigest, err := compressAndWriteBlob(ctx, cs, compression.Gzip, mediaType, uncompressedReader)
|
||||
if err != nil {
|
||||
return "", "", "", err
|
||||
}
|
||||
@@ -228,7 +226,7 @@ func writeCompressedBlob(ctx context.Context, cs content.Store, mediaType string
|
||||
digester := digest.Canonical.Digester()
|
||||
|
||||
// Decompress the piped blob.
|
||||
decompressedStream, err := archive.DecompressStream(pr)
|
||||
decompressedStream, err := compression.DecompressStream(pr)
|
||||
if err == nil {
|
||||
// Feed the digester with decompressed data.
|
||||
_, err = io.Copy(digester.Hash(), decompressedStream)
|
||||
@@ -249,12 +247,12 @@ func writeCompressedBlob(ctx context.Context, cs content.Store, mediaType string
|
||||
}
|
||||
|
||||
// compressAndWriteBlob compresses the uncompressedReader and stores it in the content store.
|
||||
func compressAndWriteBlob(ctx context.Context, cs content.Store, compression archive.Compression, mediaType string, uncompressedLayerReader io.Reader) (digest.Digest, digest.Digest, error) {
|
||||
func compressAndWriteBlob(ctx context.Context, cs content.Store, comp compression.Compression, mediaType string, uncompressedLayerReader io.Reader) (digest.Digest, digest.Digest, error) {
|
||||
pr, pw := io.Pipe()
|
||||
defer pr.Close()
|
||||
defer pw.Close()
|
||||
|
||||
compressor, err := archive.CompressStream(pw, compression)
|
||||
compressor, err := compression.CompressStream(pw, comp)
|
||||
if err != nil {
|
||||
return "", "", errdefs.InvalidParameter(err)
|
||||
}
|
||||
@@ -312,7 +310,7 @@ func (i *ImageService) unpackImage(ctx context.Context, snapshotter string, img
|
||||
}
|
||||
|
||||
// detectCompression detects the reader compression type.
|
||||
func detectCompression(bufRd *bufio.Reader) (archive.Compression, error) {
|
||||
func detectCompression(bufRd *bufio.Reader) (compression.Compression, error) {
|
||||
bs, err := bufRd.Peek(10)
|
||||
if err != nil && err != io.EOF {
|
||||
// Note: we'll ignore any io.EOF error because there are some odd
|
||||
@@ -321,10 +319,10 @@ func detectCompression(bufRd *bufio.Reader) (archive.Compression, error) {
|
||||
// cases we'll just treat it as a non-compressed stream and
|
||||
// that means just create an empty layer.
|
||||
// See Issue 18170
|
||||
return archive.Uncompressed, errdefs.Unknown(err)
|
||||
return compression.None, errdefs.Unknown(err)
|
||||
}
|
||||
|
||||
return archive.DetectCompression(bs), nil
|
||||
return compression.Detect(bs), nil
|
||||
}
|
||||
|
||||
// fillUncompressedLabel sets the uncompressed digest label on the compressed blob metadata
|
||||
|
||||
@@ -23,8 +23,7 @@ import (
|
||||
)
|
||||
|
||||
func (i *ImageService) ImageInspect(ctx context.Context, refOrID string, opts backend.ImageInspectOpts) (*imagetypes.InspectResponse, error) {
|
||||
// TODO: Pass in opts
|
||||
var requestedPlatform *ocispec.Platform
|
||||
requestedPlatform := opts.Platform
|
||||
|
||||
c8dImg, err := i.resolveImage(ctx, refOrID)
|
||||
if err != nil {
|
||||
@@ -60,7 +59,6 @@ func (i *ImageService) ImageInspect(ctx context.Context, refOrID string, opts ba
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//nolint:govet // TODO: requestedPlatform is always nil, but should be passed by the caller
|
||||
if multi.Best == nil && requestedPlatform != nil {
|
||||
return nil, &errPlatformNotFound{
|
||||
imageRef: refOrID,
|
||||
@@ -87,6 +85,10 @@ func (i *ImageService) ImageInspect(ctx context.Context, refOrID string, opts ba
|
||||
|
||||
repoTags, repoDigests := collectRepoTagsAndDigests(ctx, tagged)
|
||||
|
||||
if requestedPlatform != nil {
|
||||
target = multi.Best.Target()
|
||||
}
|
||||
|
||||
resp := &imagetypes.InspectResponse{
|
||||
ID: target.Digest.String(),
|
||||
RepoTags: repoTags,
|
||||
|
||||
@@ -61,4 +61,38 @@ func TestImageInspect(t *testing.T) {
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("inspect image with platform parameter", func(t *testing.T) {
|
||||
ctx := logtest.WithT(ctx, t)
|
||||
service := fakeImageService(t, ctx, cs)
|
||||
|
||||
multiPlatformImage := toContainerdImage(t, func(dir string) (*ocispec.Index, error) {
|
||||
idx, _, err := specialimage.MultiPlatform(dir, "multiplatform:latest", []ocispec.Platform{
|
||||
{OS: "linux", Architecture: "amd64"},
|
||||
{OS: "linux", Architecture: "arm64"},
|
||||
})
|
||||
return idx, err
|
||||
})
|
||||
|
||||
_, err := service.images.Create(ctx, multiPlatformImage)
|
||||
assert.NilError(t, err)
|
||||
|
||||
// Test with amd64 platform
|
||||
amd64Platform := &ocispec.Platform{OS: "linux", Architecture: "amd64"}
|
||||
inspectAmd64, err := service.ImageInspect(ctx, multiPlatformImage.Name, backend.ImageInspectOpts{
|
||||
Platform: amd64Platform,
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, inspectAmd64.Architecture, "amd64")
|
||||
assert.Equal(t, inspectAmd64.Os, "linux")
|
||||
|
||||
// Test with arm64 platform
|
||||
arm64Platform := &ocispec.Platform{OS: "linux", Architecture: "arm64"}
|
||||
inspectArm64, err := service.ImageInspect(ctx, multiPlatformImage.Name, backend.ImageInspectOpts{
|
||||
Platform: arm64Platform,
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, inspectArm64.Architecture, "arm64")
|
||||
assert.Equal(t, inspectArm64.Os, "linux")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -130,11 +130,13 @@ func (i *ImageService) Images(ctx context.Context, opts imagetypes.ListOptions)
|
||||
}
|
||||
|
||||
dgst := img.Target.Digest
|
||||
uniqueImages[dgst] = img
|
||||
|
||||
if isDangling {
|
||||
if _, ok := uniqueImages[dgst]; !ok {
|
||||
uniqueImages[dgst] = img
|
||||
}
|
||||
continue
|
||||
}
|
||||
uniqueImages[dgst] = img
|
||||
|
||||
ref, err := reference.ParseNormalizedNamed(img.Name)
|
||||
if err != nil {
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/internal/testutils/specialimage"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/moby/go-archive"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
|
||||
@@ -117,13 +117,12 @@ func (i *ImageService) pullTag(ctx context.Context, ref reference.Named, platfor
|
||||
}
|
||||
|
||||
jobs := newJobs()
|
||||
h := c8dimages.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
if c8dimages.IsLayerType(desc.MediaType) {
|
||||
opts = append(opts, containerd.WithImageHandler(c8dimages.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
if showBlobProgress(desc) {
|
||||
jobs.Add(desc)
|
||||
}
|
||||
return nil, nil
|
||||
})
|
||||
opts = append(opts, containerd.WithImageHandler(h))
|
||||
})))
|
||||
|
||||
pp := &pullProgress{
|
||||
store: i.content,
|
||||
@@ -229,7 +228,7 @@ func (i *ImageService) pullTag(ctx context.Context, ref reference.Named, platfor
|
||||
return errdefs.NotFound(fmt.Errorf("no matching manifest for %s in the manifest list entries: %w", platformStr, err))
|
||||
}
|
||||
}
|
||||
return err
|
||||
return translateRegistryError(ctx, err)
|
||||
}
|
||||
|
||||
logger := log.G(ctx).WithFields(log.Fields{
|
||||
|
||||
@@ -149,19 +149,13 @@ func (i *ImageService) pushRef(ctx context.Context, targetRef reference.Named, p
|
||||
return err
|
||||
}
|
||||
|
||||
addLayerJobs := c8dimages.HandlerFunc(
|
||||
func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
switch {
|
||||
case c8dimages.IsIndexType(desc.MediaType),
|
||||
c8dimages.IsManifestType(desc.MediaType),
|
||||
c8dimages.IsConfigType(desc.MediaType):
|
||||
default:
|
||||
jobsQueue.Add(desc)
|
||||
}
|
||||
addLayerJobs := c8dimages.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
if showBlobProgress(desc) {
|
||||
jobsQueue.Add(desc)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
},
|
||||
)
|
||||
return nil, nil
|
||||
})
|
||||
|
||||
handlerWrapper := func(h c8dimages.Handler) c8dimages.Handler {
|
||||
return c8dimages.Handlers(addLayerJobs, h)
|
||||
@@ -197,7 +191,7 @@ func (i *ImageService) pushRef(ctx context.Context, targetRef reference.Named, p
|
||||
|
||||
if err != nil {
|
||||
if !cerrdefs.IsNotFound(err) {
|
||||
return errdefs.System(err)
|
||||
return translateRegistryError(ctx, err)
|
||||
}
|
||||
progress.Aux(out, auxprogress.ContentMissing{
|
||||
ContentMissing: true,
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/containerd/containerd/v2/core/snapshots"
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/containerd/continuity/sysx"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -54,12 +53,12 @@ func (i *ImageService) remapRootFS(ctx context.Context, mounts []mount.Mount) er
|
||||
return fmt.Errorf("cannot get underlying data for %s", path)
|
||||
}
|
||||
|
||||
ids, err := i.idMapping.ToHost(idtools.Identity{UID: int(stat.Uid), GID: int(stat.Gid)})
|
||||
uid, gid, err := i.idMapping.ToHost(int(stat.Uid), int(stat.Gid))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return chownWithCaps(path, ids.UID, ids.GID)
|
||||
return chownWithCaps(path, uid, gid)
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -82,7 +81,7 @@ func (i *ImageService) copyAndUnremapRootFS(ctx context.Context, dst, src []moun
|
||||
return fmt.Errorf("cannot get underlying data for %s", path)
|
||||
}
|
||||
|
||||
uid, gid, err := i.idMapping.ToContainer(idtools.Identity{UID: int(stat.Uid), GID: int(stat.Gid)})
|
||||
uid, gid, err := i.idMapping.ToContainer(int(stat.Uid), int(stat.Gid))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -105,7 +104,7 @@ func (i *ImageService) unremapRootFS(ctx context.Context, mounts []mount.Mount)
|
||||
return fmt.Errorf("cannot get underlying data for %s", path)
|
||||
}
|
||||
|
||||
uid, gid, err := i.idMapping.ToContainer(idtools.Identity{UID: int(stat.Uid), GID: int(stat.Gid)})
|
||||
uid, gid, err := i.idMapping.ToContainer(int(stat.Uid), int(stat.Gid))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -337,3 +337,25 @@ func (combined combinedProgress) UpdateProgress(ctx context.Context, ongoing *jo
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// showBlobProgress determines if the progress of pulling/pushing blob should be shown.
|
||||
// Only indexes, manifests, and configs are hidden to align with the pre-containerd behavior.
|
||||
// They are small enough JSON files so it's fine to not show them.
|
||||
// We mostly care about bigger content like layers or other blobs.
|
||||
func showBlobProgress(desc ocispec.Descriptor) bool {
|
||||
switch {
|
||||
case c8dimages.IsLayerType(desc.MediaType):
|
||||
// Fast path: we always show progress for layers.
|
||||
//
|
||||
// Note: We can't just plainly check for c8dimages.IsLayerType alone
|
||||
// because it wouldn't account for other potentially big blobs like
|
||||
// artifacts or non-standard images.
|
||||
return true
|
||||
case c8dimages.IsIndexType(desc.MediaType),
|
||||
c8dimages.IsManifestType(desc.MediaType),
|
||||
c8dimages.IsConfigType(desc.MediaType):
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
78
daemon/containerd/registry_errors.go
Normal file
78
daemon/containerd/registry_errors.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/remotes/docker"
|
||||
remoteerrors "github.com/containerd/containerd/v2/core/remotes/errors"
|
||||
cerrdefs "github.com/containerd/errdefs"
|
||||
"github.com/containerd/log"
|
||||
)
|
||||
|
||||
func translateRegistryError(ctx context.Context, err error) error {
|
||||
// Check for registry specific error
|
||||
var derrs docker.Errors
|
||||
if !errors.As(err, &derrs) {
|
||||
var remoteErr remoteerrors.ErrUnexpectedStatus
|
||||
if errors.As(err, &remoteErr) {
|
||||
if jerr := json.Unmarshal(remoteErr.Body, &derrs); jerr != nil {
|
||||
log.G(ctx).WithError(derrs).Debug("unable to unmarshal registry error")
|
||||
return fmt.Errorf("%w: %w", cerrdefs.ErrUnknown, err)
|
||||
}
|
||||
} else {
|
||||
var derr docker.Error
|
||||
if errors.As(err, &derr) {
|
||||
derrs = append(derrs, derr)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
var errs []error
|
||||
for _, err := range derrs {
|
||||
var derr docker.Error
|
||||
if errors.As(err, &derr) {
|
||||
var message string
|
||||
|
||||
if derr.Message != "" {
|
||||
message = derr.Message
|
||||
} else {
|
||||
message = derr.Code.Message()
|
||||
}
|
||||
|
||||
if detail, ok := derr.Detail.(string); ok {
|
||||
message = fmt.Sprintf("%s - %s", message, detail)
|
||||
}
|
||||
|
||||
switch derr.Code {
|
||||
case docker.ErrorCodeUnsupported:
|
||||
err = cerrdefs.ErrNotImplemented.WithMessage(message)
|
||||
case docker.ErrorCodeUnauthorized:
|
||||
err = cerrdefs.ErrUnauthenticated.WithMessage(message)
|
||||
case docker.ErrorCodeDenied:
|
||||
err = cerrdefs.ErrPermissionDenied.WithMessage(message)
|
||||
case docker.ErrorCodeUnavailable:
|
||||
err = cerrdefs.ErrUnavailable.WithMessage(message)
|
||||
case docker.ErrorCodeTooManyRequests:
|
||||
err = cerrdefs.ErrResourceExhausted.WithMessage(message)
|
||||
default:
|
||||
err = cerrdefs.ErrUnknown.WithMessage(message)
|
||||
}
|
||||
} else {
|
||||
errs = append(errs, cerrdefs.ErrUnknown.WithMessage(err.Error()))
|
||||
}
|
||||
errs = append(errs, err)
|
||||
}
|
||||
switch len(errs) {
|
||||
case 0:
|
||||
err = cerrdefs.ErrUnknown.WithMessage(err.Error())
|
||||
case 1:
|
||||
err = errs[0]
|
||||
default:
|
||||
err = errors.Join(errs...)
|
||||
}
|
||||
return fmt.Errorf("error from registry: %w", err)
|
||||
}
|
||||
@@ -14,14 +14,13 @@ import (
|
||||
cerrdefs "github.com/containerd/errdefs"
|
||||
"github.com/containerd/log"
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/docker/container"
|
||||
daemonevents "github.com/docker/docker/daemon/events"
|
||||
dimages "github.com/docker/docker/daemon/images"
|
||||
"github.com/docker/docker/daemon/snapshotter"
|
||||
"github.com/docker/docker/distribution"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/moby/sys/user"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -34,32 +33,25 @@ type ImageService struct {
|
||||
snapshotterServices map[string]snapshots.Snapshotter
|
||||
snapshotter string
|
||||
registryHosts docker.RegistryHosts
|
||||
registryService registryResolver
|
||||
registryService distribution.RegistryResolver
|
||||
eventsService *daemonevents.Events
|
||||
pruneRunning atomic.Bool
|
||||
refCountMounter snapshotter.Mounter
|
||||
idMapping idtools.IdentityMapping
|
||||
idMapping user.IdentityMapping
|
||||
|
||||
// defaultPlatformOverride is used in tests to override the host platform.
|
||||
defaultPlatformOverride platforms.MatchComparer
|
||||
}
|
||||
|
||||
type registryResolver interface {
|
||||
IsInsecureRegistry(host string) bool
|
||||
ResolveRepository(name reference.Named) (*registry.RepositoryInfo, error)
|
||||
LookupPullEndpoints(hostname string) ([]registry.APIEndpoint, error)
|
||||
LookupPushEndpoints(hostname string) ([]registry.APIEndpoint, error)
|
||||
}
|
||||
|
||||
type ImageServiceConfig struct {
|
||||
Client *containerd.Client
|
||||
Containers container.Store
|
||||
Snapshotter string
|
||||
RegistryHosts docker.RegistryHosts
|
||||
Registry registryResolver
|
||||
Registry distribution.RegistryResolver
|
||||
EventsService *daemonevents.Events
|
||||
RefCountMounter snapshotter.Mounter
|
||||
IDMapping idtools.IdentityMapping
|
||||
IDMapping user.IdentityMapping
|
||||
}
|
||||
|
||||
// NewService creates a new ImageService.
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
// TODO(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.22
|
||||
|
||||
package daemon // import "github.com/docker/docker/daemon"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/log"
|
||||
@@ -19,11 +24,15 @@ import (
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/internal/metrics"
|
||||
"github.com/docker/docker/internal/multierror"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/internal/otelutil"
|
||||
"github.com/docker/docker/runconfig"
|
||||
"github.com/moby/sys/user"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/opencontainers/selinux/go-selinux"
|
||||
"github.com/tonistiigi/go-archvariant"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
type createOpts struct {
|
||||
@@ -56,7 +65,15 @@ func (daemon *Daemon) ContainerCreateIgnoreImagesArgsEscaped(ctx context.Context
|
||||
})
|
||||
}
|
||||
|
||||
func (daemon *Daemon) containerCreate(ctx context.Context, daemonCfg *configStore, opts createOpts) (containertypes.CreateResponse, error) {
|
||||
func (daemon *Daemon) containerCreate(ctx context.Context, daemonCfg *configStore, opts createOpts) (_ containertypes.CreateResponse, retErr error) {
|
||||
ctx, span := otel.Tracer("").Start(ctx, "daemon.containerCreate", trace.WithAttributes(
|
||||
labelsAsOTelAttributes(opts.params.Config.Labels)...,
|
||||
))
|
||||
defer func() {
|
||||
otelutil.RecordStatus(span, retErr)
|
||||
span.End()
|
||||
}()
|
||||
|
||||
start := time.Now()
|
||||
if opts.params.Config == nil {
|
||||
return containertypes.CreateResponse{}, errdefs.InvalidParameter(runconfig.ErrEmptyConfig)
|
||||
@@ -122,6 +139,31 @@ func (daemon *Daemon) containerCreate(ctx context.Context, daemonCfg *configStor
|
||||
return containertypes.CreateResponse{ID: ctr.ID, Warnings: warnings}, nil
|
||||
}
|
||||
|
||||
var (
|
||||
containerLabelsFilter []string
|
||||
containerLabelsFilterOnce sync.Once
|
||||
)
|
||||
|
||||
func labelsAsOTelAttributes(labels map[string]string) []attribute.KeyValue {
|
||||
containerLabelsFilterOnce.Do(func() {
|
||||
containerLabelsFilter = strings.Split(os.Getenv("DOCKER_CONTAINER_LABELS_FILTER"), ",")
|
||||
})
|
||||
|
||||
// This env var is a comma-separated list of labels to be included in the
|
||||
// OTel span attributes. The labels are prefixed with "label." to avoid
|
||||
// collision with other attributes.
|
||||
//
|
||||
// Note that, this is an experimental env var that might be removed
|
||||
// unceremoniously at any point in time.
|
||||
attrs := make([]attribute.KeyValue, 0, len(containerLabelsFilter))
|
||||
for _, k := range containerLabelsFilter {
|
||||
if v, ok := labels[k]; ok {
|
||||
attrs = append(attrs, attribute.String("label."+k, v))
|
||||
}
|
||||
}
|
||||
return attrs
|
||||
}
|
||||
|
||||
// Create creates a new container from the given configuration with a given name.
|
||||
func (daemon *Daemon) create(ctx context.Context, daemonCfg *config.Config, opts createOpts) (retC *container.Container, retErr error) {
|
||||
var (
|
||||
@@ -186,17 +228,18 @@ func (daemon *Daemon) create(ctx context.Context, daemonCfg *config.Config, opts
|
||||
ctr.ImageManifest = imgManifest
|
||||
|
||||
// Set RWLayer for container after mount labels have been set
|
||||
rwLayer, err := daemon.imageService.CreateLayer(ctr, setupInitLayer(daemon.idMapping))
|
||||
rwLayer, err := daemon.imageService.CreateLayer(ctr, setupInitLayer(daemon.idMapping.RootPair()))
|
||||
if err != nil {
|
||||
return nil, errdefs.System(err)
|
||||
}
|
||||
ctr.RWLayer = rwLayer
|
||||
|
||||
current := idtools.CurrentIdentity()
|
||||
if err := idtools.MkdirAndChown(ctr.Root, 0o710, idtools.Identity{UID: current.UID, GID: daemon.IdentityMapping().RootPair().GID}); err != nil {
|
||||
cuid := os.Getuid()
|
||||
_, gid := daemon.IdentityMapping().RootPair()
|
||||
if err := user.MkdirAndChown(ctr.Root, 0o710, cuid, gid); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := idtools.MkdirAndChown(ctr.CheckpointDir(), 0o700, current); err != nil {
|
||||
if err := user.MkdirAndChown(ctr.CheckpointDir(), 0o700, cuid, os.Getegid()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/oci"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
volumemounts "github.com/docker/docker/volume/mounts"
|
||||
volumeopts "github.com/docker/docker/volume/service/opts"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
@@ -27,8 +28,7 @@ func (daemon *Daemon) createContainerOSSpecificSettings(ctx context.Context, con
|
||||
}
|
||||
defer daemon.Unmount(container)
|
||||
|
||||
rootIDs := daemon.idMapping.RootPair()
|
||||
if err := container.SetupWorkingDirectory(rootIDs); err != nil {
|
||||
if err := container.SetupWorkingDirectory(daemon.idMapping.RootPair()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -107,8 +107,8 @@ func (daemon *Daemon) populateVolume(ctx context.Context, c *container.Container
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
volumePath, cleanup, err := mnt.Setup(ctx, c.MountLabel, daemon.idMapping.RootPair(), nil)
|
||||
uid, gid := daemon.idMapping.RootPair()
|
||||
volumePath, cleanup, err := mnt.Setup(ctx, c.MountLabel, idtools.Identity{UID: uid, GID: gid}, nil)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
return nil
|
||||
|
||||
@@ -75,6 +75,7 @@ import (
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
"github.com/moby/locker"
|
||||
"github.com/moby/sys/user"
|
||||
"github.com/moby/sys/userns"
|
||||
"github.com/pkg/errors"
|
||||
"go.etcd.io/bbolt"
|
||||
@@ -113,7 +114,7 @@ type Daemon struct {
|
||||
sysInfoOnce sync.Once
|
||||
sysInfo *sysinfo.SysInfo
|
||||
shutdown bool
|
||||
idMapping idtools.IdentityMapping
|
||||
idMapping user.IdentityMapping
|
||||
PluginStore *plugin.Store // TODO: remove
|
||||
pluginManager *plugin.Manager
|
||||
linkIndex *linkIndex
|
||||
@@ -263,7 +264,7 @@ func (daemon *Daemon) restore(cfg *configStore) error {
|
||||
|
||||
removeContainers := make(map[string]*container.Container)
|
||||
restartContainers := make(map[*container.Container]chan struct{})
|
||||
activeSandboxes := make(map[string]interface{})
|
||||
activeSandboxes := make(map[string]any)
|
||||
|
||||
for _, c := range containers {
|
||||
group.Add(1)
|
||||
@@ -791,7 +792,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rootIDs := idMapping.RootPair()
|
||||
uid, gid := idMapping.RootPair()
|
||||
|
||||
// set up the tmpDir to use a canonical path
|
||||
tmp, err := prepareTempDir(config.Root)
|
||||
@@ -878,10 +879,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
||||
}
|
||||
|
||||
daemonRepo := filepath.Join(cfgStore.Root, "containers")
|
||||
if err := idtools.MkdirAllAndChown(daemonRepo, 0o710, idtools.Identity{
|
||||
UID: idtools.CurrentIdentity().UID,
|
||||
GID: rootIDs.GID,
|
||||
}); err != nil {
|
||||
if err := user.MkdirAllAndChown(daemonRepo, 0o710, os.Getuid(), gid); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -967,7 +965,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
||||
|
||||
var (
|
||||
shim string
|
||||
shimOpts interface{}
|
||||
shimOpts any
|
||||
)
|
||||
if runtime.GOOS != "windows" {
|
||||
shim, shimOpts, err = rts.Get("")
|
||||
@@ -999,7 +997,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
||||
}
|
||||
log.G(ctx).Debugf("Using default logging driver %s", d.defaultLogConfig.Type)
|
||||
|
||||
d.volumes, err = volumesservice.NewVolumeService(cfgStore.Root, d.PluginStore, rootIDs, d)
|
||||
d.volumes, err = volumesservice.NewVolumeService(cfgStore.Root, d.PluginStore, idtools.Identity{UID: uid, GID: gid}, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1423,7 +1421,7 @@ func prepareTempDir(rootDir string) (string, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
return tmpDir, idtools.MkdirAllAndChown(tmpDir, 0o700, idtools.CurrentIdentity())
|
||||
return tmpDir, user.MkdirAllAndChown(tmpDir, 0o700, os.Getuid(), os.Getegid())
|
||||
}
|
||||
|
||||
func (daemon *Daemon) setGenericResources(conf *config.Config) error {
|
||||
@@ -1446,7 +1444,7 @@ func isBridgeNetworkDisabled(conf *config.Config) bool {
|
||||
return conf.BridgeConfig.Iface == config.DisableNetworkBridge
|
||||
}
|
||||
|
||||
func (daemon *Daemon) networkOptions(conf *config.Config, pg plugingetter.PluginGetter, hostID string, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) {
|
||||
func (daemon *Daemon) networkOptions(conf *config.Config, pg plugingetter.PluginGetter, hostID string, activeSandboxes map[string]any) ([]nwconfig.Option, error) {
|
||||
options := []nwconfig.Option{
|
||||
nwconfig.OptionDataDir(filepath.Join(conf.Root, config.LibnetDataPath)),
|
||||
nwconfig.OptionExecRoot(conf.GetExecRoot()),
|
||||
@@ -1545,7 +1543,8 @@ func CreateDaemonRoot(config *config.Config) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return setupDaemonRoot(config, realRoot, idMapping.RootPair())
|
||||
uid, gid := idMapping.RootPair()
|
||||
return setupDaemonRoot(config, realRoot, uid, gid)
|
||||
}
|
||||
|
||||
// RemapContainerdNamespaces returns the right containerd namespaces to use:
|
||||
@@ -1561,16 +1560,16 @@ func RemapContainerdNamespaces(config *config.Config) (ns string, pluginNs strin
|
||||
if idMapping.Empty() {
|
||||
return config.ContainerdNamespace, config.ContainerdPluginNamespace, nil
|
||||
}
|
||||
root := idMapping.RootPair()
|
||||
uid, gid := idMapping.RootPair()
|
||||
|
||||
ns = config.ContainerdNamespace
|
||||
if _, ok := config.ValuesSet["containerd-namespace"]; !ok {
|
||||
ns = fmt.Sprintf("%s-%d.%d", config.ContainerdNamespace, root.UID, root.GID)
|
||||
ns = fmt.Sprintf("%s-%d.%d", config.ContainerdNamespace, uid, gid)
|
||||
}
|
||||
|
||||
pluginNs = config.ContainerdPluginNamespace
|
||||
if _, ok := config.ValuesSet["containerd-plugin-namespace"]; !ok {
|
||||
pluginNs = fmt.Sprintf("%s-%d.%d", config.ContainerdPluginNamespace, root.UID, root.GID)
|
||||
pluginNs = fmt.Sprintf("%s-%d.%d", config.ContainerdPluginNamespace, uid, gid)
|
||||
}
|
||||
|
||||
return ns, pluginNs, nil
|
||||
@@ -1600,7 +1599,7 @@ func (daemon *Daemon) GetAttachmentStore() *network.AttachmentStore {
|
||||
}
|
||||
|
||||
// IdentityMapping returns uid/gid mapping or a SID (in the case of Windows) for the builder
|
||||
func (daemon *Daemon) IdentityMapping() idtools.IdentityMapping {
|
||||
func (daemon *Daemon) IdentityMapping() user.IdentityMapping {
|
||||
return daemon.idMapping
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user