mirror of
https://github.com/moby/moby.git
synced 2026-01-12 03:01:38 +00:00
Compare commits
189 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
061aa95809 | ||
|
|
d0d85f6438 | ||
|
|
5d6679345c | ||
|
|
ef1fa235cd | ||
|
|
0451b287dc | ||
|
|
d27fe2558d | ||
|
|
77de535364 | ||
|
|
9e526bc394 | ||
|
|
2d347024d1 | ||
|
|
51e876cd96 | ||
|
|
3fa0cedce3 | ||
|
|
4e7d8531ed | ||
|
|
f66b5f642e | ||
|
|
41fde13f64 | ||
|
|
0db1c6d8bb | ||
|
|
33a29c0135 | ||
|
|
30545de83e | ||
|
|
fa4ea308f0 | ||
|
|
d66e0fb7b1 | ||
|
|
30ecc0ea8a | ||
|
|
06767446fe | ||
|
|
7048a63686 | ||
|
|
81fb7f9986 | ||
|
|
b77bb69f87 | ||
|
|
7a4abb8c77 | ||
|
|
81a83f0544 | ||
|
|
abcd6f8a46 | ||
|
|
f7be6dcba6 | ||
|
|
10609544e5 | ||
|
|
be59afce2d | ||
|
|
97951c39fb | ||
|
|
2001813571 | ||
|
|
8e3bcf1974 | ||
|
|
27f36f42a4 | ||
|
|
1ae019fca2 | ||
|
|
c761353e7c | ||
|
|
00b2e1072b | ||
|
|
10bc347b03 | ||
|
|
6d675b429e | ||
|
|
9f1b47c597 | ||
|
|
94137f6df5 | ||
|
|
dd5faa9d4f | ||
|
|
012bfd33e5 | ||
|
|
3ec1946ce1 | ||
|
|
200a2c3576 | ||
|
|
cb66214dfd | ||
|
|
70c05fe10c | ||
|
|
e85cef89fa | ||
|
|
a72294a668 | ||
|
|
9ee331235a | ||
|
|
6fb71a9764 | ||
|
|
5d9e13bc84 | ||
|
|
36d02bf488 | ||
|
|
bb66c3ca04 | ||
|
|
fa3a64f2bc | ||
|
|
f417435e5f | ||
|
|
acd023d42b | ||
|
|
7a075cacf9 | ||
|
|
aff7177ee7 | ||
|
|
ed7c26339e | ||
|
|
74e3b4fb2e | ||
|
|
4cc0416534 | ||
|
|
f9f9e7ff9a | ||
|
|
5fb4eb941d | ||
|
|
67e9aa6d4d | ||
|
|
61b82be580 | ||
|
|
0227d95f99 | ||
|
|
fa9c5c55e1 | ||
|
|
df96d8d0bd | ||
|
|
1652559be4 | ||
|
|
ab29279200 | ||
|
|
147b5388dd | ||
|
|
60103717bc | ||
|
|
45dede440e | ||
|
|
ba4a2dab16 | ||
|
|
51133117fb | ||
|
|
341a7978a5 | ||
|
|
10e3bfd0ac | ||
|
|
269a0d8feb | ||
|
|
876b1d1dcd | ||
|
|
0bcd64689b | ||
|
|
8d454710cd | ||
|
|
6cf694fe70 | ||
|
|
c12bbf549b | ||
|
|
1ae115175c | ||
|
|
a7f9907f5f | ||
|
|
9150d0115e | ||
|
|
9af7c8ec0a | ||
|
|
3344c502da | ||
|
|
6c9fafdda7 | ||
|
|
f8a8cdaf9e | ||
|
|
7a659049b8 | ||
|
|
0ccf1c2a93 | ||
|
|
28c1a8bc2b | ||
|
|
5b5a58b2cd | ||
|
|
282891f70c | ||
|
|
bbe6f09afc | ||
|
|
5b13a38144 | ||
|
|
990e95dcf0 | ||
|
|
a140d0d95f | ||
|
|
91a8312fb7 | ||
|
|
cf03e96354 | ||
|
|
c48b67160d | ||
|
|
225e043196 | ||
|
|
78174d2e74 | ||
|
|
622e66684a | ||
|
|
85f4e6151a | ||
|
|
3e358447f5 | ||
|
|
dd4de8f388 | ||
|
|
f5ef4e76b3 | ||
|
|
6c5e5271c1 | ||
|
|
693fca6199 | ||
|
|
49487e996a | ||
|
|
0358f31dc2 | ||
|
|
081cffb3fa | ||
|
|
9de19554c7 | ||
|
|
2a80b8a7b2 | ||
|
|
61ffecfa3b | ||
|
|
02cd8dec03 | ||
|
|
1d7df5ecc0 | ||
|
|
4e68a265ed | ||
|
|
e437f890ba | ||
|
|
5a0015f72c | ||
|
|
5babfee371 | ||
|
|
fce6e0ca9b | ||
|
|
d838e68300 | ||
|
|
fa0d4159c7 | ||
|
|
06e22dce46 | ||
|
|
b73ee94289 | ||
|
|
fd6a419ad5 | ||
|
|
13ce91825f | ||
|
|
4b63c47c1e | ||
|
|
4edb71bb83 | ||
|
|
667bc3f803 | ||
|
|
1b47bfac02 | ||
|
|
f2d0d87c46 | ||
|
|
6ac38cdbeb | ||
|
|
d7bf237e29 | ||
|
|
f41b342cbe | ||
|
|
f413ba6fdb | ||
|
|
c2ef38f790 | ||
|
|
d5eebf9e19 | ||
|
|
f3f5327b48 | ||
|
|
05a370f52f | ||
|
|
be7b60ef05 | ||
|
|
6d05b9b65b | ||
|
|
c01bbbddeb | ||
|
|
32635850ed | ||
|
|
2cf1c762f8 | ||
|
|
71fa3ab079 | ||
|
|
5295e88ceb | ||
|
|
6eef840b8a | ||
|
|
e2ab4718c8 | ||
|
|
3de920a0b1 | ||
|
|
a445aa95e5 | ||
|
|
cb77e48229 | ||
|
|
e8801fbe26 | ||
|
|
613b6a12c1 | ||
|
|
1b6738369f | ||
|
|
b8cc2e8c66 | ||
|
|
fcccfeb811 | ||
|
|
f8eaa14a18 | ||
|
|
ac76925ff2 | ||
|
|
c7a1d928c0 | ||
|
|
2672baefd7 | ||
|
|
ff15b49b47 | ||
|
|
c0573b133f | ||
|
|
c7466c0b52 | ||
|
|
dde33d0dfe | ||
|
|
39fedb254b | ||
|
|
f0f5fc974a | ||
|
|
7c185a1e40 | ||
|
|
2b036fb1da | ||
|
|
1f24da70d8 | ||
|
|
358fecb566 | ||
|
|
f030b25770 | ||
|
|
e07aed0f77 | ||
|
|
cdf3611cff | ||
|
|
05267e9e8c | ||
|
|
e5edf62bca | ||
|
|
e14d121d49 | ||
|
|
e0acf1cd70 | ||
|
|
c2847b2eb2 | ||
|
|
0894f7fe69 | ||
|
|
d25aa32c21 | ||
|
|
1e335cfa74 | ||
|
|
4d287e9267 | ||
|
|
0240f5675b | ||
|
|
13964248f1 |
6
.github/workflows/.dco.yml
vendored
6
.github/workflows/.dco.yml
vendored
@@ -15,19 +15,19 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Dump context
|
||||
uses: actions/github-script@v6
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
console.log(JSON.stringify(context, null, 2));
|
||||
-
|
||||
name: Get base ref
|
||||
id: base-ref
|
||||
uses: actions/github-script@v6
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
|
||||
4
.github/workflows/.test-prepare.yml
vendored
4
.github/workflows/.test-prepare.yml
vendored
@@ -18,11 +18,11 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Create matrix
|
||||
id: set
|
||||
uses: actions/github-script@v6
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
let matrix = ['graphdriver'];
|
||||
|
||||
101
.github/workflows/.test.yml
vendored
101
.github/workflows/.test.yml
vendored
@@ -12,9 +12,9 @@ on:
|
||||
default: "graphdriver"
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.21.6"
|
||||
GO_VERSION: "1.21.8"
|
||||
GOTESTLIST_VERSION: v0.3.1
|
||||
TESTSTAT_VERSION: v0.1.3
|
||||
TESTSTAT_VERSION: v0.1.25
|
||||
ITG_CLI_MATRIX_SIZE: 6
|
||||
DOCKER_EXPERIMENTAL: 1
|
||||
DOCKER_GRAPHDRIVER: ${{ inputs.storage == 'snapshotter' && 'overlayfs' || 'overlay2' }}
|
||||
@@ -28,16 +28,16 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up runner
|
||||
uses: ./.github/actions/setup-runner
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v2
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
@@ -57,17 +57,18 @@ jobs:
|
||||
tree -nh /tmp/reports
|
||||
-
|
||||
name: Send to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
directory: ./bundles
|
||||
env_vars: RUNNER_OS
|
||||
flags: unit
|
||||
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.storage }}-unit-reports
|
||||
name: test-reports-unit-${{ inputs.storage }}
|
||||
path: /tmp/reports/*
|
||||
|
||||
unit-report:
|
||||
@@ -80,14 +81,14 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
-
|
||||
name: Download reports
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.storage }}-unit-reports
|
||||
name: test-reports-unit-${{ inputs.storage }}
|
||||
path: /tmp/reports
|
||||
-
|
||||
name: Install teststat
|
||||
@@ -96,7 +97,7 @@ jobs:
|
||||
-
|
||||
name: Create summary
|
||||
run: |
|
||||
teststat -markdown $(find /tmp/reports -type f -name '*.json' -print0 | xargs -0) >> $GITHUB_STEP_SUMMARY
|
||||
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
docker-py:
|
||||
runs-on: ubuntu-20.04
|
||||
@@ -105,7 +106,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up runner
|
||||
uses: ./.github/actions/setup-runner
|
||||
@@ -114,10 +115,10 @@ jobs:
|
||||
uses: ./.github/actions/setup-tracing
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v2
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
@@ -145,9 +146,9 @@ jobs:
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.storage }}-docker-py-reports
|
||||
name: test-reports-docker-py-${{ inputs.storage }}
|
||||
path: /tmp/reports/*
|
||||
|
||||
integration-flaky:
|
||||
@@ -157,16 +158,16 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up runner
|
||||
uses: ./.github/actions/setup-runner
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v2
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
@@ -196,7 +197,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up runner
|
||||
uses: ./.github/actions/setup-runner
|
||||
@@ -217,10 +218,10 @@ jobs:
|
||||
echo "CACHE_DEV_SCOPE=${CACHE_DEV_SCOPE}" >> $GITHUB_ENV
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v2
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
@@ -236,10 +237,13 @@ jobs:
|
||||
name: Prepare reports
|
||||
if: always()
|
||||
run: |
|
||||
reportsPath="/tmp/reports/${{ matrix.os }}"
|
||||
reportsName=${{ matrix.os }}
|
||||
if [ -n "${{ matrix.mode }}" ]; then
|
||||
reportsPath="$reportsPath-${{ matrix.mode }}"
|
||||
reportsName="$reportsName-${{ matrix.mode }}"
|
||||
fi
|
||||
reportsPath="/tmp/reports/$reportsName"
|
||||
echo "TESTREPORTS_NAME=$reportsName" >> $GITHUB_ENV
|
||||
|
||||
mkdir -p bundles $reportsPath
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
|
||||
tar -xzf /tmp/reports.tar.gz -C $reportsPath
|
||||
@@ -249,11 +253,12 @@ jobs:
|
||||
curl -sSLf localhost:16686/api/traces?service=integration-test-client > $reportsPath/jaeger-trace.json
|
||||
-
|
||||
name: Send to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
directory: ./bundles/test-integration
|
||||
env_vars: RUNNER_OS
|
||||
flags: integration,${{ matrix.mode }}
|
||||
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
|
||||
-
|
||||
name: Test daemon logs
|
||||
if: always()
|
||||
@@ -262,9 +267,9 @@ jobs:
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.storage }}-integration-reports
|
||||
name: test-reports-integration-${{ inputs.storage }}-${{ env.TESTREPORTS_NAME }}
|
||||
path: /tmp/reports/*
|
||||
|
||||
integration-report:
|
||||
@@ -277,15 +282,16 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
-
|
||||
name: Download reports
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.storage }}-integration-reports
|
||||
path: /tmp/reports
|
||||
pattern: test-reports-integration-${{ inputs.storage }}-*
|
||||
merge-multiple: true
|
||||
-
|
||||
name: Install teststat
|
||||
run: |
|
||||
@@ -293,7 +299,7 @@ jobs:
|
||||
-
|
||||
name: Create summary
|
||||
run: |
|
||||
teststat -markdown $(find /tmp/reports -type f -name '*.json' -print0 | xargs -0) >> $GITHUB_STEP_SUMMARY
|
||||
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
integration-cli-prepare:
|
||||
runs-on: ubuntu-20.04
|
||||
@@ -303,10 +309,10 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
-
|
||||
@@ -343,7 +349,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up runner
|
||||
uses: ./.github/actions/setup-runner
|
||||
@@ -352,10 +358,10 @@ jobs:
|
||||
uses: ./.github/actions/setup-tracing
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v2
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
@@ -372,7 +378,10 @@ jobs:
|
||||
name: Prepare reports
|
||||
if: always()
|
||||
run: |
|
||||
reportsPath=/tmp/reports/$(echo -n "${{ matrix.test }}" | sha256sum | cut -d " " -f 1)
|
||||
reportsName=$(echo -n "${{ matrix.test }}" | sha256sum | cut -d " " -f 1)
|
||||
reportsPath=/tmp/reports/$reportsName
|
||||
echo "TESTREPORTS_NAME=$reportsName" >> $GITHUB_ENV
|
||||
|
||||
mkdir -p bundles $reportsPath
|
||||
echo "${{ matrix.test }}" | tr -s '|' '\n' | tee -a "$reportsPath/tests.txt"
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
|
||||
@@ -383,11 +392,12 @@ jobs:
|
||||
curl -sSLf localhost:16686/api/traces?service=integration-test-client > $reportsPath/jaeger-trace.json
|
||||
-
|
||||
name: Send to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
directory: ./bundles/test-integration
|
||||
env_vars: RUNNER_OS
|
||||
flags: integration-cli
|
||||
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
|
||||
-
|
||||
name: Test daemon logs
|
||||
if: always()
|
||||
@@ -396,9 +406,9 @@ jobs:
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.storage }}-integration-cli-reports
|
||||
name: test-reports-integration-cli-${{ inputs.storage }}-${{ env.TESTREPORTS_NAME }}
|
||||
path: /tmp/reports/*
|
||||
|
||||
integration-cli-report:
|
||||
@@ -411,15 +421,16 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
-
|
||||
name: Download reports
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.storage }}-integration-cli-reports
|
||||
path: /tmp/reports
|
||||
pattern: test-reports-integration-cli-${{ inputs.storage }}-*
|
||||
merge-multiple: true
|
||||
-
|
||||
name: Install teststat
|
||||
run: |
|
||||
@@ -427,4 +438,4 @@ jobs:
|
||||
-
|
||||
name: Create summary
|
||||
run: |
|
||||
teststat -markdown $(find /tmp/reports -type f -name '*.json' -print0 | xargs -0) >> $GITHUB_STEP_SUMMARY
|
||||
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
59
.github/workflows/.windows.yml
vendored
59
.github/workflows/.windows.yml
vendored
@@ -19,9 +19,9 @@ on:
|
||||
default: false
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.21.6"
|
||||
GO_VERSION: "1.21.8"
|
||||
GOTESTLIST_VERSION: v0.3.1
|
||||
TESTSTAT_VERSION: v0.1.3
|
||||
TESTSTAT_VERSION: v0.1.25
|
||||
WINDOWS_BASE_IMAGE: mcr.microsoft.com/windows/servercore
|
||||
WINDOWS_BASE_TAG_2019: ltsc2019
|
||||
WINDOWS_BASE_TAG_2022: ltsc2022
|
||||
@@ -43,7 +43,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: ${{ env.GOPATH }}/src/github.com/docker/docker
|
||||
-
|
||||
@@ -62,7 +62,7 @@ jobs:
|
||||
}
|
||||
-
|
||||
name: Cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~\AppData\Local\go-build
|
||||
@@ -103,7 +103,7 @@ jobs:
|
||||
docker cp "${{ env.TEST_CTN_NAME }}`:c`:\containerd\bin\containerd-shim-runhcs-v1.exe" ${{ env.BIN_OUT }}\
|
||||
-
|
||||
name: Upload artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: build-${{ inputs.storage }}-${{ inputs.os }}
|
||||
path: ${{ env.BIN_OUT }}/*
|
||||
@@ -122,7 +122,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: ${{ env.GOPATH }}/src/github.com/docker/docker
|
||||
-
|
||||
@@ -142,7 +142,7 @@ jobs:
|
||||
}
|
||||
-
|
||||
name: Cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~\AppData\Local\go-build
|
||||
@@ -176,16 +176,17 @@ jobs:
|
||||
-
|
||||
name: Send to Codecov
|
||||
if: inputs.send_coverage
|
||||
uses: codecov/codecov-action@v3
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
working-directory: ${{ env.GOPATH }}\src\github.com\docker\docker
|
||||
directory: bundles
|
||||
env_vars: RUNNER_OS
|
||||
flags: unit
|
||||
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.os }}-${{ inputs.storage }}-unit-reports
|
||||
path: ${{ env.GOPATH }}\src\github.com\docker\docker\bundles\*
|
||||
@@ -198,12 +199,12 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
-
|
||||
name: Download artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.os }}-${{ inputs.storage }}-unit-reports
|
||||
path: /tmp/artifacts
|
||||
@@ -214,7 +215,7 @@ jobs:
|
||||
-
|
||||
name: Create summary
|
||||
run: |
|
||||
teststat -markdown $(find /tmp/artifacts -type f -name '*.json' -print0 | xargs -0) >> $GITHUB_STEP_SUMMARY
|
||||
find /tmp/artifacts -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
integration-test-prepare:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -223,10 +224,10 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
-
|
||||
@@ -278,10 +279,11 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: ${{ env.GOPATH }}/src/github.com/docker/docker
|
||||
-
|
||||
name: Set up Jaeger
|
||||
run: |
|
||||
# Jaeger is set up on Linux through the setup-tracing action. If you update Jaeger here, don't forget to
|
||||
# update the version set in .github/actions/setup-tracing/action.yml.
|
||||
@@ -296,7 +298,7 @@ jobs:
|
||||
Get-ChildItem Env: | Out-String
|
||||
-
|
||||
name: Download artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: build-${{ inputs.storage }}-${{ inputs.os }}
|
||||
path: ${{ env.BIN_OUT }}
|
||||
@@ -310,6 +312,9 @@ jobs:
|
||||
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2022 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
||||
}
|
||||
Write-Output "${{ env.BIN_OUT }}" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
|
||||
$testName = ([System.BitConverter]::ToString((New-Object System.Security.Cryptography.SHA256Managed).ComputeHash([System.Text.Encoding]::UTF8.GetBytes("${{ matrix.test }}"))) -replace '-').ToLower()
|
||||
echo "TESTREPORTS_NAME=$testName" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
||||
-
|
||||
# removes docker service that is currently installed on the runner. we
|
||||
# could use Uninstall-Package but not yet available on Windows runners.
|
||||
@@ -420,7 +425,7 @@ jobs:
|
||||
DOCKER_HOST: npipe:////./pipe/docker_engine
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
-
|
||||
@@ -445,12 +450,13 @@ jobs:
|
||||
-
|
||||
name: Send to Codecov
|
||||
if: inputs.send_coverage
|
||||
uses: codecov/codecov-action@v3
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
working-directory: ${{ env.GOPATH }}\src\github.com\docker\docker
|
||||
directory: bundles
|
||||
env_vars: RUNNER_OS
|
||||
flags: integration,${{ matrix.runtime }}
|
||||
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
|
||||
-
|
||||
name: Docker info
|
||||
run: |
|
||||
@@ -498,9 +504,9 @@ jobs:
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.os }}-${{ inputs.storage }}-integration-reports-${{ matrix.runtime }}
|
||||
name: ${{ inputs.os }}-${{ inputs.storage }}-integration-reports-${{ matrix.runtime }}-${{ env.TESTREPORTS_NAME }}
|
||||
path: ${{ env.GOPATH }}\src\github.com\docker\docker\bundles\*
|
||||
|
||||
integration-test-report:
|
||||
@@ -523,15 +529,16 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
-
|
||||
name: Download artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
name: Download reports
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.os }}-${{ inputs.storage }}-integration-reports-${{ matrix.runtime }}
|
||||
path: /tmp/artifacts
|
||||
path: /tmp/reports
|
||||
pattern: ${{ inputs.os }}-${{ inputs.storage }}-integration-reports-${{ matrix.runtime }}-*
|
||||
merge-multiple: true
|
||||
-
|
||||
name: Install teststat
|
||||
run: |
|
||||
@@ -539,4 +546,4 @@ jobs:
|
||||
-
|
||||
name: Create summary
|
||||
run: |
|
||||
teststat -markdown $(find /tmp/artifacts -type f -name '*.json' -print0 | xargs -0) >> $GITHUB_STEP_SUMMARY
|
||||
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
42
.github/workflows/bin-image.yml
vendored
42
.github/workflows/bin-image.yml
vendored
@@ -34,11 +34,11 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
${{ env.MOBYBIN_REPO_SLUG }}
|
||||
@@ -61,11 +61,13 @@ jobs:
|
||||
type=sha
|
||||
-
|
||||
name: Rename meta bake definition file
|
||||
# see https://github.com/docker/metadata-action/issues/381#issuecomment-1918607161
|
||||
run: |
|
||||
mv "${{ steps.meta.outputs.bake-file }}" "/tmp/bake-meta.json"
|
||||
bakeFile="${{ steps.meta.outputs.bake-file }}"
|
||||
mv "${bakeFile#cwd://}" "/tmp/bake-meta.json"
|
||||
-
|
||||
name: Upload meta bake definition
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: bake-meta
|
||||
path: /tmp/bake-meta.json
|
||||
@@ -88,34 +90,39 @@ jobs:
|
||||
matrix:
|
||||
platform: ${{ fromJson(needs.prepare.outputs.platforms) }}
|
||||
steps:
|
||||
-
|
||||
name: Prepare
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Download meta bake definition
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bake-meta
|
||||
path: /tmp
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
uses: docker/setup-qemu-action@v3
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request' && github.repository == 'moby/moby'
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_MOBYBIN_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_MOBYBIN_TOKEN }}
|
||||
-
|
||||
name: Build
|
||||
id: bake
|
||||
uses: docker/bake-action@v3
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
files: |
|
||||
./docker-bake.hcl
|
||||
@@ -135,9 +142,9 @@ jobs:
|
||||
-
|
||||
name: Upload digest
|
||||
if: github.event_name != 'pull_request' && github.repository == 'moby/moby'
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: digests
|
||||
name: digests-${{ env.PLATFORM_PAIR }}
|
||||
path: /tmp/digests/*
|
||||
if-no-files-found: error
|
||||
retention-days: 1
|
||||
@@ -150,22 +157,23 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Download meta bake definition
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bake-meta
|
||||
path: /tmp
|
||||
-
|
||||
name: Download digests
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: digests
|
||||
path: /tmp/digests
|
||||
pattern: digests-*
|
||||
merge-multiple: true
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_MOBYBIN_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_MOBYBIN_TOKEN }}
|
||||
|
||||
22
.github/workflows/buildkit.yml
vendored
22
.github/workflows/buildkit.yml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.21.6"
|
||||
GO_VERSION: "1.21.8"
|
||||
DESTDIR: ./build
|
||||
|
||||
jobs:
|
||||
@@ -27,18 +27,18 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v2
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
targets: binary
|
||||
-
|
||||
name: Upload artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: binary
|
||||
path: ${{ env.DESTDIR }}
|
||||
@@ -78,10 +78,10 @@ jobs:
|
||||
# https://github.com/moby/buildkit/blob/567a99433ca23402d5e9b9f9124005d2e59b8861/client/client_test.go#L5407-L5411
|
||||
-
|
||||
name: Expose GitHub Runtime
|
||||
uses: crazy-max/ghaction-github-runtime@v2
|
||||
uses: crazy-max/ghaction-github-runtime@v3
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: moby
|
||||
-
|
||||
@@ -91,20 +91,20 @@ jobs:
|
||||
working-directory: moby
|
||||
-
|
||||
name: Checkout BuildKit ${{ env.BUILDKIT_REF }}
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: ${{ env.BUILDKIT_REPO }}
|
||||
ref: ${{ env.BUILDKIT_REF }}
|
||||
path: buildkit
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
uses: docker/setup-qemu-action@v3
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Download binary artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: binary
|
||||
path: ./buildkit/build/moby/
|
||||
|
||||
18
.github/workflows/ci.yml
vendored
18
.github/workflows/ci.yml
vendored
@@ -32,15 +32,15 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v2
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
-
|
||||
@@ -53,7 +53,7 @@ jobs:
|
||||
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
|
||||
-
|
||||
name: Upload artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.target }}
|
||||
path: ${{ env.DESTDIR }}
|
||||
@@ -69,7 +69,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Create matrix
|
||||
id: platforms
|
||||
@@ -93,7 +93,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
@@ -103,10 +103,10 @@ jobs:
|
||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v2
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
targets: all
|
||||
set: |
|
||||
@@ -121,7 +121,7 @@ jobs:
|
||||
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
|
||||
-
|
||||
name: Upload artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: cross-${{ env.PLATFORM_PAIR }}
|
||||
path: ${{ env.DESTDIR }}
|
||||
|
||||
27
.github/workflows/test.yml
vendored
27
.github/workflows/test.yml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.21.6"
|
||||
GO_VERSION: "1.21.8"
|
||||
|
||||
jobs:
|
||||
validate-dco:
|
||||
@@ -38,13 +38,13 @@ jobs:
|
||||
fi
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v2
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
@@ -57,6 +57,7 @@ jobs:
|
||||
- build-dev
|
||||
- validate-dco
|
||||
uses: ./.github/workflows/.test.yml
|
||||
secrets: inherit
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -75,7 +76,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Create matrix
|
||||
id: scripts
|
||||
@@ -100,7 +101,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
@@ -108,10 +109,10 @@ jobs:
|
||||
uses: ./.github/actions/setup-runner
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v2
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
@@ -130,7 +131,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Create matrix
|
||||
id: platforms
|
||||
@@ -153,7 +154,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Prepare
|
||||
run: |
|
||||
@@ -161,13 +162,13 @@ jobs:
|
||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
uses: docker/setup-qemu-action@v3
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Test
|
||||
uses: docker/bake-action@v2
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
targets: binary-smoketest
|
||||
set: |
|
||||
|
||||
1
.github/workflows/windows-2019.yml
vendored
1
.github/workflows/windows-2019.yml
vendored
@@ -22,6 +22,7 @@ jobs:
|
||||
needs:
|
||||
- test-prepare
|
||||
uses: ./.github/workflows/.windows.yml
|
||||
secrets: inherit
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
|
||||
1
.github/workflows/windows-2022.yml
vendored
1
.github/workflows/windows-2022.yml
vendored
@@ -25,6 +25,7 @@ jobs:
|
||||
needs:
|
||||
- test-prepare
|
||||
uses: ./.github/workflows/.windows.yml
|
||||
secrets: inherit
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
|
||||
12
Dockerfile
12
Dockerfile
@@ -1,6 +1,6 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.21.6
|
||||
ARG GO_VERSION=1.21.8
|
||||
ARG BASE_DEBIAN_DISTRO="bookworm"
|
||||
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
||||
ARG XX_VERSION=1.2.1
|
||||
@@ -8,12 +8,12 @@ ARG XX_VERSION=1.2.1
|
||||
ARG VPNKIT_VERSION=0.5.0
|
||||
|
||||
ARG DOCKERCLI_REPOSITORY="https://github.com/docker/cli.git"
|
||||
ARG DOCKERCLI_VERSION=v24.0.2
|
||||
ARG DOCKERCLI_VERSION=v25.0.2
|
||||
# cli version used for integration-cli tests
|
||||
ARG DOCKERCLI_INTEGRATION_REPOSITORY="https://github.com/docker/cli.git"
|
||||
ARG DOCKERCLI_INTEGRATION_VERSION=v17.06.2-ce
|
||||
ARG BUILDX_VERSION=0.12.1
|
||||
ARG COMPOSE_VERSION=v2.24.0
|
||||
ARG COMPOSE_VERSION=v2.24.5
|
||||
|
||||
ARG SYSTEMD="false"
|
||||
ARG DOCKER_STATIC=1
|
||||
@@ -198,7 +198,7 @@ RUN git init . && git remote add origin "https://github.com/containerd/container
|
||||
# When updating the binary version you may also need to update the vendor
|
||||
# version to pick up bug fixes or new APIs, however, usually the Go packages
|
||||
# are built from a commit from the master branch.
|
||||
ARG CONTAINERD_VERSION=v1.7.12
|
||||
ARG CONTAINERD_VERSION=v1.7.13
|
||||
RUN git fetch -q --depth 1 origin "${CONTAINERD_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS containerd-build
|
||||
@@ -283,7 +283,7 @@ RUN git init . && git remote add origin "https://github.com/opencontainers/runc.
|
||||
# that is used. If you need to update runc, open a pull request in the containerd
|
||||
# project first, and update both after that is merged. When updating RUNC_VERSION,
|
||||
# consider updating runc in vendor.mod accordingly.
|
||||
ARG RUNC_VERSION=v1.1.11
|
||||
ARG RUNC_VERSION=v1.1.12
|
||||
RUN git fetch -q --depth 1 origin "${RUNC_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS runc-build
|
||||
@@ -352,7 +352,7 @@ FROM base AS rootlesskit-src
|
||||
WORKDIR /usr/src/rootlesskit
|
||||
RUN git init . && git remote add origin "https://github.com/rootless-containers/rootlesskit.git"
|
||||
# When updating, also update vendor.mod and hack/dockerfile/install/rootlesskit.installer accordingly.
|
||||
ARG ROOTLESSKIT_VERSION=v2.0.0
|
||||
ARG ROOTLESSKIT_VERSION=v2.0.2
|
||||
RUN git fetch -q --depth 1 origin "${ROOTLESSKIT_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS rootlesskit-build
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
# This represents the bare minimum required to build and test Docker.
|
||||
|
||||
ARG GO_VERSION=1.21.6
|
||||
ARG GO_VERSION=1.21.8
|
||||
|
||||
ARG BASE_DEBIAN_DISTRO="bookworm"
|
||||
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
||||
|
||||
@@ -161,10 +161,10 @@ FROM ${WINDOWS_BASE_IMAGE}:${WINDOWS_BASE_IMAGE_TAG}
|
||||
# Use PowerShell as the default shell
|
||||
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
|
||||
|
||||
ARG GO_VERSION=1.21.6
|
||||
ARG GO_VERSION=1.21.8
|
||||
ARG GOTESTSUM_VERSION=v1.8.2
|
||||
ARG GOWINRES_VERSION=v0.3.1
|
||||
ARG CONTAINERD_VERSION=v1.7.12
|
||||
ARG CONTAINERD_VERSION=v1.7.13
|
||||
|
||||
# Environment variable notes:
|
||||
# - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
|
||||
|
||||
3
Makefile
3
Makefile
@@ -250,6 +250,9 @@ swagger-docs: ## preview the API documentation
|
||||
.PHONY: generate-files
|
||||
generate-files:
|
||||
$(eval $@_TMP_OUT := $(shell mktemp -d -t moby-output.XXXXXXXXXX))
|
||||
ifeq ($($@_TMP_OUT),)
|
||||
$(error Could not create temp directory.)
|
||||
endif
|
||||
$(BUILD_CMD) --target "update" \
|
||||
--output "type=local,dest=$($@_TMP_OUT)" \
|
||||
--file "./hack/dockerfiles/generate-files.Dockerfile" .
|
||||
|
||||
@@ -602,17 +602,27 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||
hostConfig.Annotations = nil
|
||||
}
|
||||
|
||||
defaultReadOnlyNonRecursive := false
|
||||
if versions.LessThan(version, "1.44") {
|
||||
if config.Healthcheck != nil {
|
||||
// StartInterval was added in API 1.44
|
||||
config.Healthcheck.StartInterval = 0
|
||||
}
|
||||
|
||||
// Set ReadOnlyNonRecursive to true because it was added in API 1.44
|
||||
// Before that all read-only mounts were non-recursive.
|
||||
// Keep that behavior for clients on older APIs.
|
||||
defaultReadOnlyNonRecursive = true
|
||||
|
||||
for _, m := range hostConfig.Mounts {
|
||||
if m.BindOptions != nil {
|
||||
// Ignore ReadOnlyNonRecursive because it was added in API 1.44.
|
||||
m.BindOptions.ReadOnlyNonRecursive = false
|
||||
if m.BindOptions.ReadOnlyForceRecursive {
|
||||
if m.Type == mount.TypeBind {
|
||||
if m.BindOptions != nil && m.BindOptions.ReadOnlyForceRecursive {
|
||||
// NOTE: that technically this is a breaking change for older
|
||||
// API versions, and we should ignore the new field.
|
||||
// However, this option may be incorrectly set by a client with
|
||||
// the expectation that the failing to apply recursive read-only
|
||||
// is enforced, so we decided to produce an error instead,
|
||||
// instead of silently ignoring.
|
||||
return errdefs.InvalidParameter(errors.New("BindOptions.ReadOnlyForceRecursive needs API v1.44 or newer"))
|
||||
}
|
||||
}
|
||||
@@ -644,12 +654,13 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||
}
|
||||
|
||||
ccr, err := s.backend.ContainerCreate(ctx, backend.ContainerCreateConfig{
|
||||
Name: name,
|
||||
Config: config,
|
||||
HostConfig: hostConfig,
|
||||
NetworkingConfig: networkingConfig,
|
||||
AdjustCPUShares: adjustCPUShares,
|
||||
Platform: platform,
|
||||
Name: name,
|
||||
Config: config,
|
||||
HostConfig: hostConfig,
|
||||
NetworkingConfig: networkingConfig,
|
||||
AdjustCPUShares: adjustCPUShares,
|
||||
Platform: platform,
|
||||
DefaultReadOnlyNonRecursive: defaultReadOnlyNonRecursive,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -662,42 +673,73 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||
// networkingConfig to set the endpoint-specific MACAddress field introduced in API v1.44. It returns a warning message
|
||||
// or an error if the container-wide field was specified for API >= v1.44.
|
||||
func handleMACAddressBC(config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, version string) (string, error) {
|
||||
if config.MacAddress == "" { //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
|
||||
return "", nil
|
||||
}
|
||||
|
||||
deprecatedMacAddress := config.MacAddress //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
|
||||
|
||||
// For older versions of the API, migrate the container-wide MAC address to EndpointsConfig.
|
||||
if versions.LessThan(version, "1.44") {
|
||||
// The container-wide MacAddress parameter is deprecated and should now be specified in EndpointsConfig.
|
||||
if hostConfig.NetworkMode.IsDefault() || hostConfig.NetworkMode.IsBridge() || hostConfig.NetworkMode.IsUserDefined() {
|
||||
nwName := hostConfig.NetworkMode.NetworkName()
|
||||
if _, ok := networkingConfig.EndpointsConfig[nwName]; !ok {
|
||||
networkingConfig.EndpointsConfig[nwName] = &network.EndpointSettings{}
|
||||
if deprecatedMacAddress == "" {
|
||||
// If a MAC address is supplied in EndpointsConfig, discard it because the old API
|
||||
// would have ignored it.
|
||||
for _, ep := range networkingConfig.EndpointsConfig {
|
||||
ep.MacAddress = ""
|
||||
}
|
||||
// Overwrite the config: either the endpoint's MacAddress was set by the user on API < v1.44, which
|
||||
// must be ignored, or migrate the top-level MacAddress to the endpoint's config.
|
||||
networkingConfig.EndpointsConfig[nwName].MacAddress = deprecatedMacAddress
|
||||
return "", nil
|
||||
}
|
||||
if !hostConfig.NetworkMode.IsDefault() && !hostConfig.NetworkMode.IsBridge() && !hostConfig.NetworkMode.IsUserDefined() {
|
||||
return "", runconfig.ErrConflictContainerNetworkAndMac
|
||||
}
|
||||
|
||||
// There cannot be more than one entry in EndpointsConfig with API < 1.44.
|
||||
|
||||
// If there's no EndpointsConfig, create a place to store the configured address. It is
|
||||
// safe to use NetworkMode as the network name, whether it's a name or id/short-id, as
|
||||
// it will be normalised later and there is no other EndpointSettings object that might
|
||||
// refer to this network/endpoint.
|
||||
if len(networkingConfig.EndpointsConfig) == 0 {
|
||||
nwName := hostConfig.NetworkMode.NetworkName()
|
||||
networkingConfig.EndpointsConfig[nwName] = &network.EndpointSettings{}
|
||||
}
|
||||
// There's exactly one network in EndpointsConfig, either from the API or just-created.
|
||||
// Migrate the container-wide setting to it.
|
||||
// No need to check for a match between NetworkMode and the names/ids in EndpointsConfig,
|
||||
// the old version of the API would have applied the address to this network anyway.
|
||||
for _, ep := range networkingConfig.EndpointsConfig {
|
||||
ep.MacAddress = deprecatedMacAddress
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// The container-wide MacAddress parameter is deprecated and should now be specified in EndpointsConfig.
|
||||
if deprecatedMacAddress == "" {
|
||||
return "", nil
|
||||
}
|
||||
var warning string
|
||||
if hostConfig.NetworkMode.IsDefault() || hostConfig.NetworkMode.IsBridge() || hostConfig.NetworkMode.IsUserDefined() {
|
||||
nwName := hostConfig.NetworkMode.NetworkName()
|
||||
if _, ok := networkingConfig.EndpointsConfig[nwName]; !ok {
|
||||
networkingConfig.EndpointsConfig[nwName] = &network.EndpointSettings{}
|
||||
}
|
||||
|
||||
ep := networkingConfig.EndpointsConfig[nwName]
|
||||
if ep.MacAddress == "" {
|
||||
ep.MacAddress = deprecatedMacAddress
|
||||
} else if ep.MacAddress != deprecatedMacAddress {
|
||||
return "", errdefs.InvalidParameter(errors.New("the container-wide MAC address should match the endpoint-specific MAC address for the main network or should be left empty"))
|
||||
// If there's no endpoint config, create a place to store the configured address.
|
||||
if len(networkingConfig.EndpointsConfig) == 0 {
|
||||
networkingConfig.EndpointsConfig[nwName] = &network.EndpointSettings{
|
||||
MacAddress: deprecatedMacAddress,
|
||||
}
|
||||
} else {
|
||||
// There is existing endpoint config - if it's not indexed by NetworkMode.Name(), we
|
||||
// can't tell which network the container-wide settings was intended for. NetworkMode,
|
||||
// the keys in EndpointsConfig and the NetworkID in EndpointsConfig may mix network
|
||||
// name/id/short-id. It's not safe to create EndpointsConfig under the NetworkMode
|
||||
// name to store the container-wide MAC address, because that may result in two sets
|
||||
// of EndpointsConfig for the same network and one set will be discarded later. So,
|
||||
// reject the request ...
|
||||
ep, ok := networkingConfig.EndpointsConfig[nwName]
|
||||
if !ok {
|
||||
return "", errdefs.InvalidParameter(errors.New("if a container-wide MAC address is supplied, HostConfig.NetworkMode must match the identity of a network in NetworkSettings.Networks"))
|
||||
}
|
||||
// ep is the endpoint that needs the container-wide MAC address; migrate the address
|
||||
// to it, or bail out if there's a mismatch.
|
||||
if ep.MacAddress == "" {
|
||||
ep.MacAddress = deprecatedMacAddress
|
||||
} else if ep.MacAddress != deprecatedMacAddress {
|
||||
return "", errdefs.InvalidParameter(errors.New("the container-wide MAC address must match the endpoint-specific MAC address for the main network, or be left empty"))
|
||||
}
|
||||
}
|
||||
}
|
||||
warning = "The container-wide MacAddress field is now deprecated. It should be specified in EndpointsConfig instead."
|
||||
|
||||
160
api/server/router/container/container_routes_test.go
Normal file
160
api/server/router/container/container_routes_test.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
)
|
||||
|
||||
func TestHandleMACAddressBC(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
apiVersion string
|
||||
ctrWideMAC string
|
||||
networkMode container.NetworkMode
|
||||
epConfig map[string]*network.EndpointSettings
|
||||
expEpWithCtrWideMAC string
|
||||
expEpWithNoMAC string
|
||||
expCtrWideMAC string
|
||||
expWarning string
|
||||
expError string
|
||||
}{
|
||||
{
|
||||
name: "old api ctr-wide mac mix id and name",
|
||||
apiVersion: "1.43",
|
||||
ctrWideMAC: "11:22:33:44:55:66",
|
||||
networkMode: "aNetId",
|
||||
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
|
||||
expEpWithCtrWideMAC: "aNetName",
|
||||
expCtrWideMAC: "11:22:33:44:55:66",
|
||||
},
|
||||
{
|
||||
name: "old api clear ep mac",
|
||||
apiVersion: "1.43",
|
||||
networkMode: "aNetId",
|
||||
epConfig: map[string]*network.EndpointSettings{"aNetName": {MacAddress: "11:22:33:44:55:66"}},
|
||||
expEpWithNoMAC: "aNetName",
|
||||
},
|
||||
{
|
||||
name: "old api no-network ctr-wide mac",
|
||||
apiVersion: "1.43",
|
||||
networkMode: "none",
|
||||
ctrWideMAC: "11:22:33:44:55:66",
|
||||
expError: "conflicting options: mac-address and the network mode",
|
||||
expCtrWideMAC: "11:22:33:44:55:66",
|
||||
},
|
||||
{
|
||||
name: "old api create ep",
|
||||
apiVersion: "1.43",
|
||||
networkMode: "aNetId",
|
||||
ctrWideMAC: "11:22:33:44:55:66",
|
||||
epConfig: map[string]*network.EndpointSettings{},
|
||||
expEpWithCtrWideMAC: "aNetId",
|
||||
expCtrWideMAC: "11:22:33:44:55:66",
|
||||
},
|
||||
{
|
||||
name: "old api migrate ctr-wide mac",
|
||||
apiVersion: "1.43",
|
||||
ctrWideMAC: "11:22:33:44:55:66",
|
||||
networkMode: "aNetName",
|
||||
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
|
||||
expEpWithCtrWideMAC: "aNetName",
|
||||
expCtrWideMAC: "11:22:33:44:55:66",
|
||||
},
|
||||
{
|
||||
name: "new api no macs",
|
||||
apiVersion: "1.44",
|
||||
networkMode: "aNetId",
|
||||
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
|
||||
},
|
||||
{
|
||||
name: "new api ep specific mac",
|
||||
apiVersion: "1.44",
|
||||
networkMode: "aNetName",
|
||||
epConfig: map[string]*network.EndpointSettings{"aNetName": {MacAddress: "11:22:33:44:55:66"}},
|
||||
},
|
||||
{
|
||||
name: "new api migrate ctr-wide mac to new ep",
|
||||
apiVersion: "1.44",
|
||||
ctrWideMAC: "11:22:33:44:55:66",
|
||||
networkMode: "aNetName",
|
||||
epConfig: map[string]*network.EndpointSettings{},
|
||||
expEpWithCtrWideMAC: "aNetName",
|
||||
expWarning: "The container-wide MacAddress field is now deprecated",
|
||||
expCtrWideMAC: "",
|
||||
},
|
||||
{
|
||||
name: "new api migrate ctr-wide mac to existing ep",
|
||||
apiVersion: "1.44",
|
||||
ctrWideMAC: "11:22:33:44:55:66",
|
||||
networkMode: "aNetName",
|
||||
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
|
||||
expEpWithCtrWideMAC: "aNetName",
|
||||
expWarning: "The container-wide MacAddress field is now deprecated",
|
||||
expCtrWideMAC: "",
|
||||
},
|
||||
{
|
||||
name: "new api mode vs name mismatch",
|
||||
apiVersion: "1.44",
|
||||
ctrWideMAC: "11:22:33:44:55:66",
|
||||
networkMode: "aNetId",
|
||||
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
|
||||
expError: "if a container-wide MAC address is supplied, HostConfig.NetworkMode must match the identity of a network in NetworkSettings.Networks",
|
||||
expCtrWideMAC: "11:22:33:44:55:66",
|
||||
},
|
||||
{
|
||||
name: "new api mac mismatch",
|
||||
apiVersion: "1.44",
|
||||
ctrWideMAC: "11:22:33:44:55:66",
|
||||
networkMode: "aNetName",
|
||||
epConfig: map[string]*network.EndpointSettings{"aNetName": {MacAddress: "00:11:22:33:44:55"}},
|
||||
expError: "the container-wide MAC address must match the endpoint-specific MAC address",
|
||||
expCtrWideMAC: "11:22:33:44:55:66",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cfg := &container.Config{
|
||||
MacAddress: tc.ctrWideMAC, //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
NetworkMode: tc.networkMode,
|
||||
}
|
||||
epConfig := make(map[string]*network.EndpointSettings, len(tc.epConfig))
|
||||
for k, v := range tc.epConfig {
|
||||
v := v
|
||||
epConfig[k] = v
|
||||
}
|
||||
netCfg := &network.NetworkingConfig{
|
||||
EndpointsConfig: epConfig,
|
||||
}
|
||||
|
||||
warning, err := handleMACAddressBC(cfg, hostCfg, netCfg, tc.apiVersion)
|
||||
|
||||
if tc.expError == "" {
|
||||
assert.Check(t, err)
|
||||
} else {
|
||||
assert.Check(t, is.ErrorContains(err, tc.expError))
|
||||
}
|
||||
if tc.expWarning == "" {
|
||||
assert.Check(t, is.Equal(warning, ""))
|
||||
} else {
|
||||
assert.Check(t, is.Contains(warning, tc.expWarning))
|
||||
}
|
||||
if tc.expEpWithCtrWideMAC != "" {
|
||||
got := netCfg.EndpointsConfig[tc.expEpWithCtrWideMAC].MacAddress
|
||||
assert.Check(t, is.Equal(got, tc.ctrWideMAC))
|
||||
}
|
||||
if tc.expEpWithNoMAC != "" {
|
||||
got := netCfg.EndpointsConfig[tc.expEpWithNoMAC].MacAddress
|
||||
assert.Check(t, is.Equal(got, ""))
|
||||
}
|
||||
gotCtrWideMAC := cfg.MacAddress //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
|
||||
assert.Check(t, is.Equal(gotCtrWideMAC, tc.expCtrWideMAC))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -298,6 +298,12 @@ func (ir *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWrite
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
if versions.LessThan(version, "1.44") {
|
||||
imageInspect.VirtualSize = imageInspect.Size //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.44.
|
||||
|
||||
if imageInspect.Created == "" {
|
||||
// backwards compatibility for Created not existing returning "0001-01-01T00:00:00Z"
|
||||
// https://github.com/moby/moby/issues/47368
|
||||
imageInspect.Created = time.Time{}.Format(time.RFC3339Nano)
|
||||
}
|
||||
}
|
||||
return httputils.WriteJSON(w, http.StatusOK, imageInspect)
|
||||
}
|
||||
|
||||
@@ -391,7 +391,11 @@ definitions:
|
||||
ReadOnlyNonRecursive:
|
||||
description: |
|
||||
Make the mount non-recursively read-only, but still leave the mount recursive
|
||||
(unless NonRecursive is set to true in conjunction).
|
||||
(unless NonRecursive is set to `true` in conjunction).
|
||||
|
||||
Addded in v1.44, before that version all read-only mounts were
|
||||
non-recursive by default. To match the previous behaviour this
|
||||
will default to `true` for clients on versions prior to v1.44.
|
||||
type: "boolean"
|
||||
default: false
|
||||
ReadOnlyForceRecursive:
|
||||
@@ -1743,8 +1747,12 @@ definitions:
|
||||
description: |
|
||||
Date and time at which the image was created, formatted in
|
||||
[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
|
||||
|
||||
This information is only available if present in the image,
|
||||
and omitted otherwise.
|
||||
type: "string"
|
||||
x-nullable: false
|
||||
format: "dateTime"
|
||||
x-nullable: true
|
||||
example: "2022-02-04T21:20:12.497794809Z"
|
||||
Container:
|
||||
description: |
|
||||
@@ -8327,6 +8335,16 @@ paths:
|
||||
description: "BuildKit output configuration"
|
||||
type: "string"
|
||||
default: ""
|
||||
- name: "version"
|
||||
in: "query"
|
||||
type: "string"
|
||||
default: "1"
|
||||
enum: ["1", "2"]
|
||||
description: |
|
||||
Version of the builder backend to use.
|
||||
|
||||
- `1` is the first generation classic (deprecated) builder in the Docker daemon (default)
|
||||
- `2` is [BuildKit](https://github.com/moby/buildkit)
|
||||
responses:
|
||||
200:
|
||||
description: "no error"
|
||||
|
||||
@@ -13,12 +13,13 @@ import (
|
||||
|
||||
// ContainerCreateConfig is the parameter set to ContainerCreate()
|
||||
type ContainerCreateConfig struct {
|
||||
Name string
|
||||
Config *container.Config
|
||||
HostConfig *container.HostConfig
|
||||
NetworkingConfig *network.NetworkingConfig
|
||||
Platform *ocispec.Platform
|
||||
AdjustCPUShares bool
|
||||
Name string
|
||||
Config *container.Config
|
||||
HostConfig *container.HostConfig
|
||||
NetworkingConfig *network.NetworkingConfig
|
||||
Platform *ocispec.Platform
|
||||
AdjustCPUShares bool
|
||||
DefaultReadOnlyNonRecursive bool
|
||||
}
|
||||
|
||||
// ContainerRmConfig holds arguments for the container remove
|
||||
|
||||
@@ -14,6 +14,9 @@ type EndpointSettings struct {
|
||||
IPAMConfig *EndpointIPAMConfig
|
||||
Links []string
|
||||
Aliases []string // Aliases holds the list of extra, user-specified DNS names for this endpoint.
|
||||
// MacAddress may be used to specify a MAC address when the container is created.
|
||||
// Once the container is running, it becomes operational data (it may contain a
|
||||
// generated address).
|
||||
MacAddress string
|
||||
// Operational data
|
||||
NetworkID string
|
||||
|
||||
@@ -30,30 +30,9 @@ const (
|
||||
ip6 ipFamily = "IPv6"
|
||||
)
|
||||
|
||||
// HasIPv6Subnets checks whether there's any IPv6 subnets in the ipam parameter. It ignores any invalid Subnet and nil
|
||||
// ipam.
|
||||
func HasIPv6Subnets(ipam *IPAM) bool {
|
||||
if ipam == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, cfg := range ipam.Config {
|
||||
subnet, err := netip.ParsePrefix(cfg.Subnet)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if subnet.Addr().Is6() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// ValidateIPAM checks whether the network's IPAM passed as argument is valid. It returns a joinError of the list of
|
||||
// errors found.
|
||||
func ValidateIPAM(ipam *IPAM) error {
|
||||
func ValidateIPAM(ipam *IPAM, enableIPv6 bool) error {
|
||||
if ipam == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -70,6 +49,10 @@ func ValidateIPAM(ipam *IPAM) error {
|
||||
subnetFamily = ip6
|
||||
}
|
||||
|
||||
if !enableIPv6 && subnetFamily == ip6 {
|
||||
continue
|
||||
}
|
||||
|
||||
if subnet != subnet.Masked() {
|
||||
errs = append(errs, fmt.Errorf("invalid subnet %s: it should be %s", subnet, subnet.Masked()))
|
||||
}
|
||||
|
||||
@@ -30,6 +30,12 @@ func TestNetworkWithInvalidIPAM(t *testing.T) {
|
||||
"invalid auxiliary address DefaultGatewayIPv4: parent subnet is an IPv4 block",
|
||||
},
|
||||
},
|
||||
{
|
||||
// Regression test for https://github.com/moby/moby/issues/47202
|
||||
name: "IPv6 subnet is discarded with no error when IPv6 is disabled",
|
||||
ipam: IPAM{Config: []IPAMConfig{{Subnet: "2001:db8::/32"}}},
|
||||
ipv6: false,
|
||||
},
|
||||
{
|
||||
name: "Invalid data - Subnet",
|
||||
ipam: IPAM{Config: []IPAMConfig{{Subnet: "foobar"}}},
|
||||
@@ -122,7 +128,7 @@ func TestNetworkWithInvalidIPAM(t *testing.T) {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
errs := ValidateIPAM(&tc.ipam)
|
||||
errs := ValidateIPAM(&tc.ipam, tc.ipv6)
|
||||
if tc.expectedErrors == nil {
|
||||
assert.NilError(t, errs)
|
||||
return
|
||||
|
||||
@@ -72,7 +72,10 @@ type ImageInspect struct {
|
||||
|
||||
// Created is the date and time at which the image was created, formatted in
|
||||
// RFC 3339 nano-seconds (time.RFC3339Nano).
|
||||
Created string
|
||||
//
|
||||
// This information is only available if present in the image,
|
||||
// and omitted otherwise.
|
||||
Created string `json:",omitempty"`
|
||||
|
||||
// Container is the ID of the container that was used to create the image.
|
||||
//
|
||||
|
||||
@@ -142,8 +142,8 @@ func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt
|
||||
return nil, err
|
||||
}
|
||||
frontends := map[string]frontend.Frontend{
|
||||
"dockerfile.v0": forwarder.NewGatewayForwarder(wc, dockerfile.Build),
|
||||
"gateway.v0": gateway.NewGatewayFrontend(wc),
|
||||
"dockerfile.v0": forwarder.NewGatewayForwarder(wc.Infos(), dockerfile.Build),
|
||||
"gateway.v0": gateway.NewGatewayFrontend(wc.Infos()),
|
||||
}
|
||||
|
||||
return control.NewController(control.Opt{
|
||||
@@ -364,8 +364,8 @@ func newGraphDriverController(ctx context.Context, rt http.RoundTripper, opt Opt
|
||||
wc.Add(w)
|
||||
|
||||
frontends := map[string]frontend.Frontend{
|
||||
"dockerfile.v0": forwarder.NewGatewayForwarder(wc, dockerfile.Build),
|
||||
"gateway.v0": gateway.NewGatewayFrontend(wc),
|
||||
"dockerfile.v0": forwarder.NewGatewayForwarder(wc.Infos(), dockerfile.Build),
|
||||
"gateway.v0": gateway.NewGatewayFrontend(wc.Infos()),
|
||||
}
|
||||
|
||||
return control.NewController(control.Opt{
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -85,7 +86,7 @@ type ImageCacheBuilder interface {
|
||||
type ImageCache interface {
|
||||
// GetCache returns a reference to a cached image whose parent equals `parent`
|
||||
// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error.
|
||||
GetCache(parentID string, cfg *container.Config) (imageID string, err error)
|
||||
GetCache(parentID string, cfg *container.Config, platform ocispec.Platform) (imageID string, err error)
|
||||
}
|
||||
|
||||
// Image represents a Docker image used by the builder.
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -74,7 +73,7 @@ type copier struct {
|
||||
source builder.Source
|
||||
pathCache pathCache
|
||||
download sourceDownloader
|
||||
platform *ocispec.Platform
|
||||
platform ocispec.Platform
|
||||
// for cleanup. TODO: having copier.cleanup() is error prone and hard to
|
||||
// follow. Code calling performCopy should manage the lifecycle of its params.
|
||||
// Copier should take override source as input, not imageMount.
|
||||
@@ -83,19 +82,7 @@ type copier struct {
|
||||
}
|
||||
|
||||
func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, imageSource *imageMount) copier {
|
||||
platform := req.builder.platform
|
||||
if platform == nil {
|
||||
// May be nil if not explicitly set in API/dockerfile
|
||||
platform = &ocispec.Platform{}
|
||||
}
|
||||
if platform.OS == "" {
|
||||
// Default to the dispatch requests operating system if not explicit in API/dockerfile
|
||||
platform.OS = req.state.operatingSystem
|
||||
}
|
||||
if platform.OS == "" {
|
||||
// This is a failsafe just in case. Shouldn't be hit.
|
||||
platform.OS = runtime.GOOS
|
||||
}
|
||||
platform := req.builder.getPlatform(req.state)
|
||||
|
||||
return copier{
|
||||
source: req.source,
|
||||
@@ -472,7 +459,16 @@ func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions)
|
||||
return copyDirectory(archiver, srcPath, destPath, options.identity)
|
||||
}
|
||||
if options.decompress && archive.IsArchivePath(srcPath) && !source.noDecompress {
|
||||
return archiver.UntarPath(srcPath, destPath)
|
||||
f, err := os.Open(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
options := &archive.TarOptions{
|
||||
IDMap: archiver.IDMapping,
|
||||
BestEffortXattrs: true,
|
||||
}
|
||||
return archiver.Untar(f, destPath, options)
|
||||
}
|
||||
|
||||
destExistsAsDir, err := isExistingDirectory(destPath)
|
||||
|
||||
@@ -348,9 +348,16 @@ func dispatchRun(ctx context.Context, d dispatchRequest, c *instructions.RunComm
|
||||
saveCmd = prependEnvOnCmd(d.state.buildArgs, buildArgs, cmdFromArgs)
|
||||
}
|
||||
|
||||
cacheArgsEscaped := argsEscaped
|
||||
// ArgsEscaped is not persisted in the committed image on Windows.
|
||||
// Use the original from previous build steps for cache probing.
|
||||
if d.state.operatingSystem == "windows" {
|
||||
cacheArgsEscaped = stateRunConfig.ArgsEscaped
|
||||
}
|
||||
|
||||
runConfigForCacheProbe := copyRunConfig(stateRunConfig,
|
||||
withCmd(saveCmd),
|
||||
withArgsEscaped(argsEscaped),
|
||||
withArgsEscaped(cacheArgsEscaped),
|
||||
withEntrypointOverride(saveCmd, nil))
|
||||
if hit, err := d.builder.probeCache(d.state, runConfigForCacheProbe); err != nil || hit {
|
||||
return err
|
||||
|
||||
@@ -6,13 +6,14 @@ import (
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/builder"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// ImageProber exposes an Image cache to the Builder. It supports resetting a
|
||||
// cache.
|
||||
type ImageProber interface {
|
||||
Reset(ctx context.Context) error
|
||||
Probe(parentID string, runConfig *container.Config) (string, error)
|
||||
Probe(parentID string, runConfig *container.Config, platform ocispec.Platform) (string, error)
|
||||
}
|
||||
|
||||
type resetFunc func(context.Context) (builder.ImageCache, error)
|
||||
@@ -51,11 +52,11 @@ func (c *imageProber) Reset(ctx context.Context) error {
|
||||
|
||||
// Probe checks if cache match can be found for current build instruction.
|
||||
// It returns the cachedID if there is a hit, and the empty string on miss
|
||||
func (c *imageProber) Probe(parentID string, runConfig *container.Config) (string, error) {
|
||||
func (c *imageProber) Probe(parentID string, runConfig *container.Config, platform ocispec.Platform) (string, error) {
|
||||
if c.cacheBusted {
|
||||
return "", nil
|
||||
}
|
||||
cacheID, err := c.cache.GetCache(parentID, runConfig)
|
||||
cacheID, err := c.cache.GetCache(parentID, runConfig, platform)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -74,6 +75,6 @@ func (c *nopProber) Reset(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *nopProber) Probe(_ string, _ *container.Config) (string, error) {
|
||||
func (c *nopProber) Probe(_ string, _ *container.Config, _ ocispec.Platform) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
@@ -328,7 +329,7 @@ func getShell(c *container.Config, os string) []string {
|
||||
}
|
||||
|
||||
func (b *Builder) probeCache(dispatchState *dispatchState, runConfig *container.Config) (bool, error) {
|
||||
cachedID, err := b.imageProber.Probe(dispatchState.imageID, runConfig)
|
||||
cachedID, err := b.imageProber.Probe(dispatchState.imageID, runConfig, b.getPlatform(dispatchState))
|
||||
if cachedID == "" || err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -388,3 +389,17 @@ func hostConfigFromOptions(options *types.ImageBuildOptions) *container.HostConf
|
||||
}
|
||||
return hc
|
||||
}
|
||||
|
||||
func (b *Builder) getPlatform(state *dispatchState) ocispec.Platform {
|
||||
// May be nil if not explicitly set in API/dockerfile
|
||||
out := platforms.DefaultSpec()
|
||||
if b.platform != nil {
|
||||
out = *b.platform
|
||||
}
|
||||
|
||||
if state.operatingSystem != "" {
|
||||
out.OS = state.operatingSystem
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// MockBackend implements the builder.Backend interface for unit testing
|
||||
@@ -106,7 +107,7 @@ type mockImageCache struct {
|
||||
getCacheFunc func(parentID string, cfg *container.Config) (string, error)
|
||||
}
|
||||
|
||||
func (mic *mockImageCache) GetCache(parentID string, cfg *container.Config) (string, error) {
|
||||
func (mic *mockImageCache) GetCache(parentID string, cfg *container.Config, _ ocispec.Platform) (string, error) {
|
||||
if mic.getCacheFunc != nil {
|
||||
return mic.getCacheFunc(parentID, cfg)
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/chrootarchive"
|
||||
"github.com/docker/docker/pkg/longpath"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/docker/pkg/tarsum"
|
||||
"github.com/moby/sys/symlink"
|
||||
"github.com/pkg/errors"
|
||||
@@ -24,9 +25,11 @@ func (c *archiveContext) Close() error {
|
||||
}
|
||||
|
||||
func convertPathError(err error, cleanpath string) error {
|
||||
if err, ok := err.(*os.PathError); ok {
|
||||
switch err := err.(type) {
|
||||
case *os.PathError:
|
||||
err.Path = cleanpath
|
||||
case *system.XattrError:
|
||||
err.Path = cleanpath
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
|
||||
// urlPathWithFragmentSuffix matches fragments to use as Git reference and build
|
||||
// context from the Git repository. See IsGitURL for details.
|
||||
var urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$")
|
||||
var urlPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`)
|
||||
|
||||
// IsURL returns true if the provided str is an HTTP(S) URL by checking if it
|
||||
// has a http:// or https:// scheme. No validation is performed to verify if the
|
||||
|
||||
@@ -17,6 +17,7 @@ var (
|
||||
}
|
||||
invalidGitUrls = []string{
|
||||
"http://github.com/docker/docker.git:#branch",
|
||||
"https://github.com/docker/dgit",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@@ -265,17 +265,22 @@ func (cli *Client) Close() error {
|
||||
// This allows for version-dependent code to use the same version as will
|
||||
// be negotiated when making the actual requests, and for which cases
|
||||
// we cannot do the negotiation lazily.
|
||||
func (cli *Client) checkVersion(ctx context.Context) {
|
||||
if cli.negotiateVersion && !cli.negotiated {
|
||||
cli.NegotiateAPIVersion(ctx)
|
||||
func (cli *Client) checkVersion(ctx context.Context) error {
|
||||
if !cli.manualOverride && cli.negotiateVersion && !cli.negotiated {
|
||||
ping, err := cli.Ping(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cli.negotiateAPIVersionPing(ping)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getAPIPath returns the versioned request path to call the API.
|
||||
// It appends the query parameters to the path if they are not empty.
|
||||
func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string {
|
||||
var apiPath string
|
||||
cli.checkVersion(ctx)
|
||||
_ = cli.checkVersion(ctx)
|
||||
if cli.version != "" {
|
||||
v := strings.TrimPrefix(cli.version, "v")
|
||||
apiPath = path.Join(cli.basePath, "/v"+v, p)
|
||||
@@ -307,7 +312,11 @@ func (cli *Client) ClientVersion() string {
|
||||
// added (1.24).
|
||||
func (cli *Client) NegotiateAPIVersion(ctx context.Context) {
|
||||
if !cli.manualOverride {
|
||||
ping, _ := cli.Ping(ctx)
|
||||
ping, err := cli.Ping(ctx)
|
||||
if err != nil {
|
||||
// FIXME(thaJeztah): Ping returns an error when failing to connect to the API; we should not swallow the error here, and instead returning it.
|
||||
return
|
||||
}
|
||||
cli.negotiateAPIVersionPing(ping)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -354,6 +354,19 @@ func TestNegotiateAPVersionOverride(t *testing.T) {
|
||||
assert.Equal(t, client.ClientVersion(), expected)
|
||||
}
|
||||
|
||||
// TestNegotiateAPVersionConnectionFailure asserts that we do not modify the
|
||||
// API version when failing to connect.
|
||||
func TestNegotiateAPVersionConnectionFailure(t *testing.T) {
|
||||
const expected = "9.99"
|
||||
|
||||
client, err := NewClientWithOpts(WithHost("tcp://no-such-host.invalid"))
|
||||
assert.NilError(t, err)
|
||||
|
||||
client.version = expected
|
||||
client.NegotiateAPIVersion(context.Background())
|
||||
assert.Equal(t, client.ClientVersion(), expected)
|
||||
}
|
||||
|
||||
func TestNegotiateAPIVersionAutomatic(t *testing.T) {
|
||||
var pingVersion string
|
||||
httpClient := newMockClient(func(req *http.Request) (*http.Response, error) {
|
||||
|
||||
@@ -28,7 +28,9 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config
|
||||
//
|
||||
// Normally, version-negotiation (if enabled) would not happen until
|
||||
// the API request is made.
|
||||
cli.checkVersion(ctx)
|
||||
if err := cli.checkVersion(ctx); err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
||||
if err := cli.NewVersionError(ctx, "1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil {
|
||||
return response, err
|
||||
|
||||
@@ -113,3 +113,15 @@ func TestContainerCreateAutoRemove(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestContainerCreateConnection verifies that connection errors occurring
|
||||
// during API-version negotiation are not shadowed by API-version errors.
|
||||
//
|
||||
// Regression test for https://github.com/docker/cli/issues/4890
|
||||
func TestContainerCreateConnectionError(t *testing.T) {
|
||||
client, err := NewClientWithOpts(WithAPIVersionNegotiation(), WithHost("tcp://no-such-host.invalid"))
|
||||
assert.NilError(t, err)
|
||||
|
||||
_, err = client.ContainerCreate(context.Background(), nil, nil, nil, nil, "")
|
||||
assert.Check(t, is.ErrorType(err, IsErrConnectionFailed))
|
||||
}
|
||||
|
||||
@@ -18,7 +18,9 @@ func (cli *Client) ContainerExecCreate(ctx context.Context, container string, co
|
||||
//
|
||||
// Normally, version-negotiation (if enabled) would not happen until
|
||||
// the API request is made.
|
||||
cli.checkVersion(ctx)
|
||||
if err := cli.checkVersion(ctx); err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
||||
if err := cli.NewVersionError(ctx, "1.25", "env"); len(config.Env) != 0 && err != nil {
|
||||
return response, err
|
||||
|
||||
@@ -24,6 +24,18 @@ func TestContainerExecCreateError(t *testing.T) {
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||
}
|
||||
|
||||
// TestContainerExecCreateConnectionError verifies that connection errors occurring
|
||||
// during API-version negotiation are not shadowed by API-version errors.
|
||||
//
|
||||
// Regression test for https://github.com/docker/cli/issues/4890
|
||||
func TestContainerExecCreateConnectionError(t *testing.T) {
|
||||
client, err := NewClientWithOpts(WithAPIVersionNegotiation(), WithHost("tcp://no-such-host.invalid"))
|
||||
assert.NilError(t, err)
|
||||
|
||||
_, err = client.ContainerExecCreate(context.Background(), "", types.ExecConfig{})
|
||||
assert.Check(t, is.ErrorType(err, IsErrConnectionFailed))
|
||||
}
|
||||
|
||||
func TestContainerExecCreate(t *testing.T) {
|
||||
expectedURL := "/containers/container_id/exec"
|
||||
client := &Client{
|
||||
|
||||
@@ -23,7 +23,9 @@ func (cli *Client) ContainerRestart(ctx context.Context, containerID string, opt
|
||||
//
|
||||
// Normally, version-negotiation (if enabled) would not happen until
|
||||
// the API request is made.
|
||||
cli.checkVersion(ctx)
|
||||
if err := cli.checkVersion(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
if versions.GreaterThanOrEqualTo(cli.version, "1.42") {
|
||||
query.Set("signal", options.Signal)
|
||||
}
|
||||
|
||||
@@ -23,6 +23,18 @@ func TestContainerRestartError(t *testing.T) {
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||
}
|
||||
|
||||
// TestContainerRestartConnectionError verifies that connection errors occurring
|
||||
// during API-version negotiation are not shadowed by API-version errors.
|
||||
//
|
||||
// Regression test for https://github.com/docker/cli/issues/4890
|
||||
func TestContainerRestartConnectionError(t *testing.T) {
|
||||
client, err := NewClientWithOpts(WithAPIVersionNegotiation(), WithHost("tcp://no-such-host.invalid"))
|
||||
assert.NilError(t, err)
|
||||
|
||||
err = client.ContainerRestart(context.Background(), "nothing", container.StopOptions{})
|
||||
assert.Check(t, is.ErrorType(err, IsErrConnectionFailed))
|
||||
}
|
||||
|
||||
func TestContainerRestart(t *testing.T) {
|
||||
const expectedURL = "/v1.42/containers/container_id/restart"
|
||||
client := &Client{
|
||||
|
||||
@@ -27,7 +27,9 @@ func (cli *Client) ContainerStop(ctx context.Context, containerID string, option
|
||||
//
|
||||
// Normally, version-negotiation (if enabled) would not happen until
|
||||
// the API request is made.
|
||||
cli.checkVersion(ctx)
|
||||
if err := cli.checkVersion(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
if versions.GreaterThanOrEqualTo(cli.version, "1.42") {
|
||||
query.Set("signal", options.Signal)
|
||||
}
|
||||
|
||||
@@ -23,6 +23,18 @@ func TestContainerStopError(t *testing.T) {
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||
}
|
||||
|
||||
// TestContainerStopConnectionError verifies that connection errors occurring
|
||||
// during API-version negotiation are not shadowed by API-version errors.
|
||||
//
|
||||
// Regression test for https://github.com/docker/cli/issues/4890
|
||||
func TestContainerStopConnectionError(t *testing.T) {
|
||||
client, err := NewClientWithOpts(WithAPIVersionNegotiation(), WithHost("tcp://no-such-host.invalid"))
|
||||
assert.NilError(t, err)
|
||||
|
||||
err = client.ContainerStop(context.Background(), "nothing", container.StopOptions{})
|
||||
assert.Check(t, is.ErrorType(err, IsErrConnectionFailed))
|
||||
}
|
||||
|
||||
func TestContainerStop(t *testing.T) {
|
||||
const expectedURL = "/v1.42/containers/container_id/stop"
|
||||
client := &Client{
|
||||
|
||||
@@ -30,19 +30,22 @@ const containerWaitErrorMsgLimit = 2 * 1024 /* Max: 2KiB */
|
||||
// synchronize ContainerWait with other calls, such as specifying a
|
||||
// "next-exit" condition before issuing a ContainerStart request.
|
||||
func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) {
|
||||
resultC := make(chan container.WaitResponse)
|
||||
errC := make(chan error, 1)
|
||||
|
||||
// Make sure we negotiated (if the client is configured to do so),
|
||||
// as code below contains API-version specific handling of options.
|
||||
//
|
||||
// Normally, version-negotiation (if enabled) would not happen until
|
||||
// the API request is made.
|
||||
cli.checkVersion(ctx)
|
||||
if err := cli.checkVersion(ctx); err != nil {
|
||||
errC <- err
|
||||
return resultC, errC
|
||||
}
|
||||
if versions.LessThan(cli.ClientVersion(), "1.30") {
|
||||
return cli.legacyContainerWait(ctx, containerID)
|
||||
}
|
||||
|
||||
resultC := make(chan container.WaitResponse)
|
||||
errC := make(chan error, 1)
|
||||
|
||||
query := url.Values{}
|
||||
if condition != "" {
|
||||
query.Set("condition", string(condition))
|
||||
|
||||
@@ -34,6 +34,23 @@ func TestContainerWaitError(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestContainerWaitConnectionError verifies that connection errors occurring
|
||||
// during API-version negotiation are not shadowed by API-version errors.
|
||||
//
|
||||
// Regression test for https://github.com/docker/cli/issues/4890
|
||||
func TestContainerWaitConnectionError(t *testing.T) {
|
||||
client, err := NewClientWithOpts(WithAPIVersionNegotiation(), WithHost("tcp://no-such-host.invalid"))
|
||||
assert.NilError(t, err)
|
||||
|
||||
resultC, errC := client.ContainerWait(context.Background(), "nothing", "")
|
||||
select {
|
||||
case result := <-resultC:
|
||||
t.Fatalf("expected to not get a wait result, got %d", result.StatusCode)
|
||||
case err := <-errC:
|
||||
assert.Check(t, is.ErrorType(err, IsErrConnectionFailed))
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainerWait(t *testing.T) {
|
||||
expectedURL := "/containers/container_id/wait"
|
||||
client := &Client{
|
||||
|
||||
@@ -11,15 +11,16 @@ import (
|
||||
|
||||
// errConnectionFailed implements an error returned when connection failed.
|
||||
type errConnectionFailed struct {
|
||||
host string
|
||||
error
|
||||
}
|
||||
|
||||
// Error returns a string representation of an errConnectionFailed
|
||||
func (err errConnectionFailed) Error() string {
|
||||
if err.host == "" {
|
||||
return "Cannot connect to the Docker daemon. Is the docker daemon running on this host?"
|
||||
}
|
||||
return fmt.Sprintf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", err.host)
|
||||
func (e errConnectionFailed) Error() string {
|
||||
return e.error.Error()
|
||||
}
|
||||
|
||||
func (e errConnectionFailed) Unwrap() error {
|
||||
return e.error
|
||||
}
|
||||
|
||||
// IsErrConnectionFailed returns true if the error is caused by connection failed.
|
||||
@@ -29,7 +30,13 @@ func IsErrConnectionFailed(err error) bool {
|
||||
|
||||
// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed.
|
||||
func ErrorConnectionFailed(host string) error {
|
||||
return errConnectionFailed{host: host}
|
||||
var err error
|
||||
if host == "" {
|
||||
err = fmt.Errorf("Cannot connect to the Docker daemon. Is the docker daemon running on this host?")
|
||||
} else {
|
||||
err = fmt.Errorf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", host)
|
||||
}
|
||||
return errConnectionFailed{error: err}
|
||||
}
|
||||
|
||||
// IsErrNotFound returns true if the error is a NotFound error, which is returned
|
||||
@@ -60,7 +67,9 @@ func (cli *Client) NewVersionError(ctx context.Context, APIrequired, feature str
|
||||
//
|
||||
// Normally, version-negotiation (if enabled) would not happen until
|
||||
// the API request is made.
|
||||
cli.checkVersion(ctx)
|
||||
if err := cli.checkVersion(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
if cli.version != "" && versions.LessThan(cli.version, APIrequired) {
|
||||
return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version)
|
||||
}
|
||||
|
||||
@@ -13,14 +13,17 @@ import (
|
||||
|
||||
// ImageList returns a list of images in the docker host.
|
||||
func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]image.Summary, error) {
|
||||
var images []image.Summary
|
||||
|
||||
// Make sure we negotiated (if the client is configured to do so),
|
||||
// as code below contains API-version specific handling of options.
|
||||
//
|
||||
// Normally, version-negotiation (if enabled) would not happen until
|
||||
// the API request is made.
|
||||
cli.checkVersion(ctx)
|
||||
if err := cli.checkVersion(ctx); err != nil {
|
||||
return images, err
|
||||
}
|
||||
|
||||
var images []image.Summary
|
||||
query := url.Values{}
|
||||
|
||||
optionFilters := options.Filters
|
||||
|
||||
@@ -28,6 +28,18 @@ func TestImageListError(t *testing.T) {
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||
}
|
||||
|
||||
// TestImageListConnectionError verifies that connection errors occurring
|
||||
// during API-version negotiation are not shadowed by API-version errors.
|
||||
//
|
||||
// Regression test for https://github.com/docker/cli/issues/4890
|
||||
func TestImageListConnectionError(t *testing.T) {
|
||||
client, err := NewClientWithOpts(WithAPIVersionNegotiation(), WithHost("tcp://no-such-host.invalid"))
|
||||
assert.NilError(t, err)
|
||||
|
||||
_, err = client.ImageList(context.Background(), types.ImageListOptions{})
|
||||
assert.Check(t, is.ErrorType(err, IsErrConnectionFailed))
|
||||
}
|
||||
|
||||
func TestImageList(t *testing.T) {
|
||||
const expectedURL = "/images/json"
|
||||
|
||||
|
||||
@@ -10,12 +10,16 @@ import (
|
||||
|
||||
// NetworkCreate creates a new network in the docker host.
|
||||
func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) {
|
||||
var response types.NetworkCreateResponse
|
||||
|
||||
// Make sure we negotiated (if the client is configured to do so),
|
||||
// as code below contains API-version specific handling of options.
|
||||
//
|
||||
// Normally, version-negotiation (if enabled) would not happen until
|
||||
// the API request is made.
|
||||
cli.checkVersion(ctx)
|
||||
if err := cli.checkVersion(ctx); err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
||||
networkCreateRequest := types.NetworkCreateRequest{
|
||||
NetworkCreate: options,
|
||||
@@ -25,7 +29,6 @@ func (cli *Client) NetworkCreate(ctx context.Context, name string, options types
|
||||
networkCreateRequest.CheckDuplicate = true //nolint:staticcheck // ignore SA1019: CheckDuplicate is deprecated since API v1.44.
|
||||
}
|
||||
|
||||
var response types.NetworkCreateResponse
|
||||
serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil)
|
||||
defer ensureReaderClosed(serverResp)
|
||||
if err != nil {
|
||||
|
||||
@@ -25,6 +25,18 @@ func TestNetworkCreateError(t *testing.T) {
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||
}
|
||||
|
||||
// TestNetworkCreateConnectionError verifies that connection errors occurring
|
||||
// during API-version negotiation are not shadowed by API-version errors.
|
||||
//
|
||||
// Regression test for https://github.com/docker/cli/issues/4890
|
||||
func TestNetworkCreateConnectionError(t *testing.T) {
|
||||
client, err := NewClientWithOpts(WithAPIVersionNegotiation(), WithHost("tcp://no-such-host.invalid"))
|
||||
assert.NilError(t, err)
|
||||
|
||||
_, err = client.NetworkCreate(context.Background(), "mynetwork", types.NetworkCreate{})
|
||||
assert.Check(t, is.ErrorType(err, IsErrConnectionFailed))
|
||||
}
|
||||
|
||||
func TestNetworkCreate(t *testing.T) {
|
||||
expectedURL := "/networks/create"
|
||||
|
||||
|
||||
@@ -14,7 +14,10 @@ import (
|
||||
// Ping pings the server and returns the value of the "Docker-Experimental",
|
||||
// "Builder-Version", "OS-Type" & "API-Version" headers. It attempts to use
|
||||
// a HEAD request on the endpoint, but falls back to GET if HEAD is not supported
|
||||
// by the daemon.
|
||||
// by the daemon. It ignores internal server errors returned by the API, which
|
||||
// may be returned if the daemon is in an unhealthy state, but returns errors
|
||||
// for other non-success status codes, failing to connect to the API, or failing
|
||||
// to parse the API response.
|
||||
func (cli *Client) Ping(ctx context.Context) (types.Ping, error) {
|
||||
var ping types.Ping
|
||||
|
||||
|
||||
@@ -53,18 +53,12 @@ func TestPingFail(t *testing.T) {
|
||||
func TestPingWithError(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(func(req *http.Request) (*http.Response, error) {
|
||||
resp := &http.Response{StatusCode: http.StatusInternalServerError}
|
||||
resp.Header = http.Header{}
|
||||
resp.Header.Set("API-Version", "awesome")
|
||||
resp.Header.Set("Docker-Experimental", "true")
|
||||
resp.Header.Set("Swarm", "active/manager")
|
||||
resp.Body = io.NopCloser(strings.NewReader("some error with the server"))
|
||||
return resp, errors.New("some error")
|
||||
return nil, errors.New("some connection error")
|
||||
}),
|
||||
}
|
||||
|
||||
ping, err := client.Ping(context.Background())
|
||||
assert.Check(t, is.ErrorContains(err, "some error"))
|
||||
assert.Check(t, is.ErrorContains(err, "some connection error"))
|
||||
assert.Check(t, is.Equal(false, ping.Experimental))
|
||||
assert.Check(t, is.Equal("", ping.APIVersion))
|
||||
var si *swarm.Status
|
||||
|
||||
@@ -134,17 +134,18 @@ func (cli *Client) sendRequest(ctx context.Context, method, path string, query u
|
||||
return resp, errdefs.FromStatusCode(err, resp.statusCode)
|
||||
}
|
||||
|
||||
// FIXME(thaJeztah): Should this actually return a serverResp when a connection error occurred?
|
||||
func (cli *Client) doRequest(req *http.Request) (serverResponse, error) {
|
||||
serverResp := serverResponse{statusCode: -1, reqURL: req.URL}
|
||||
|
||||
resp, err := cli.client.Do(req)
|
||||
if err != nil {
|
||||
if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") {
|
||||
return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err)
|
||||
return serverResp, errConnectionFailed{fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err)}
|
||||
}
|
||||
|
||||
if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") {
|
||||
return serverResp, errors.Wrap(err, "the server probably has client authentication (--tlsverify) enabled; check your TLS client certification settings")
|
||||
return serverResp, errConnectionFailed{errors.Wrap(err, "the server probably has client authentication (--tlsverify) enabled; check your TLS client certification settings")}
|
||||
}
|
||||
|
||||
// Don't decorate context sentinel errors; users may be comparing to
|
||||
@@ -156,12 +157,13 @@ func (cli *Client) doRequest(req *http.Request) (serverResponse, error) {
|
||||
if uErr, ok := err.(*url.Error); ok {
|
||||
if nErr, ok := uErr.Err.(*net.OpError); ok {
|
||||
if os.IsPermission(nErr.Err) {
|
||||
return serverResp, errors.Wrapf(err, "permission denied while trying to connect to the Docker daemon socket at %v", cli.host)
|
||||
return serverResp, errConnectionFailed{errors.Wrapf(err, "permission denied while trying to connect to the Docker daemon socket at %v", cli.host)}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if nErr, ok := err.(net.Error); ok {
|
||||
// FIXME(thaJeztah): any net.Error should be considered a connection error (but we should include the original error)?
|
||||
if nErr.Timeout() {
|
||||
return serverResp, ErrorConnectionFailed(cli.host)
|
||||
}
|
||||
@@ -190,7 +192,7 @@ func (cli *Client) doRequest(req *http.Request) (serverResponse, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return serverResp, errors.Wrap(err, "error during connect")
|
||||
return serverResp, errConnectionFailed{errors.Wrap(err, "error during connect")}
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
|
||||
@@ -25,7 +25,9 @@ func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec,
|
||||
//
|
||||
// Normally, version-negotiation (if enabled) would not happen until
|
||||
// the API request is made.
|
||||
cli.checkVersion(ctx)
|
||||
if err := cli.checkVersion(ctx); err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
||||
// Make sure containerSpec is not nil when no runtime is set or the runtime is set to container
|
||||
if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) {
|
||||
|
||||
@@ -28,6 +28,18 @@ func TestServiceCreateError(t *testing.T) {
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||
}
|
||||
|
||||
// TestServiceCreateConnectionError verifies that connection errors occurring
|
||||
// during API-version negotiation are not shadowed by API-version errors.
|
||||
//
|
||||
// Regression test for https://github.com/docker/cli/issues/4890
|
||||
func TestServiceCreateConnectionError(t *testing.T) {
|
||||
client, err := NewClientWithOpts(WithAPIVersionNegotiation(), WithHost("tcp://no-such-host.invalid"))
|
||||
assert.NilError(t, err)
|
||||
|
||||
_, err = client.ServiceCreate(context.Background(), swarm.ServiceSpec{}, types.ServiceCreateOptions{})
|
||||
assert.Check(t, is.ErrorType(err, IsErrConnectionFailed))
|
||||
}
|
||||
|
||||
func TestServiceCreate(t *testing.T) {
|
||||
expectedURL := "/services/create"
|
||||
client := &Client{
|
||||
|
||||
@@ -16,18 +16,18 @@ import (
|
||||
// It should be the value as set *before* the update. You can find this value in the Meta field
|
||||
// of swarm.Service, which can be found using ServiceInspectWithRaw.
|
||||
func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) {
|
||||
response := swarm.ServiceUpdateResponse{}
|
||||
|
||||
// Make sure we negotiated (if the client is configured to do so),
|
||||
// as code below contains API-version specific handling of options.
|
||||
//
|
||||
// Normally, version-negotiation (if enabled) would not happen until
|
||||
// the API request is made.
|
||||
cli.checkVersion(ctx)
|
||||
|
||||
var (
|
||||
query = url.Values{}
|
||||
response = swarm.ServiceUpdateResponse{}
|
||||
)
|
||||
if err := cli.checkVersion(ctx); err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
||||
query := url.Values{}
|
||||
if options.RegistryAuthFrom != "" {
|
||||
query.Set("registryAuthFrom", options.RegistryAuthFrom)
|
||||
}
|
||||
|
||||
@@ -25,6 +25,18 @@ func TestServiceUpdateError(t *testing.T) {
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||
}
|
||||
|
||||
// TestServiceUpdateConnectionError verifies that connection errors occurring
|
||||
// during API-version negotiation are not shadowed by API-version errors.
|
||||
//
|
||||
// Regression test for https://github.com/docker/cli/issues/4890
|
||||
func TestServiceUpdateConnectionError(t *testing.T) {
|
||||
client, err := NewClientWithOpts(WithAPIVersionNegotiation(), WithHost("tcp://no-such-host.invalid"))
|
||||
assert.NilError(t, err)
|
||||
|
||||
_, err = client.ServiceUpdate(context.Background(), "service_id", swarm.Version{}, swarm.ServiceSpec{}, types.ServiceUpdateOptions{})
|
||||
assert.Check(t, is.ErrorType(err, IsErrConnectionFailed))
|
||||
}
|
||||
|
||||
func TestServiceUpdate(t *testing.T) {
|
||||
expectedURL := "/services/service_id/update"
|
||||
|
||||
|
||||
@@ -16,7 +16,9 @@ func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool
|
||||
//
|
||||
// Normally, version-negotiation (if enabled) would not happen until
|
||||
// the API request is made.
|
||||
cli.checkVersion(ctx)
|
||||
if err := cli.checkVersion(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
if versions.GreaterThanOrEqualTo(cli.version, "1.25") {
|
||||
query.Set("force", "1")
|
||||
}
|
||||
|
||||
@@ -23,6 +23,18 @@ func TestVolumeRemoveError(t *testing.T) {
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||
}
|
||||
|
||||
// TestVolumeRemoveConnectionError verifies that connection errors occurring
|
||||
// during API-version negotiation are not shadowed by API-version errors.
|
||||
//
|
||||
// Regression test for https://github.com/docker/cli/issues/4890
|
||||
func TestVolumeRemoveConnectionError(t *testing.T) {
|
||||
client, err := NewClientWithOpts(WithAPIVersionNegotiation(), WithHost("tcp://no-such-host.invalid"))
|
||||
assert.NilError(t, err)
|
||||
|
||||
err = client.VolumeRemove(context.Background(), "volume_id", false)
|
||||
assert.Check(t, is.ErrorType(err, IsErrConnectionFailed))
|
||||
}
|
||||
|
||||
func TestVolumeRemove(t *testing.T) {
|
||||
expectedURL := "/volumes/volume_id"
|
||||
|
||||
|
||||
@@ -301,7 +301,13 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
||||
routerCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
routerOptions, err := newRouterOptions(routerCtx, cli.Config, d)
|
||||
// Get a the current daemon config, because the daemon sets up config
|
||||
// during initialization. We cannot user the cli.Config for that reason,
|
||||
// as that only holds the config that was set by the user.
|
||||
//
|
||||
// FIXME(thaJeztah): better separate runtime and config data?
|
||||
daemonCfg := d.Config()
|
||||
routerOptions, err := newRouterOptions(routerCtx, &daemonCfg, d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -269,6 +269,13 @@ init() {
|
||||
# - sysctl: "net.ipv4.ip_unprivileged_port_start"
|
||||
# - external binary: slirp4netns
|
||||
# - external binary: fuse-overlayfs
|
||||
|
||||
# check RootlessKit functionality. RootlessKit will print hints if something is still unsatisfied.
|
||||
# (e.g., `kernel.apparmor_restrict_unprivileged_userns` constraint)
|
||||
if ! rootlesskit true; then
|
||||
ERROR "RootlessKit failed, see the error messages and https://rootlesscontaine.rs/getting-started/common/ ."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# CLI subcommand: "check"
|
||||
|
||||
@@ -377,12 +377,14 @@ func (c *containerConfig) healthcheck() *enginecontainer.HealthConfig {
|
||||
interval, _ := gogotypes.DurationFromProto(hcSpec.Interval)
|
||||
timeout, _ := gogotypes.DurationFromProto(hcSpec.Timeout)
|
||||
startPeriod, _ := gogotypes.DurationFromProto(hcSpec.StartPeriod)
|
||||
startInterval, _ := gogotypes.DurationFromProto(hcSpec.StartInterval)
|
||||
return &enginecontainer.HealthConfig{
|
||||
Test: hcSpec.Test,
|
||||
Interval: interval,
|
||||
Timeout: timeout,
|
||||
Retries: int(hcSpec.Retries),
|
||||
StartPeriod: startPeriod,
|
||||
Test: hcSpec.Test,
|
||||
Interval: interval,
|
||||
Timeout: timeout,
|
||||
Retries: int(hcSpec.Retries),
|
||||
StartPeriod: startPeriod,
|
||||
StartInterval: startInterval,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -203,10 +203,10 @@ func (daemon *Daemon) setSecurityOptions(cfg *config.Config, container *containe
|
||||
return daemon.parseSecurityOpt(cfg, &container.SecurityOptions, hostConfig)
|
||||
}
|
||||
|
||||
func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *containertypes.HostConfig) error {
|
||||
func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *containertypes.HostConfig, defaultReadOnlyNonRecursive bool) error {
|
||||
// Do not lock while creating volumes since this could be calling out to external plugins
|
||||
// Don't want to block other actions, like `docker ps` because we're waiting on an external plugin
|
||||
if err := daemon.registerMountPoints(container, hostConfig); err != nil {
|
||||
if err := daemon.registerMountPoints(container, hostConfig, defaultReadOnlyNonRecursive); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.19
|
||||
|
||||
package daemon // import "github.com/docker/docker/daemon"
|
||||
|
||||
import (
|
||||
@@ -442,6 +445,11 @@ func (daemon *Daemon) updateContainerNetworkSettings(container *container.Contai
|
||||
for name, epConfig := range endpointsConfig {
|
||||
container.NetworkSettings.Networks[name] = &network.EndpointSettings{
|
||||
EndpointSettings: epConfig,
|
||||
// At this point, during container creation, epConfig.MacAddress is the
|
||||
// configured value from the API. If there is no configured value, the
|
||||
// same field will later be used to store a generated MAC address. So,
|
||||
// remember the requested address now.
|
||||
DesiredMacAddress: epConfig.MacAddress,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -508,7 +516,7 @@ func (daemon *Daemon) allocateNetwork(cfg *config.Config, container *container.C
|
||||
defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName()
|
||||
if nConf, ok := container.NetworkSettings.Networks[defaultNetName]; ok {
|
||||
cleanOperationalData(nConf)
|
||||
if err := daemon.connectToNetwork(cfg, container, defaultNetName, nConf.EndpointSettings, updateSettings); err != nil {
|
||||
if err := daemon.connectToNetwork(cfg, container, defaultNetName, nConf, updateSettings); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -525,7 +533,7 @@ func (daemon *Daemon) allocateNetwork(cfg *config.Config, container *container.C
|
||||
|
||||
for netName, epConf := range networks {
|
||||
cleanOperationalData(epConf)
|
||||
if err := daemon.connectToNetwork(cfg, container, netName, epConf.EndpointSettings, updateSettings); err != nil {
|
||||
if err := daemon.connectToNetwork(cfg, container, netName, epConf, updateSettings); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -634,18 +642,22 @@ func cleanOperationalData(es *network.EndpointSettings) {
|
||||
es.IPv6Gateway = ""
|
||||
es.GlobalIPv6Address = ""
|
||||
es.GlobalIPv6PrefixLen = 0
|
||||
es.MacAddress = ""
|
||||
if es.IPAMOperational {
|
||||
es.IPAMConfig = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (daemon *Daemon) updateNetworkConfig(container *container.Container, n *libnetwork.Network, endpointConfig *networktypes.EndpointSettings, updateSettings bool) error {
|
||||
if containertypes.NetworkMode(n.Name()).IsUserDefined() {
|
||||
// Set up DNS names for a user defined network, and for the default 'nat'
|
||||
// network on Windows (IsBridge() returns true for nat).
|
||||
if containertypes.NetworkMode(n.Name()).IsUserDefined() ||
|
||||
(serviceDiscoveryOnDefaultNetwork() && containertypes.NetworkMode(n.Name()).IsBridge()) {
|
||||
endpointConfig.DNSNames = buildEndpointDNSNames(container, endpointConfig.Aliases)
|
||||
}
|
||||
|
||||
if err := validateEndpointSettings(n, n.Name(), endpointConfig); err != nil {
|
||||
return err
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
if updateSettings {
|
||||
@@ -679,7 +691,7 @@ func buildEndpointDNSNames(ctr *container.Container, aliases []string) []string
|
||||
return sliceutil.Dedup(dnsNames)
|
||||
}
|
||||
|
||||
func (daemon *Daemon) connectToNetwork(cfg *config.Config, container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings, updateSettings bool) (retErr error) {
|
||||
func (daemon *Daemon) connectToNetwork(cfg *config.Config, container *container.Container, idOrName string, endpointConfig *network.EndpointSettings, updateSettings bool) (retErr error) {
|
||||
start := time.Now()
|
||||
if container.HostConfig.NetworkMode.IsContainer() {
|
||||
return runconfig.ErrConflictSharedNetwork
|
||||
@@ -689,10 +701,12 @@ func (daemon *Daemon) connectToNetwork(cfg *config.Config, container *container.
|
||||
return nil
|
||||
}
|
||||
if endpointConfig == nil {
|
||||
endpointConfig = &networktypes.EndpointSettings{}
|
||||
endpointConfig = &network.EndpointSettings{
|
||||
EndpointSettings: &networktypes.EndpointSettings{},
|
||||
}
|
||||
}
|
||||
|
||||
n, nwCfg, err := daemon.findAndAttachNetwork(container, idOrName, endpointConfig)
|
||||
n, nwCfg, err := daemon.findAndAttachNetwork(container, idOrName, endpointConfig.EndpointSettings)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -707,11 +721,11 @@ func (daemon *Daemon) connectToNetwork(cfg *config.Config, container *container.
|
||||
}
|
||||
}
|
||||
|
||||
var operIPAM bool
|
||||
endpointConfig.IPAMOperational = false
|
||||
if nwCfg != nil {
|
||||
if epConfig, ok := nwCfg.EndpointsConfig[nwName]; ok {
|
||||
if endpointConfig.IPAMConfig == nil || (endpointConfig.IPAMConfig.IPv4Address == "" && endpointConfig.IPAMConfig.IPv6Address == "" && len(endpointConfig.IPAMConfig.LinkLocalIPs) == 0) {
|
||||
operIPAM = true
|
||||
endpointConfig.IPAMOperational = true
|
||||
}
|
||||
|
||||
// copy IPAMConfig and NetworkID from epConfig via AttachNetwork
|
||||
@@ -720,7 +734,7 @@ func (daemon *Daemon) connectToNetwork(cfg *config.Config, container *container.
|
||||
}
|
||||
}
|
||||
|
||||
if err := daemon.updateNetworkConfig(container, n, endpointConfig, updateSettings); err != nil {
|
||||
if err := daemon.updateNetworkConfig(container, n, endpointConfig.EndpointSettings, updateSettings); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -743,10 +757,7 @@ func (daemon *Daemon) connectToNetwork(cfg *config.Config, container *container.
|
||||
}
|
||||
}
|
||||
}()
|
||||
container.NetworkSettings.Networks[nwName] = &network.EndpointSettings{
|
||||
EndpointSettings: endpointConfig,
|
||||
IPAMOperational: operIPAM,
|
||||
}
|
||||
container.NetworkSettings.Networks[nwName] = endpointConfig
|
||||
|
||||
delete(container.NetworkSettings.Networks, n.ID())
|
||||
|
||||
@@ -1050,7 +1061,10 @@ func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := daemon.connectToNetwork(&daemon.config().Config, container, idOrName, endpointConfig, true); err != nil {
|
||||
epc := &network.EndpointSettings{
|
||||
EndpointSettings: endpointConfig,
|
||||
}
|
||||
if err := daemon.connectToNetwork(&daemon.config().Config, container, idOrName, epc, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/image/cache"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
@@ -53,7 +54,7 @@ type localCache struct {
|
||||
imageService *ImageService
|
||||
}
|
||||
|
||||
func (ic *localCache) GetCache(parentID string, cfg *container.Config) (imageID string, err error) {
|
||||
func (ic *localCache) GetCache(parentID string, cfg *container.Config, platform ocispec.Platform) (imageID string, err error) {
|
||||
ctx := context.TODO()
|
||||
|
||||
var children []image.ID
|
||||
@@ -98,9 +99,12 @@ func (ic *localCache) GetCache(parentID string, cfg *container.Config) (imageID
|
||||
return "", err
|
||||
}
|
||||
|
||||
if isMatch(&cc, cfg) {
|
||||
childImage, err := ic.imageService.GetImage(ctx, child.String(), imagetype.GetImageOpts{})
|
||||
if cache.CompareConfig(&cc, cfg) {
|
||||
childImage, err := ic.imageService.GetImage(ctx, child.String(), imagetype.GetImageOpts{Platform: &platform})
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
@@ -123,10 +127,10 @@ type imageCache struct {
|
||||
lc *localCache
|
||||
}
|
||||
|
||||
func (ic *imageCache) GetCache(parentID string, cfg *container.Config) (imageID string, err error) {
|
||||
func (ic *imageCache) GetCache(parentID string, cfg *container.Config, platform ocispec.Platform) (imageID string, err error) {
|
||||
ctx := context.TODO()
|
||||
|
||||
imgID, err := ic.lc.GetCache(parentID, cfg)
|
||||
imgID, err := ic.lc.GetCache(parentID, cfg, platform)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -142,7 +146,7 @@ func (ic *imageCache) GetCache(parentID string, cfg *container.Config) (imageID
|
||||
lenHistory := 0
|
||||
|
||||
if parentID != "" {
|
||||
parent, err = ic.imageService.GetImage(ctx, parentID, imagetype.GetImageOpts{})
|
||||
parent, err = ic.imageService.GetImage(ctx, parentID, imagetype.GetImageOpts{Platform: &platform})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -206,61 +210,3 @@ func (ic *imageCache) isParent(ctx context.Context, img *image.Image, parentID i
|
||||
}
|
||||
return ic.isParent(ctx, p, parentID)
|
||||
}
|
||||
|
||||
// compare two Config struct. Do not compare the "Image" nor "Hostname" fields
|
||||
// If OpenStdin is set, then it differs
|
||||
func isMatch(a, b *container.Config) bool {
|
||||
if a == nil || b == nil ||
|
||||
a.OpenStdin || b.OpenStdin {
|
||||
return false
|
||||
}
|
||||
if a.AttachStdout != b.AttachStdout ||
|
||||
a.AttachStderr != b.AttachStderr ||
|
||||
a.User != b.User ||
|
||||
a.OpenStdin != b.OpenStdin ||
|
||||
a.Tty != b.Tty {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(a.Cmd) != len(b.Cmd) ||
|
||||
len(a.Env) != len(b.Env) ||
|
||||
len(a.Labels) != len(b.Labels) ||
|
||||
len(a.ExposedPorts) != len(b.ExposedPorts) ||
|
||||
len(a.Entrypoint) != len(b.Entrypoint) ||
|
||||
len(a.Volumes) != len(b.Volumes) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := 0; i < len(a.Cmd); i++ {
|
||||
if a.Cmd[i] != b.Cmd[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for i := 0; i < len(a.Env); i++ {
|
||||
if a.Env[i] != b.Env[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for k, v := range a.Labels {
|
||||
if v != b.Labels[k] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for k := range a.ExposedPorts {
|
||||
if _, exists := b.ExposedPorts[k]; !exists {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < len(a.Entrypoint); i++ {
|
||||
if a.Entrypoint[i] != b.Entrypoint[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for key := range a.Volumes {
|
||||
if _, exists := b.Volumes[key]; !exists {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
|
||||
cerrdefs "github.com/containerd/containerd/errdefs"
|
||||
containerdimages "github.com/containerd/containerd/images"
|
||||
cplatforms "github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/log"
|
||||
"github.com/distribution/reference"
|
||||
imagetype "github.com/docker/docker/api/types/image"
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/image"
|
||||
imagespec "github.com/docker/docker/image/spec/specs-go/v1"
|
||||
"github.com/docker/docker/pkg/platforms"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
@@ -38,49 +37,15 @@ func (i *ImageService) GetImage(ctx context.Context, refOrID string, options ima
|
||||
return nil, err
|
||||
}
|
||||
|
||||
platform := platforms.AllPlatformsWithPreference(cplatforms.Default())
|
||||
platform := matchAllWithPreference(platforms.Default())
|
||||
if options.Platform != nil {
|
||||
platform = cplatforms.OnlyStrict(*options.Platform)
|
||||
platform = platforms.OnlyStrict(*options.Platform)
|
||||
}
|
||||
|
||||
var presentImages []imagespec.DockerOCIImage
|
||||
err = i.walkImageManifests(ctx, desc, func(img *ImageManifest) error {
|
||||
conf, err := img.Config(ctx)
|
||||
if err != nil {
|
||||
if cerrdefs.IsNotFound(err) {
|
||||
log.G(ctx).WithFields(log.Fields{
|
||||
"manifestDescriptor": img.Target(),
|
||||
}).Debug("manifest was present, but accessing its config failed, ignoring")
|
||||
return nil
|
||||
}
|
||||
return errdefs.System(fmt.Errorf("failed to get config descriptor: %w", err))
|
||||
}
|
||||
|
||||
var ociimage imagespec.DockerOCIImage
|
||||
if err := readConfig(ctx, i.content, conf, &ociimage); err != nil {
|
||||
if cerrdefs.IsNotFound(err) {
|
||||
log.G(ctx).WithFields(log.Fields{
|
||||
"manifestDescriptor": img.Target(),
|
||||
"configDescriptor": conf,
|
||||
}).Debug("manifest present, but its config is missing, ignoring")
|
||||
return nil
|
||||
}
|
||||
return errdefs.System(fmt.Errorf("failed to read config of the manifest %v: %w", img.Target().Digest, err))
|
||||
}
|
||||
presentImages = append(presentImages, ociimage)
|
||||
return nil
|
||||
})
|
||||
presentImages, err := i.presentImages(ctx, desc, refOrID, platform)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(presentImages) == 0 {
|
||||
ref, _ := reference.ParseAnyReference(refOrID)
|
||||
return nil, images.ErrImageDoesNotExist{Ref: ref}
|
||||
}
|
||||
|
||||
sort.SliceStable(presentImages, func(i, j int) bool {
|
||||
return platform.Less(presentImages[i].Platform, presentImages[j].Platform)
|
||||
})
|
||||
ociimage := presentImages[0]
|
||||
|
||||
img := dockerOciImageToDockerImagePartial(image.ID(desc.Target.Digest), ociimage)
|
||||
@@ -157,10 +122,60 @@ func (i *ImageService) GetImage(ctx context.Context, refOrID string, options ima
|
||||
return img, nil
|
||||
}
|
||||
|
||||
// presentImages returns the images that are present in the content store,
|
||||
// manifests without a config are ignored.
|
||||
// The images are filtered and sorted by platform preference.
|
||||
func (i *ImageService) presentImages(ctx context.Context, desc containerdimages.Image, refOrID string, platform platforms.MatchComparer) ([]imagespec.DockerOCIImage, error) {
|
||||
var presentImages []imagespec.DockerOCIImage
|
||||
err := i.walkImageManifests(ctx, desc, func(img *ImageManifest) error {
|
||||
conf, err := img.Config(ctx)
|
||||
if err != nil {
|
||||
if cerrdefs.IsNotFound(err) {
|
||||
log.G(ctx).WithFields(log.Fields{
|
||||
"manifestDescriptor": img.Target(),
|
||||
}).Debug("manifest was present, but accessing its config failed, ignoring")
|
||||
return nil
|
||||
}
|
||||
return errdefs.System(fmt.Errorf("failed to get config descriptor: %w", err))
|
||||
}
|
||||
|
||||
var ociimage imagespec.DockerOCIImage
|
||||
if err := readConfig(ctx, i.content, conf, &ociimage); err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
log.G(ctx).WithFields(log.Fields{
|
||||
"manifestDescriptor": img.Target(),
|
||||
"configDescriptor": conf,
|
||||
}).Debug("manifest present, but its config is missing, ignoring")
|
||||
return nil
|
||||
}
|
||||
return errdefs.System(fmt.Errorf("failed to read config of the manifest %v: %w", img.Target().Digest, err))
|
||||
}
|
||||
|
||||
if platform.Match(ociimage.Platform) {
|
||||
presentImages = append(presentImages, ociimage)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(presentImages) == 0 {
|
||||
ref, _ := reference.ParseAnyReference(refOrID)
|
||||
return nil, images.ErrImageDoesNotExist{Ref: ref}
|
||||
}
|
||||
|
||||
sort.SliceStable(presentImages, func(i, j int) bool {
|
||||
return platform.Less(presentImages[i].Platform, presentImages[j].Platform)
|
||||
})
|
||||
|
||||
return presentImages, nil
|
||||
}
|
||||
|
||||
func (i *ImageService) GetImageManifest(ctx context.Context, refOrID string, options imagetype.GetImageOpts) (*ocispec.Descriptor, error) {
|
||||
platform := platforms.AllPlatformsWithPreference(cplatforms.Default())
|
||||
platform := matchAllWithPreference(platforms.Default())
|
||||
if options.Platform != nil {
|
||||
platform = cplatforms.Only(*options.Platform)
|
||||
platform = platforms.Only(*options.Platform)
|
||||
}
|
||||
|
||||
cs := i.client.ContentStore()
|
||||
@@ -188,9 +203,9 @@ func (i *ImageService) GetImageManifest(ctx context.Context, refOrID string, opt
|
||||
|
||||
if options.Platform != nil {
|
||||
if plat == nil {
|
||||
return nil, errdefs.NotFound(errors.Errorf("image with reference %s was found but does not match the specified platform: wanted %s, actual: nil", refOrID, cplatforms.Format(*options.Platform)))
|
||||
return nil, errdefs.NotFound(errors.Errorf("image with reference %s was found but does not match the specified platform: wanted %s, actual: nil", refOrID, platforms.Format(*options.Platform)))
|
||||
} else if !platform.Match(*plat) {
|
||||
return nil, errdefs.NotFound(errors.Errorf("image with reference %s was found but does not match the specified platform: wanted %s, actual: %s", refOrID, cplatforms.Format(*options.Platform), cplatforms.Format(*plat)))
|
||||
return nil, errdefs.NotFound(errors.Errorf("image with reference %s was found but does not match the specified platform: wanted %s, actual: %s", refOrID, platforms.Format(*options.Platform), platforms.Format(*plat)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -219,7 +234,7 @@ func (i *ImageService) GetImageManifest(ctx context.Context, refOrID string, opt
|
||||
}
|
||||
|
||||
// size returns the total size of the image's packed resources.
|
||||
func (i *ImageService) size(ctx context.Context, desc ocispec.Descriptor, platform cplatforms.MatchComparer) (int64, error) {
|
||||
func (i *ImageService) size(ctx context.Context, desc ocispec.Descriptor, platform platforms.MatchComparer) (int64, error) {
|
||||
var size int64
|
||||
|
||||
cs := i.client.ContentStore()
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
containerdimages "github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/images/archive"
|
||||
"github.com/containerd/containerd/leases"
|
||||
cplatforms "github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/log"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/docker/api/types/events"
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"github.com/docker/docker/daemon/images"
|
||||
"github.com/docker/docker/errdefs"
|
||||
dockerarchive "github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/platforms"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
@@ -49,7 +48,7 @@ func (i *ImageService) PerformWithBaseFS(ctx context.Context, c *container.Conta
|
||||
//
|
||||
// TODO(thaJeztah): produce JSON stream progress response and image events; see https://github.com/moby/moby/issues/43910
|
||||
func (i *ImageService) ExportImage(ctx context.Context, names []string, outStream io.Writer) error {
|
||||
platform := platforms.AllPlatformsWithPreference(cplatforms.Default())
|
||||
platform := matchAllWithPreference(platforms.Default())
|
||||
opts := []archive.ExportOpt{
|
||||
archive.WithSkipNonDistributableBlobs(),
|
||||
|
||||
@@ -169,6 +168,12 @@ func (i *ImageService) ExportImage(ctx context.Context, names []string, outStrea
|
||||
|
||||
ref, refErr := reference.ParseNormalizedNamed(name)
|
||||
|
||||
if refErr == nil {
|
||||
if _, ok := ref.(reference.Digested); ok {
|
||||
specificDigestResolved = true
|
||||
}
|
||||
}
|
||||
|
||||
if resolveErr != nil || !specificDigestResolved {
|
||||
// Name didn't resolve to anything, or name wasn't explicitly referencing a digest
|
||||
if refErr == nil && reference.IsNameOnly(ref) {
|
||||
@@ -236,7 +241,7 @@ func (i *ImageService) LoadImage(ctx context.Context, inTar io.ReadCloser, outSt
|
||||
|
||||
opts := []containerd.ImportOpt{
|
||||
// TODO(vvoland): Allow user to pass platform
|
||||
containerd.WithImportPlatform(cplatforms.All),
|
||||
containerd.WithImportPlatform(platforms.All),
|
||||
|
||||
containerd.WithSkipMissing(),
|
||||
|
||||
|
||||
@@ -2,19 +2,14 @@ package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
|
||||
"github.com/containerd/containerd/images"
|
||||
containerdimages "github.com/containerd/containerd/images"
|
||||
cplatforms "github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/log"
|
||||
"github.com/distribution/reference"
|
||||
imagetype "github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/platforms"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -26,33 +21,13 @@ func (i *ImageService) ImageHistory(ctx context.Context, name string) ([]*imaget
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cs := i.client.ContentStore()
|
||||
// TODO: pass platform in from the CLI
|
||||
platform := platforms.AllPlatformsWithPreference(cplatforms.Default())
|
||||
platform := matchAllWithPreference(platforms.Default())
|
||||
|
||||
var presentImages []ocispec.Image
|
||||
err = i.walkImageManifests(ctx, img, func(img *ImageManifest) error {
|
||||
conf, err := img.Config(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var ociimage ocispec.Image
|
||||
if err := readConfig(ctx, cs, conf, &ociimage); err != nil {
|
||||
return err
|
||||
}
|
||||
presentImages = append(presentImages, ociimage)
|
||||
return nil
|
||||
})
|
||||
presentImages, err := i.presentImages(ctx, img, name, platform)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(presentImages) == 0 {
|
||||
return nil, errdefs.NotFound(errors.New("failed to find image manifest"))
|
||||
}
|
||||
|
||||
sort.SliceStable(presentImages, func(i, j int) bool {
|
||||
return platform.Less(presentImages[i].Platform, presentImages[j].Platform)
|
||||
})
|
||||
ociimage := presentImages[0]
|
||||
|
||||
var (
|
||||
@@ -97,7 +72,7 @@ func (i *ImageService) ImageHistory(ctx context.Context, name string) ([]*imaget
|
||||
}}, history...)
|
||||
}
|
||||
|
||||
findParents := func(img images.Image) []images.Image {
|
||||
findParents := func(img containerdimages.Image) []containerdimages.Image {
|
||||
imgs, err := i.getParentsByBuilderLabel(ctx, img)
|
||||
if err != nil {
|
||||
log.G(ctx).WithFields(log.Fields{
|
||||
@@ -142,7 +117,7 @@ func (i *ImageService) ImageHistory(ctx context.Context, name string) ([]*imaget
|
||||
return history, nil
|
||||
}
|
||||
|
||||
func getImageTags(ctx context.Context, imgs []images.Image) []string {
|
||||
func getImageTags(ctx context.Context, imgs []containerdimages.Image) []string {
|
||||
var tags []string
|
||||
for _, img := range imgs {
|
||||
if isDanglingImage(img) {
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/images"
|
||||
containerdimages "github.com/containerd/containerd/images"
|
||||
cplatforms "github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/moby/buildkit/util/attestation"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@@ -65,7 +65,7 @@ func (i *ImageService) NewImageManifest(ctx context.Context, img containerdimage
|
||||
parent := img.Target
|
||||
img.Target = manifestDesc
|
||||
|
||||
c8dImg := containerd.NewImageWithPlatform(i.client, img, cplatforms.All)
|
||||
c8dImg := containerd.NewImageWithPlatform(i.client, img, platforms.All)
|
||||
return &ImageManifest{
|
||||
Image: c8dImg,
|
||||
RealTarget: parent,
|
||||
@@ -122,7 +122,7 @@ func (im *ImageManifest) Manifest(ctx context.Context) (ocispec.Manifest, error)
|
||||
|
||||
func (im *ImageManifest) CheckContentAvailable(ctx context.Context) (bool, error) {
|
||||
// The target is already a platform-specific manifest, so no need to match platform.
|
||||
pm := cplatforms.All
|
||||
pm := platforms.All
|
||||
|
||||
available, _, _, missing, err := containerdimages.Check(ctx, im.ContentStore(), im.Target(), pm)
|
||||
if err != nil {
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/docker/docker/internal/compatcontext"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -117,6 +118,10 @@ func (i *ImageService) pullTag(ctx context.Context, ref reference.Named, platfor
|
||||
progress.Message(out, "", distribution.DeprecatedSchema1ImageMessage(ref))
|
||||
sentSchema1Deprecation = true
|
||||
}
|
||||
if images.IsLayerType(desc.MediaType) {
|
||||
id := stringid.TruncateID(desc.Digest.String())
|
||||
progress.Update(out, id, "Pulling fs layer")
|
||||
}
|
||||
if images.IsManifestType(desc.MediaType) {
|
||||
if !sentPullingFrom {
|
||||
var tagOrDigest string
|
||||
|
||||
@@ -28,7 +28,7 @@ func (i *ImageService) PrepareSnapshot(ctx context.Context, id string, parentIma
|
||||
|
||||
cs := i.client.ContentStore()
|
||||
|
||||
matcher := platforms.Default()
|
||||
matcher := matchAllWithPreference(platforms.Default())
|
||||
if platform != nil {
|
||||
matcher = platforms.Only(*platform)
|
||||
}
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
package platforms
|
||||
package containerd
|
||||
|
||||
import (
|
||||
cplatforms "github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
type allPlatformsWithPreferenceMatcher struct {
|
||||
preferred cplatforms.MatchComparer
|
||||
preferred platforms.MatchComparer
|
||||
}
|
||||
|
||||
// AllPlatformsWithPreference will return a platform matcher that matches all
|
||||
// matchAllWithPreference will return a platform matcher that matches all
|
||||
// platforms but will order platforms matching the preferred matcher first.
|
||||
func AllPlatformsWithPreference(preferred cplatforms.MatchComparer) cplatforms.MatchComparer {
|
||||
func matchAllWithPreference(preferred platforms.MatchComparer) platforms.MatchComparer {
|
||||
return allPlatformsWithPreferenceMatcher{
|
||||
preferred: preferred,
|
||||
}
|
||||
@@ -135,6 +135,9 @@ func (p pullProgress) UpdateProgress(ctx context.Context, ongoing *jobs, out pro
|
||||
}
|
||||
key := remotes.MakeRefKey(ctx, j)
|
||||
if info, ok := pulling[key]; ok {
|
||||
if info.Offset == 0 {
|
||||
continue
|
||||
}
|
||||
out.WriteProgress(progress.Progress{
|
||||
ID: stringid.TruncateID(j.Digest.Encoded()),
|
||||
Action: "Downloading",
|
||||
|
||||
@@ -218,7 +218,7 @@ func (daemon *Daemon) create(ctx context.Context, daemonCfg *config.Config, opts
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := daemon.setHostConfig(ctr, opts.params.HostConfig); err != nil {
|
||||
if err := daemon.setHostConfig(ctr, opts.params.HostConfig, opts.params.DefaultReadOnlyNonRecursive); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -180,6 +180,11 @@ func (daemon *Daemon) config() *configStore {
|
||||
return cfg
|
||||
}
|
||||
|
||||
// Config returns daemon's config.
|
||||
func (daemon *Daemon) Config() config.Config {
|
||||
return daemon.config().Config
|
||||
}
|
||||
|
||||
// HasExperimental returns whether the experimental features of the daemon are enabled or not
|
||||
func (daemon *Daemon) HasExperimental() bool {
|
||||
return daemon.config().Experimental
|
||||
|
||||
@@ -406,7 +406,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr
|
||||
return err
|
||||
}
|
||||
if lower != "" {
|
||||
if err := ioutils.AtomicWriteFile(path.Join(dir, lowerFile), []byte(lower), 0o666); err != nil {
|
||||
if err := ioutils.AtomicWriteFile(path.Join(dir, lowerFile), []byte(lower), 0o644); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -255,6 +255,9 @@ func (i *ImageService) CreateImage(ctx context.Context, config []byte, parent st
|
||||
return nil, errors.Wrapf(err, "failed to set parent %s", parent)
|
||||
}
|
||||
}
|
||||
if err := i.imageStore.SetBuiltLocally(id); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to mark image %s as built locally", id)
|
||||
}
|
||||
|
||||
return i.imageStore.Get(id)
|
||||
}
|
||||
|
||||
@@ -62,6 +62,9 @@ func (i *ImageService) CommitImage(ctx context.Context, c backend.CommitConfig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := i.imageStore.SetBuiltLocally(id); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if c.ParentImageID != "" {
|
||||
if err := i.imageStore.SetParent(id, image.ID(c.ParentImageID)); err != nil {
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.19
|
||||
|
||||
package daemon // import "github.com/docker/docker/daemon"
|
||||
|
||||
import (
|
||||
@@ -8,6 +11,7 @@ import (
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
networktypes "github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/api/types/versions/v1p20"
|
||||
@@ -36,8 +40,10 @@ func (daemon *Daemon) ContainerInspect(ctx context.Context, name string, size bo
|
||||
}
|
||||
|
||||
shortCID := stringid.TruncateID(ctr.ID)
|
||||
for _, ep := range ctr.NetworkSettings.Networks {
|
||||
ep.Aliases = sliceutil.Dedup(append(ep.Aliases, shortCID, ctr.Config.Hostname))
|
||||
for nwName, ep := range ctr.NetworkSettings.Networks {
|
||||
if containertypes.NetworkMode(nwName).IsUserDefined() {
|
||||
ep.Aliases = sliceutil.Dedup(append(ep.Aliases, shortCID, ctr.Config.Hostname))
|
||||
}
|
||||
}
|
||||
|
||||
return ctr, nil
|
||||
@@ -159,8 +165,12 @@ func (daemon *Daemon) getInspectData(daemonCfg *config.Config, container *contai
|
||||
// unversioned API endpoints.
|
||||
if container.Config != nil && container.Config.MacAddress == "" { //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
|
||||
if nwm := hostConfig.NetworkMode; nwm.IsDefault() || nwm.IsBridge() || nwm.IsUserDefined() {
|
||||
if epConf, ok := container.NetworkSettings.Networks[nwm.NetworkName()]; ok {
|
||||
container.Config.MacAddress = epConf.MacAddress //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
|
||||
name := nwm.NetworkName()
|
||||
if nwm.IsDefault() {
|
||||
name = daemon.netController.Config().DefaultNetwork
|
||||
}
|
||||
if epConf, ok := container.NetworkSettings.Networks[name]; ok {
|
||||
container.Config.MacAddress = epConf.DesiredMacAddress //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
|
||||
"code.cloudfoundry.org/clock"
|
||||
"github.com/coreos/go-systemd/v22/journal"
|
||||
"github.com/google/uuid"
|
||||
"gotest.tools/v3/assert"
|
||||
|
||||
"github.com/docker/docker/daemon/logger/journald/internal/export"
|
||||
@@ -67,6 +68,14 @@ type Sender struct {
|
||||
// timestamp in zero time after the SYSLOG_TIMESTAMP value was set,
|
||||
// which is higly unrealistic in practice.
|
||||
AssignEventTimestampFromSyslogTimestamp bool
|
||||
// Boot ID for journal entries. Required by systemd-journal-remote as of
|
||||
// https://github.com/systemd/systemd/commit/1eede158519e4e5ed22738c90cb57a91dbecb7f2
|
||||
// (systemd 255).
|
||||
BootID uuid.UUID
|
||||
|
||||
// When set, Send will act as a test helper and redirect
|
||||
// systemd-journal-remote command output to the test log.
|
||||
TB testing.TB
|
||||
}
|
||||
|
||||
// New constructs a new Sender which will write journal entries to outpath. The
|
||||
@@ -82,6 +91,7 @@ func New(outpath string) (*Sender, error) {
|
||||
CmdName: p,
|
||||
OutputPath: outpath,
|
||||
Clock: clock.NewClock(),
|
||||
BootID: uuid.New(), // UUIDv4, like systemd itself generates for sd_id128 values.
|
||||
}
|
||||
return sender, nil
|
||||
}
|
||||
@@ -95,6 +105,7 @@ func NewT(t *testing.T, outpath string) *Sender {
|
||||
t.Skip(err)
|
||||
}
|
||||
assert.NilError(t, err)
|
||||
s.TB = t
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -103,6 +114,9 @@ var validVarName = regexp.MustCompile("^[A-Z0-9][A-Z0-9_]*$")
|
||||
// Send is a drop-in replacement for
|
||||
// github.com/coreos/go-systemd/v22/journal.Send.
|
||||
func (s *Sender) Send(message string, priority journal.Priority, vars map[string]string) error {
|
||||
if s.TB != nil {
|
||||
s.TB.Helper()
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
// https://systemd.io/JOURNAL_EXPORT_FORMATS/ says "if you are
|
||||
// generating this format you shouldn’t care about these special
|
||||
@@ -121,6 +135,9 @@ func (s *Sender) Send(message string, priority journal.Priority, vars map[string
|
||||
if err := export.WriteField(&buf, "__REALTIME_TIMESTAMP", strconv.FormatInt(ts.UnixMicro(), 10)); err != nil {
|
||||
return fmt.Errorf("fake: error writing entry to systemd-journal-remote: %w", err)
|
||||
}
|
||||
if err := export.WriteField(&buf, "_BOOT_ID", fmt.Sprintf("%x", [16]byte(s.BootID))); err != nil {
|
||||
return fmt.Errorf("fake: error writing entry to systemd-journal-remote: %w", err)
|
||||
}
|
||||
if err := export.WriteField(&buf, "MESSAGE", message); err != nil {
|
||||
return fmt.Errorf("fake: error writing entry to systemd-journal-remote: %w", err)
|
||||
}
|
||||
@@ -143,6 +160,16 @@ func (s *Sender) Send(message string, priority journal.Priority, vars map[string
|
||||
// has been flushed to disk when Send returns.
|
||||
cmd := exec.Command(s.CmdName, "--output", s.OutputPath, "-")
|
||||
cmd.Stdin = &buf
|
||||
|
||||
if s.TB != nil {
|
||||
out, err := cmd.CombinedOutput()
|
||||
s.TB.Logf("[systemd-journal-remote] %s", out)
|
||||
var exitErr *exec.ExitError
|
||||
if errors.As(err, &exitErr) {
|
||||
s.TB.Logf("systemd-journal-remote exit status: %d", exitErr.ExitCode())
|
||||
}
|
||||
return err
|
||||
}
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
|
||||
@@ -4,7 +4,6 @@ package journald // import "github.com/docker/docker/daemon/logger/journald"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
@@ -17,7 +16,10 @@ import (
|
||||
"github.com/docker/docker/daemon/logger/journald/internal/sdjournal"
|
||||
)
|
||||
|
||||
const closedDrainTimeout = 5 * time.Second
|
||||
const (
|
||||
closedDrainTimeout = 5 * time.Second
|
||||
waitInterval = 250 * time.Millisecond
|
||||
)
|
||||
|
||||
// Fields which we know are not user-provided attribute fields.
|
||||
var wellKnownFields = map[string]bool{
|
||||
@@ -46,13 +48,13 @@ var wellKnownFields = map[string]bool{
|
||||
}
|
||||
|
||||
type reader struct {
|
||||
s *journald
|
||||
j *sdjournal.Journal
|
||||
logWatcher *logger.LogWatcher
|
||||
config logger.ReadConfig
|
||||
maxOrdinal uint64
|
||||
initialized bool
|
||||
ready chan struct{}
|
||||
s *journald
|
||||
j *sdjournal.Journal
|
||||
logWatcher *logger.LogWatcher
|
||||
config logger.ReadConfig
|
||||
maxOrdinal uint64
|
||||
ready chan struct{}
|
||||
drainDeadline time.Time
|
||||
}
|
||||
|
||||
func getMessage(d map[string]string) (line []byte, ok bool) {
|
||||
@@ -99,101 +101,168 @@ func getAttrs(d map[string]string) []backend.LogAttr {
|
||||
return attrs
|
||||
}
|
||||
|
||||
// errDrainDone is the error returned by drainJournal to signal that there are
|
||||
// no more log entries to send to the log watcher.
|
||||
var errDrainDone = errors.New("journald drain done")
|
||||
// The SeekXYZ() methods all move the journal read pointer to a "conceptual"
|
||||
// position which does not correspond to any journal entry. A subsequent call to
|
||||
// Next(), Previous() or similar is necessary to resolve the read pointer to a
|
||||
// discrete entry.
|
||||
// https://github.com/systemd/systemd/pull/5930#issuecomment-300878104
|
||||
// But that's not all! If there is no discrete entry to resolve the position to,
|
||||
// the call to Next() or Previous() will just leave the read pointer in a
|
||||
// conceptual position, or do something even more bizarre.
|
||||
// https://github.com/systemd/systemd/issues/9934
|
||||
|
||||
// drainJournal reads and sends log messages from the journal.
|
||||
//
|
||||
// drainJournal returns errDrainDone when a terminal stopping condition has been
|
||||
// reached: the watch consumer is gone, a log entry is read which has a
|
||||
// timestamp after until (if until is nonzero), or the log driver is closed and
|
||||
// the last message logged has been sent from the journal. If the end of the
|
||||
// journal is reached without encountering a terminal stopping condition, a nil
|
||||
// error is returned.
|
||||
func (r *reader) drainJournal() error {
|
||||
if !r.initialized {
|
||||
defer func() {
|
||||
r.signalReady()
|
||||
r.initialized = true
|
||||
}()
|
||||
// initialSeekHead positions the journal read pointer at the earliest journal
|
||||
// entry with a timestamp of at least r.config.Since. It returns true if there
|
||||
// is an entry to read at the read pointer.
|
||||
func (r *reader) initialSeekHead() (bool, error) {
|
||||
var err error
|
||||
if r.config.Since.IsZero() {
|
||||
err = r.j.SeekHead()
|
||||
} else {
|
||||
err = r.j.SeekRealtime(r.config.Since)
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return r.j.Next()
|
||||
}
|
||||
|
||||
var (
|
||||
err error
|
||||
seekedToTail bool
|
||||
)
|
||||
if r.config.Tail >= 0 {
|
||||
if r.config.Until.IsZero() {
|
||||
err = r.j.SeekTail()
|
||||
seekedToTail = true
|
||||
} else {
|
||||
err = r.j.SeekRealtime(r.config.Until)
|
||||
}
|
||||
} else {
|
||||
if r.config.Since.IsZero() {
|
||||
err = r.j.SeekHead()
|
||||
} else {
|
||||
err = r.j.SeekRealtime(r.config.Since)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// SeekTail() followed by Next() behaves incorrectly, so we need
|
||||
// to work around the bug by ensuring the first discrete
|
||||
// movement of the read pointer is Previous() or PreviousSkip().
|
||||
// PreviousSkip() is called inside the loop when config.Tail > 0
|
||||
// so the only special case requiring special handling is
|
||||
// config.Tail == 0.
|
||||
// https://github.com/systemd/systemd/issues/9934
|
||||
if seekedToTail && r.config.Tail == 0 {
|
||||
// Resolve the read pointer to the last entry in the
|
||||
// journal so that the call to Next() inside the loop
|
||||
// advances past it.
|
||||
if ok, err := r.j.Previous(); err != nil || !ok {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// initialSeekTail positions the journal read pointer at a journal entry
|
||||
// relative to the tail of the journal at the time of the call based on the
|
||||
// specification in r.config. It returns true if there is an entry to read at
|
||||
// the read pointer. Otherwise the read pointer is set to a conceptual position
|
||||
// which will be resolved to the desired entry (once written) by advancing
|
||||
// forward with r.j.Next() or similar.
|
||||
func (r *reader) initialSeekTail() (bool, error) {
|
||||
var err error
|
||||
if r.config.Until.IsZero() {
|
||||
err = r.j.SeekTail()
|
||||
} else {
|
||||
err = r.j.SeekRealtime(r.config.Until)
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for i := 0; ; i++ {
|
||||
if !r.initialized && i == 0 && r.config.Tail > 0 {
|
||||
if n, err := r.j.PreviousSkip(uint(r.config.Tail)); err != nil || n == 0 {
|
||||
return err
|
||||
var ok bool
|
||||
if r.config.Tail == 0 {
|
||||
ok, err = r.j.Previous()
|
||||
} else {
|
||||
var n int
|
||||
n, err = r.j.PreviousSkip(uint(r.config.Tail))
|
||||
ok = n > 0
|
||||
}
|
||||
if err != nil {
|
||||
return ok, err
|
||||
}
|
||||
if !ok {
|
||||
// The (filtered) journal has no entries. The tail is the head: all new
|
||||
// entries which get written into the journal from this point forward
|
||||
// should be read from the journal. However the read pointer is
|
||||
// positioned at a conceptual position which is not condusive to reading
|
||||
// those entries. The tail of the journal is resolved to the last entry
|
||||
// in the journal _at the time of the first successful Previous() call_,
|
||||
// which means that an arbitrary number of journal entries added in the
|
||||
// interim may be skipped: race condition. While the realtime conceptual
|
||||
// position is not so racy, it is also unhelpful: it is the timestamp
|
||||
// past where reading should stop, so all logs that should be followed
|
||||
// would be skipped over.
|
||||
// Reset the read pointer position to avoid these problems.
|
||||
return r.initialSeekHead()
|
||||
} else if r.config.Tail == 0 {
|
||||
// The journal read pointer is positioned at the discrete position of
|
||||
// the journal entry _before_ the entry to send.
|
||||
return r.j.Next()
|
||||
}
|
||||
|
||||
// Check if the PreviousSkip went too far back.
|
||||
timestamp, err := r.j.Realtime()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if timestamp.Before(r.config.Since) {
|
||||
if err := r.j.SeekRealtime(r.config.Since); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return r.j.Next()
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// wait blocks until the journal has new data to read, the reader's drain
|
||||
// deadline is exceeded, or the log reading consumer is gone.
|
||||
func (r *reader) wait() (bool, error) {
|
||||
for {
|
||||
dur := waitInterval
|
||||
if !r.drainDeadline.IsZero() {
|
||||
dur = time.Until(r.drainDeadline)
|
||||
if dur < 0 {
|
||||
// Container is gone but we haven't found the end of the
|
||||
// logs before the deadline. Maybe it was dropped by
|
||||
// journald, e.g. due to rate-limiting.
|
||||
return false, nil
|
||||
} else if dur > waitInterval {
|
||||
dur = waitInterval
|
||||
}
|
||||
} else if ok, err := r.j.Next(); err != nil || !ok {
|
||||
return err
|
||||
}
|
||||
|
||||
if !r.initialized && i == 0 {
|
||||
// The cursor is in a position which will be unaffected
|
||||
// by subsequent logging.
|
||||
r.signalReady()
|
||||
status, err := r.j.Wait(dur)
|
||||
if err != nil {
|
||||
return false, err
|
||||
} else if status != sdjournal.StatusNOP {
|
||||
return true, nil
|
||||
}
|
||||
select {
|
||||
case <-r.logWatcher.WatchConsumerGone():
|
||||
return false, nil
|
||||
case <-r.s.closed:
|
||||
// Container is gone; don't wait indefinitely for journal entries that will never arrive.
|
||||
if r.maxOrdinal >= atomic.LoadUint64(&r.s.ordinal) {
|
||||
return false, nil
|
||||
}
|
||||
if r.drainDeadline.IsZero() {
|
||||
r.drainDeadline = time.Now().Add(closedDrainTimeout)
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// nextWait blocks until there is a new journal entry to read, and advances the
|
||||
// journal read pointer to it.
|
||||
func (r *reader) nextWait() (bool, error) {
|
||||
for {
|
||||
if ok, err := r.j.Next(); err != nil || ok {
|
||||
return ok, err
|
||||
}
|
||||
if ok, err := r.wait(); err != nil || !ok {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// drainJournal reads and sends log messages from the journal, starting from the
|
||||
// current read pointer, until the end of the journal or a terminal stopping
|
||||
// condition is reached.
|
||||
//
|
||||
// It returns false when a terminal stopping condition has been reached:
|
||||
// - the watch consumer is gone, or
|
||||
// - (if until is nonzero) a log entry is read which has a timestamp after
|
||||
// until
|
||||
func (r *reader) drainJournal() (bool, error) {
|
||||
for i := 0; ; i++ {
|
||||
// Read the entry's timestamp.
|
||||
timestamp, err := r.j.Realtime()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Check if the PreviousSkip went too far back. Check only the
|
||||
// initial position as we are comparing wall-clock timestamps,
|
||||
// which may not be monotonic. We don't want to skip over
|
||||
// messages sent later in time just because the clock moved
|
||||
// backwards.
|
||||
if !r.initialized && i == 0 && r.config.Tail > 0 && timestamp.Before(r.config.Since) {
|
||||
r.j.SeekRealtime(r.config.Since)
|
||||
continue
|
||||
return true, err
|
||||
}
|
||||
if !r.config.Until.IsZero() && r.config.Until.Before(timestamp) {
|
||||
return errDrainDone
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Read and send the logged message, if there is one to read.
|
||||
data, err := r.j.Data()
|
||||
if err != nil {
|
||||
return err
|
||||
return true, err
|
||||
}
|
||||
|
||||
if data[fieldLogEpoch] == r.s.epoch {
|
||||
@@ -228,7 +297,7 @@ func (r *reader) drainJournal() error {
|
||||
*/
|
||||
select {
|
||||
case <-r.logWatcher.WatchConsumerGone():
|
||||
return errDrainDone
|
||||
return false, nil
|
||||
case r.logWatcher.Msg <- msg:
|
||||
}
|
||||
}
|
||||
@@ -243,41 +312,28 @@ func (r *reader) drainJournal() error {
|
||||
Warn("journald: error processing journal")
|
||||
}
|
||||
}
|
||||
|
||||
if ok, err := r.j.Next(); err != nil || !ok {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *reader) readJournal() error {
|
||||
caughtUp := atomic.LoadUint64(&r.s.ordinal)
|
||||
if err := r.drainJournal(); err != nil {
|
||||
if err != errDrainDone {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
if more, err := r.drainJournal(); err != nil || !more {
|
||||
return err
|
||||
}
|
||||
|
||||
var drainTimeout <-chan time.Time
|
||||
if !r.config.Follow {
|
||||
if r.s.readSyncTimeout == 0 {
|
||||
return nil
|
||||
}
|
||||
tmr := time.NewTimer(r.s.readSyncTimeout)
|
||||
defer tmr.Stop()
|
||||
drainTimeout = tmr.C
|
||||
r.drainDeadline = time.Now().Add(r.s.readSyncTimeout)
|
||||
}
|
||||
|
||||
for {
|
||||
status, err := r.j.Wait(250 * time.Millisecond)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
select {
|
||||
case <-r.logWatcher.WatchConsumerGone():
|
||||
return nil // won't be able to write anything anymore
|
||||
case <-drainTimeout:
|
||||
// Container is gone but we haven't found the end of the
|
||||
// logs within the timeout. Maybe it was dropped by
|
||||
// journald, e.g. due to rate-limiting.
|
||||
return nil
|
||||
case <-r.s.closed:
|
||||
// container is gone, drain journal
|
||||
lastSeq := atomic.LoadUint64(&r.s.ordinal)
|
||||
@@ -285,24 +341,14 @@ func (r *reader) readJournal() error {
|
||||
// All caught up with the logger!
|
||||
return nil
|
||||
}
|
||||
if drainTimeout == nil {
|
||||
tmr := time.NewTimer(closedDrainTimeout)
|
||||
defer tmr.Stop()
|
||||
drainTimeout = tmr.C
|
||||
}
|
||||
default:
|
||||
// container is still alive
|
||||
if status == sdjournal.StatusNOP {
|
||||
// no new data -- keep waiting
|
||||
continue
|
||||
}
|
||||
}
|
||||
err = r.drainJournal()
|
||||
if err != nil {
|
||||
if err != errDrainDone {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
if more, err := r.nextWait(); err != nil || !more {
|
||||
return err
|
||||
}
|
||||
if more, err := r.drainJournal(); err != nil || !more {
|
||||
return err
|
||||
}
|
||||
if !r.config.Follow && r.s.readSyncTimeout > 0 && r.maxOrdinal >= caughtUp {
|
||||
return nil
|
||||
@@ -361,6 +407,33 @@ func (r *reader) readLogs() {
|
||||
return
|
||||
}
|
||||
|
||||
var ok bool
|
||||
if r.config.Tail >= 0 {
|
||||
ok, err = r.initialSeekTail()
|
||||
} else {
|
||||
ok, err = r.initialSeekHead()
|
||||
}
|
||||
if err != nil {
|
||||
r.logWatcher.Err <- err
|
||||
return
|
||||
}
|
||||
r.signalReady()
|
||||
if !ok {
|
||||
if !r.config.Follow {
|
||||
return
|
||||
}
|
||||
// Either the read pointer is positioned at a discrete journal entry, in
|
||||
// which case the position will be unaffected by subsequent logging, or
|
||||
// the read pointer is in the conceptual position corresponding to the
|
||||
// first journal entry to send once it is logged in the future.
|
||||
if more, err := r.nextWait(); err != nil || !more {
|
||||
if err != nil {
|
||||
r.logWatcher.Err <- err
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := r.readJournal(); err != nil {
|
||||
r.logWatcher.Err <- err
|
||||
return
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
package journald // import "github.com/docker/docker/daemon/logger/journald"
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -46,32 +47,37 @@ func TestLogRead(t *testing.T) {
|
||||
assert.NilError(t, rotatedJournal.Send("a log message from a totally different process in the active journal", journal.PriInfo, nil))
|
||||
|
||||
return func(t *testing.T) logger.Logger {
|
||||
l, err := new(info)
|
||||
assert.NilError(t, err)
|
||||
l.journalReadDir = journalDir
|
||||
sl := &syncLogger{journald: l, waiters: map[uint64]chan<- struct{}{}}
|
||||
|
||||
s := make(chan sendit, 100)
|
||||
t.Cleanup(func() { close(s) })
|
||||
go func() {
|
||||
for m := range s {
|
||||
<-m.after
|
||||
activeJournal.Send(m.message, m.priority, m.vars)
|
||||
if m.sent != nil {
|
||||
close(m.sent)
|
||||
sl.mu.Lock()
|
||||
sl.sent++
|
||||
if notify, ok := sl.waiters[sl.sent]; ok {
|
||||
delete(sl.waiters, sl.sent)
|
||||
close(notify)
|
||||
}
|
||||
sl.mu.Unlock()
|
||||
}
|
||||
}()
|
||||
l, err := new(info)
|
||||
assert.NilError(t, err)
|
||||
l.journalReadDir = journalDir
|
||||
|
||||
sl := &syncLogger{journald: l}
|
||||
l.sendToJournal = func(message string, priority journal.Priority, vars map[string]string) error {
|
||||
sent := make(chan struct{})
|
||||
sl.mu.Lock()
|
||||
sl.queued++
|
||||
sl.mu.Unlock()
|
||||
s <- sendit{
|
||||
message: message,
|
||||
priority: priority,
|
||||
vars: vars,
|
||||
after: time.After(150 * time.Millisecond),
|
||||
sent: sent,
|
||||
}
|
||||
sl.waitOn = sent
|
||||
return nil
|
||||
}
|
||||
l.readSyncTimeout = 3 * time.Second
|
||||
@@ -88,17 +94,31 @@ type sendit struct {
|
||||
priority journal.Priority
|
||||
vars map[string]string
|
||||
after <-chan time.Time
|
||||
sent chan<- struct{}
|
||||
}
|
||||
|
||||
type syncLogger struct {
|
||||
*journald
|
||||
waitOn <-chan struct{}
|
||||
|
||||
mu sync.Mutex
|
||||
queued, sent uint64
|
||||
waiters map[uint64]chan<- struct{}
|
||||
}
|
||||
|
||||
func (l *syncLogger) Sync() error {
|
||||
if l.waitOn != nil {
|
||||
<-l.waitOn
|
||||
l.mu.Lock()
|
||||
waitFor := l.queued
|
||||
if l.sent >= l.queued {
|
||||
l.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
notify := make(chan struct{})
|
||||
l.waiters[waitFor] = notify
|
||||
l.mu.Unlock()
|
||||
<-notify
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *syncLogger) Close() error {
|
||||
_ = l.Sync()
|
||||
return l.journald.Close()
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package loggertest // import "github.com/docker/docker/daemon/logger/loggertest"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -10,6 +11,7 @@ import (
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/v3/assert/opt"
|
||||
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
@@ -194,28 +196,31 @@ func (tr Reader) testTailEmptyLogs(t *testing.T, live bool) {
|
||||
func (tr Reader) TestFollow(t *testing.T) {
|
||||
// Reader sends all logs and closes after logger is closed
|
||||
// - Starting from empty log (like run)
|
||||
t.Run("FromEmptyLog", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
l := tr.Factory(t, logger.Info{
|
||||
ContainerID: "followstart0",
|
||||
ContainerName: "logloglog",
|
||||
})(t)
|
||||
lw := l.(logger.LogReader).ReadLogs(logger.ReadConfig{Tail: -1, Follow: true})
|
||||
defer lw.ConsumerGone()
|
||||
for i, tail := range []int{-1, 0, 1, 42} {
|
||||
i, tail := i, tail
|
||||
t.Run(fmt.Sprintf("FromEmptyLog/Tail=%d", tail), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
l := tr.Factory(t, logger.Info{
|
||||
ContainerID: fmt.Sprintf("followstart%d", i),
|
||||
ContainerName: fmt.Sprintf("logloglog%d", i),
|
||||
})(t)
|
||||
lw := l.(logger.LogReader).ReadLogs(logger.ReadConfig{Tail: tail, Follow: true})
|
||||
defer lw.ConsumerGone()
|
||||
|
||||
doneReading := make(chan struct{})
|
||||
var logs []*logger.Message
|
||||
go func() {
|
||||
defer close(doneReading)
|
||||
logs = readAll(t, lw)
|
||||
}()
|
||||
doneReading := make(chan struct{})
|
||||
var logs []*logger.Message
|
||||
go func() {
|
||||
defer close(doneReading)
|
||||
logs = readAll(t, lw)
|
||||
}()
|
||||
|
||||
mm := makeTestMessages()
|
||||
expected := logMessages(t, l, mm)
|
||||
assert.NilError(t, l.Close())
|
||||
<-doneReading
|
||||
assert.DeepEqual(t, logs, expected, compareLog)
|
||||
})
|
||||
mm := makeTestMessages()
|
||||
expected := logMessages(t, l, mm)
|
||||
assert.NilError(t, l.Close())
|
||||
<-doneReading
|
||||
assert.DeepEqual(t, logs, expected, compareLog)
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("AttachMidStream", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
@@ -433,7 +438,7 @@ func (tr Reader) TestConcurrent(t *testing.T) {
|
||||
logAll := func(msgs []*logger.Message) {
|
||||
defer wg.Done()
|
||||
for _, m := range msgs {
|
||||
l.Log(copyLogMessage(m))
|
||||
assert.Check(t, l.Log(copyLogMessage(m)), "failed to log message %+v", m)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -446,6 +451,15 @@ func (tr Reader) TestConcurrent(t *testing.T) {
|
||||
defer l.Close()
|
||||
wg.Wait()
|
||||
}()
|
||||
defer func() {
|
||||
// Make sure log gets closed before we return
|
||||
// so the temporary dir can be deleted
|
||||
select {
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatal("timed out waiting for logger to close")
|
||||
case <-closed:
|
||||
}
|
||||
}()
|
||||
|
||||
// Check if the message count, order and content is equal to what was logged
|
||||
for {
|
||||
@@ -469,12 +483,8 @@ func (tr Reader) TestConcurrent(t *testing.T) {
|
||||
*messages = (*messages)[1:]
|
||||
}
|
||||
|
||||
assert.Equal(t, len(stdoutMessages), 0)
|
||||
assert.Equal(t, len(stderrMessages), 0)
|
||||
|
||||
// Make sure log gets closed before we return
|
||||
// so the temporary dir can be deleted
|
||||
<-closed
|
||||
assert.Check(t, is.Len(stdoutMessages, 0), "expected stdout messages were not read")
|
||||
assert.Check(t, is.Len(stderrMessages, 0), "expected stderr messages were not read")
|
||||
}
|
||||
|
||||
// logMessages logs messages to l and returns a slice of messages as would be
|
||||
|
||||
@@ -305,10 +305,6 @@ func (daemon *Daemon) createNetwork(cfg *config.Config, create types.NetworkCrea
|
||||
return nil, errdefs.Forbidden(errors.New(`This node is not a swarm manager. Use "docker swarm init" or "docker swarm join" to connect this node to swarm and try again.`))
|
||||
}
|
||||
|
||||
if network.HasIPv6Subnets(create.IPAM) {
|
||||
create.EnableIPv6 = true
|
||||
}
|
||||
|
||||
networkOptions := make(map[string]string)
|
||||
for k, v := range create.Options {
|
||||
networkOptions[k] = v
|
||||
@@ -335,8 +331,28 @@ func (daemon *Daemon) createNetwork(cfg *config.Config, create types.NetworkCrea
|
||||
nwOptions = append(nwOptions, libnetwork.NetworkOptionConfigOnly())
|
||||
}
|
||||
|
||||
if err := network.ValidateIPAM(create.IPAM); err != nil {
|
||||
return nil, errdefs.InvalidParameter(err)
|
||||
if err := network.ValidateIPAM(create.IPAM, create.EnableIPv6); err != nil {
|
||||
if agent {
|
||||
// This function is called with agent=false for all networks. For swarm-scoped
|
||||
// networks, the configuration is validated but ManagerRedirectError is returned
|
||||
// and the network is not created. Then, each time a swarm-scoped network is
|
||||
// needed, this function is called again with agent=true.
|
||||
//
|
||||
// Non-swarm networks created before ValidateIPAM was introduced continue to work
|
||||
// as they did before-upgrade, even if they would fail the new checks on creation
|
||||
// (for example, by having host-bits set in their subnet). Those networks are not
|
||||
// seen again here.
|
||||
//
|
||||
// By dropping errors for agent networks, existing swarm-scoped networks also
|
||||
// continue to behave as they did before upgrade - but new networks are still
|
||||
// validated.
|
||||
log.G(context.TODO()).WithFields(log.Fields{
|
||||
"error": err,
|
||||
"network": create.Name,
|
||||
}).Warn("Continuing with validation errors in agent IPAM")
|
||||
} else {
|
||||
return nil, errdefs.InvalidParameter(err)
|
||||
}
|
||||
}
|
||||
|
||||
if create.IPAM != nil {
|
||||
@@ -788,7 +804,7 @@ func (daemon *Daemon) clearAttachableNetworks() {
|
||||
}
|
||||
|
||||
// buildCreateEndpointOptions builds endpoint options from a given network.
|
||||
func buildCreateEndpointOptions(c *container.Container, n *libnetwork.Network, epConfig *network.EndpointSettings, sb *libnetwork.Sandbox, daemonDNS []string) ([]libnetwork.EndpointOption, error) {
|
||||
func buildCreateEndpointOptions(c *container.Container, n *libnetwork.Network, epConfig *internalnetwork.EndpointSettings, sb *libnetwork.Sandbox, daemonDNS []string) ([]libnetwork.EndpointOption, error) {
|
||||
var createOptions []libnetwork.EndpointOption
|
||||
var genericOptions = make(options.Generic)
|
||||
|
||||
@@ -824,8 +840,8 @@ func buildCreateEndpointOptions(c *container.Container, n *libnetwork.Network, e
|
||||
createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(options.Generic{k: v}))
|
||||
}
|
||||
|
||||
if epConfig.MacAddress != "" {
|
||||
mac, err := net.ParseMAC(epConfig.MacAddress)
|
||||
if epConfig.DesiredMacAddress != "" {
|
||||
mac, err := net.ParseMAC(epConfig.DesiredMacAddress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -33,6 +33,9 @@ type Settings struct {
|
||||
type EndpointSettings struct {
|
||||
*networktypes.EndpointSettings
|
||||
IPAMOperational bool
|
||||
// DesiredMacAddress is the configured value, it's copied from MacAddress (the
|
||||
// API param field) when the container is created.
|
||||
DesiredMacAddress string
|
||||
}
|
||||
|
||||
// AttachmentStore stores the load balancer IP address for a network id.
|
||||
|
||||
@@ -2,6 +2,7 @@ package daemon // import "github.com/docker/docker/daemon"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/log"
|
||||
@@ -127,9 +128,9 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) (retErr error) {
|
||||
return err
|
||||
}
|
||||
|
||||
ep, err := nw.EndpointByID(epConfig.EndpointID)
|
||||
if err != nil {
|
||||
return err
|
||||
ep := sb.GetEndpoint(epConfig.EndpointID)
|
||||
if ep == nil {
|
||||
return fmt.Errorf("no endpoint attached to network %s found", nw.Name())
|
||||
}
|
||||
|
||||
oldDNSNames := make([]string, len(epConfig.DNSNames))
|
||||
|
||||
@@ -68,7 +68,7 @@ func (daemon *Daemon) ContainerStart(ctx context.Context, name string, hostConfi
|
||||
if err := daemon.mergeAndVerifyLogConfig(&hostConfig.LogConfig); err != nil {
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
if err := daemon.setHostConfig(ctr, hostConfig); err != nil {
|
||||
if err := daemon.setHostConfig(ctr, hostConfig, true); err != nil {
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
newNetworkMode := ctr.HostConfig.NetworkMode
|
||||
|
||||
@@ -54,7 +54,7 @@ func (m mounts) parts(i int) int {
|
||||
// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination.
|
||||
// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations.
|
||||
// 4. Cleanup old volumes that are about to be reassigned.
|
||||
func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig) (retErr error) {
|
||||
func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig, defaultReadOnlyNonRecursive bool) (retErr error) {
|
||||
binds := map[string]bool{}
|
||||
mountPoints := map[string]*volumemounts.MountPoint{}
|
||||
parser := volumemounts.NewParser()
|
||||
@@ -158,6 +158,15 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo
|
||||
}
|
||||
}
|
||||
|
||||
if bind.Type == mount.TypeBind && !bind.RW {
|
||||
if defaultReadOnlyNonRecursive {
|
||||
if bind.Spec.BindOptions == nil {
|
||||
bind.Spec.BindOptions = &mounttypes.BindOptions{}
|
||||
}
|
||||
bind.Spec.BindOptions.ReadOnlyNonRecursive = true
|
||||
}
|
||||
}
|
||||
|
||||
binds[bind.Destination] = true
|
||||
dereferenceIfExists(bind.Destination)
|
||||
mountPoints[bind.Destination] = bind
|
||||
@@ -212,8 +221,17 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo
|
||||
}
|
||||
}
|
||||
|
||||
if mp.Type == mounttypes.TypeBind && (cfg.BindOptions == nil || !cfg.BindOptions.CreateMountpoint) {
|
||||
mp.SkipMountpointCreation = true
|
||||
if mp.Type == mounttypes.TypeBind {
|
||||
if cfg.BindOptions == nil || !cfg.BindOptions.CreateMountpoint {
|
||||
mp.SkipMountpointCreation = true
|
||||
}
|
||||
|
||||
if !mp.RW && defaultReadOnlyNonRecursive {
|
||||
if mp.Spec.BindOptions == nil {
|
||||
mp.Spec.BindOptions = &mounttypes.BindOptions{}
|
||||
}
|
||||
mp.Spec.BindOptions.ReadOnlyNonRecursive = true
|
||||
}
|
||||
}
|
||||
|
||||
binds[mp.Destination] = true
|
||||
|
||||
2173
docs/api/v1.18.md
2173
docs/api/v1.18.md
File diff suppressed because it is too large
Load Diff
2253
docs/api/v1.19.md
2253
docs/api/v1.19.md
File diff suppressed because it is too large
Load Diff
2408
docs/api/v1.20.md
2408
docs/api/v1.20.md
File diff suppressed because it is too large
Load Diff
2997
docs/api/v1.21.md
2997
docs/api/v1.21.md
File diff suppressed because it is too large
Load Diff
3336
docs/api/v1.22.md
3336
docs/api/v1.22.md
File diff suppressed because it is too large
Load Diff
3452
docs/api/v1.23.md
3452
docs/api/v1.23.md
File diff suppressed because it is too large
Load Diff
@@ -52,18 +52,6 @@ info:
|
||||
|
||||
The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer Docker daemons.
|
||||
|
||||
This documentation is for version 1.25 of the API, which was introduced with Docker 1.13. Use this table to find documentation for previous versions of the API:
|
||||
|
||||
Docker version | API version | Changes
|
||||
----------------|-------------|---------
|
||||
1.12.x | [1.24](https://docs.docker.com/engine/api/v1.24/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-24-api-changes)
|
||||
1.11.x | [1.23](https://docs.docker.com/engine/api/v1.23/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-23-api-changes)
|
||||
1.10.x | [1.22](https://docs.docker.com/engine/api/v1.22/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-22-api-changes)
|
||||
1.9.x | [1.21](https://docs.docker.com/engine/api/v1.21/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-21-api-changes)
|
||||
1.8.x | [1.20](https://docs.docker.com/engine/api/v1.20/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-20-api-changes)
|
||||
1.7.x | [1.19](https://docs.docker.com/engine/api/v1.19/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-19-api-changes)
|
||||
1.6.x | [1.18](https://docs.docker.com/engine/api/v1.18/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-18-api-changes)
|
||||
|
||||
# Authentication
|
||||
|
||||
Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure:
|
||||
|
||||
@@ -52,19 +52,6 @@ info:
|
||||
|
||||
The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer Docker daemons.
|
||||
|
||||
This documentation is for version 1.26 of the API, which was introduced with Docker 1.13.1. Use this table to find documentation for previous versions of the API:
|
||||
|
||||
Docker version | API version | Changes
|
||||
----------------|-------------|---------
|
||||
1.13.0 | [1.25](https://docs.docker.com/engine/api/v1.25/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-25-api-changes)
|
||||
1.12.x | [1.24](https://docs.docker.com/engine/api/v1.24/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-24-api-changes)
|
||||
1.11.x | [1.23](https://docs.docker.com/engine/api/v1.23/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-23-api-changes)
|
||||
1.10.x | [1.22](https://docs.docker.com/engine/api/v1.22/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-22-api-changes)
|
||||
1.9.x | [1.21](https://docs.docker.com/engine/api/v1.21/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-21-api-changes)
|
||||
1.8.x | [1.20](https://docs.docker.com/engine/api/v1.20/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-20-api-changes)
|
||||
1.7.x | [1.19](https://docs.docker.com/engine/api/v1.19/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-19-api-changes)
|
||||
1.6.x | [1.18](https://docs.docker.com/engine/api/v1.18/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-18-api-changes)
|
||||
|
||||
# Authentication
|
||||
|
||||
Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure:
|
||||
|
||||
@@ -52,20 +52,6 @@ info:
|
||||
|
||||
The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer Docker daemons.
|
||||
|
||||
This documentation is for version 1.27 of the API, which was introduced with Docker 17.03.1. Use this table to find documentation for previous versions of the API:
|
||||
|
||||
Docker version | API version | Changes
|
||||
----------------|-------------|---------
|
||||
1.13.1 & 17.03.0 | [1.26](https://docs.docker.com/engine/api/v1.26/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-26-api-changes)
|
||||
1.13.0 | [1.25](https://docs.docker.com/engine/api/v1.25/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-25-api-changes)
|
||||
1.12.x | [1.24](https://docs.docker.com/engine/api/v1.24/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-24-api-changes)
|
||||
1.11.x | [1.23](https://docs.docker.com/engine/api/v1.23/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-23-api-changes)
|
||||
1.10.x | [1.22](https://docs.docker.com/engine/api/v1.22/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-22-api-changes)
|
||||
1.9.x | [1.21](https://docs.docker.com/engine/api/v1.21/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-21-api-changes)
|
||||
1.8.x | [1.20](https://docs.docker.com/engine/api/v1.20/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-20-api-changes)
|
||||
1.7.x | [1.19](https://docs.docker.com/engine/api/v1.19/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-19-api-changes)
|
||||
1.6.x | [1.18](https://docs.docker.com/engine/api/v1.18/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-18-api-changes)
|
||||
|
||||
# Authentication
|
||||
|
||||
Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure:
|
||||
|
||||
@@ -52,21 +52,6 @@ info:
|
||||
|
||||
The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer Docker daemons.
|
||||
|
||||
This documentation is for version 1.28 of the API, which was introduced with Docker 17.04. Use this table to find documentation for previous versions of the API:
|
||||
|
||||
Docker version | API version | Changes
|
||||
----------------|-------------|---------
|
||||
17.03.1 | [1.27](https://docs.docker.com/engine/api/v1.27/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-27-api-changes)
|
||||
1.13.1 & 17.03.0 | [1.26](https://docs.docker.com/engine/api/v1.26/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-26-api-changes)
|
||||
1.13.0 | [1.25](https://docs.docker.com/engine/api/v1.25/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-25-api-changes)
|
||||
1.12.x | [1.24](https://docs.docker.com/engine/api/v1.24/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-24-api-changes)
|
||||
1.11.x | [1.23](https://docs.docker.com/engine/api/v1.23/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-23-api-changes)
|
||||
1.10.x | [1.22](https://docs.docker.com/engine/api/v1.22/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-22-api-changes)
|
||||
1.9.x | [1.21](https://docs.docker.com/engine/api/v1.21/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-21-api-changes)
|
||||
1.8.x | [1.20](https://docs.docker.com/engine/api/v1.20/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-20-api-changes)
|
||||
1.7.x | [1.19](https://docs.docker.com/engine/api/v1.19/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-19-api-changes)
|
||||
1.6.x | [1.18](https://docs.docker.com/engine/api/v1.18/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-18-api-changes)
|
||||
|
||||
# Authentication
|
||||
|
||||
Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure:
|
||||
|
||||
@@ -52,22 +52,6 @@ info:
|
||||
|
||||
The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer Docker daemons.
|
||||
|
||||
This documentation is for version 1.29 of the API, which was introduced with Docker 17.05. Use this table to find documentation for previous versions of the API:
|
||||
|
||||
Docker version | API version | Changes
|
||||
----------------|-------------|---------
|
||||
17.04.x | [1.28](https://docs.docker.com/engine/api/v1.28/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-28-api-changes)
|
||||
17.03.1 | [1.27](https://docs.docker.com/engine/api/v1.27/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-27-api-changes)
|
||||
1.13.1 & 17.03.0 | [1.26](https://docs.docker.com/engine/api/v1.26/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-26-api-changes)
|
||||
1.13.0 | [1.25](https://docs.docker.com/engine/api/v1.25/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-25-api-changes)
|
||||
1.12.x | [1.24](https://docs.docker.com/engine/api/v1.24/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-24-api-changes)
|
||||
1.11.x | [1.23](https://docs.docker.com/engine/api/v1.23/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-23-api-changes)
|
||||
1.10.x | [1.22](https://docs.docker.com/engine/api/v1.22/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-22-api-changes)
|
||||
1.9.x | [1.21](https://docs.docker.com/engine/api/v1.21/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-21-api-changes)
|
||||
1.8.x | [1.20](https://docs.docker.com/engine/api/v1.20/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-20-api-changes)
|
||||
1.7.x | [1.19](https://docs.docker.com/engine/api/v1.19/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-19-api-changes)
|
||||
1.6.x | [1.18](https://docs.docker.com/engine/api/v1.18/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-18-api-changes)
|
||||
|
||||
# Authentication
|
||||
|
||||
Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure:
|
||||
|
||||
@@ -52,23 +52,6 @@ info:
|
||||
|
||||
The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer Docker daemons.
|
||||
|
||||
This documentation is for version 1.30 of the API, which was introduced with Docker 17.06. Use this table to find documentation for previous versions of the API:
|
||||
|
||||
Docker version | API version | Changes
|
||||
----------------|-------------|---------
|
||||
17.05.x | [1.29](https://docs.docker.com/engine/api/v1.29/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-29-api-changes)
|
||||
17.04.x | [1.28](https://docs.docker.com/engine/api/v1.28/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-28-api-changes)
|
||||
17.03.1 | [1.27](https://docs.docker.com/engine/api/v1.27/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-27-api-changes)
|
||||
1.13.1 & 17.03.0 | [1.26](https://docs.docker.com/engine/api/v1.26/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-26-api-changes)
|
||||
1.13.0 | [1.25](https://docs.docker.com/engine/api/v1.25/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-25-api-changes)
|
||||
1.12.x | [1.24](https://docs.docker.com/engine/api/v1.24/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-24-api-changes)
|
||||
1.11.x | [1.23](https://docs.docker.com/engine/api/v1.23/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-23-api-changes)
|
||||
1.10.x | [1.22](https://docs.docker.com/engine/api/v1.22/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-22-api-changes)
|
||||
1.9.x | [1.21](https://docs.docker.com/engine/api/v1.21/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-21-api-changes)
|
||||
1.8.x | [1.20](https://docs.docker.com/engine/api/v1.20/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-20-api-changes)
|
||||
1.7.x | [1.19](https://docs.docker.com/engine/api/v1.19/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-19-api-changes)
|
||||
1.6.x | [1.18](https://docs.docker.com/engine/api/v1.18/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-18-api-changes)
|
||||
|
||||
# Authentication
|
||||
|
||||
Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user