mirror of
https://github.com/moby/moby.git
synced 2026-01-12 11:11:44 +00:00
Compare commits
259 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b08a51fe16 | ||
|
|
d151b0f87f | ||
|
|
c6ba9a5124 | ||
|
|
4673a3ca2c | ||
|
|
30f8908102 | ||
|
|
7454d6a2e6 | ||
|
|
65cc597cea | ||
|
|
b722836927 | ||
|
|
e8ecb9c76d | ||
|
|
e6cae1f237 | ||
|
|
8ec448db6b | ||
|
|
274310807e | ||
|
|
886e726984 | ||
|
|
a0f0f7e77e | ||
|
|
91903e81ca | ||
|
|
ccfe0a41d4 | ||
|
|
ed11c9c562 | ||
|
|
d046451b34 | ||
|
|
e16a25e442 | ||
|
|
b1aac1b134 | ||
|
|
fffbe84ded | ||
|
|
c55eeb3cfa | ||
|
|
9f6600deed | ||
|
|
f26fd4a73a | ||
|
|
70fe516b46 | ||
|
|
303e26dce7 | ||
|
|
f7ce828e9e | ||
|
|
085fa9bf66 | ||
|
|
577ca9b076 | ||
|
|
98ddccbbfe | ||
|
|
03ecc6f5e6 | ||
|
|
637205391b | ||
|
|
d16d8bd448 | ||
|
|
2ebb5ca1c0 | ||
|
|
3d56d734db | ||
|
|
0a2f5085ee | ||
|
|
3141ea5c8b | ||
|
|
4f25076181 | ||
|
|
d93cc7edc0 | ||
|
|
5beae56515 | ||
|
|
ee5909c2d0 | ||
|
|
f37d6f5f48 | ||
|
|
fd828b6766 | ||
|
|
7ac688aa0f | ||
|
|
584a30c772 | ||
|
|
60605eb1da | ||
|
|
71b8e0339c | ||
|
|
08e8912d7c | ||
|
|
aee8b332bf | ||
|
|
12c4e03288 | ||
|
|
e2e670299f | ||
|
|
f42f65b464 | ||
|
|
935787c19c | ||
|
|
bd19301d9e | ||
|
|
79c31c12fc | ||
|
|
50bd133ad3 | ||
|
|
e63daec867 | ||
|
|
817bccb1c6 | ||
|
|
2a0601e84e | ||
|
|
9df9ccc06f | ||
|
|
a987bc5ad0 | ||
|
|
20c205fd3a | ||
|
|
4be97233cc | ||
|
|
7ed7e6caf6 | ||
|
|
81ad7062f0 | ||
|
|
02d4ee3f9a | ||
|
|
5901652edd | ||
|
|
478f6b097d | ||
|
|
98b171fd4d | ||
|
|
d250e13945 | ||
|
|
061aa95809 | ||
|
|
d0d85f6438 | ||
|
|
5d6679345c | ||
|
|
ef1fa235cd | ||
|
|
0451b287dc | ||
|
|
d27fe2558d | ||
|
|
77de535364 | ||
|
|
9e526bc394 | ||
|
|
2d347024d1 | ||
|
|
51e876cd96 | ||
|
|
3fa0cedce3 | ||
|
|
4e7d8531ed | ||
|
|
f66b5f642e | ||
|
|
41fde13f64 | ||
|
|
0db1c6d8bb | ||
|
|
33a29c0135 | ||
|
|
30545de83e | ||
|
|
fa4ea308f0 | ||
|
|
d66e0fb7b1 | ||
|
|
30ecc0ea8a | ||
|
|
06767446fe | ||
|
|
7048a63686 | ||
|
|
81fb7f9986 | ||
|
|
b77bb69f87 | ||
|
|
7a4abb8c77 | ||
|
|
81a83f0544 | ||
|
|
abcd6f8a46 | ||
|
|
f7be6dcba6 | ||
|
|
10609544e5 | ||
|
|
be59afce2d | ||
|
|
97951c39fb | ||
|
|
2001813571 | ||
|
|
8e3bcf1974 | ||
|
|
27f36f42a4 | ||
|
|
1ae019fca2 | ||
|
|
c761353e7c | ||
|
|
00b2e1072b | ||
|
|
10bc347b03 | ||
|
|
6d675b429e | ||
|
|
9f1b47c597 | ||
|
|
94137f6df5 | ||
|
|
dd5faa9d4f | ||
|
|
012bfd33e5 | ||
|
|
3ec1946ce1 | ||
|
|
200a2c3576 | ||
|
|
cb66214dfd | ||
|
|
70c05fe10c | ||
|
|
e85cef89fa | ||
|
|
a72294a668 | ||
|
|
9ee331235a | ||
|
|
6fb71a9764 | ||
|
|
5d9e13bc84 | ||
|
|
36d02bf488 | ||
|
|
bb66c3ca04 | ||
|
|
fa3a64f2bc | ||
|
|
f417435e5f | ||
|
|
acd023d42b | ||
|
|
7a075cacf9 | ||
|
|
aff7177ee7 | ||
|
|
ed7c26339e | ||
|
|
74e3b4fb2e | ||
|
|
4cc0416534 | ||
|
|
f9f9e7ff9a | ||
|
|
5fb4eb941d | ||
|
|
67e9aa6d4d | ||
|
|
61b82be580 | ||
|
|
0227d95f99 | ||
|
|
fa9c5c55e1 | ||
|
|
df96d8d0bd | ||
|
|
1652559be4 | ||
|
|
ab29279200 | ||
|
|
147b5388dd | ||
|
|
60103717bc | ||
|
|
45dede440e | ||
|
|
ba4a2dab16 | ||
|
|
51133117fb | ||
|
|
341a7978a5 | ||
|
|
10e3bfd0ac | ||
|
|
269a0d8feb | ||
|
|
876b1d1dcd | ||
|
|
0bcd64689b | ||
|
|
8d454710cd | ||
|
|
6cf694fe70 | ||
|
|
c12bbf549b | ||
|
|
1ae115175c | ||
|
|
a7f9907f5f | ||
|
|
9150d0115e | ||
|
|
9af7c8ec0a | ||
|
|
3344c502da | ||
|
|
6c9fafdda7 | ||
|
|
f8a8cdaf9e | ||
|
|
7a659049b8 | ||
|
|
0ccf1c2a93 | ||
|
|
28c1a8bc2b | ||
|
|
5b5a58b2cd | ||
|
|
282891f70c | ||
|
|
bbe6f09afc | ||
|
|
5b13a38144 | ||
|
|
990e95dcf0 | ||
|
|
a140d0d95f | ||
|
|
91a8312fb7 | ||
|
|
cf03e96354 | ||
|
|
c48b67160d | ||
|
|
225e043196 | ||
|
|
78174d2e74 | ||
|
|
622e66684a | ||
|
|
85f4e6151a | ||
|
|
3e358447f5 | ||
|
|
dd4de8f388 | ||
|
|
f5ef4e76b3 | ||
|
|
6c5e5271c1 | ||
|
|
693fca6199 | ||
|
|
49487e996a | ||
|
|
0358f31dc2 | ||
|
|
081cffb3fa | ||
|
|
9de19554c7 | ||
|
|
2a80b8a7b2 | ||
|
|
61ffecfa3b | ||
|
|
02cd8dec03 | ||
|
|
1d7df5ecc0 | ||
|
|
4e68a265ed | ||
|
|
e437f890ba | ||
|
|
5a0015f72c | ||
|
|
5babfee371 | ||
|
|
fce6e0ca9b | ||
|
|
d838e68300 | ||
|
|
fa0d4159c7 | ||
|
|
06e22dce46 | ||
|
|
b73ee94289 | ||
|
|
fd6a419ad5 | ||
|
|
13ce91825f | ||
|
|
4b63c47c1e | ||
|
|
4edb71bb83 | ||
|
|
667bc3f803 | ||
|
|
1b47bfac02 | ||
|
|
f2d0d87c46 | ||
|
|
6ac38cdbeb | ||
|
|
d7bf237e29 | ||
|
|
f41b342cbe | ||
|
|
f413ba6fdb | ||
|
|
c2ef38f790 | ||
|
|
d5eebf9e19 | ||
|
|
f3f5327b48 | ||
|
|
05a370f52f | ||
|
|
be7b60ef05 | ||
|
|
6d05b9b65b | ||
|
|
c01bbbddeb | ||
|
|
32635850ed | ||
|
|
2cf1c762f8 | ||
|
|
71fa3ab079 | ||
|
|
5295e88ceb | ||
|
|
6eef840b8a | ||
|
|
e2ab4718c8 | ||
|
|
3de920a0b1 | ||
|
|
a445aa95e5 | ||
|
|
cb77e48229 | ||
|
|
e8801fbe26 | ||
|
|
613b6a12c1 | ||
|
|
1b6738369f | ||
|
|
b8cc2e8c66 | ||
|
|
fcccfeb811 | ||
|
|
f8eaa14a18 | ||
|
|
ac76925ff2 | ||
|
|
c7a1d928c0 | ||
|
|
2672baefd7 | ||
|
|
ff15b49b47 | ||
|
|
c0573b133f | ||
|
|
c7466c0b52 | ||
|
|
dde33d0dfe | ||
|
|
39fedb254b | ||
|
|
f0f5fc974a | ||
|
|
7c185a1e40 | ||
|
|
2b036fb1da | ||
|
|
1f24da70d8 | ||
|
|
358fecb566 | ||
|
|
f030b25770 | ||
|
|
e07aed0f77 | ||
|
|
cdf3611cff | ||
|
|
05267e9e8c | ||
|
|
e5edf62bca | ||
|
|
e14d121d49 | ||
|
|
e0acf1cd70 | ||
|
|
c2847b2eb2 | ||
|
|
0894f7fe69 | ||
|
|
d25aa32c21 | ||
|
|
1e335cfa74 | ||
|
|
4d287e9267 | ||
|
|
0240f5675b | ||
|
|
13964248f1 |
@@ -3,19 +3,11 @@
|
||||
"build": {
|
||||
"context": "..",
|
||||
"dockerfile": "../Dockerfile",
|
||||
"target": "devcontainer"
|
||||
"target": "dev"
|
||||
},
|
||||
"workspaceFolder": "/go/src/github.com/docker/docker",
|
||||
"workspaceMount": "source=${localWorkspaceFolder},target=/go/src/github.com/docker/docker,type=bind,consistency=cached",
|
||||
|
||||
"remoteUser": "root",
|
||||
"runArgs": ["--privileged"],
|
||||
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
"extensions": [
|
||||
"golang.go"
|
||||
]
|
||||
}
|
||||
}
|
||||
"runArgs": ["--privileged"]
|
||||
}
|
||||
|
||||
9
.github/workflows/.dco.yml
vendored
9
.github/workflows/.dco.yml
vendored
@@ -7,12 +7,11 @@ on:
|
||||
workflow_call:
|
||||
|
||||
env:
|
||||
ALPINE_VERSION: "3.20"
|
||||
ALPINE_VERSION: 3.16
|
||||
|
||||
jobs:
|
||||
run:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10 # guardrails timeout for the whole job
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
@@ -40,12 +39,10 @@ jobs:
|
||||
name: Validate
|
||||
run: |
|
||||
docker run --rm \
|
||||
--quiet \
|
||||
-v ./:/workspace \
|
||||
-w /workspace \
|
||||
-v "$(pwd):/workspace" \
|
||||
-e VALIDATE_REPO \
|
||||
-e VALIDATE_BRANCH \
|
||||
alpine:${{ env.ALPINE_VERSION }} sh -c 'apk add --no-cache -q bash git openssh-client && git config --system --add safe.directory /workspace && hack/validate/dco'
|
||||
alpine:${{ env.ALPINE_VERSION }} sh -c 'apk add --no-cache -q bash git openssh-client && git config --system --add safe.directory /workspace && cd /workspace && hack/validate/dco'
|
||||
env:
|
||||
VALIDATE_REPO: ${{ github.server_url }}/${{ github.repository }}.git
|
||||
VALIDATE_BRANCH: ${{ steps.base-ref.outputs.result }}
|
||||
|
||||
1
.github/workflows/.test-prepare.yml
vendored
1
.github/workflows/.test-prepare.yml
vendored
@@ -13,7 +13,6 @@ on:
|
||||
jobs:
|
||||
run:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
outputs:
|
||||
matrix: ${{ steps.set.outputs.matrix }}
|
||||
steps:
|
||||
|
||||
19
.github/workflows/.test.yml
vendored
19
.github/workflows/.test.yml
vendored
@@ -12,7 +12,7 @@ on:
|
||||
default: "graphdriver"
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.22.10"
|
||||
GO_VERSION: "1.21.12"
|
||||
GOTESTLIST_VERSION: v0.3.1
|
||||
TESTSTAT_VERSION: v0.1.25
|
||||
ITG_CLI_MATRIX_SIZE: 6
|
||||
@@ -23,8 +23,8 @@ env:
|
||||
jobs:
|
||||
unit:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 120
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
@@ -74,8 +74,8 @@ jobs:
|
||||
|
||||
unit-report:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 10
|
||||
if: always()
|
||||
needs:
|
||||
- unit
|
||||
@@ -102,8 +102,8 @@ jobs:
|
||||
|
||||
docker-py:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 120
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
@@ -155,8 +155,8 @@ jobs:
|
||||
|
||||
integration-flaky:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 120
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
@@ -183,8 +183,8 @@ jobs:
|
||||
|
||||
integration:
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 120
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -277,8 +277,8 @@ jobs:
|
||||
|
||||
integration-report:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 10
|
||||
if: always()
|
||||
needs:
|
||||
- integration
|
||||
@@ -306,7 +306,6 @@ jobs:
|
||||
|
||||
integration-cli-prepare:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
outputs:
|
||||
matrix: ${{ steps.tests.outputs.matrix }}
|
||||
@@ -342,8 +341,8 @@ jobs:
|
||||
|
||||
integration-cli:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 120
|
||||
needs:
|
||||
- integration-cli-prepare
|
||||
strategy:
|
||||
@@ -418,8 +417,8 @@ jobs:
|
||||
|
||||
integration-cli-report:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 10
|
||||
if: always()
|
||||
needs:
|
||||
- integration-cli
|
||||
|
||||
16
.github/workflows/.windows.yml
vendored
16
.github/workflows/.windows.yml
vendored
@@ -19,7 +19,7 @@ on:
|
||||
default: false
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.22.10"
|
||||
GO_VERSION: "1.21.12"
|
||||
GOTESTLIST_VERSION: v0.3.1
|
||||
TESTSTAT_VERSION: v0.1.25
|
||||
WINDOWS_BASE_IMAGE: mcr.microsoft.com/windows/servercore
|
||||
@@ -33,7 +33,6 @@ env:
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ${{ inputs.os }}
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
env:
|
||||
GOPATH: ${{ github.workspace }}\go
|
||||
GOBIN: ${{ github.workspace }}\go\bin
|
||||
@@ -113,7 +112,7 @@ jobs:
|
||||
|
||||
unit-test:
|
||||
runs-on: ${{ inputs.os }}
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
GOPATH: ${{ github.workspace }}\go
|
||||
GOBIN: ${{ github.workspace }}\go\bin
|
||||
@@ -194,8 +193,7 @@ jobs:
|
||||
retention-days: 1
|
||||
|
||||
unit-test-report:
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
runs-on: ubuntu-latest
|
||||
if: always()
|
||||
needs:
|
||||
- unit-test
|
||||
@@ -221,8 +219,7 @@ jobs:
|
||||
find /tmp/artifacts -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
integration-test-prepare:
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
matrix: ${{ steps.tests.outputs.matrix }}
|
||||
steps:
|
||||
@@ -256,8 +253,8 @@ jobs:
|
||||
|
||||
integration-test:
|
||||
runs-on: ${{ inputs.os }}
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ inputs.storage == 'snapshotter' && github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 120
|
||||
needs:
|
||||
- build
|
||||
- integration-test-prepare
|
||||
@@ -515,8 +512,7 @@ jobs:
|
||||
retention-days: 1
|
||||
|
||||
integration-test-report:
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: ${{ inputs.storage == 'snapshotter' && github.event_name != 'pull_request' }}
|
||||
if: always()
|
||||
needs:
|
||||
|
||||
275
.github/workflows/arm64.yml
vendored
275
.github/workflows/arm64.yml
vendored
@@ -1,275 +0,0 @@
|
||||
name: arm64
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- '[0-9]+.[0-9]+'
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.22.10"
|
||||
TESTSTAT_VERSION: v0.1.25
|
||||
DESTDIR: ./build
|
||||
SETUP_BUILDX_VERSION: edge
|
||||
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
|
||||
DOCKER_EXPERIMENTAL: 1
|
||||
|
||||
jobs:
|
||||
validate-dco:
|
||||
uses: ./.github/workflows/.dco.yml
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-22.04-arm
|
||||
timeout-minutes: 20 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target:
|
||||
- binary
|
||||
- dynbinary
|
||||
steps:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
-
|
||||
name: List artifacts
|
||||
run: |
|
||||
tree -nh ${{ env.DESTDIR }}
|
||||
-
|
||||
name: Check artifacts
|
||||
run: |
|
||||
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
|
||||
|
||||
build-dev:
|
||||
runs-on: ubuntu-22.04-arm
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
steps:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
*.cache-from=type=gha,scope=dev-arm64
|
||||
*.cache-to=type=gha,scope=dev-arm64,mode=max
|
||||
*.output=type=cacheonly
|
||||
|
||||
test-unit:
|
||||
runs-on: ubuntu-22.04-arm
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- build-dev
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up runner
|
||||
uses: ./.github/actions/setup-runner
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
dev.cache-from=type=gha,scope=dev-arm64
|
||||
-
|
||||
name: Test
|
||||
run: |
|
||||
make -o build test-unit
|
||||
-
|
||||
name: Prepare reports
|
||||
if: always()
|
||||
run: |
|
||||
mkdir -p bundles /tmp/reports
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
|
||||
tar -xzf /tmp/reports.tar.gz -C /tmp/reports
|
||||
sudo chown -R $(id -u):$(id -g) /tmp/reports
|
||||
tree -nh /tmp/reports
|
||||
-
|
||||
name: Send to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
directory: ./bundles
|
||||
env_vars: RUNNER_OS
|
||||
flags: unit
|
||||
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-reports-unit-arm64-graphdriver
|
||||
path: /tmp/reports/*
|
||||
retention-days: 1
|
||||
|
||||
test-unit-report:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
if: always()
|
||||
needs:
|
||||
- test-unit
|
||||
steps:
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache-dependency-path: vendor.sum
|
||||
-
|
||||
name: Download reports
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: test-reports-unit-arm64-*
|
||||
path: /tmp/reports
|
||||
-
|
||||
name: Install teststat
|
||||
run: |
|
||||
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
|
||||
-
|
||||
name: Create summary
|
||||
run: |
|
||||
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
test-integration:
|
||||
runs-on: ubuntu-22.04-arm
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
needs:
|
||||
- build-dev
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up runner
|
||||
uses: ./.github/actions/setup-runner
|
||||
-
|
||||
name: Set up tracing
|
||||
uses: ./.github/actions/setup-tracing
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
dev.cache-from=type=gha,scope=dev-arm64
|
||||
-
|
||||
name: Test
|
||||
run: |
|
||||
make -o build test-integration
|
||||
env:
|
||||
TEST_SKIP_INTEGRATION_CLI: 1
|
||||
TESTCOVERAGE: 1
|
||||
-
|
||||
name: Prepare reports
|
||||
if: always()
|
||||
run: |
|
||||
reportsPath="/tmp/reports/arm64-graphdriver"
|
||||
mkdir -p bundles $reportsPath
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
|
||||
tar -xzf /tmp/reports.tar.gz -C $reportsPath
|
||||
sudo chown -R $(id -u):$(id -g) $reportsPath
|
||||
tree -nh $reportsPath
|
||||
curl -sSLf localhost:16686/api/traces?service=integration-test-client > $reportsPath/jaeger-trace.json
|
||||
-
|
||||
name: Send to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
directory: ./bundles/test-integration
|
||||
env_vars: RUNNER_OS
|
||||
flags: integration
|
||||
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
|
||||
-
|
||||
name: Test daemon logs
|
||||
if: always()
|
||||
run: |
|
||||
cat bundles/test-integration/docker.log
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-reports-integration-arm64-graphdriver
|
||||
path: /tmp/reports/*
|
||||
retention-days: 1
|
||||
|
||||
test-integration-report:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
if: always()
|
||||
needs:
|
||||
- test-integration
|
||||
steps:
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache-dependency-path: vendor.sum
|
||||
-
|
||||
name: Download reports
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: /tmp/reports
|
||||
pattern: test-reports-integration-arm64-*
|
||||
merge-multiple: true
|
||||
-
|
||||
name: Install teststat
|
||||
run: |
|
||||
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
|
||||
-
|
||||
name: Create summary
|
||||
run: |
|
||||
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
|
||||
3
.github/workflows/bin-image.yml
vendored
3
.github/workflows/bin-image.yml
vendored
@@ -29,7 +29,6 @@ jobs:
|
||||
|
||||
prepare:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 20 # guardrails timeout for the whole job
|
||||
outputs:
|
||||
platforms: ${{ steps.platforms.outputs.matrix }}
|
||||
steps:
|
||||
@@ -82,7 +81,6 @@ jobs:
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
- prepare
|
||||
@@ -153,7 +151,6 @@ jobs:
|
||||
|
||||
merge:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- build
|
||||
if: always() && !contains(needs.*.result, 'failure') && !contains(needs.*.result, 'cancelled') && github.event_name != 'pull_request' && github.repository == 'moby/moby'
|
||||
|
||||
22
.github/workflows/buildkit.yml
vendored
22
.github/workflows/buildkit.yml
vendored
@@ -13,7 +13,8 @@ on:
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.22.10"
|
||||
GO_VERSION: "1.21.12"
|
||||
ALPINE_VERSION: "3.19"
|
||||
DESTDIR: ./build
|
||||
|
||||
jobs:
|
||||
@@ -22,7 +23,6 @@ jobs:
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
steps:
|
||||
@@ -48,12 +48,9 @@ jobs:
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
timeout-minutes: 120
|
||||
needs:
|
||||
- build
|
||||
env:
|
||||
TEST_IMAGE_BUILD: "0"
|
||||
TEST_IMAGE_ID: "buildkit-tests"
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -88,11 +85,6 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: moby
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
-
|
||||
name: BuildKit ref
|
||||
run: |
|
||||
@@ -124,14 +116,6 @@ jobs:
|
||||
sudo service docker restart
|
||||
docker version
|
||||
docker info
|
||||
-
|
||||
name: Build test image
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
workdir: ./buildkit
|
||||
targets: integration-tests
|
||||
set: |
|
||||
*.output=type=docker,name=${{ env.TEST_IMAGE_ID }}
|
||||
-
|
||||
name: Test
|
||||
run: |
|
||||
|
||||
5
.github/workflows/ci.yml
vendored
5
.github/workflows/ci.yml
vendored
@@ -21,7 +21,6 @@ jobs:
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 20 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
strategy:
|
||||
@@ -54,8 +53,7 @@ jobs:
|
||||
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
|
||||
|
||||
prepare-cross:
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 20 # guardrails timeout for the whole job
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- validate-dco
|
||||
outputs:
|
||||
@@ -77,7 +75,6 @@ jobs:
|
||||
|
||||
cross:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 20 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
- prepare-cross
|
||||
|
||||
8
.github/workflows/test.yml
vendored
8
.github/workflows/test.yml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.22.10"
|
||||
GO_VERSION: "1.21.12"
|
||||
GIT_PAGER: "cat"
|
||||
PAGER: "cat"
|
||||
|
||||
@@ -23,7 +23,6 @@ jobs:
|
||||
|
||||
build-dev:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
strategy:
|
||||
@@ -72,7 +71,6 @@ jobs:
|
||||
|
||||
validate-prepare:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
outputs:
|
||||
@@ -94,7 +92,7 @@ jobs:
|
||||
|
||||
validate:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 30 # guardrails timeout for the whole job
|
||||
timeout-minutes: 120
|
||||
needs:
|
||||
- validate-prepare
|
||||
- build-dev
|
||||
@@ -128,7 +126,6 @@ jobs:
|
||||
|
||||
smoke-prepare:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
outputs:
|
||||
@@ -150,7 +147,6 @@ jobs:
|
||||
|
||||
smoke:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 20 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- smoke-prepare
|
||||
strategy:
|
||||
|
||||
3
.github/workflows/validate-pr.yml
vendored
3
.github/workflows/validate-pr.yml
vendored
@@ -7,7 +7,6 @@ on:
|
||||
jobs:
|
||||
check-area-label:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
steps:
|
||||
- name: Missing `area/` label
|
||||
if: contains(join(github.event.pull_request.labels.*.name, ','), 'impact/') && !contains(join(github.event.pull_request.labels.*.name, ','), 'area/')
|
||||
@@ -20,7 +19,6 @@ jobs:
|
||||
check-changelog:
|
||||
if: contains(join(github.event.pull_request.labels.*.name, ','), 'impact/')
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
env:
|
||||
PR_BODY: |
|
||||
${{ github.event.pull_request.body }}
|
||||
@@ -49,7 +47,6 @@ jobs:
|
||||
|
||||
check-pr-branch:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
env:
|
||||
PR_TITLE: ${{ github.event.pull_request.title }}
|
||||
steps:
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
linters:
|
||||
enable:
|
||||
- depguard
|
||||
- dupword # Checks for duplicate words in the source code.
|
||||
- goimports
|
||||
- gosec
|
||||
- gosimple
|
||||
@@ -26,11 +25,6 @@ linters:
|
||||
- docs
|
||||
|
||||
linters-settings:
|
||||
dupword:
|
||||
ignore:
|
||||
- "true" # some tests use this as expected output
|
||||
- "false" # some tests use this as expected output
|
||||
- "root" # for tests using "ls" output with files owned by "root:root"
|
||||
importas:
|
||||
# Do not allow unaliased imports of aliased packages.
|
||||
no-unaliased: true
|
||||
@@ -38,34 +32,19 @@ linters-settings:
|
||||
alias:
|
||||
# Enforce alias to prevent it accidentally being used instead of our
|
||||
# own errdefs package (or vice-versa).
|
||||
- pkg: github.com/containerd/errdefs
|
||||
- pkg: github.com/containerd/containerd/errdefs
|
||||
alias: cerrdefs
|
||||
- pkg: github.com/opencontainers/image-spec/specs-go/v1
|
||||
alias: ocispec
|
||||
|
||||
govet:
|
||||
check-shadowing: false
|
||||
|
||||
gosec:
|
||||
excludes:
|
||||
- G115 # FIXME temporarily suppress 'G115: integer overflow conversion': it produces many hits, some of which may be false positives, and need to be looked at; see https://github.com/moby/moby/issues/48358
|
||||
|
||||
depguard:
|
||||
rules:
|
||||
main:
|
||||
deny:
|
||||
- pkg: io/ioutil
|
||||
desc: The io/ioutil package has been deprecated, see https://go.dev/doc/go1.16#ioutil
|
||||
- pkg: "github.com/stretchr/testify/assert"
|
||||
desc: Use "gotest.tools/v3/assert" instead
|
||||
- pkg: "github.com/stretchr/testify/require"
|
||||
desc: Use "gotest.tools/v3/assert" instead
|
||||
- pkg: "github.com/stretchr/testify/suite"
|
||||
desc: Do not use
|
||||
- pkg: github.com/containerd/containerd/errdefs
|
||||
desc: The errdefs package has moved to a separate module, https://github.com/containerd/errdefs
|
||||
- pkg: github.com/containerd/containerd/log
|
||||
desc: The logs package has moved to a separate module, https://github.com/containerd/log
|
||||
revive:
|
||||
rules:
|
||||
# FIXME make sure all packages have a description. Currently, there's many packages without.
|
||||
|
||||
6
.mailmap
6
.mailmap
@@ -173,8 +173,6 @@ Dattatraya Kumbhar <dattatraya.kumbhar@gslab.com>
|
||||
Dave Goodchild <buddhamagnet@gmail.com>
|
||||
Dave Henderson <dhenderson@gmail.com> <Dave.Henderson@ca.ibm.com>
|
||||
Dave Tucker <dt@docker.com> <dave@dtucker.co.uk>
|
||||
David Dooling <dooling@gmail.com>
|
||||
David Dooling <dooling@gmail.com> <david.dooling@docker.com>
|
||||
David M. Karr <davidmichaelkarr@gmail.com>
|
||||
David Sheets <dsheets@docker.com> <sheets@alum.mit.edu>
|
||||
David Sissitka <me@dsissitka.com>
|
||||
@@ -221,8 +219,6 @@ Felix Hupfeld <felix@quobyte.com> <quofelix@users.noreply.github.com>
|
||||
Felix Ruess <felix.ruess@gmail.com> <felix.ruess@roboception.de>
|
||||
Feng Yan <fy2462@gmail.com>
|
||||
Fengtu Wang <wangfengtu@huawei.com> <wangfengtu@huawei.com>
|
||||
Filipe Pina <hzlu1ot0@duck.com>
|
||||
Filipe Pina <hzlu1ot0@duck.com> <636320+fopina@users.noreply.github.com>
|
||||
Francisco Carriedo <fcarriedo@gmail.com>
|
||||
Frank Rosquin <frank.rosquin+github@gmail.com> <frank.rosquin@gmail.com>
|
||||
Frank Yang <yyb196@gmail.com>
|
||||
@@ -274,7 +270,6 @@ Hollie Teal <hollie@docker.com> <hollie.teal@docker.com>
|
||||
Hollie Teal <hollie@docker.com> <hollietealok@users.noreply.github.com>
|
||||
hsinko <21551195@zju.edu.cn> <hsinko@users.noreply.github.com>
|
||||
Hu Keping <hukeping@huawei.com>
|
||||
Huajin Tong <fliterdashen@gmail.com>
|
||||
Hui Kang <hkang.sunysb@gmail.com>
|
||||
Hui Kang <hkang.sunysb@gmail.com> <kangh@us.ibm.com>
|
||||
Huu Nguyen <huu@prismskylabs.com> <whoshuu@gmail.com>
|
||||
@@ -568,7 +563,6 @@ Sebastiaan van Stijn <github@gone.nl> <sebastiaan@ws-key-sebas3.dpi1.dpi>
|
||||
Sebastiaan van Stijn <github@gone.nl> <thaJeztah@users.noreply.github.com>
|
||||
Sebastian Thomschke <sebthom@users.noreply.github.com>
|
||||
Seongyeol Lim <seongyeol37@gmail.com>
|
||||
Serhii Nakon <serhii.n@thescimus.com>
|
||||
Shaun Kaasten <shaunk@gmail.com>
|
||||
Shawn Landden <shawn@churchofgit.com> <shawnlandden@gmail.com>
|
||||
Shengbo Song <thomassong@tencent.com>
|
||||
|
||||
9
AUTHORS
9
AUTHORS
@@ -669,7 +669,6 @@ Erik Hollensbe <github@hollensbe.org>
|
||||
Erik Inge Bolsø <knan@redpill-linpro.com>
|
||||
Erik Kristensen <erik@erikkristensen.com>
|
||||
Erik Sipsma <erik@sipsma.dev>
|
||||
Erik Sjölund <erik.sjolund@gmail.com>
|
||||
Erik St. Martin <alakriti@gmail.com>
|
||||
Erik Weathers <erikdw@gmail.com>
|
||||
Erno Hopearuoho <erno.hopearuoho@gmail.com>
|
||||
@@ -732,7 +731,6 @@ Feroz Salam <feroz.salam@sourcegraph.com>
|
||||
Ferran Rodenas <frodenas@gmail.com>
|
||||
Filipe Brandenburger <filbranden@google.com>
|
||||
Filipe Oliveira <contato@fmoliveira.com.br>
|
||||
Filipe Pina <hzlu1ot0@duck.com>
|
||||
Flavio Castelli <fcastelli@suse.com>
|
||||
Flavio Crisciani <flavio.crisciani@docker.com>
|
||||
Florian <FWirtz@users.noreply.github.com>
|
||||
@@ -877,8 +875,6 @@ Hsing-Yu (David) Chen <davidhsingyuchen@gmail.com>
|
||||
hsinko <21551195@zju.edu.cn>
|
||||
Hu Keping <hukeping@huawei.com>
|
||||
Hu Tao <hutao@cn.fujitsu.com>
|
||||
Huajin Tong <fliterdashen@gmail.com>
|
||||
huang-jl <1046678590@qq.com>
|
||||
HuanHuan Ye <logindaveye@gmail.com>
|
||||
Huanzhong Zhang <zhanghuanzhong90@gmail.com>
|
||||
Huayi Zhang <irachex@gmail.com>
|
||||
@@ -973,7 +969,6 @@ Jannick Fahlbusch <git@jf-projects.de>
|
||||
Januar Wayong <januar@gmail.com>
|
||||
Jared Biel <jared.biel@bolderthinking.com>
|
||||
Jared Hocutt <jaredh@netapp.com>
|
||||
Jaroslav Jindrak <dzejrou@gmail.com>
|
||||
Jaroslaw Zabiello <hipertracker@gmail.com>
|
||||
Jasmine Hegman <jasmine@jhegman.com>
|
||||
Jason A. Donenfeld <Jason@zx2c4.com>
|
||||
@@ -1017,7 +1012,6 @@ Jeffrey Bolle <jeffreybolle@gmail.com>
|
||||
Jeffrey Morgan <jmorganca@gmail.com>
|
||||
Jeffrey van Gogh <jvg@google.com>
|
||||
Jenny Gebske <jennifer@gebske.de>
|
||||
Jeongseok Kang <piono623@naver.com>
|
||||
Jeremy Chambers <jeremy@thehipbot.com>
|
||||
Jeremy Grosser <jeremy@synack.me>
|
||||
Jeremy Huntwork <jhuntwork@lightcubesolutions.com>
|
||||
@@ -1035,7 +1029,6 @@ Jezeniel Zapanta <jpzapanta22@gmail.com>
|
||||
Jhon Honce <jhonce@redhat.com>
|
||||
Ji.Zhilong <zhilongji@gmail.com>
|
||||
Jian Liao <jliao@alauda.io>
|
||||
Jian Zeng <anonymousknight96@gmail.com>
|
||||
Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
|
||||
Jiang Jinyang <jjyruby@gmail.com>
|
||||
Jianyong Wu <jianyong.wu@arm.com>
|
||||
@@ -1974,7 +1967,6 @@ Sergey Evstifeev <sergey.evstifeev@gmail.com>
|
||||
Sergii Kabashniuk <skabashnyuk@codenvy.com>
|
||||
Sergio Lopez <slp@redhat.com>
|
||||
Serhat Gülçiçek <serhat25@gmail.com>
|
||||
Serhii Nakon <serhii.n@thescimus.com>
|
||||
SeungUkLee <lsy931106@gmail.com>
|
||||
Sevki Hasirci <s@sevki.org>
|
||||
Shane Canon <scanon@lbl.gov>
|
||||
@@ -2261,7 +2253,6 @@ VladimirAus <v_roudakov@yahoo.com>
|
||||
Vladislav Kolesnikov <vkolesnikov@beget.ru>
|
||||
Vlastimil Zeman <vlastimil.zeman@diffblue.com>
|
||||
Vojtech Vitek (V-Teq) <vvitek@redhat.com>
|
||||
voloder <110066198+voloder@users.noreply.github.com>
|
||||
Walter Leibbrandt <github@wrl.co.za>
|
||||
Walter Stanish <walter@pratyeka.org>
|
||||
Wang Chao <chao.wang@ucloud.cn>
|
||||
|
||||
@@ -101,7 +101,7 @@ the contributors guide.
|
||||
<td>
|
||||
<p>
|
||||
Register for the Docker Community Slack at
|
||||
<a href="https://dockr.ly/comm-slack" target="_blank">https://dockr.ly/comm-slack</a>.
|
||||
<a href="https://dockr.ly/slack" target="_blank">https://dockr.ly/slack</a>.
|
||||
We use the #moby-project channel for general discussion, and there are separate channels for other Moby projects such as #containerd.
|
||||
</p>
|
||||
</td>
|
||||
|
||||
53
Dockerfile
53
Dockerfile
@@ -1,19 +1,19 @@
|
||||
# syntax=docker/dockerfile:1.7
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.22.10
|
||||
ARG GO_VERSION=1.21.12
|
||||
ARG BASE_DEBIAN_DISTRO="bookworm"
|
||||
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
||||
ARG XX_VERSION=1.6.1
|
||||
ARG XX_VERSION=1.2.1
|
||||
|
||||
ARG VPNKIT_VERSION=0.5.0
|
||||
|
||||
ARG DOCKERCLI_REPOSITORY="https://github.com/docker/cli.git"
|
||||
ARG DOCKERCLI_VERSION=v26.0.0
|
||||
ARG DOCKERCLI_VERSION=v25.0.2
|
||||
# cli version used for integration-cli tests
|
||||
ARG DOCKERCLI_INTEGRATION_REPOSITORY="https://github.com/docker/cli.git"
|
||||
ARG DOCKERCLI_INTEGRATION_VERSION=v17.06.2-ce
|
||||
ARG BUILDX_VERSION=0.13.1
|
||||
ARG COMPOSE_VERSION=v2.25.0
|
||||
ARG BUILDX_VERSION=0.12.1
|
||||
ARG COMPOSE_VERSION=v2.24.5
|
||||
|
||||
ARG SYSTEMD="false"
|
||||
ARG DOCKER_STATIC=1
|
||||
@@ -24,12 +24,6 @@ ARG DOCKER_STATIC=1
|
||||
# specified here should match a current release.
|
||||
ARG REGISTRY_VERSION=2.8.3
|
||||
|
||||
# delve is currently only supported on linux/amd64 and linux/arm64;
|
||||
# https://github.com/go-delve/delve/blob/v1.8.1/pkg/proc/native/support_sentinel.go#L1-L6
|
||||
ARG DELVE_SUPPORTED=${TARGETPLATFORM#linux/amd64} DELVE_SUPPORTED=${DELVE_SUPPORTED#linux/arm64}
|
||||
ARG DELVE_SUPPORTED=${DELVE_SUPPORTED:+"unsupported"}
|
||||
ARG DELVE_SUPPORTED=${DELVE_SUPPORTED:-"supported"}
|
||||
|
||||
# cross compilation helper
|
||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
||||
|
||||
@@ -147,10 +141,10 @@ RUN git init . && git remote add origin "https://github.com/go-delve/delve.git"
|
||||
# from the https://github.com/go-delve/delve repository.
|
||||
# It can be used to run Docker with a possibility of
|
||||
# attaching debugger to it.
|
||||
ARG DELVE_VERSION=v1.23.0
|
||||
ARG DELVE_VERSION=v1.21.1
|
||||
RUN git fetch -q --depth 1 origin "${DELVE_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS delve-supported
|
||||
FROM base AS delve-build
|
||||
WORKDIR /usr/src/delve
|
||||
ARG TARGETPLATFORM
|
||||
RUN --mount=from=delve-src,src=/usr/src/delve,rw \
|
||||
@@ -161,8 +155,16 @@ RUN --mount=from=delve-src,src=/usr/src/delve,rw \
|
||||
xx-verify /build/dlv
|
||||
EOT
|
||||
|
||||
FROM binary-dummy AS delve-unsupported
|
||||
FROM delve-${DELVE_SUPPORTED} AS delve
|
||||
# delve is currently only supported on linux/amd64 and linux/arm64;
|
||||
# https://github.com/go-delve/delve/blob/v1.8.1/pkg/proc/native/support_sentinel.go#L1-L6
|
||||
FROM binary-dummy AS delve-windows
|
||||
FROM binary-dummy AS delve-linux-arm
|
||||
FROM binary-dummy AS delve-linux-ppc64le
|
||||
FROM binary-dummy AS delve-linux-s390x
|
||||
FROM delve-build AS delve-linux-amd64
|
||||
FROM delve-build AS delve-linux-arm64
|
||||
FROM delve-linux-${TARGETARCH} AS delve-linux
|
||||
FROM delve-${TARGETOS} AS delve
|
||||
|
||||
FROM base AS tomll
|
||||
# GOTOML_VERSION specifies the version of the tomll binary to build and install
|
||||
@@ -196,7 +198,7 @@ RUN git init . && git remote add origin "https://github.com/containerd/container
|
||||
# When updating the binary version you may also need to update the vendor
|
||||
# version to pick up bug fixes or new APIs, however, usually the Go packages
|
||||
# are built from a commit from the master branch.
|
||||
ARG CONTAINERD_VERSION=v1.7.18
|
||||
ARG CONTAINERD_VERSION=v1.7.20
|
||||
RUN git fetch -q --depth 1 origin "${CONTAINERD_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS containerd-build
|
||||
@@ -229,7 +231,7 @@ FROM binary-dummy AS containerd-windows
|
||||
FROM containerd-${TARGETOS} AS containerd
|
||||
|
||||
FROM base AS golangci_lint
|
||||
ARG GOLANGCI_LINT_VERSION=v1.60.2
|
||||
ARG GOLANGCI_LINT_VERSION=v1.55.2
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
GOBIN=/build/ GO111MODULE=on go install "github.com/golangci/golangci-lint/cmd/golangci-lint@${GOLANGCI_LINT_VERSION}" \
|
||||
@@ -243,18 +245,12 @@ RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
&& /build/gotestsum --version
|
||||
|
||||
FROM base AS shfmt
|
||||
ARG SHFMT_VERSION=v3.8.0
|
||||
ARG SHFMT_VERSION=v3.6.0
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
GOBIN=/build/ GO111MODULE=on go install "mvdan.cc/sh/v3/cmd/shfmt@${SHFMT_VERSION}" \
|
||||
&& /build/shfmt --version
|
||||
|
||||
FROM base AS gopls
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
GOBIN=/build/ GO111MODULE=on go install "golang.org/x/tools/gopls@latest" \
|
||||
&& /build/gopls version
|
||||
|
||||
FROM base AS dockercli
|
||||
WORKDIR /go/src/github.com/docker/cli
|
||||
ARG DOCKERCLI_REPOSITORY
|
||||
@@ -287,7 +283,7 @@ RUN git init . && git remote add origin "https://github.com/opencontainers/runc.
|
||||
# that is used. If you need to update runc, open a pull request in the containerd
|
||||
# project first, and update both after that is merged. When updating RUNC_VERSION,
|
||||
# consider updating runc in vendor.mod accordingly.
|
||||
ARG RUNC_VERSION=v1.1.14
|
||||
ARG RUNC_VERSION=v1.1.12
|
||||
RUN git fetch -q --depth 1 origin "${RUNC_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS runc-build
|
||||
@@ -659,11 +655,6 @@ RUN <<EOT
|
||||
docker-proxy --version
|
||||
EOT
|
||||
|
||||
# devcontainer is a stage used by .devcontainer/devcontainer.json
|
||||
FROM dev-base AS devcontainer
|
||||
COPY --link . .
|
||||
COPY --link --from=gopls /build/ /usr/local/bin/
|
||||
|
||||
# usage:
|
||||
# > make shell
|
||||
# > SYSTEMD=true make shell
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
# This represents the bare minimum required to build and test Docker.
|
||||
|
||||
ARG GO_VERSION=1.22.10
|
||||
ARG GO_VERSION=1.21.12
|
||||
|
||||
ARG BASE_DEBIAN_DISTRO="bookworm"
|
||||
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
||||
|
||||
@@ -161,10 +161,10 @@ FROM ${WINDOWS_BASE_IMAGE}:${WINDOWS_BASE_IMAGE_TAG}
|
||||
# Use PowerShell as the default shell
|
||||
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
|
||||
|
||||
ARG GO_VERSION=1.22.10
|
||||
ARG GO_VERSION=1.21.12
|
||||
ARG GOTESTSUM_VERSION=v1.8.2
|
||||
ARG GOWINRES_VERSION=v0.3.1
|
||||
ARG CONTAINERD_VERSION=v1.7.18
|
||||
ARG CONTAINERD_VERSION=v1.7.20
|
||||
|
||||
# Environment variable notes:
|
||||
# - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
|
||||
|
||||
165
Jenkinsfile
vendored
Normal file
165
Jenkinsfile
vendored
Normal file
@@ -0,0 +1,165 @@
|
||||
#!groovy
|
||||
pipeline {
|
||||
agent none
|
||||
|
||||
options {
|
||||
buildDiscarder(logRotator(daysToKeepStr: '30'))
|
||||
timeout(time: 2, unit: 'HOURS')
|
||||
timestamps()
|
||||
}
|
||||
parameters {
|
||||
booleanParam(name: 'arm64', defaultValue: true, description: 'ARM (arm64) Build/Test')
|
||||
booleanParam(name: 'dco', defaultValue: true, description: 'Run the DCO check')
|
||||
}
|
||||
environment {
|
||||
DOCKER_BUILDKIT = '1'
|
||||
DOCKER_EXPERIMENTAL = '1'
|
||||
DOCKER_GRAPHDRIVER = 'overlay2'
|
||||
CHECK_CONFIG_COMMIT = '33a3680e08d1007e72c3b3f1454f823d8e9948ee'
|
||||
TESTDEBUG = '0'
|
||||
TIMEOUT = '120m'
|
||||
}
|
||||
stages {
|
||||
stage('pr-hack') {
|
||||
when { changeRequest() }
|
||||
steps {
|
||||
script {
|
||||
echo "Workaround for PR auto-cancel feature. Borrowed from https://issues.jenkins-ci.org/browse/JENKINS-43353"
|
||||
def buildNumber = env.BUILD_NUMBER as int
|
||||
if (buildNumber > 1) milestone(buildNumber - 1)
|
||||
milestone(buildNumber)
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('DCO-check') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.dco }
|
||||
}
|
||||
agent { label 'arm64 && ubuntu-2004' }
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm \
|
||||
-v "$WORKSPACE:/workspace" \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
alpine sh -c 'apk add --no-cache -q bash git openssh-client && git config --system --add safe.directory /workspace && cd /workspace && hack/validate/dco'
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('Build') {
|
||||
parallel {
|
||||
stage('arm64') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.arm64 }
|
||||
}
|
||||
agent { label 'arm64 && ubuntu-2004' }
|
||||
environment {
|
||||
TEST_SKIP_INTEGRATION_CLI = '1'
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
sh '''
|
||||
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||
&& bash ${WORKSPACE}/check-config.sh || true
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh 'docker build --force-rm -t docker:${GIT_COMMIT} .'
|
||||
}
|
||||
}
|
||||
stage("Unit tests") {
|
||||
steps {
|
||||
sh '''
|
||||
sudo modprobe ip6table_filter
|
||||
'''
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/test/unit
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Integration tests") {
|
||||
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TESTDEBUG \
|
||||
-e TEST_INTEGRATION_USE_SNAPSHOTTER \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
-e TIMEOUT \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary \
|
||||
test-integration
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh '''
|
||||
echo "Ensuring container killed."
|
||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Chowning /workspace to jenkins user"
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=arm64-integration
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2,17 +2,8 @@ package api // import "github.com/docker/docker/api"
|
||||
|
||||
// Common constants for daemon and client.
|
||||
const (
|
||||
// DefaultVersion of the current REST API.
|
||||
DefaultVersion = "1.45"
|
||||
|
||||
// MinSupportedAPIVersion is the minimum API version that can be supported
|
||||
// by the API server, specified as "major.minor". Note that the daemon
|
||||
// may be configured with a different minimum API version, as returned
|
||||
// in [github.com/docker/docker/api/types.Version.MinAPIVersion].
|
||||
//
|
||||
// API requests for API versions lower than the configured version produce
|
||||
// an error.
|
||||
MinSupportedAPIVersion = "1.24"
|
||||
// DefaultVersion of Current REST API
|
||||
DefaultVersion = "1.44"
|
||||
|
||||
// NoBaseImageSpecifier is the symbol used by the FROM
|
||||
// command to specify that no base image is to be used.
|
||||
|
||||
34
api/server/errorhandler.go
Normal file
34
api/server/errorhandler.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/server/httpstatus"
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/gorilla/mux"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// makeErrorHandler makes an HTTP handler that decodes a Docker error and
|
||||
// returns it in the response.
|
||||
func makeErrorHandler(err error) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
statusCode := httpstatus.FromError(err)
|
||||
vars := mux.Vars(r)
|
||||
if apiVersionSupportsJSONErrors(vars["version"]) {
|
||||
response := &types.ErrorResponse{
|
||||
Message: err.Error(),
|
||||
}
|
||||
_ = httputils.WriteJSON(w, statusCode, response)
|
||||
} else {
|
||||
http.Error(w, status.Convert(err).Message(), statusCode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func apiVersionSupportsJSONErrors(version string) bool {
|
||||
const firstAPIVersionWithJSONErrors = "1.23"
|
||||
return version == "" || versions.GreaterThan(version, firstAPIVersionWithJSONErrors)
|
||||
}
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
cerrdefs "github.com/containerd/errdefs"
|
||||
cerrdefs "github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/docker/errdefs"
|
||||
|
||||
@@ -12,4 +12,5 @@ import (
|
||||
// container configuration.
|
||||
type ContainerDecoder interface {
|
||||
DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *network.NetworkingConfig, error)
|
||||
DecodeHostConfig(src io.Reader) (*container.HostConfig, error)
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
|
||||
@@ -17,11 +16,7 @@ import (
|
||||
|
||||
// WriteLogStream writes an encoded byte stream of log messages from the
|
||||
// messages channel, multiplexing them with a stdcopy.Writer if mux is true
|
||||
func WriteLogStream(_ context.Context, w http.ResponseWriter, msgs <-chan *backend.LogMessage, config *container.LogsOptions, mux bool) {
|
||||
// See https://github.com/moby/moby/issues/47448
|
||||
// Trigger headers to be written immediately.
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
func WriteLogStream(_ context.Context, w io.Writer, msgs <-chan *backend.LogMessage, config *container.LogsOptions, mux bool) {
|
||||
wf := ioutils.NewWriteFlusher(w)
|
||||
defer wf.Close()
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"net/http"
|
||||
"runtime"
|
||||
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
)
|
||||
@@ -14,40 +13,19 @@ import (
|
||||
// VersionMiddleware is a middleware that
|
||||
// validates the client and server versions.
|
||||
type VersionMiddleware struct {
|
||||
serverVersion string
|
||||
|
||||
// defaultAPIVersion is the default API version provided by the API server,
|
||||
// specified as "major.minor". It is usually configured to the latest API
|
||||
// version [github.com/docker/docker/api.DefaultVersion].
|
||||
//
|
||||
// API requests for API versions greater than this version are rejected by
|
||||
// the server and produce a [versionUnsupportedError].
|
||||
defaultAPIVersion string
|
||||
|
||||
// minAPIVersion is the minimum API version provided by the API server,
|
||||
// specified as "major.minor".
|
||||
//
|
||||
// API requests for API versions lower than this version are rejected by
|
||||
// the server and produce a [versionUnsupportedError].
|
||||
minAPIVersion string
|
||||
serverVersion string
|
||||
defaultVersion string
|
||||
minVersion string
|
||||
}
|
||||
|
||||
// NewVersionMiddleware creates a VersionMiddleware with the given versions.
|
||||
func NewVersionMiddleware(serverVersion, defaultAPIVersion, minAPIVersion string) (*VersionMiddleware, error) {
|
||||
if versions.LessThan(defaultAPIVersion, api.MinSupportedAPIVersion) || versions.GreaterThan(defaultAPIVersion, api.DefaultVersion) {
|
||||
return nil, fmt.Errorf("invalid default API version (%s): must be between %s and %s", defaultAPIVersion, api.MinSupportedAPIVersion, api.DefaultVersion)
|
||||
// NewVersionMiddleware creates a new VersionMiddleware
|
||||
// with the default versions.
|
||||
func NewVersionMiddleware(s, d, m string) VersionMiddleware {
|
||||
return VersionMiddleware{
|
||||
serverVersion: s,
|
||||
defaultVersion: d,
|
||||
minVersion: m,
|
||||
}
|
||||
if versions.LessThan(minAPIVersion, api.MinSupportedAPIVersion) || versions.GreaterThan(minAPIVersion, api.DefaultVersion) {
|
||||
return nil, fmt.Errorf("invalid minimum API version (%s): must be between %s and %s", minAPIVersion, api.MinSupportedAPIVersion, api.DefaultVersion)
|
||||
}
|
||||
if versions.GreaterThan(minAPIVersion, defaultAPIVersion) {
|
||||
return nil, fmt.Errorf("invalid API version: the minimum API version (%s) is higher than the default version (%s)", minAPIVersion, defaultAPIVersion)
|
||||
}
|
||||
return &VersionMiddleware{
|
||||
serverVersion: serverVersion,
|
||||
defaultAPIVersion: defaultAPIVersion,
|
||||
minAPIVersion: minAPIVersion,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type versionUnsupportedError struct {
|
||||
@@ -67,18 +45,18 @@ func (e versionUnsupportedError) InvalidParameter() {}
|
||||
func (v VersionMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
w.Header().Set("Server", fmt.Sprintf("Docker/%s (%s)", v.serverVersion, runtime.GOOS))
|
||||
w.Header().Set("API-Version", v.defaultAPIVersion)
|
||||
w.Header().Set("API-Version", v.defaultVersion)
|
||||
w.Header().Set("OSType", runtime.GOOS)
|
||||
|
||||
apiVersion := vars["version"]
|
||||
if apiVersion == "" {
|
||||
apiVersion = v.defaultAPIVersion
|
||||
apiVersion = v.defaultVersion
|
||||
}
|
||||
if versions.LessThan(apiVersion, v.minAPIVersion) {
|
||||
return versionUnsupportedError{version: apiVersion, minVersion: v.minAPIVersion}
|
||||
if versions.LessThan(apiVersion, v.minVersion) {
|
||||
return versionUnsupportedError{version: apiVersion, minVersion: v.minVersion}
|
||||
}
|
||||
if versions.GreaterThan(apiVersion, v.defaultAPIVersion) {
|
||||
return versionUnsupportedError{version: apiVersion, maxVersion: v.defaultAPIVersion}
|
||||
if versions.GreaterThan(apiVersion, v.defaultVersion) {
|
||||
return versionUnsupportedError{version: apiVersion, maxVersion: v.defaultVersion}
|
||||
}
|
||||
ctx = context.WithValue(ctx, httputils.APIVersionKey{}, apiVersion)
|
||||
return handler(ctx, w, r, vars)
|
||||
|
||||
@@ -2,82 +2,27 @@ package middleware // import "github.com/docker/docker/api/server/middleware"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
)
|
||||
|
||||
func TestNewVersionMiddlewareValidation(t *testing.T) {
|
||||
tests := []struct {
|
||||
doc, defaultVersion, minVersion, expectedErr string
|
||||
}{
|
||||
{
|
||||
doc: "defaults",
|
||||
defaultVersion: api.DefaultVersion,
|
||||
minVersion: api.MinSupportedAPIVersion,
|
||||
},
|
||||
{
|
||||
doc: "invalid default lower than min",
|
||||
defaultVersion: api.MinSupportedAPIVersion,
|
||||
minVersion: api.DefaultVersion,
|
||||
expectedErr: fmt.Sprintf("invalid API version: the minimum API version (%s) is higher than the default version (%s)", api.DefaultVersion, api.MinSupportedAPIVersion),
|
||||
},
|
||||
{
|
||||
doc: "invalid default too low",
|
||||
defaultVersion: "0.1",
|
||||
minVersion: api.MinSupportedAPIVersion,
|
||||
expectedErr: fmt.Sprintf("invalid default API version (0.1): must be between %s and %s", api.MinSupportedAPIVersion, api.DefaultVersion),
|
||||
},
|
||||
{
|
||||
doc: "invalid default too high",
|
||||
defaultVersion: "9999.9999",
|
||||
minVersion: api.DefaultVersion,
|
||||
expectedErr: fmt.Sprintf("invalid default API version (9999.9999): must be between %s and %s", api.MinSupportedAPIVersion, api.DefaultVersion),
|
||||
},
|
||||
{
|
||||
doc: "invalid minimum too low",
|
||||
defaultVersion: api.MinSupportedAPIVersion,
|
||||
minVersion: "0.1",
|
||||
expectedErr: fmt.Sprintf("invalid minimum API version (0.1): must be between %s and %s", api.MinSupportedAPIVersion, api.DefaultVersion),
|
||||
},
|
||||
{
|
||||
doc: "invalid minimum too high",
|
||||
defaultVersion: api.DefaultVersion,
|
||||
minVersion: "9999.9999",
|
||||
expectedErr: fmt.Sprintf("invalid minimum API version (9999.9999): must be between %s and %s", api.MinSupportedAPIVersion, api.DefaultVersion),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
tc := tc
|
||||
t.Run(tc.doc, func(t *testing.T) {
|
||||
_, err := NewVersionMiddleware("1.2.3", tc.defaultVersion, tc.minVersion)
|
||||
if tc.expectedErr == "" {
|
||||
assert.Check(t, err)
|
||||
} else {
|
||||
assert.Check(t, is.Error(err, tc.expectedErr))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVersionMiddlewareVersion(t *testing.T) {
|
||||
expectedVersion := "<not set>"
|
||||
defaultVersion := "1.10.0"
|
||||
minVersion := "1.2.0"
|
||||
expectedVersion := defaultVersion
|
||||
handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
v := httputils.VersionFromContext(ctx)
|
||||
assert.Check(t, is.Equal(expectedVersion, v))
|
||||
return nil
|
||||
}
|
||||
|
||||
m, err := NewVersionMiddleware("1.2.3", api.DefaultVersion, api.MinSupportedAPIVersion)
|
||||
assert.NilError(t, err)
|
||||
m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion)
|
||||
h := m.WrapHandler(handler)
|
||||
|
||||
req, _ := http.NewRequest(http.MethodGet, "/containers/json", nil)
|
||||
@@ -90,19 +35,19 @@ func TestVersionMiddlewareVersion(t *testing.T) {
|
||||
errString string
|
||||
}{
|
||||
{
|
||||
expectedVersion: api.DefaultVersion,
|
||||
expectedVersion: "1.10.0",
|
||||
},
|
||||
{
|
||||
reqVersion: api.MinSupportedAPIVersion,
|
||||
expectedVersion: api.MinSupportedAPIVersion,
|
||||
reqVersion: "1.9.0",
|
||||
expectedVersion: "1.9.0",
|
||||
},
|
||||
{
|
||||
reqVersion: "0.1",
|
||||
errString: fmt.Sprintf("client version 0.1 is too old. Minimum supported API version is %s, please upgrade your client to a newer version", api.MinSupportedAPIVersion),
|
||||
errString: "client version 0.1 is too old. Minimum supported API version is 1.2.0, please upgrade your client to a newer version",
|
||||
},
|
||||
{
|
||||
reqVersion: "9999.9999",
|
||||
errString: fmt.Sprintf("client version 9999.9999 is too new. Maximum supported API version is %s", api.DefaultVersion),
|
||||
errString: "client version 9999.9999 is too new. Maximum supported API version is 1.10.0",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -126,8 +71,9 @@ func TestVersionMiddlewareWithErrorsReturnsHeaders(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
|
||||
m, err := NewVersionMiddleware("1.2.3", api.DefaultVersion, api.MinSupportedAPIVersion)
|
||||
assert.NilError(t, err)
|
||||
defaultVersion := "1.10.0"
|
||||
minVersion := "1.2.0"
|
||||
m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion)
|
||||
h := m.WrapHandler(handler)
|
||||
|
||||
req, _ := http.NewRequest(http.MethodGet, "/containers/json", nil)
|
||||
@@ -135,12 +81,12 @@ func TestVersionMiddlewareWithErrorsReturnsHeaders(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
vars := map[string]string{"version": "0.1"}
|
||||
err = h(ctx, resp, req, vars)
|
||||
err := h(ctx, resp, req, vars)
|
||||
assert.Check(t, is.ErrorContains(err, ""))
|
||||
|
||||
hdr := resp.Result().Header
|
||||
assert.Check(t, is.Contains(hdr.Get("Server"), "Docker/1.2.3"))
|
||||
assert.Check(t, is.Contains(hdr.Get("Server"), "Docker/"+defaultVersion))
|
||||
assert.Check(t, is.Contains(hdr.Get("Server"), runtime.GOOS))
|
||||
assert.Check(t, is.Equal(hdr.Get("API-Version"), api.DefaultVersion))
|
||||
assert.Check(t, is.Equal(hdr.Get("API-Version"), defaultVersion))
|
||||
assert.Check(t, is.Equal(hdr.Get("OSType"), runtime.GOOS))
|
||||
}
|
||||
|
||||
@@ -42,7 +42,6 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
||||
SuppressOutput: httputils.BoolValue(r, "q"),
|
||||
NoCache: httputils.BoolValue(r, "nocache"),
|
||||
ForceRemove: httputils.BoolValue(r, "forcerm"),
|
||||
PullParent: httputils.BoolValue(r, "pull"),
|
||||
MemorySwap: httputils.Int64ValueOrZero(r, "memswap"),
|
||||
Memory: httputils.Int64ValueOrZero(r, "memory"),
|
||||
CPUShares: httputils.Int64ValueOrZero(r, "cpushares"),
|
||||
@@ -67,14 +66,17 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
||||
return nil, invalidParam{errors.New("security options are not supported on " + runtime.GOOS)}
|
||||
}
|
||||
|
||||
if httputils.BoolValue(r, "forcerm") {
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
if httputils.BoolValue(r, "forcerm") && versions.GreaterThanOrEqualTo(version, "1.12") {
|
||||
options.Remove = true
|
||||
} else if r.FormValue("rm") == "" {
|
||||
} else if r.FormValue("rm") == "" && versions.GreaterThanOrEqualTo(version, "1.12") {
|
||||
options.Remove = true
|
||||
} else {
|
||||
options.Remove = httputils.BoolValue(r, "rm")
|
||||
}
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
if httputils.BoolValue(r, "pull") && versions.GreaterThanOrEqualTo(version, "1.16") {
|
||||
options.PullParent = true
|
||||
}
|
||||
if versions.GreaterThanOrEqualTo(version, "1.32") {
|
||||
options.Platform = r.FormValue("platform")
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ type execBackend interface {
|
||||
// copyBackend includes functions to implement to provide container copy functionality.
|
||||
type copyBackend interface {
|
||||
ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error)
|
||||
ContainerCopy(name string, res string) (io.ReadCloser, error)
|
||||
ContainerExport(ctx context.Context, name string, out io.Writer) error
|
||||
ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error
|
||||
ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error)
|
||||
@@ -38,7 +39,7 @@ type stateBackend interface {
|
||||
ContainerResize(name string, height, width int) error
|
||||
ContainerRestart(ctx context.Context, name string, options container.StopOptions) error
|
||||
ContainerRm(name string, config *backend.ContainerRmConfig) error
|
||||
ContainerStart(ctx context.Context, name string, checkpoint string, checkpointDir string) error
|
||||
ContainerStart(ctx context.Context, name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error
|
||||
ContainerStop(ctx context.Context, name string, options container.StopOptions) error
|
||||
ContainerUnpause(name string) error
|
||||
ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error)
|
||||
|
||||
@@ -56,6 +56,7 @@ func (r *containerRouter) initRoutes() {
|
||||
router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait),
|
||||
router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize),
|
||||
router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach),
|
||||
router.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), // Deprecated since 1.8 (API v1.20), errors out since 1.12 (API v1.24)
|
||||
router.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate),
|
||||
router.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart),
|
||||
router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize),
|
||||
|
||||
@@ -39,6 +39,13 @@ func (s *containerRouter) postCommit(ctx context.Context, w http.ResponseWriter,
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: remove pause arg, and always pause in backend
|
||||
pause := httputils.BoolValue(r, "pause")
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
if r.FormValue("pause") == "" && versions.GreaterThanOrEqualTo(version, "1.13") {
|
||||
pause = true
|
||||
}
|
||||
|
||||
config, _, _, err := s.decoder.DecodeConfig(r.Body)
|
||||
if err != nil && !errors.Is(err, io.EOF) { // Do not fail if body is empty.
|
||||
return err
|
||||
@@ -50,7 +57,7 @@ func (s *containerRouter) postCommit(ctx context.Context, w http.ResponseWriter,
|
||||
}
|
||||
|
||||
imgID, err := s.backend.CreateImageFromContainer(ctx, r.Form.Get("container"), &backend.CreateImageConfig{
|
||||
Pause: httputils.BoolValueOrDefault(r, "pause", true), // TODO(dnephin): remove pause arg, and always pause in backend
|
||||
Pause: pause,
|
||||
Tag: ref,
|
||||
Author: r.Form.Get("author"),
|
||||
Comment: r.Form.Get("comment"),
|
||||
@@ -111,20 +118,14 @@ func (s *containerRouter) getContainersStats(ctx context.Context, w http.Respons
|
||||
oneShot = httputils.BoolValueOrDefault(r, "one-shot", false)
|
||||
}
|
||||
|
||||
return s.backend.ContainerStats(ctx, vars["name"], &backend.ContainerStatsConfig{
|
||||
Stream: stream,
|
||||
OneShot: oneShot,
|
||||
OutStream: func() io.Writer {
|
||||
// Assume that when this is called the request is OK.
|
||||
w.WriteHeader(http.StatusOK)
|
||||
if !stream {
|
||||
return w
|
||||
}
|
||||
wf := ioutils.NewWriteFlusher(w)
|
||||
wf.Flush()
|
||||
return wf
|
||||
},
|
||||
})
|
||||
config := &backend.ContainerStatsConfig{
|
||||
Stream: stream,
|
||||
OneShot: oneShot,
|
||||
OutStream: w,
|
||||
Version: httputils.VersionFromContext(ctx),
|
||||
}
|
||||
|
||||
return s.backend.ContainerStats(ctx, vars["name"], config)
|
||||
}
|
||||
|
||||
func (s *containerRouter) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
@@ -177,6 +178,14 @@ func (s *containerRouter) getContainersExport(ctx context.Context, w http.Respon
|
||||
return s.backend.ContainerExport(ctx, vars["name"], w)
|
||||
}
|
||||
|
||||
type bodyOnStartError struct{}
|
||||
|
||||
func (bodyOnStartError) Error() string {
|
||||
return "starting container with non-empty request body was deprecated since API v1.22 and removed in v1.24"
|
||||
}
|
||||
|
||||
func (bodyOnStartError) InvalidParameter() {}
|
||||
|
||||
func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
// If contentLength is -1, we can assumed chunked encoding
|
||||
// or more technically that the length is unknown
|
||||
@@ -184,17 +193,33 @@ func (s *containerRouter) postContainersStart(ctx context.Context, w http.Respon
|
||||
// net/http otherwise seems to swallow any headers related to chunked encoding
|
||||
// including r.TransferEncoding
|
||||
// allow a nil body for backwards compatibility
|
||||
//
|
||||
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
var hostConfig *container.HostConfig
|
||||
// A non-nil json object is at least 7 characters.
|
||||
if r.ContentLength > 7 || r.ContentLength == -1 {
|
||||
return errdefs.InvalidParameter(errors.New("starting container with non-empty request body was deprecated since API v1.22 and removed in v1.24"))
|
||||
if versions.GreaterThanOrEqualTo(version, "1.24") {
|
||||
return bodyOnStartError{}
|
||||
}
|
||||
|
||||
if err := httputils.CheckForJSON(r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := s.decoder.DecodeHostConfig(r.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hostConfig = c
|
||||
}
|
||||
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.backend.ContainerStart(ctx, vars["name"], r.Form.Get("checkpoint"), r.Form.Get("checkpoint-dir")); err != nil {
|
||||
checkpoint := r.Form.Get("checkpoint")
|
||||
checkpointDir := r.Form.Get("checkpoint-dir")
|
||||
if err := s.backend.ContainerStart(ctx, vars["name"], hostConfig, checkpoint, checkpointDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -230,14 +255,25 @@ func (s *containerRouter) postContainersStop(ctx context.Context, w http.Respons
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *containerRouter) postContainersKill(_ context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
func (s *containerRouter) postContainersKill(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name := vars["name"]
|
||||
if err := s.backend.ContainerKill(name, r.Form.Get("signal")); err != nil {
|
||||
return errors.Wrapf(err, "cannot kill container: %s", name)
|
||||
var isStopped bool
|
||||
if errdefs.IsConflict(err) {
|
||||
isStopped = true
|
||||
}
|
||||
|
||||
// Return error that's not caused because the container is stopped.
|
||||
// Return error if the container is not running and the api is >= 1.20
|
||||
// to keep backwards compatibility.
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
if versions.GreaterThanOrEqualTo(version, "1.20") || !isStopped {
|
||||
return errors.Wrapf(err, "Cannot kill container: %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
@@ -465,29 +501,18 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||
if hostConfig == nil {
|
||||
hostConfig = &container.HostConfig{}
|
||||
}
|
||||
if hostConfig.NetworkMode == "" {
|
||||
hostConfig.NetworkMode = "default"
|
||||
}
|
||||
if networkingConfig == nil {
|
||||
networkingConfig = &network.NetworkingConfig{}
|
||||
}
|
||||
if networkingConfig.EndpointsConfig == nil {
|
||||
networkingConfig.EndpointsConfig = make(map[string]*network.EndpointSettings)
|
||||
}
|
||||
// The NetworkMode "default" is used as a way to express a container should
|
||||
// be attached to the OS-dependant default network, in an OS-independent
|
||||
// way. Doing this conversion as soon as possible ensures we have less
|
||||
// NetworkMode to handle down the path (including in the
|
||||
// backward-compatibility layer we have just below).
|
||||
//
|
||||
// Note that this is not the only place where this conversion has to be
|
||||
// done (as there are various other places where containers get created).
|
||||
if hostConfig.NetworkMode == "" || hostConfig.NetworkMode.IsDefault() {
|
||||
hostConfig.NetworkMode = runconfig.DefaultDaemonNetworkMode()
|
||||
if nw, ok := networkingConfig.EndpointsConfig[network.NetworkDefault]; ok {
|
||||
networkingConfig.EndpointsConfig[hostConfig.NetworkMode.NetworkName()] = nw
|
||||
delete(networkingConfig.EndpointsConfig, network.NetworkDefault)
|
||||
}
|
||||
}
|
||||
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
adjustCPUShares := versions.LessThan(version, "1.19")
|
||||
|
||||
// When using API 1.24 and under, the client is responsible for removing the container
|
||||
if versions.LessThan(version, "1.25") {
|
||||
@@ -613,14 +638,6 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||
}
|
||||
}
|
||||
|
||||
if versions.LessThan(version, "1.45") {
|
||||
for _, m := range hostConfig.Mounts {
|
||||
if m.VolumeOptions != nil && m.VolumeOptions.Subpath != "" {
|
||||
return errdefs.InvalidParameter(errors.New("VolumeOptions.Subpath needs API v1.45 or newer"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var warnings []string
|
||||
if warn, err := handleMACAddressBC(config, hostConfig, networkingConfig, version); err != nil {
|
||||
return err
|
||||
@@ -641,6 +658,7 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||
Config: config,
|
||||
HostConfig: hostConfig,
|
||||
NetworkingConfig: networkingConfig,
|
||||
AdjustCPUShares: adjustCPUShares,
|
||||
Platform: platform,
|
||||
DefaultReadOnlyNonRecursive: defaultReadOnlyNonRecursive,
|
||||
})
|
||||
@@ -667,7 +685,7 @@ func handleMACAddressBC(config *container.Config, hostConfig *container.HostConf
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
if !hostConfig.NetworkMode.IsBridge() && !hostConfig.NetworkMode.IsUserDefined() {
|
||||
if !hostConfig.NetworkMode.IsDefault() && !hostConfig.NetworkMode.IsBridge() && !hostConfig.NetworkMode.IsUserDefined() {
|
||||
return "", runconfig.ErrConflictContainerNetworkAndMac
|
||||
}
|
||||
|
||||
@@ -696,7 +714,7 @@ func handleMACAddressBC(config *container.Config, hostConfig *container.HostConf
|
||||
return "", nil
|
||||
}
|
||||
var warning string
|
||||
if hostConfig.NetworkMode.IsBridge() || hostConfig.NetworkMode.IsUserDefined() {
|
||||
if hostConfig.NetworkMode.IsDefault() || hostConfig.NetworkMode.IsBridge() || hostConfig.NetworkMode.IsUserDefined() {
|
||||
nwName := hostConfig.NetworkMode.NetworkName()
|
||||
// If there's no endpoint config, create a place to store the configured address.
|
||||
if len(networkingConfig.EndpointsConfig) == 0 {
|
||||
@@ -797,11 +815,9 @@ func (s *containerRouter) postContainersAttach(ctx context.Context, w http.Respo
|
||||
if multiplexed && versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.42") {
|
||||
contentType = types.MediaTypeMultiplexedStream
|
||||
}
|
||||
// FIXME(thaJeztah): we should not ignore errors here; see https://github.com/moby/moby/pull/48359#discussion_r1725562802
|
||||
fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: %v\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n", contentType)
|
||||
fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: "+contentType+"\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n")
|
||||
} else {
|
||||
// FIXME(thaJeztah): we should not ignore errors here; see https://github.com/moby/moby/pull/48359#discussion_r1725562802
|
||||
fmt.Fprint(conn, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
|
||||
fmt.Fprintf(conn, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
|
||||
}
|
||||
|
||||
closer := func() error {
|
||||
|
||||
@@ -11,10 +11,49 @@ import (
|
||||
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
gddohttputil "github.com/golang/gddo/httputil"
|
||||
)
|
||||
|
||||
// setContainerPathStatHeader encodes the stat to JSON, base64 encode, and place in a header.
|
||||
type pathError struct{}
|
||||
|
||||
func (pathError) Error() string {
|
||||
return "Path cannot be empty"
|
||||
}
|
||||
|
||||
func (pathError) InvalidParameter() {}
|
||||
|
||||
// postContainersCopy is deprecated in favor of getContainersArchive.
|
||||
//
|
||||
// Deprecated since 1.8 (API v1.20), errors out since 1.12 (API v1.24)
|
||||
func (s *containerRouter) postContainersCopy(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
if versions.GreaterThanOrEqualTo(version, "1.24") {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg := types.CopyConfig{}
|
||||
if err := httputils.ReadJSON(r, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cfg.Resource == "" {
|
||||
return pathError{}
|
||||
}
|
||||
|
||||
data, err := s.backend.ContainerCopy(vars["name"], cfg.Resource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer data.Close()
|
||||
|
||||
w.Header().Set("Content-Type", "application/x-tar")
|
||||
_, err = io.Copy(w, data)
|
||||
return err
|
||||
}
|
||||
|
||||
// // Encode the stat to JSON, base64 encode, and place in a header.
|
||||
func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error {
|
||||
statJSON, err := json.Marshal(stat)
|
||||
if err != nil {
|
||||
|
||||
@@ -71,6 +71,15 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
|
||||
return err
|
||||
}
|
||||
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
if versions.LessThan(version, "1.22") {
|
||||
// API versions before 1.22 did not enforce application/json content-type.
|
||||
// Allow older clients to work by patching the content-type.
|
||||
if r.Header.Get("Content-Type") != "application/json" {
|
||||
r.Header.Set("Content-Type", "application/json")
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
execName = vars["name"]
|
||||
stdin, inStream io.ReadCloser
|
||||
@@ -87,8 +96,6 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
|
||||
}
|
||||
|
||||
if execStartCheck.ConsoleSize != nil {
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
|
||||
// Not supported before 1.42
|
||||
if versions.LessThan(version, "1.42") {
|
||||
execStartCheck.ConsoleSize = nil
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/distribution"
|
||||
@@ -13,7 +12,6 @@ import (
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
distributionpkg "github.com/docker/docker/distribution"
|
||||
"github.com/docker/docker/errdefs"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
@@ -26,10 +24,10 @@ func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.Res
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
imgName := vars["name"]
|
||||
image := vars["name"]
|
||||
|
||||
// TODO why is reference.ParseAnyReference() / reference.ParseNormalizedNamed() not using the reference.ErrTagInvalidFormat (and so on) errors?
|
||||
ref, err := reference.ParseAnyReference(imgName)
|
||||
ref, err := reference.ParseAnyReference(image)
|
||||
if err != nil {
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
@@ -39,7 +37,7 @@ func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.Res
|
||||
// full image ID
|
||||
return errors.Errorf("no manifest found for full image ID")
|
||||
}
|
||||
return errdefs.InvalidParameter(errors.Errorf("unknown image reference format: %s", imgName))
|
||||
return errdefs.InvalidParameter(errors.Errorf("unknown image reference format: %s", image))
|
||||
}
|
||||
|
||||
// For a search it is not an error if no auth was given. Ignore invalid
|
||||
@@ -155,9 +153,6 @@ func (s *distributionRouter) fetchManifest(ctx context.Context, distrepo distrib
|
||||
}
|
||||
}
|
||||
case *schema1.SignedManifest:
|
||||
if os.Getenv("DOCKER_ENABLE_DEPRECATED_PULL_SCHEMA_1_IMAGE") == "" {
|
||||
return registry.DistributionInspect{}, distributionpkg.DeprecatedSchema1ImageError(namedRef)
|
||||
}
|
||||
platform := ocispec.Platform{
|
||||
Architecture: mnfstObj.Architecture,
|
||||
OS: "linux",
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
@@ -25,8 +24,8 @@ type Backend interface {
|
||||
type imageBackend interface {
|
||||
ImageDelete(ctx context.Context, imageRef string, force, prune bool) ([]image.DeleteResponse, error)
|
||||
ImageHistory(ctx context.Context, imageName string) ([]*image.HistoryResponseItem, error)
|
||||
Images(ctx context.Context, opts image.ListOptions) ([]*image.Summary, error)
|
||||
GetImage(ctx context.Context, refOrID string, options backend.GetImageOpts) (*dockerimage.Image, error)
|
||||
Images(ctx context.Context, opts types.ImageListOptions) ([]*image.Summary, error)
|
||||
GetImage(ctx context.Context, refOrID string, options image.GetImageOpts) (*dockerimage.Image, error)
|
||||
TagImage(ctx context.Context, id dockerimage.ID, newRef reference.Named) error
|
||||
ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error)
|
||||
}
|
||||
|
||||
@@ -15,9 +15,8 @@ import (
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
imagetypes "github.com/docker/docker/api/types/image"
|
||||
opts "github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/builder/remotecontext"
|
||||
@@ -73,9 +72,9 @@ func (ir *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrit
|
||||
// Special case: "pull -a" may send an image name with a
|
||||
// trailing :. This is ugly, but let's not break API
|
||||
// compatibility.
|
||||
imgName := strings.TrimSuffix(img, ":")
|
||||
image := strings.TrimSuffix(img, ":")
|
||||
|
||||
ref, err := reference.ParseNormalizedNamed(imgName)
|
||||
ref, err := reference.ParseNormalizedNamed(image)
|
||||
if err != nil {
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
@@ -142,7 +141,7 @@ func (ir *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrit
|
||||
id, progressErr = ir.backend.ImportImage(ctx, tagRef, platform, comment, layerReader, r.Form["changes"])
|
||||
|
||||
if progressErr == nil {
|
||||
_, _ = output.Write(streamformatter.FormatStatus("", "%v", id.String()))
|
||||
output.Write(streamformatter.FormatStatus("", id.String()))
|
||||
}
|
||||
}
|
||||
if progressErr != nil {
|
||||
@@ -190,7 +189,7 @@ func (ir *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter
|
||||
|
||||
var ref reference.Named
|
||||
|
||||
// Tag is empty only in case PushOptions.All is true.
|
||||
// Tag is empty only in case ImagePushOptions.All is true.
|
||||
if tag != "" {
|
||||
r, err := httputils.RepoTagReference(img, tag)
|
||||
if err != nil {
|
||||
@@ -286,7 +285,7 @@ func (ir *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter,
|
||||
}
|
||||
|
||||
func (ir *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
img, err := ir.backend.GetImage(ctx, vars["name"], backend.GetImageOpts{Details: true})
|
||||
img, err := ir.backend.GetImage(ctx, vars["name"], opts.GetImageOpts{Details: true})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -306,10 +305,6 @@ func (ir *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWrite
|
||||
imageInspect.Created = time.Time{}.Format(time.RFC3339Nano)
|
||||
}
|
||||
}
|
||||
if versions.GreaterThanOrEqualTo(version, "1.45") {
|
||||
imageInspect.Container = "" //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.45.
|
||||
imageInspect.ContainerConfig = nil //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.45.
|
||||
}
|
||||
return httputils.WriteJSON(w, http.StatusOK, imageInspect)
|
||||
}
|
||||
|
||||
@@ -364,7 +359,7 @@ func (ir *imageRouter) toImageInspect(img *image.Image) (*types.ImageInspect, er
|
||||
Data: img.Details.Metadata,
|
||||
},
|
||||
RootFS: rootFSToAPIType(img.RootFS),
|
||||
Metadata: imagetypes.Metadata{
|
||||
Metadata: opts.Metadata{
|
||||
LastTagTime: img.Details.LastUpdated,
|
||||
},
|
||||
}, nil
|
||||
@@ -406,7 +401,7 @@ func (ir *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter,
|
||||
sharedSize = httputils.BoolValue(r, "shared-size")
|
||||
}
|
||||
|
||||
images, err := ir.backend.Images(ctx, imagetypes.ListOptions{
|
||||
images, err := ir.backend.Images(ctx, types.ImageListOptions{
|
||||
All: httputils.BoolValue(r, "all"),
|
||||
Filters: imageFilters,
|
||||
SharedSize: sharedSize,
|
||||
@@ -463,7 +458,7 @@ func (ir *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter,
|
||||
return errdefs.InvalidParameter(errors.New("refusing to create an ambiguous tag using digest algorithm as name"))
|
||||
}
|
||||
|
||||
img, err := ir.backend.GetImage(ctx, vars["name"], backend.GetImageOpts{})
|
||||
img, err := ir.backend.GetImage(ctx, vars["name"], opts.GetImageOpts{})
|
||||
if err != nil {
|
||||
return errdefs.NotFound(err)
|
||||
}
|
||||
|
||||
@@ -213,10 +213,6 @@ func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWr
|
||||
return libnetwork.NetworkNameError(create.Name)
|
||||
}
|
||||
|
||||
// For a Swarm-scoped network, this call to backend.CreateNetwork is used to
|
||||
// validate the configuration. The network will not be created but, if the
|
||||
// configuration is valid, ManagerRedirectError will be returned and handled
|
||||
// below.
|
||||
nw, err := n.backend.CreateNetwork(create)
|
||||
if err != nil {
|
||||
if _, ok := err.(libnetwork.ManagerRedirectError); !ok {
|
||||
|
||||
@@ -224,6 +224,14 @@ func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter,
|
||||
adjustForAPIVersion(v, &service)
|
||||
}
|
||||
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
if versions.LessThan(version, "1.44") {
|
||||
if service.TaskTemplate.ContainerSpec != nil && service.TaskTemplate.ContainerSpec.Healthcheck != nil {
|
||||
// StartInterval was added in API 1.44
|
||||
service.TaskTemplate.ContainerSpec.Healthcheck.StartInterval = 0
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := sr.backend.CreateService(service, encodedAuth, queryRegistry)
|
||||
if err != nil {
|
||||
log.G(ctx).WithFields(log.Fields{
|
||||
|
||||
@@ -121,17 +121,11 @@ func adjustForAPIVersion(cliVersion string, service *swarm.ServiceSpec) {
|
||||
}
|
||||
|
||||
if versions.LessThan(cliVersion, "1.44") {
|
||||
if service.TaskTemplate.ContainerSpec != nil {
|
||||
// seccomp, apparmor, and no_new_privs were added in 1.44.
|
||||
if service.TaskTemplate.ContainerSpec.Privileges != nil {
|
||||
service.TaskTemplate.ContainerSpec.Privileges.Seccomp = nil
|
||||
service.TaskTemplate.ContainerSpec.Privileges.AppArmor = nil
|
||||
service.TaskTemplate.ContainerSpec.Privileges.NoNewPrivileges = false
|
||||
}
|
||||
if service.TaskTemplate.ContainerSpec.Healthcheck != nil {
|
||||
// StartInterval was added in API 1.44
|
||||
service.TaskTemplate.ContainerSpec.Healthcheck.StartInterval = 0
|
||||
}
|
||||
// seccomp, apparmor, and no_new_privs were added in 1.44.
|
||||
if service.TaskTemplate.ContainerSpec != nil && service.TaskTemplate.ContainerSpec.Privileges != nil {
|
||||
service.TaskTemplate.ContainerSpec.Privileges.Seccomp = nil
|
||||
service.TaskTemplate.ContainerSpec.Privileges.AppArmor = nil
|
||||
service.TaskTemplate.ContainerSpec.Privileges.NoNewPrivileges = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -263,7 +263,6 @@ func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
output := ioutils.NewWriteFlusher(w)
|
||||
defer output.Close()
|
||||
output.Flush()
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/docker/docker/api/server/middleware"
|
||||
"github.com/docker/docker/api/server/router"
|
||||
"github.com/docker/docker/api/server/router/debug"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/gorilla/mux"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
@@ -58,13 +57,19 @@ func (s *Server) makeHTTPHandler(handler httputils.APIFunc, operation string) ht
|
||||
if statusCode >= 500 {
|
||||
log.G(ctx).Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err)
|
||||
}
|
||||
_ = httputils.WriteJSON(w, statusCode, &types.ErrorResponse{
|
||||
Message: err.Error(),
|
||||
})
|
||||
makeErrorHandler(err)(w, r)
|
||||
}
|
||||
}), operation).ServeHTTP
|
||||
}
|
||||
|
||||
type pageNotFoundError struct{}
|
||||
|
||||
func (pageNotFoundError) Error() string {
|
||||
return "page not found"
|
||||
}
|
||||
|
||||
func (pageNotFoundError) NotFound() {}
|
||||
|
||||
// CreateMux returns a new mux with all the routers registered.
|
||||
func (s *Server) CreateMux(routers ...router.Router) *mux.Router {
|
||||
m := mux.NewRouter()
|
||||
@@ -86,12 +91,7 @@ func (s *Server) CreateMux(routers ...router.Router) *mux.Router {
|
||||
m.Path("/debug" + r.Path()).Handler(f)
|
||||
}
|
||||
|
||||
notFoundHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_ = httputils.WriteJSON(w, http.StatusNotFound, &types.ErrorResponse{
|
||||
Message: "page not found",
|
||||
})
|
||||
})
|
||||
|
||||
notFoundHandler := makeErrorHandler(pageNotFoundError{})
|
||||
m.HandleFunc(versionMatcher+"/{path:.*}", notFoundHandler)
|
||||
m.NotFoundHandler = notFoundHandler
|
||||
m.MethodNotAllowedHandler = notFoundHandler
|
||||
|
||||
@@ -15,11 +15,8 @@ import (
|
||||
func TestMiddlewares(t *testing.T) {
|
||||
srv := &Server{}
|
||||
|
||||
m, err := middleware.NewVersionMiddleware("0.1omega2", api.DefaultVersion, api.MinSupportedAPIVersion)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
srv.UseMiddleware(*m)
|
||||
const apiMinVersion = "1.12"
|
||||
srv.UseMiddleware(middleware.NewVersionMiddleware("0.1omega2", api.DefaultVersion, apiMinVersion))
|
||||
|
||||
req, _ := http.NewRequest(http.MethodGet, "/containers/json", nil)
|
||||
resp := httptest.NewRecorder()
|
||||
|
||||
260
api/swagger.yaml
260
api/swagger.yaml
@@ -19,10 +19,10 @@ produces:
|
||||
consumes:
|
||||
- "application/json"
|
||||
- "text/plain"
|
||||
basePath: "/v1.45"
|
||||
basePath: "/v1.44"
|
||||
info:
|
||||
title: "Docker Engine API"
|
||||
version: "1.45"
|
||||
version: "1.44"
|
||||
x-logo:
|
||||
url: "https://docs.docker.com/assets/images/logo-docker-main.png"
|
||||
description: |
|
||||
@@ -55,8 +55,8 @@ info:
|
||||
the URL is not supported by the daemon, a HTTP `400 Bad Request` error message
|
||||
is returned.
|
||||
|
||||
If you omit the version-prefix, the current version of the API (v1.45) is used.
|
||||
For example, calling `/info` is the same as calling `/v1.45/info`. Using the
|
||||
If you omit the version-prefix, the current version of the API (v1.44) is used.
|
||||
For example, calling `/info` is the same as calling `/v1.44/info`. Using the
|
||||
API without a version-prefix is deprecated and will be removed in a future release.
|
||||
|
||||
Engine releases in the near future should support this version of the API,
|
||||
@@ -427,10 +427,6 @@ definitions:
|
||||
type: "object"
|
||||
additionalProperties:
|
||||
type: "string"
|
||||
Subpath:
|
||||
description: "Source path inside the volume. Must be relative without any back traversals."
|
||||
type: "string"
|
||||
example: "dir-inside-volume/subdirectory"
|
||||
TmpfsOptions:
|
||||
description: "Optional configuration for the `tmpfs` type."
|
||||
type: "object"
|
||||
@@ -1198,6 +1194,13 @@ definitions:
|
||||
ContainerConfig:
|
||||
description: |
|
||||
Configuration for a container that is portable between hosts.
|
||||
|
||||
When used as `ContainerConfig` field in an image, `ContainerConfig` is an
|
||||
optional field containing the configuration of the container that was last
|
||||
committed when creating the image.
|
||||
|
||||
Previous versions of Docker builder used this field to store build cache,
|
||||
and it is not in active use anymore.
|
||||
type: "object"
|
||||
properties:
|
||||
Hostname:
|
||||
@@ -1751,6 +1754,21 @@ definitions:
|
||||
format: "dateTime"
|
||||
x-nullable: true
|
||||
example: "2022-02-04T21:20:12.497794809Z"
|
||||
Container:
|
||||
description: |
|
||||
The ID of the container that was used to create the image.
|
||||
|
||||
Depending on how the image was created, this field may be empty.
|
||||
|
||||
**Deprecated**: this field is kept for backward compatibility, but
|
||||
will be removed in API v1.45.
|
||||
type: "string"
|
||||
example: "65974bc86f1770ae4bff79f651ebdbce166ae9aada632ee3fa9af3a264911735"
|
||||
ContainerConfig:
|
||||
description: |
|
||||
**Deprecated**: this field is kept for backward compatibility, but
|
||||
will be removed in API v1.45.
|
||||
$ref: "#/definitions/ContainerConfig"
|
||||
DockerVersion:
|
||||
description: |
|
||||
The version of Docker that was used to build the image.
|
||||
@@ -2157,129 +2175,72 @@ definitions:
|
||||
type: "object"
|
||||
properties:
|
||||
Name:
|
||||
description: |
|
||||
Name of the network.
|
||||
type: "string"
|
||||
example: "my_network"
|
||||
Id:
|
||||
description: |
|
||||
ID that uniquely identifies a network on a single machine.
|
||||
type: "string"
|
||||
example: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99"
|
||||
Created:
|
||||
description: |
|
||||
Date and time at which the network was created in
|
||||
[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
|
||||
type: "string"
|
||||
format: "dateTime"
|
||||
example: "2016-10-19T04:33:30.360899459Z"
|
||||
Scope:
|
||||
description: |
|
||||
The level at which the network exists (e.g. `swarm` for cluster-wide
|
||||
or `local` for machine level)
|
||||
type: "string"
|
||||
example: "local"
|
||||
Driver:
|
||||
description: |
|
||||
The name of the driver used to create the network (e.g. `bridge`,
|
||||
`overlay`).
|
||||
type: "string"
|
||||
example: "overlay"
|
||||
EnableIPv6:
|
||||
description: |
|
||||
Whether the network was created with IPv6 enabled.
|
||||
type: "boolean"
|
||||
example: false
|
||||
IPAM:
|
||||
$ref: "#/definitions/IPAM"
|
||||
Internal:
|
||||
description: |
|
||||
Whether the network is created to only allow internal networking
|
||||
connectivity.
|
||||
type: "boolean"
|
||||
default: false
|
||||
example: false
|
||||
Attachable:
|
||||
description: |
|
||||
Wheter a global / swarm scope network is manually attachable by regular
|
||||
containers from workers in swarm mode.
|
||||
type: "boolean"
|
||||
default: false
|
||||
example: false
|
||||
Ingress:
|
||||
description: |
|
||||
Whether the network is providing the routing-mesh for the swarm cluster.
|
||||
type: "boolean"
|
||||
default: false
|
||||
example: false
|
||||
ConfigFrom:
|
||||
$ref: "#/definitions/ConfigReference"
|
||||
ConfigOnly:
|
||||
description: |
|
||||
Whether the network is a config-only network. Config-only networks are
|
||||
placeholder networks for network configurations to be used by other
|
||||
networks. Config-only networks cannot be used directly to run containers
|
||||
or services.
|
||||
type: "boolean"
|
||||
default: false
|
||||
Containers:
|
||||
description: |
|
||||
Contains endpoints attached to the network.
|
||||
type: "object"
|
||||
additionalProperties:
|
||||
$ref: "#/definitions/NetworkContainer"
|
||||
example:
|
||||
19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c:
|
||||
Name: "test"
|
||||
EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a"
|
||||
MacAddress: "02:42:ac:13:00:02"
|
||||
IPv4Address: "172.19.0.2/16"
|
||||
IPv6Address: ""
|
||||
Options:
|
||||
description: |
|
||||
Network-specific options uses when creating the network.
|
||||
type: "object"
|
||||
additionalProperties:
|
||||
type: "string"
|
||||
example:
|
||||
com.docker.network.bridge.default_bridge: "true"
|
||||
com.docker.network.bridge.enable_icc: "true"
|
||||
com.docker.network.bridge.enable_ip_masquerade: "true"
|
||||
com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
|
||||
com.docker.network.bridge.name: "docker0"
|
||||
com.docker.network.driver.mtu: "1500"
|
||||
Labels:
|
||||
description: "User-defined key/value metadata."
|
||||
type: "object"
|
||||
additionalProperties:
|
||||
type: "string"
|
||||
example:
|
||||
com.example.some-label: "some-value"
|
||||
com.example.some-other-label: "some-other-value"
|
||||
Peers:
|
||||
description: |
|
||||
List of peer nodes for an overlay network. This field is only present
|
||||
for overlay networks, and omitted for other network types.
|
||||
type: "array"
|
||||
items:
|
||||
$ref: "#/definitions/PeerInfo"
|
||||
x-nullable: true
|
||||
# TODO: Add Services (only present when "verbose" is set).
|
||||
|
||||
ConfigReference:
|
||||
description: |
|
||||
The config-only network source to provide the configuration for
|
||||
this network.
|
||||
type: "object"
|
||||
properties:
|
||||
Network:
|
||||
description: |
|
||||
The name of the config-only network that provides the network's
|
||||
configuration. The specified network must be an existing config-only
|
||||
network. Only network names are allowed, not network IDs.
|
||||
type: "string"
|
||||
example: "config_only_network_01"
|
||||
|
||||
example:
|
||||
Name: "net01"
|
||||
Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99"
|
||||
Created: "2016-10-19T04:33:30.360899459Z"
|
||||
Scope: "local"
|
||||
Driver: "bridge"
|
||||
EnableIPv6: false
|
||||
IPAM:
|
||||
Driver: "default"
|
||||
Config:
|
||||
- Subnet: "172.19.0.0/16"
|
||||
Gateway: "172.19.0.1"
|
||||
Options:
|
||||
foo: "bar"
|
||||
Internal: false
|
||||
Attachable: false
|
||||
Ingress: false
|
||||
Containers:
|
||||
19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c:
|
||||
Name: "test"
|
||||
EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a"
|
||||
MacAddress: "02:42:ac:13:00:02"
|
||||
IPv4Address: "172.19.0.2/16"
|
||||
IPv6Address: ""
|
||||
Options:
|
||||
com.docker.network.bridge.default_bridge: "true"
|
||||
com.docker.network.bridge.enable_icc: "true"
|
||||
com.docker.network.bridge.enable_ip_masquerade: "true"
|
||||
com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
|
||||
com.docker.network.bridge.name: "docker0"
|
||||
com.docker.network.driver.mtu: "1500"
|
||||
Labels:
|
||||
com.example.some-label: "some-value"
|
||||
com.example.some-other-label: "some-other-value"
|
||||
IPAM:
|
||||
type: "object"
|
||||
properties:
|
||||
@@ -2287,7 +2248,6 @@ definitions:
|
||||
description: "Name of the IPAM driver to use."
|
||||
type: "string"
|
||||
default: "default"
|
||||
example: "default"
|
||||
Config:
|
||||
description: |
|
||||
List of IPAM configuration options, specified as a map:
|
||||
@@ -2303,21 +2263,16 @@ definitions:
|
||||
type: "object"
|
||||
additionalProperties:
|
||||
type: "string"
|
||||
example:
|
||||
foo: "bar"
|
||||
|
||||
IPAMConfig:
|
||||
type: "object"
|
||||
properties:
|
||||
Subnet:
|
||||
type: "string"
|
||||
example: "172.20.0.0/16"
|
||||
IPRange:
|
||||
type: "string"
|
||||
example: "172.20.10.0/24"
|
||||
Gateway:
|
||||
type: "string"
|
||||
example: "172.20.10.11"
|
||||
AuxiliaryAddresses:
|
||||
type: "object"
|
||||
additionalProperties:
|
||||
@@ -2328,35 +2283,14 @@ definitions:
|
||||
properties:
|
||||
Name:
|
||||
type: "string"
|
||||
example: "container_1"
|
||||
EndpointID:
|
||||
type: "string"
|
||||
example: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a"
|
||||
MacAddress:
|
||||
type: "string"
|
||||
example: "02:42:ac:13:00:02"
|
||||
IPv4Address:
|
||||
type: "string"
|
||||
example: "172.19.0.2/16"
|
||||
IPv6Address:
|
||||
type: "string"
|
||||
example: ""
|
||||
|
||||
PeerInfo:
|
||||
description: |
|
||||
PeerInfo represents one peer of an overlay network.
|
||||
type: "object"
|
||||
properties:
|
||||
Name:
|
||||
description:
|
||||
ID of the peer-node in the Swarm cluster.
|
||||
type: "string"
|
||||
example: "6869d7c1732b"
|
||||
IP:
|
||||
description:
|
||||
IP-address of the peer-node in the Swarm cluster.
|
||||
type: "string"
|
||||
example: "10.133.77.91"
|
||||
|
||||
BuildInfo:
|
||||
type: "object"
|
||||
@@ -5004,7 +4938,7 @@ definitions:
|
||||
The version Go used to compile the daemon, and the version of the Go
|
||||
runtime in use.
|
||||
type: "string"
|
||||
example: "go1.22.7"
|
||||
example: "go1.21.12"
|
||||
Os:
|
||||
description: |
|
||||
The operating system that the daemon is running on ("linux" or "windows")
|
||||
@@ -8836,7 +8770,8 @@ paths:
|
||||
|
||||
<p><br /></p>
|
||||
|
||||
> **Deprecated**: This field is deprecated and will always be "false".
|
||||
> **Deprecated**: This field is deprecated and will always
|
||||
> be "false" in future.
|
||||
type: "boolean"
|
||||
example: false
|
||||
name:
|
||||
@@ -8879,8 +8814,13 @@ paths:
|
||||
description: |
|
||||
A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters:
|
||||
|
||||
- `is-automated=(true|false)` (deprecated, see below)
|
||||
- `is-official=(true|false)`
|
||||
- `stars=<number>` Matches images that has at least 'number' stars.
|
||||
|
||||
The `is-automated` filter is deprecated. The `is_automated` field has
|
||||
been deprecated by Docker Hub's search API. Consequently, searching
|
||||
for `is-automated=true` will yield no results.
|
||||
type: "string"
|
||||
tags: ["Image"]
|
||||
/images/prune:
|
||||
@@ -10166,22 +10106,14 @@ paths:
|
||||
Name:
|
||||
description: "The network's name."
|
||||
type: "string"
|
||||
example: "my_network"
|
||||
CheckDuplicate:
|
||||
description: |
|
||||
Deprecated: CheckDuplicate is now always enabled.
|
||||
type: "boolean"
|
||||
example: true
|
||||
Driver:
|
||||
description: "Name of the network driver plugin to use."
|
||||
type: "string"
|
||||
default: "bridge"
|
||||
example: "bridge"
|
||||
Scope:
|
||||
description: |
|
||||
The level at which the network exists (e.g. `swarm` for cluster-wide
|
||||
or `local` for machine level).
|
||||
type: "string"
|
||||
Internal:
|
||||
description: "Restrict external access to the network."
|
||||
type: "boolean"
|
||||
@@ -10190,55 +10122,55 @@ paths:
|
||||
Globally scoped network is manually attachable by regular
|
||||
containers from workers in swarm mode.
|
||||
type: "boolean"
|
||||
example: true
|
||||
Ingress:
|
||||
description: |
|
||||
Ingress network is the network which provides the routing-mesh
|
||||
in swarm mode.
|
||||
type: "boolean"
|
||||
example: false
|
||||
ConfigOnly:
|
||||
description: |
|
||||
Creates a config-only network. Config-only networks are placeholder
|
||||
networks for network configurations to be used by other networks.
|
||||
Config-only networks cannot be used directly to run containers
|
||||
or services.
|
||||
type: "boolean"
|
||||
default: false
|
||||
example: false
|
||||
ConfigFrom:
|
||||
description: |
|
||||
Specifies the source which will provide the configuration for
|
||||
this network. The specified network must be an existing
|
||||
config-only network; see ConfigOnly.
|
||||
$ref: "#/definitions/ConfigReference"
|
||||
IPAM:
|
||||
description: "Optional custom IP scheme for the network."
|
||||
$ref: "#/definitions/IPAM"
|
||||
EnableIPv6:
|
||||
description: "Enable IPv6 on the network."
|
||||
type: "boolean"
|
||||
example: true
|
||||
Options:
|
||||
description: "Network specific options to be used by the drivers."
|
||||
type: "object"
|
||||
additionalProperties:
|
||||
type: "string"
|
||||
example:
|
||||
com.docker.network.bridge.default_bridge: "true"
|
||||
com.docker.network.bridge.enable_icc: "true"
|
||||
com.docker.network.bridge.enable_ip_masquerade: "true"
|
||||
com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
|
||||
com.docker.network.bridge.name: "docker0"
|
||||
com.docker.network.driver.mtu: "1500"
|
||||
Labels:
|
||||
description: "User-defined key/value metadata."
|
||||
type: "object"
|
||||
additionalProperties:
|
||||
type: "string"
|
||||
example:
|
||||
com.example.some-label: "some-value"
|
||||
com.example.some-other-label: "some-other-value"
|
||||
example:
|
||||
Name: "isolated_nw"
|
||||
CheckDuplicate: false
|
||||
Driver: "bridge"
|
||||
EnableIPv6: true
|
||||
IPAM:
|
||||
Driver: "default"
|
||||
Config:
|
||||
- Subnet: "172.20.0.0/16"
|
||||
IPRange: "172.20.10.0/24"
|
||||
Gateway: "172.20.10.11"
|
||||
- Subnet: "2001:db8:abcd::/64"
|
||||
Gateway: "2001:db8:abcd::1011"
|
||||
Options:
|
||||
foo: "bar"
|
||||
Internal: true
|
||||
Attachable: false
|
||||
Ingress: false
|
||||
Options:
|
||||
com.docker.network.bridge.default_bridge: "true"
|
||||
com.docker.network.bridge.enable_icc: "true"
|
||||
com.docker.network.bridge.enable_ip_masquerade: "true"
|
||||
com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
|
||||
com.docker.network.bridge.name: "docker0"
|
||||
com.docker.network.driver.mtu: "1500"
|
||||
Labels:
|
||||
com.example.some-label: "some-value"
|
||||
com.example.some-other-label: "some-other-value"
|
||||
tags: ["Network"]
|
||||
|
||||
/networks/{id}/connect:
|
||||
|
||||
@@ -18,6 +18,7 @@ type ContainerCreateConfig struct {
|
||||
HostConfig *container.HostConfig
|
||||
NetworkingConfig *network.NetworkingConfig
|
||||
Platform *ocispec.Platform
|
||||
AdjustCPUShares bool
|
||||
DefaultReadOnlyNonRecursive bool
|
||||
}
|
||||
|
||||
@@ -89,7 +90,8 @@ type LogSelector struct {
|
||||
type ContainerStatsConfig struct {
|
||||
Stream bool
|
||||
OneShot bool
|
||||
OutStream func() io.Writer
|
||||
OutStream io.Writer
|
||||
Version string
|
||||
}
|
||||
|
||||
// ExecInspect holds information about a running process started
|
||||
@@ -129,13 +131,6 @@ type CreateImageConfig struct {
|
||||
Changes []string
|
||||
}
|
||||
|
||||
// GetImageOpts holds parameters to retrieve image information
|
||||
// from the backend.
|
||||
type GetImageOpts struct {
|
||||
Platform *ocispec.Platform
|
||||
Details bool
|
||||
}
|
||||
|
||||
// CommitConfig is the configuration for creating an image as part of a build.
|
||||
type CommitConfig struct {
|
||||
Author string
|
||||
|
||||
@@ -157,12 +157,42 @@ type ImageBuildResponse struct {
|
||||
OSType string
|
||||
}
|
||||
|
||||
// ImageCreateOptions holds information to create images.
|
||||
type ImageCreateOptions struct {
|
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry.
|
||||
Platform string // Platform is the target platform of the image if it needs to be pulled from the registry.
|
||||
}
|
||||
|
||||
// ImageImportSource holds source information for ImageImport
|
||||
type ImageImportSource struct {
|
||||
Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this.
|
||||
SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute.
|
||||
}
|
||||
|
||||
// ImageImportOptions holds information to import images from the client host.
|
||||
type ImageImportOptions struct {
|
||||
Tag string // Tag is the name to tag this image with. This attribute is deprecated.
|
||||
Message string // Message is the message to tag the image with
|
||||
Changes []string // Changes are the raw changes to apply to this image
|
||||
Platform string // Platform is the target platform of the image
|
||||
}
|
||||
|
||||
// ImageListOptions holds parameters to list images with.
|
||||
type ImageListOptions struct {
|
||||
// All controls whether all images in the graph are filtered, or just
|
||||
// the heads.
|
||||
All bool
|
||||
|
||||
// Filters is a JSON-encoded set of filter arguments.
|
||||
Filters filters.Args
|
||||
|
||||
// SharedSize indicates whether the shared size of images should be computed.
|
||||
SharedSize bool
|
||||
|
||||
// ContainerCount indicates whether container count should be computed.
|
||||
ContainerCount bool
|
||||
}
|
||||
|
||||
// ImageLoadResponse returns information to the client about a load process.
|
||||
type ImageLoadResponse struct {
|
||||
// Body must be closed to avoid a resource leak
|
||||
@@ -170,6 +200,14 @@ type ImageLoadResponse struct {
|
||||
JSON bool
|
||||
}
|
||||
|
||||
// ImagePullOptions holds information to pull images.
|
||||
type ImagePullOptions struct {
|
||||
All bool
|
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
||||
PrivilegeFunc RequestPrivilegeFunc
|
||||
Platform string
|
||||
}
|
||||
|
||||
// RequestPrivilegeFunc is a function interface that
|
||||
// clients can supply to retry operations after
|
||||
// getting an authorization error.
|
||||
@@ -178,6 +216,15 @@ type ImageLoadResponse struct {
|
||||
// if the privilege request fails.
|
||||
type RequestPrivilegeFunc func() (string, error)
|
||||
|
||||
// ImagePushOptions holds information to push images.
|
||||
type ImagePushOptions ImagePullOptions
|
||||
|
||||
// ImageRemoveOptions holds parameters to remove images.
|
||||
type ImageRemoveOptions struct {
|
||||
Force bool
|
||||
PruneChildren bool
|
||||
}
|
||||
|
||||
// ImageSearchOptions holds parameters to search images with.
|
||||
type ImageSearchOptions struct {
|
||||
RegistryAuth string
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/strslice"
|
||||
dockerspec "github.com/docker/docker/image/spec/specs-go/v1"
|
||||
"github.com/docker/go-connections/nat"
|
||||
dockerspec "github.com/moby/docker-image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// MinimumDuration puts a minimum on user configured duration.
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
@@ -326,12 +325,12 @@ func ValidateRestartPolicy(policy RestartPolicy) error {
|
||||
if policy.MaximumRetryCount < 0 {
|
||||
msg += " and cannot be negative"
|
||||
}
|
||||
return &errInvalidParameter{errors.New(msg)}
|
||||
return &errInvalidParameter{fmt.Errorf(msg)}
|
||||
}
|
||||
return nil
|
||||
case RestartPolicyOnFailure:
|
||||
if policy.MaximumRetryCount < 0 {
|
||||
return &errInvalidParameter{errors.New("invalid restart policy: maximum retry count cannot be negative")}
|
||||
return &errInvalidParameter{fmt.Errorf("invalid restart policy: maximum retry count cannot be negative")}
|
||||
}
|
||||
return nil
|
||||
case "":
|
||||
|
||||
@@ -1,57 +1,9 @@
|
||||
package image
|
||||
|
||||
import "github.com/docker/docker/api/types/filters"
|
||||
import ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
// ImportOptions holds information to import images from the client host.
|
||||
type ImportOptions struct {
|
||||
Tag string // Tag is the name to tag this image with. This attribute is deprecated.
|
||||
Message string // Message is the message to tag the image with
|
||||
Changes []string // Changes are the raw changes to apply to this image
|
||||
Platform string // Platform is the target platform of the image
|
||||
}
|
||||
|
||||
// CreateOptions holds information to create images.
|
||||
type CreateOptions struct {
|
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry.
|
||||
Platform string // Platform is the target platform of the image if it needs to be pulled from the registry.
|
||||
}
|
||||
|
||||
// PullOptions holds information to pull images.
|
||||
type PullOptions struct {
|
||||
All bool
|
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
||||
|
||||
// PrivilegeFunc is a function that clients can supply to retry operations
|
||||
// after getting an authorization error. This function returns the registry
|
||||
// authentication header value in base64 encoded format, or an error if the
|
||||
// privilege request fails.
|
||||
//
|
||||
// Also see [github.com/docker/docker/api/types.RequestPrivilegeFunc].
|
||||
PrivilegeFunc func() (string, error)
|
||||
Platform string
|
||||
}
|
||||
|
||||
// PushOptions holds information to push images.
|
||||
type PushOptions PullOptions
|
||||
|
||||
// ListOptions holds parameters to list images with.
|
||||
type ListOptions struct {
|
||||
// All controls whether all images in the graph are filtered, or just
|
||||
// the heads.
|
||||
All bool
|
||||
|
||||
// Filters is a JSON-encoded set of filter arguments.
|
||||
Filters filters.Args
|
||||
|
||||
// SharedSize indicates whether the shared size of images should be computed.
|
||||
SharedSize bool
|
||||
|
||||
// ContainerCount indicates whether container count should be computed.
|
||||
ContainerCount bool
|
||||
}
|
||||
|
||||
// RemoveOptions holds parameters to remove images.
|
||||
type RemoveOptions struct {
|
||||
Force bool
|
||||
PruneChildren bool
|
||||
// GetImageOpts holds parameters to inspect an image.
|
||||
type GetImageOpts struct {
|
||||
Platform *ocispec.Platform
|
||||
Details bool
|
||||
}
|
||||
|
||||
@@ -96,7 +96,6 @@ type BindOptions struct {
|
||||
type VolumeOptions struct {
|
||||
NoCopy bool `json:",omitempty"`
|
||||
Labels map[string]string `json:",omitempty"`
|
||||
Subpath string `json:",omitempty"`
|
||||
DriverConfig *Driver `json:",omitempty"`
|
||||
}
|
||||
|
||||
|
||||
@@ -94,7 +94,7 @@ type SearchResult struct {
|
||||
Name string `json:"name"`
|
||||
// IsAutomated indicates whether the result is automated.
|
||||
//
|
||||
// Deprecated: the "is_automated" field is deprecated and will always be "false".
|
||||
// Deprecated: the "is_automated" field is deprecated and will always be "false" in the future.
|
||||
IsAutomated bool `json:"is_automated"`
|
||||
// Description is a textual description of the repository
|
||||
Description string `json:"description"`
|
||||
|
||||
@@ -82,7 +82,7 @@ type ImageInspect struct {
|
||||
// Depending on how the image was created, this field may be empty.
|
||||
//
|
||||
// Deprecated: this field is omitted in API v1.45, but kept for backward compatibility.
|
||||
Container string `json:",omitempty"`
|
||||
Container string
|
||||
|
||||
// ContainerConfig is an optional field containing the configuration of the
|
||||
// container that was last committed when creating the image.
|
||||
@@ -91,7 +91,7 @@ type ImageInspect struct {
|
||||
// and it is not in active use anymore.
|
||||
//
|
||||
// Deprecated: this field is omitted in API v1.45, but kept for backward compatibility.
|
||||
ContainerConfig *container.Config `json:",omitempty"`
|
||||
ContainerConfig *container.Config
|
||||
|
||||
// DockerVersion is the version of Docker that was used to build the image.
|
||||
//
|
||||
@@ -457,24 +457,24 @@ type EndpointResource struct {
|
||||
type NetworkCreate struct {
|
||||
// Deprecated: CheckDuplicate is deprecated since API v1.44, but it defaults to true when sent by the client
|
||||
// package to older daemons.
|
||||
CheckDuplicate bool `json:",omitempty"`
|
||||
Driver string // Driver is the driver-name used to create the network (e.g. `bridge`, `overlay`)
|
||||
Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level).
|
||||
EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6.
|
||||
IPAM *network.IPAM // IPAM is the network's IP Address Management.
|
||||
Internal bool // Internal represents if the network is used internal only.
|
||||
Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
|
||||
Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster.
|
||||
ConfigOnly bool // ConfigOnly creates a config-only network. Config-only networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services.
|
||||
ConfigFrom *network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. The specified network must be a config-only network; see [NetworkCreate.ConfigOnly].
|
||||
Options map[string]string // Options specifies the network-specific options to use for when creating the network.
|
||||
Labels map[string]string // Labels holds metadata specific to the network being created.
|
||||
CheckDuplicate bool `json:",omitempty"`
|
||||
Driver string
|
||||
Scope string
|
||||
EnableIPv6 bool
|
||||
IPAM *network.IPAM
|
||||
Internal bool
|
||||
Attachable bool
|
||||
Ingress bool
|
||||
ConfigOnly bool
|
||||
ConfigFrom *network.ConfigReference
|
||||
Options map[string]string
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
// NetworkCreateRequest is the request message sent to the server for network create call.
|
||||
type NetworkCreateRequest struct {
|
||||
NetworkCreate
|
||||
Name string // Name is the requested name of the network.
|
||||
Name string
|
||||
}
|
||||
|
||||
// NetworkCreateResponse is the response message sent by the server for network create call
|
||||
|
||||
@@ -1,35 +1,138 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/api/types/checkpoint"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/api/types/system"
|
||||
)
|
||||
|
||||
// ImageImportOptions holds information to import images from the client host.
|
||||
// CheckpointCreateOptions holds parameters to create a checkpoint from a container.
|
||||
//
|
||||
// Deprecated: use [image.ImportOptions].
|
||||
type ImageImportOptions = image.ImportOptions
|
||||
// Deprecated: use [checkpoint.CreateOptions].
|
||||
type CheckpointCreateOptions = checkpoint.CreateOptions
|
||||
|
||||
// ImageCreateOptions holds information to create images.
|
||||
// CheckpointListOptions holds parameters to list checkpoints for a container
|
||||
//
|
||||
// Deprecated: use [image.CreateOptions].
|
||||
type ImageCreateOptions = image.CreateOptions
|
||||
// Deprecated: use [checkpoint.ListOptions].
|
||||
type CheckpointListOptions = checkpoint.ListOptions
|
||||
|
||||
// ImagePullOptions holds information to pull images.
|
||||
// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container
|
||||
//
|
||||
// Deprecated: use [image.PullOptions].
|
||||
type ImagePullOptions = image.PullOptions
|
||||
// Deprecated: use [checkpoint.DeleteOptions].
|
||||
type CheckpointDeleteOptions = checkpoint.DeleteOptions
|
||||
|
||||
// ImagePushOptions holds information to push images.
|
||||
// Checkpoint represents the details of a checkpoint when listing endpoints.
|
||||
//
|
||||
// Deprecated: use [image.PushOptions].
|
||||
type ImagePushOptions = image.PushOptions
|
||||
// Deprecated: use [checkpoint.Summary].
|
||||
type Checkpoint = checkpoint.Summary
|
||||
|
||||
// ImageListOptions holds parameters to list images with.
|
||||
// Info contains response of Engine API:
|
||||
// GET "/info"
|
||||
//
|
||||
// Deprecated: use [image.ListOptions].
|
||||
type ImageListOptions = image.ListOptions
|
||||
// Deprecated: use [system.Info].
|
||||
type Info = system.Info
|
||||
|
||||
// ImageRemoveOptions holds parameters to remove images.
|
||||
// Commit holds the Git-commit (SHA1) that a binary was built from, as reported
|
||||
// in the version-string of external tools, such as containerd, or runC.
|
||||
//
|
||||
// Deprecated: use [image.RemoveOptions].
|
||||
type ImageRemoveOptions = image.RemoveOptions
|
||||
// Deprecated: use [system.Commit].
|
||||
type Commit = system.Commit
|
||||
|
||||
// PluginsInfo is a temp struct holding Plugins name
|
||||
// registered with docker daemon. It is used by [system.Info] struct
|
||||
//
|
||||
// Deprecated: use [system.PluginsInfo].
|
||||
type PluginsInfo = system.PluginsInfo
|
||||
|
||||
// NetworkAddressPool is a temp struct used by [system.Info] struct.
|
||||
//
|
||||
// Deprecated: use [system.NetworkAddressPool].
|
||||
type NetworkAddressPool = system.NetworkAddressPool
|
||||
|
||||
// Runtime describes an OCI runtime.
|
||||
//
|
||||
// Deprecated: use [system.Runtime].
|
||||
type Runtime = system.Runtime
|
||||
|
||||
// SecurityOpt contains the name and options of a security option.
|
||||
//
|
||||
// Deprecated: use [system.SecurityOpt].
|
||||
type SecurityOpt = system.SecurityOpt
|
||||
|
||||
// KeyValue holds a key/value pair.
|
||||
//
|
||||
// Deprecated: use [system.KeyValue].
|
||||
type KeyValue = system.KeyValue
|
||||
|
||||
// ImageDeleteResponseItem image delete response item.
|
||||
//
|
||||
// Deprecated: use [image.DeleteResponse].
|
||||
type ImageDeleteResponseItem = image.DeleteResponse
|
||||
|
||||
// ImageSummary image summary.
|
||||
//
|
||||
// Deprecated: use [image.Summary].
|
||||
type ImageSummary = image.Summary
|
||||
|
||||
// ImageMetadata contains engine-local data about the image.
|
||||
//
|
||||
// Deprecated: use [image.Metadata].
|
||||
type ImageMetadata = image.Metadata
|
||||
|
||||
// ServiceCreateResponse contains the information returned to a client
|
||||
// on the creation of a new service.
|
||||
//
|
||||
// Deprecated: use [swarm.ServiceCreateResponse].
|
||||
type ServiceCreateResponse = swarm.ServiceCreateResponse
|
||||
|
||||
// ServiceUpdateResponse service update response.
|
||||
//
|
||||
// Deprecated: use [swarm.ServiceUpdateResponse].
|
||||
type ServiceUpdateResponse = swarm.ServiceUpdateResponse
|
||||
|
||||
// ContainerStartOptions holds parameters to start containers.
|
||||
//
|
||||
// Deprecated: use [container.StartOptions].
|
||||
type ContainerStartOptions = container.StartOptions
|
||||
|
||||
// ResizeOptions holds parameters to resize a TTY.
|
||||
// It can be used to resize container TTYs and
|
||||
// exec process TTYs too.
|
||||
//
|
||||
// Deprecated: use [container.ResizeOptions].
|
||||
type ResizeOptions = container.ResizeOptions
|
||||
|
||||
// ContainerAttachOptions holds parameters to attach to a container.
|
||||
//
|
||||
// Deprecated: use [container.AttachOptions].
|
||||
type ContainerAttachOptions = container.AttachOptions
|
||||
|
||||
// ContainerCommitOptions holds parameters to commit changes into a container.
|
||||
//
|
||||
// Deprecated: use [container.CommitOptions].
|
||||
type ContainerCommitOptions = container.CommitOptions
|
||||
|
||||
// ContainerListOptions holds parameters to list containers with.
|
||||
//
|
||||
// Deprecated: use [container.ListOptions].
|
||||
type ContainerListOptions = container.ListOptions
|
||||
|
||||
// ContainerLogsOptions holds parameters to filter logs with.
|
||||
//
|
||||
// Deprecated: use [container.LogsOptions].
|
||||
type ContainerLogsOptions = container.LogsOptions
|
||||
|
||||
// ContainerRemoveOptions holds parameters to remove containers.
|
||||
//
|
||||
// Deprecated: use [container.RemoveOptions].
|
||||
type ContainerRemoveOptions = container.RemoveOptions
|
||||
|
||||
// DecodeSecurityOptions decodes a security options string slice to a type safe
|
||||
// [system.SecurityOpt].
|
||||
//
|
||||
// Deprecated: use [system.DecodeSecurityOptions].
|
||||
func DecodeSecurityOptions(opts []string) ([]system.SecurityOpt, error) {
|
||||
return system.DecodeSecurityOptions(opts)
|
||||
}
|
||||
|
||||
14
api/types/versions/README.md
Normal file
14
api/types/versions/README.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# Legacy API type versions
|
||||
|
||||
This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`.
|
||||
|
||||
Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`.
|
||||
|
||||
## Package name conventions
|
||||
|
||||
The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention:
|
||||
|
||||
1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`.
|
||||
2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`.
|
||||
|
||||
For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`.
|
||||
35
api/types/versions/v1p19/types.go
Normal file
35
api/types/versions/v1p19/types.go
Normal file
@@ -0,0 +1,35 @@
|
||||
// Package v1p19 provides specific API types for the API version 1, patch 19.
|
||||
package v1p19 // import "github.com/docker/docker/api/types/versions/v1p19"
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/versions/v1p20"
|
||||
"github.com/docker/go-connections/nat"
|
||||
)
|
||||
|
||||
// ContainerJSON is a backcompatibility struct for APIs prior to 1.20.
|
||||
// Note this is not used by the Windows daemon.
|
||||
type ContainerJSON struct {
|
||||
*types.ContainerJSONBase
|
||||
Volumes map[string]string
|
||||
VolumesRW map[string]bool
|
||||
Config *ContainerConfig
|
||||
NetworkSettings *v1p20.NetworkSettings
|
||||
}
|
||||
|
||||
// ContainerConfig is a backcompatibility struct for APIs prior to 1.20.
|
||||
type ContainerConfig struct {
|
||||
*container.Config
|
||||
|
||||
MacAddress string
|
||||
NetworkDisabled bool
|
||||
ExposedPorts map[nat.Port]struct{}
|
||||
|
||||
// backward compatibility, they now live in HostConfig
|
||||
VolumeDriver string
|
||||
Memory int64
|
||||
MemorySwap int64
|
||||
CPUShares int64 `json:"CpuShares"`
|
||||
CPUSet string `json:"Cpuset"`
|
||||
}
|
||||
40
api/types/versions/v1p20/types.go
Normal file
40
api/types/versions/v1p20/types.go
Normal file
@@ -0,0 +1,40 @@
|
||||
// Package v1p20 provides specific API types for the API version 1, patch 20.
|
||||
package v1p20 // import "github.com/docker/docker/api/types/versions/v1p20"
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/go-connections/nat"
|
||||
)
|
||||
|
||||
// ContainerJSON is a backcompatibility struct for the API 1.20
|
||||
type ContainerJSON struct {
|
||||
*types.ContainerJSONBase
|
||||
Mounts []types.MountPoint
|
||||
Config *ContainerConfig
|
||||
NetworkSettings *NetworkSettings
|
||||
}
|
||||
|
||||
// ContainerConfig is a backcompatibility struct used in ContainerJSON for the API 1.20
|
||||
type ContainerConfig struct {
|
||||
*container.Config
|
||||
|
||||
MacAddress string
|
||||
NetworkDisabled bool
|
||||
ExposedPorts map[nat.Port]struct{}
|
||||
|
||||
// backward compatibility, they now live in HostConfig
|
||||
VolumeDriver string
|
||||
}
|
||||
|
||||
// StatsJSON is a backcompatibility struct used in Stats for APIs prior to 1.21
|
||||
type StatsJSON struct {
|
||||
types.Stats
|
||||
Network types.NetworkStats `json:"network,omitempty"`
|
||||
}
|
||||
|
||||
// NetworkSettings is a backward compatible struct for APIs prior to 1.21
|
||||
type NetworkSettings struct {
|
||||
types.NetworkSettingsBase
|
||||
types.DefaultNetworkSettings
|
||||
}
|
||||
@@ -238,13 +238,13 @@ type TopologyRequirement struct {
|
||||
// If requisite is specified, all topologies in preferred list MUST
|
||||
// also be present in the list of requisite topologies.
|
||||
//
|
||||
// If the SP is unable to make the provisioned volume available
|
||||
// If the SP is unable to to make the provisioned volume available
|
||||
// from any of the preferred topologies, the SP MAY choose a topology
|
||||
// from the list of requisite topologies.
|
||||
// If the list of requisite topologies is not specified, then the SP
|
||||
// MAY choose from the list of all possible topologies.
|
||||
// If the list of requisite topologies is specified and the SP is
|
||||
// unable to make the provisioned volume available from any of the
|
||||
// unable to to make the provisioned volume available from any of the
|
||||
// requisite topologies it MUST fail the CreateVolume call.
|
||||
//
|
||||
// Example 1:
|
||||
@@ -254,7 +254,7 @@ type TopologyRequirement struct {
|
||||
// {"region": "R1", "zone": "Z3"}
|
||||
// preferred =
|
||||
// {"region": "R1", "zone": "Z3"}
|
||||
// then the SP SHOULD first attempt to make the provisioned volume
|
||||
// then the the SP SHOULD first attempt to make the provisioned volume
|
||||
// available from "zone" "Z3" in the "region" "R1" and fall back to
|
||||
// "zone" "Z2" in the "region" "R1" if that is not possible.
|
||||
//
|
||||
@@ -268,7 +268,7 @@ type TopologyRequirement struct {
|
||||
// preferred =
|
||||
// {"region": "R1", "zone": "Z4"},
|
||||
// {"region": "R1", "zone": "Z2"}
|
||||
// then the SP SHOULD first attempt to make the provisioned volume
|
||||
// then the the SP SHOULD first attempt to make the provisioned volume
|
||||
// accessible from "zone" "Z4" in the "region" "R1" and fall back to
|
||||
// "zone" "Z2" in the "region" "R1" if that is not possible. If that
|
||||
// is not possible, the SP may choose between either the "zone"
|
||||
@@ -287,7 +287,7 @@ type TopologyRequirement struct {
|
||||
// preferred =
|
||||
// {"region": "R1", "zone": "Z5"},
|
||||
// {"region": "R1", "zone": "Z3"}
|
||||
// then the SP SHOULD first attempt to make the provisioned volume
|
||||
// then the the SP SHOULD first attempt to make the provisioned volume
|
||||
// accessible from the combination of the two "zones" "Z5" and "Z3" in
|
||||
// the "region" "R1". If that's not possible, it should fall back to
|
||||
// a combination of "Z5" and other possibilities from the list of
|
||||
|
||||
@@ -9,12 +9,12 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
cerrdefs "github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/gc"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/leases"
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
"github.com/containerd/containerd/remotes/docker/schema1" //nolint:staticcheck // Ignore SA1019: "github.com/containerd/containerd/remotes/docker/schema1" is deprecated: use images formatted in Docker Image Manifest v2, Schema 2, or OCI Image Spec v1.
|
||||
cerrdefs "github.com/containerd/errdefs"
|
||||
"github.com/containerd/log"
|
||||
distreference "github.com/distribution/reference"
|
||||
dimages "github.com/docker/docker/daemon/images"
|
||||
@@ -35,15 +34,14 @@ import (
|
||||
pkgprogress "github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/reference"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/client/llb/sourceresolver"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/solver"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/source"
|
||||
"github.com/moby/buildkit/source/containerimage"
|
||||
srctypes "github.com/moby/buildkit/source/types"
|
||||
"github.com/moby/buildkit/sourcepolicy"
|
||||
policy "github.com/moby/buildkit/sourcepolicy/pb"
|
||||
spb "github.com/moby/buildkit/sourcepolicy/pb"
|
||||
"github.com/moby/buildkit/util/flightcontrol"
|
||||
"github.com/moby/buildkit/util/imageutil"
|
||||
@@ -82,77 +80,9 @@ func NewSource(opt SourceOpt) (*Source, error) {
|
||||
return &Source{SourceOpt: opt}, nil
|
||||
}
|
||||
|
||||
// Schemes returns a list of SourceOp identifier schemes that this source
|
||||
// should match.
|
||||
func (is *Source) Schemes() []string {
|
||||
return []string{srctypes.DockerImageScheme}
|
||||
}
|
||||
|
||||
// Identifier constructs an Identifier from the given scheme, ref, and attrs,
|
||||
// all of which come from a SourceOp.
|
||||
func (is *Source) Identifier(scheme, ref string, attrs map[string]string, platform *pb.Platform) (source.Identifier, error) {
|
||||
return is.registryIdentifier(ref, attrs, platform)
|
||||
}
|
||||
|
||||
// Copied from github.com/moby/buildkit/source/containerimage/source.go
|
||||
func (is *Source) registryIdentifier(ref string, attrs map[string]string, platform *pb.Platform) (source.Identifier, error) {
|
||||
id, err := containerimage.NewImageIdentifier(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if platform != nil {
|
||||
id.Platform = &ocispec.Platform{
|
||||
OS: platform.OS,
|
||||
Architecture: platform.Architecture,
|
||||
Variant: platform.Variant,
|
||||
OSVersion: platform.OSVersion,
|
||||
}
|
||||
if platform.OSFeatures != nil {
|
||||
id.Platform.OSFeatures = append([]string{}, platform.OSFeatures...)
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range attrs {
|
||||
switch k {
|
||||
case pb.AttrImageResolveMode:
|
||||
rm, err := resolver.ParseImageResolveMode(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id.ResolveMode = rm
|
||||
case pb.AttrImageRecordType:
|
||||
rt, err := parseImageRecordType(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id.RecordType = rt
|
||||
case pb.AttrImageLayerLimit:
|
||||
l, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid layer limit %s", v)
|
||||
}
|
||||
if l <= 0 {
|
||||
return nil, errors.Errorf("invalid layer limit %s", v)
|
||||
}
|
||||
id.LayerLimit = &l
|
||||
}
|
||||
}
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func parseImageRecordType(v string) (client.UsageRecordType, error) {
|
||||
switch client.UsageRecordType(v) {
|
||||
case "", client.UsageRecordTypeRegular:
|
||||
return client.UsageRecordTypeRegular, nil
|
||||
case client.UsageRecordTypeInternal:
|
||||
return client.UsageRecordTypeInternal, nil
|
||||
case client.UsageRecordTypeFrontend:
|
||||
return client.UsageRecordTypeFrontend, nil
|
||||
default:
|
||||
return "", errors.Errorf("invalid record type %s", v)
|
||||
}
|
||||
// ID returns image scheme identifier
|
||||
func (is *Source) ID() string {
|
||||
return srctypes.DockerImageScheme
|
||||
}
|
||||
|
||||
func (is *Source) resolveLocal(refStr string) (*image.Image, error) {
|
||||
@@ -177,7 +107,7 @@ type resolveRemoteResult struct {
|
||||
dt []byte
|
||||
}
|
||||
|
||||
func (is *Source) resolveRemote(ctx context.Context, ref string, platform *ocispec.Platform, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
|
||||
func (is *Source) resolveRemote(ctx context.Context, ref string, platform *ocispec.Platform, sm *session.Manager, g session.Group) (string, digest.Digest, []byte, error) {
|
||||
p := platforms.DefaultSpec()
|
||||
if platform != nil {
|
||||
p = *platform
|
||||
@@ -186,36 +116,34 @@ func (is *Source) resolveRemote(ctx context.Context, ref string, platform *ocisp
|
||||
key := "getconfig::" + ref + "::" + platforms.Format(p)
|
||||
res, err := is.g.Do(ctx, key, func(ctx context.Context) (*resolveRemoteResult, error) {
|
||||
res := resolver.DefaultPool.GetResolver(is.RegistryHosts, ref, "pull", sm, g)
|
||||
dgst, dt, err := imageutil.Config(ctx, ref, res, is.ContentStore, is.LeaseManager, platform)
|
||||
ref, dgst, dt, err := imageutil.Config(ctx, ref, res, is.ContentStore, is.LeaseManager, platform, []*policy.Policy{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resolveRemoteResult{ref: ref, dgst: dgst, dt: dt}, nil
|
||||
})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
return ref, "", nil, err
|
||||
}
|
||||
return res.dgst, res.dt, nil
|
||||
return res.ref, res.dgst, res.dt, nil
|
||||
}
|
||||
|
||||
// ResolveImageConfig returns image config for an image
|
||||
func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt sourceresolver.Opt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
|
||||
if opt.ImageOpt == nil {
|
||||
return "", nil, fmt.Errorf("can only resolve an image: %v, opt: %v", ref, opt)
|
||||
}
|
||||
func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (string, digest.Digest, []byte, error) {
|
||||
ref, err := applySourcePolicies(ctx, ref, opt.SourcePolicies)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
return "", "", nil, err
|
||||
}
|
||||
resolveMode, err := resolver.ParseImageResolveMode(opt.ImageOpt.ResolveMode)
|
||||
resolveMode, err := source.ParseImageResolveMode(opt.ResolveMode)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
return ref, "", nil, err
|
||||
}
|
||||
switch resolveMode {
|
||||
case resolver.ResolveModeForcePull:
|
||||
return is.resolveRemote(ctx, ref, opt.Platform, sm, g)
|
||||
case source.ResolveModeForcePull:
|
||||
ref, dgst, dt, err := is.resolveRemote(ctx, ref, opt.Platform, sm, g)
|
||||
// TODO: pull should fallback to local in case of failure to allow offline behavior
|
||||
// the fallback doesn't work currently
|
||||
return ref, dgst, dt, err
|
||||
/*
|
||||
if err == nil {
|
||||
return dgst, dt, err
|
||||
@@ -225,10 +153,10 @@ func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt source
|
||||
return "", dt, err
|
||||
*/
|
||||
|
||||
case resolver.ResolveModeDefault:
|
||||
case source.ResolveModeDefault:
|
||||
// default == prefer local, but in the future could be smarter
|
||||
fallthrough
|
||||
case resolver.ResolveModePreferLocal:
|
||||
case source.ResolveModePreferLocal:
|
||||
img, err := is.resolveLocal(ref)
|
||||
if err == nil {
|
||||
if opt.Platform != nil && !platformMatches(img, opt.Platform) {
|
||||
@@ -237,19 +165,19 @@ func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt source
|
||||
path.Join(img.OS, img.Architecture, img.Variant),
|
||||
)
|
||||
} else {
|
||||
return "", img.RawJSON(), err
|
||||
return ref, "", img.RawJSON(), err
|
||||
}
|
||||
}
|
||||
// fallback to remote
|
||||
return is.resolveRemote(ctx, ref, opt.Platform, sm, g)
|
||||
}
|
||||
// should never happen
|
||||
return "", nil, fmt.Errorf("builder cannot resolve image %s: invalid mode %q", ref, opt.ImageOpt.ResolveMode)
|
||||
return ref, "", nil, fmt.Errorf("builder cannot resolve image %s: invalid mode %q", ref, opt.ResolveMode)
|
||||
}
|
||||
|
||||
// Resolve returns access to pulling for an identifier
|
||||
func (is *Source) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager, vtx solver.Vertex) (source.SourceInstance, error) {
|
||||
imageIdentifier, ok := id.(*containerimage.ImageIdentifier)
|
||||
imageIdentifier, ok := id.(*source.ImageIdentifier)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid image identifier %v", id)
|
||||
}
|
||||
@@ -273,7 +201,7 @@ type puller struct {
|
||||
is *Source
|
||||
resolveLocalOnce sync.Once
|
||||
g flightcontrol.Group[struct{}]
|
||||
src *containerimage.ImageIdentifier
|
||||
src *source.ImageIdentifier
|
||||
desc ocispec.Descriptor
|
||||
ref string
|
||||
config []byte
|
||||
@@ -325,7 +253,7 @@ func (p *puller) resolveLocal() {
|
||||
}
|
||||
}
|
||||
|
||||
if p.src.ResolveMode == resolver.ResolveModeDefault || p.src.ResolveMode == resolver.ResolveModePreferLocal {
|
||||
if p.src.ResolveMode == source.ResolveModeDefault || p.src.ResolveMode == source.ResolveModePreferLocal {
|
||||
ref := p.src.Reference.String()
|
||||
img, err := p.is.resolveLocal(ref)
|
||||
if err == nil {
|
||||
@@ -374,17 +302,12 @@ func (p *puller) resolve(ctx context.Context, g session.Group) error {
|
||||
if err != nil {
|
||||
return struct{}{}, err
|
||||
}
|
||||
_, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), sourceresolver.Opt{
|
||||
Platform: &p.platform,
|
||||
ImageOpt: &sourceresolver.ResolveImageOpt{
|
||||
ResolveMode: p.src.ResolveMode.String(),
|
||||
},
|
||||
}, p.sm, g)
|
||||
newRef, _, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), llb.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: p.src.ResolveMode.String()}, p.sm, g)
|
||||
if err != nil {
|
||||
return struct{}{}, err
|
||||
}
|
||||
|
||||
p.ref = ref.String()
|
||||
p.ref = newRef
|
||||
p.config = dt
|
||||
}
|
||||
return struct{}{}, nil
|
||||
@@ -943,8 +866,12 @@ func applySourcePolicies(ctx context.Context, str string, spls []*spb.Policy) (s
|
||||
if err != nil {
|
||||
return "", errors.WithStack(err)
|
||||
}
|
||||
op := &pb.SourceOp{
|
||||
Identifier: srctypes.DockerImageScheme + "://" + ref.String(),
|
||||
op := &pb.Op{
|
||||
Op: &pb.Op_Source{
|
||||
Source: &pb.SourceOp{
|
||||
Identifier: srctypes.DockerImageScheme + "://" + ref.String(),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
mut, err := sourcepolicy.NewEngine(spls).Evaluate(ctx, op)
|
||||
@@ -957,9 +884,9 @@ func applySourcePolicies(ctx context.Context, str string, spls []*spb.Policy) (s
|
||||
t string
|
||||
ok bool
|
||||
)
|
||||
t, newRef, ok := strings.Cut(op.GetIdentifier(), "://")
|
||||
t, newRef, ok := strings.Cut(op.GetSource().GetIdentifier(), "://")
|
||||
if !ok {
|
||||
return "", errors.Errorf("could not parse ref: %s", op.GetIdentifier())
|
||||
return "", errors.Errorf("could not parse ref: %s", op.GetSource().GetIdentifier())
|
||||
}
|
||||
if ok && t != srctypes.DockerImageScheme {
|
||||
return "", &imageutil.ResolveToNonImageError{Ref: str, Updated: newRef}
|
||||
|
||||
@@ -7,10 +7,10 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
cerrdefs "github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/leases"
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
cerrdefs "github.com/containerd/errdefs"
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
|
||||
@@ -390,10 +390,9 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
|
||||
}
|
||||
|
||||
req := &controlapi.SolveRequest{
|
||||
Ref: id,
|
||||
Exporters: []*controlapi.Exporter{
|
||||
&controlapi.Exporter{Type: exporterName, Attrs: exporterAttrs},
|
||||
},
|
||||
Ref: id,
|
||||
Exporter: exporterName,
|
||||
ExporterAttrs: exporterAttrs,
|
||||
Frontend: "dockerfile.v0",
|
||||
FrontendAttrs: frontendAttrs,
|
||||
Session: opt.Options.SessionID,
|
||||
|
||||
@@ -67,11 +67,11 @@ func newController(ctx context.Context, rt http.RoundTripper, opt Opt) (*control
|
||||
}
|
||||
|
||||
func getTraceExporter(ctx context.Context) trace.SpanExporter {
|
||||
span, _, err := detect.Exporter()
|
||||
exp, err := detect.Exporter()
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Error("Failed to detect trace exporter for buildkit controller")
|
||||
}
|
||||
return span
|
||||
return exp
|
||||
}
|
||||
|
||||
func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
@@ -105,8 +105,7 @@ func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt
|
||||
wo, err := containerd.NewWorkerOpt(opt.Root, opt.ContainerdAddress, opt.Snapshotter, opt.ContainerdNamespace,
|
||||
opt.Rootless, map[string]string{
|
||||
label.Snapshotter: opt.Snapshotter,
|
||||
}, dns, nc, opt.ApparmorProfile, false, nil, "", nil, ctd.WithTimeout(60*time.Second),
|
||||
)
|
||||
}, dns, nc, opt.ApparmorProfile, false, nil, "", ctd.WithTimeout(60*time.Second))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -303,11 +302,9 @@ func newGraphDriverController(ctx context.Context, rt http.RoundTripper, opt Opt
|
||||
}
|
||||
|
||||
exp, err := mobyexporter.New(mobyexporter.Opt{
|
||||
ImageStore: dist.ImageStore,
|
||||
ContentStore: store,
|
||||
Differ: differ,
|
||||
ImageTagger: opt.ImageTagger,
|
||||
LeaseManager: lm,
|
||||
ImageStore: dist.ImageStore,
|
||||
Differ: differ,
|
||||
ImageTagger: opt.ImageTagger,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/moby/buildkit/executor"
|
||||
"github.com/moby/buildkit/executor/oci"
|
||||
"github.com/moby/buildkit/executor/resources"
|
||||
resourcestypes "github.com/moby/buildkit/executor/resources/types"
|
||||
"github.com/moby/buildkit/executor/runcexecutor"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
@@ -57,16 +56,9 @@ func newExecutor(root, cgroupParent string, net *libnetwork.Controller, dnsConfi
|
||||
return nil, err
|
||||
}
|
||||
|
||||
runcCmds := []string{"runc"}
|
||||
|
||||
// TODO: FIXME: testing env var, replace with something better or remove in a major version or two
|
||||
if runcOverride := os.Getenv("DOCKER_BUILDKIT_RUNC_COMMAND"); runcOverride != "" {
|
||||
runcCmds = []string{runcOverride}
|
||||
}
|
||||
|
||||
return runcexecutor.New(runcexecutor.Opt{
|
||||
Root: filepath.Join(root, "executor"),
|
||||
CommandCandidates: runcCmds,
|
||||
CommandCandidates: []string{"runc"},
|
||||
DefaultCgroupParent: cgroupParent,
|
||||
Rootless: rootless,
|
||||
NoPivot: os.Getenv("DOCKER_RAMDISK") != "",
|
||||
@@ -136,8 +128,8 @@ func (iface *lnInterface) init(c *libnetwork.Controller, n *libnetwork.Network)
|
||||
}
|
||||
|
||||
// TODO(neersighted): Unstub Sample(), and collect data from the libnetwork Endpoint.
|
||||
func (iface *lnInterface) Sample() (*resourcestypes.NetworkSample, error) {
|
||||
return &resourcestypes.NetworkSample{}, nil
|
||||
func (iface *lnInterface) Sample() (*network.Sample, error) {
|
||||
return &network.Sample{}, nil
|
||||
}
|
||||
|
||||
func (iface *lnInterface) Set(s *specs.Spec) error {
|
||||
|
||||
@@ -1,27 +1,24 @@
|
||||
package mobyexporter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/leases"
|
||||
"github.com/containerd/log"
|
||||
distref "github.com/distribution/reference"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/internal/compatcontext"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/moby/buildkit/exporter"
|
||||
"github.com/moby/buildkit/exporter/containerimage"
|
||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
||||
"github.com/moby/buildkit/util/leaseutil"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
keyImageName = "name"
|
||||
)
|
||||
|
||||
// Differ can make a moby layer from a snapshot
|
||||
type Differ interface {
|
||||
EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error)
|
||||
@@ -33,11 +30,9 @@ type ImageTagger interface {
|
||||
|
||||
// Opt defines a struct for creating new exporter
|
||||
type Opt struct {
|
||||
ImageStore image.Store
|
||||
Differ Differ
|
||||
ImageTagger ImageTagger
|
||||
ContentStore content.Store
|
||||
LeaseManager leases.Manager
|
||||
ImageStore image.Store
|
||||
Differ Differ
|
||||
ImageTagger ImageTagger
|
||||
}
|
||||
|
||||
type imageExporter struct {
|
||||
@@ -50,14 +45,13 @@ func New(opt Opt) (exporter.Exporter, error) {
|
||||
return im, nil
|
||||
}
|
||||
|
||||
func (e *imageExporter) Resolve(ctx context.Context, id int, opt map[string]string) (exporter.ExporterInstance, error) {
|
||||
func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) {
|
||||
i := &imageExporterInstance{
|
||||
imageExporter: e,
|
||||
id: id,
|
||||
}
|
||||
for k, v := range opt {
|
||||
switch exptypes.ImageExporterOptKey(k) {
|
||||
case exptypes.OptKeyName:
|
||||
switch k {
|
||||
case keyImageName:
|
||||
for _, v := range strings.Split(v, ",") {
|
||||
ref, err := distref.ParseNormalizedNamed(v)
|
||||
if err != nil {
|
||||
@@ -77,15 +71,10 @@ func (e *imageExporter) Resolve(ctx context.Context, id int, opt map[string]stri
|
||||
|
||||
type imageExporterInstance struct {
|
||||
*imageExporter
|
||||
id int
|
||||
targetNames []distref.Named
|
||||
meta map[string][]byte
|
||||
}
|
||||
|
||||
func (e *imageExporterInstance) ID() int {
|
||||
return e.id
|
||||
}
|
||||
|
||||
func (e *imageExporterInstance) Name() string {
|
||||
return "exporting to image"
|
||||
}
|
||||
@@ -94,7 +83,7 @@ func (e *imageExporterInstance) Config() *exporter.Config {
|
||||
return exporter.NewConfig()
|
||||
}
|
||||
|
||||
func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source, inlineCache exptypes.InlineCache, sessionID string) (map[string]string, exporter.DescriptorReference, error) {
|
||||
func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source, sessionID string) (map[string]string, exporter.DescriptorReference, error) {
|
||||
if len(inp.Refs) > 1 {
|
||||
return nil, nil, fmt.Errorf("exporting multiple references to image store is currently unsupported")
|
||||
}
|
||||
@@ -114,14 +103,18 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source
|
||||
case 0:
|
||||
config = inp.Metadata[exptypes.ExporterImageConfigKey]
|
||||
case 1:
|
||||
ps, err := exptypes.ParsePlatforms(inp.Metadata)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("cannot export image, failed to parse platforms: %w", err)
|
||||
platformsBytes, ok := inp.Metadata[exptypes.ExporterPlatformsKey]
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("cannot export image, missing platforms mapping")
|
||||
}
|
||||
if len(ps.Platforms) != len(inp.Refs) {
|
||||
return nil, nil, errors.Errorf("number of platforms does not match references %d %d", len(ps.Platforms), len(inp.Refs))
|
||||
var p exptypes.Platforms
|
||||
if err := json.Unmarshal(platformsBytes, &p); err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "failed to parse platforms passed to exporter")
|
||||
}
|
||||
config = inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, ps.Platforms[0].ID)]
|
||||
if len(p.Platforms) != len(inp.Refs) {
|
||||
return nil, nil, errors.Errorf("number of platforms does not match references %d %d", len(p.Platforms), len(inp.Refs))
|
||||
}
|
||||
config = inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, p.Platforms[0].ID)]
|
||||
}
|
||||
|
||||
var diffs []digest.Digest
|
||||
@@ -164,21 +157,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source
|
||||
|
||||
diffs, history = normalizeLayersAndHistory(diffs, history, ref)
|
||||
|
||||
var inlineCacheEntry *exptypes.InlineCacheEntry
|
||||
if inlineCache != nil {
|
||||
inlineCacheResult, err := inlineCache(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if inlineCacheResult != nil {
|
||||
if ref != nil {
|
||||
inlineCacheEntry, _ = inlineCacheResult.FindRef(ref.ID())
|
||||
} else {
|
||||
inlineCacheEntry = inlineCacheResult.Ref
|
||||
}
|
||||
}
|
||||
}
|
||||
config, err = patchImageConfig(config, diffs, history, inlineCacheEntry)
|
||||
config, err = patchImageConfig(config, diffs, history, inp.Metadata[exptypes.ExporterInlineCache])
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -192,10 +171,8 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source
|
||||
}
|
||||
_ = configDone(nil)
|
||||
|
||||
var names []string
|
||||
for _, targetName := range e.targetNames {
|
||||
names = append(names, targetName.String())
|
||||
if e.opt.ImageTagger != nil {
|
||||
if e.opt.ImageTagger != nil {
|
||||
for _, targetName := range e.targetNames {
|
||||
tagDone := oneOffProgress(ctx, "naming to "+targetName.String())
|
||||
if err := e.opt.ImageTagger.TagImage(ctx, image.ID(digest.Digest(id)), targetName); err != nil {
|
||||
return nil, nil, tagDone(err)
|
||||
@@ -204,49 +181,8 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source
|
||||
}
|
||||
}
|
||||
|
||||
resp := map[string]string{
|
||||
return map[string]string{
|
||||
exptypes.ExporterImageConfigDigestKey: configDigest.String(),
|
||||
exptypes.ExporterImageDigestKey: id.String(),
|
||||
}
|
||||
if len(names) > 0 {
|
||||
resp["image.name"] = strings.Join(names, ",")
|
||||
}
|
||||
|
||||
descRef, err := e.newTempReference(ctx, config)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create a temporary descriptor reference: %w", err)
|
||||
}
|
||||
|
||||
return resp, descRef, nil
|
||||
}
|
||||
|
||||
func (e *imageExporterInstance) newTempReference(ctx context.Context, config []byte) (exporter.DescriptorReference, error) {
|
||||
lm := e.opt.LeaseManager
|
||||
|
||||
dgst := digest.FromBytes(config)
|
||||
leaseCtx, done, err := leaseutil.WithLease(ctx, lm, leaseutil.MakeTemporary)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
unlease := func(ctx context.Context) error {
|
||||
err := done(compatcontext.WithoutCancel(ctx))
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Error("failed to delete descriptor reference lease")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
desc := ocispec.Descriptor{
|
||||
Digest: dgst,
|
||||
MediaType: "application/vnd.docker.container.image.v1+json",
|
||||
Size: int64(len(config)),
|
||||
}
|
||||
|
||||
if err := content.WriteBlob(leaseCtx, e.opt.ContentStore, desc.Digest.String(), bytes.NewReader(config), desc); err != nil {
|
||||
unlease(leaseCtx)
|
||||
return nil, fmt.Errorf("failed to save temporary image config: %w", err)
|
||||
}
|
||||
|
||||
return containerimage.NewDescriptorReference(desc, unlease), nil
|
||||
}, nil, nil
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/log"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
||||
"github.com/moby/buildkit/util/progress"
|
||||
"github.com/moby/buildkit/util/system"
|
||||
"github.com/opencontainers/go-digest"
|
||||
@@ -39,7 +38,7 @@ func parseHistoryFromConfig(dt []byte) ([]ocispec.History, error) {
|
||||
return config.History, nil
|
||||
}
|
||||
|
||||
func patchImageConfig(dt []byte, dps []digest.Digest, history []ocispec.History, cache *exptypes.InlineCacheEntry) ([]byte, error) {
|
||||
func patchImageConfig(dt []byte, dps []digest.Digest, history []ocispec.History, cache []byte) ([]byte, error) {
|
||||
m := map[string]json.RawMessage{}
|
||||
if err := json.Unmarshal(dt, &m); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse image config for patch")
|
||||
@@ -80,7 +79,7 @@ func patchImageConfig(dt []byte, dps []digest.Digest, history []ocispec.History,
|
||||
}
|
||||
|
||||
if cache != nil {
|
||||
dt, err := json.Marshal(cache.Data)
|
||||
dt, err := json.Marshal(cache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ func NewExporterWrapper(exp exporter.Exporter) (exporter.Exporter, error) {
|
||||
}
|
||||
|
||||
// Resolve applies moby specific attributes to the request.
|
||||
func (e *imageExporterMobyWrapper) Resolve(ctx context.Context, id int, exporterAttrs map[string]string) (exporter.ExporterInstance, error) {
|
||||
func (e *imageExporterMobyWrapper) Resolve(ctx context.Context, exporterAttrs map[string]string) (exporter.ExporterInstance, error) {
|
||||
if exporterAttrs == nil {
|
||||
exporterAttrs = make(map[string]string)
|
||||
}
|
||||
@@ -33,5 +33,5 @@ func (e *imageExporterMobyWrapper) Resolve(ctx context.Context, id int, exporter
|
||||
exporterAttrs[string(exptypes.OptKeyDanglingPrefix)] = "moby-dangling"
|
||||
}
|
||||
|
||||
return e.exp.Resolve(ctx, id, exporterAttrs)
|
||||
return e.exp.Resolve(ctx, exporterAttrs)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/rootfs"
|
||||
"github.com/containerd/log"
|
||||
imageadapter "github.com/docker/docker/builder/builder-next/adapters/containerimage"
|
||||
"github.com/docker/docker/builder/builder-next/adapters/containerimage"
|
||||
mobyexporter "github.com/docker/docker/builder/builder-next/exporter"
|
||||
distmetadata "github.com/docker/docker/distribution/metadata"
|
||||
"github.com/docker/docker/distribution/xfer"
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
"github.com/moby/buildkit/cache"
|
||||
cacheconfig "github.com/moby/buildkit/cache/config"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/client/llb/sourceresolver"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/moby/buildkit/executor"
|
||||
"github.com/moby/buildkit/exporter"
|
||||
localexporter "github.com/moby/buildkit/exporter/local"
|
||||
@@ -37,7 +37,6 @@ import (
|
||||
"github.com/moby/buildkit/solver/llbsolver/ops"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/source"
|
||||
"github.com/moby/buildkit/source/containerimage"
|
||||
"github.com/moby/buildkit/source/git"
|
||||
"github.com/moby/buildkit/source/http"
|
||||
"github.com/moby/buildkit/source/local"
|
||||
@@ -76,7 +75,7 @@ type Opt struct {
|
||||
ContentStore *containerdsnapshot.Store
|
||||
CacheManager cache.Manager
|
||||
LeaseManager *leaseutil.Manager
|
||||
ImageSource *imageadapter.Source
|
||||
ImageSource *containerimage.Source
|
||||
DownloadManager *xfer.LayerDownloadManager
|
||||
V2MetadataService distmetadata.V2MetadataService
|
||||
Transport nethttp.RoundTripper
|
||||
@@ -213,49 +212,6 @@ func (w *Worker) LoadRef(ctx context.Context, id string, hidden bool) (cache.Imm
|
||||
return w.CacheManager().Get(ctx, id, nil, opts...)
|
||||
}
|
||||
|
||||
func (w *Worker) ResolveSourceMetadata(ctx context.Context, op *pb.SourceOp, opt sourceresolver.Opt, sm *session.Manager, g session.Group) (*sourceresolver.MetaResponse, error) {
|
||||
if opt.SourcePolicies != nil {
|
||||
return nil, errors.New("source policies can not be set for worker")
|
||||
}
|
||||
|
||||
var platform *pb.Platform
|
||||
if p := opt.Platform; p != nil {
|
||||
platform = &pb.Platform{
|
||||
Architecture: p.Architecture,
|
||||
OS: p.OS,
|
||||
Variant: p.Variant,
|
||||
OSVersion: p.OSVersion,
|
||||
}
|
||||
}
|
||||
|
||||
id, err := w.SourceManager.Identifier(&pb.Op_Source{Source: op}, platform)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch idt := id.(type) {
|
||||
case *containerimage.ImageIdentifier:
|
||||
if opt.ImageOpt == nil {
|
||||
opt.ImageOpt = &sourceresolver.ResolveImageOpt{}
|
||||
}
|
||||
dgst, config, err := w.ImageSource.ResolveImageConfig(ctx, idt.Reference.String(), opt, sm, g)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &sourceresolver.MetaResponse{
|
||||
Op: op,
|
||||
Image: &sourceresolver.ResolveImageResponse{
|
||||
Digest: dgst,
|
||||
Config: config,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &sourceresolver.MetaResponse{
|
||||
Op: op,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ResolveOp converts a LLB vertex into a LLB operation
|
||||
func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *session.Manager) (solver.Op, error) {
|
||||
if baseOp, ok := v.Sys().(*pb.Op); ok {
|
||||
@@ -280,7 +236,7 @@ func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *se
|
||||
}
|
||||
|
||||
// ResolveImageConfig returns image config for an image
|
||||
func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt sourceresolver.Opt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
|
||||
func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (string, digest.Digest, []byte, error) {
|
||||
return w.ImageSource.ResolveImageConfig(ctx, ref, opt, sm, g)
|
||||
}
|
||||
|
||||
@@ -570,7 +526,3 @@ type emptyProvider struct{}
|
||||
func (p *emptyProvider) ReaderAt(ctx context.Context, dec ocispec.Descriptor) (content.ReaderAt, error) {
|
||||
return nil, errors.Errorf("ReaderAt not implemented for empty provider")
|
||||
}
|
||||
|
||||
func (p *emptyProvider) Info(ctx context.Context, d digest.Digest) (content.Info, error) {
|
||||
return content.Info{}, errors.Errorf("Info not implemented for empty provider")
|
||||
}
|
||||
|
||||
@@ -64,7 +64,7 @@ type ExecBackend interface {
|
||||
// ContainerRm removes a container specified by `id`.
|
||||
ContainerRm(name string, config *backend.ContainerRmConfig) error
|
||||
// ContainerStart starts a new container
|
||||
ContainerStart(ctx context.Context, containerID string, checkpoint string, checkpointDir string) error
|
||||
ContainerStart(ctx context.Context, containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error
|
||||
// ContainerWait stops processing until the given container is stopped.
|
||||
ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error)
|
||||
}
|
||||
|
||||
@@ -72,7 +72,7 @@ func (c *containerManager) Run(ctx context.Context, cID string, stdout, stderr i
|
||||
}
|
||||
}()
|
||||
|
||||
if err := c.backend.ContainerStart(ctx, cID, "", ""); err != nil {
|
||||
if err := c.backend.ContainerStart(ctx, cID, nil, "", ""); err != nil {
|
||||
close(finished)
|
||||
logCancellationError(cancelErrCh, "error from ContainerStart: "+err.Error())
|
||||
return err
|
||||
|
||||
@@ -22,7 +22,7 @@ func normalizeWorkdir(_ string, current string, requested string) (string, error
|
||||
if !filepath.IsAbs(requested) {
|
||||
return filepath.Join(string(os.PathSeparator), current, requested), nil
|
||||
}
|
||||
return filepath.Clean(requested), nil
|
||||
return requested, nil
|
||||
}
|
||||
|
||||
// resolveCmdLine takes a command line arg set and optionally prepends a platform-specific
|
||||
|
||||
@@ -15,13 +15,11 @@ import (
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/chrootarchive"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/runconfig"
|
||||
"github.com/docker/go-connections/nat"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
@@ -379,21 +377,12 @@ func hostConfigFromOptions(options *types.ImageBuildOptions) *container.HostConf
|
||||
Ulimits: options.Ulimits,
|
||||
}
|
||||
|
||||
// We need to make sure no empty string or "default" NetworkMode is
|
||||
// provided to the daemon as it doesn't support them.
|
||||
//
|
||||
// This is in line with what the ContainerCreate API endpoint does.
|
||||
networkMode := options.NetworkMode
|
||||
if networkMode == "" || networkMode == network.NetworkDefault {
|
||||
networkMode = runconfig.DefaultDaemonNetworkMode().NetworkName()
|
||||
}
|
||||
|
||||
hc := &container.HostConfig{
|
||||
SecurityOpt: options.SecurityOpt,
|
||||
Isolation: options.Isolation,
|
||||
ShmSize: options.ShmSize,
|
||||
Resources: resources,
|
||||
NetworkMode: container.NetworkMode(networkMode),
|
||||
NetworkMode: container.NetworkMode(options.NetworkMode),
|
||||
// Set a log config to override any default value set on the daemon
|
||||
LogConfig: defaultLogConfig,
|
||||
ExtraHosts: options.ExtraHosts,
|
||||
|
||||
@@ -27,25 +27,25 @@ func parseChownFlag(ctx context.Context, builder *Builder, state *dispatchState,
|
||||
|
||||
passwdPath, err := symlink.FollowSymlinkInScope(filepath.Join(ctrRootPath, "etc", "passwd"), ctrRootPath)
|
||||
if err != nil {
|
||||
return idtools.Identity{}, errors.Wrap(err, "can't resolve /etc/passwd path in container rootfs")
|
||||
return idtools.Identity{}, errors.Wrapf(err, "can't resolve /etc/passwd path in container rootfs")
|
||||
}
|
||||
groupPath, err := symlink.FollowSymlinkInScope(filepath.Join(ctrRootPath, "etc", "group"), ctrRootPath)
|
||||
if err != nil {
|
||||
return idtools.Identity{}, errors.Wrap(err, "can't resolve /etc/group path in container rootfs")
|
||||
return idtools.Identity{}, errors.Wrapf(err, "can't resolve /etc/group path in container rootfs")
|
||||
}
|
||||
uid, err := lookupUser(userStr, passwdPath)
|
||||
if err != nil {
|
||||
return idtools.Identity{}, errors.Wrap(err, "can't find uid for user "+userStr)
|
||||
return idtools.Identity{}, errors.Wrapf(err, "can't find uid for user "+userStr)
|
||||
}
|
||||
gid, err := lookupGroup(grpStr, groupPath)
|
||||
if err != nil {
|
||||
return idtools.Identity{}, errors.Wrap(err, "can't find gid for group "+grpStr)
|
||||
return idtools.Identity{}, errors.Wrapf(err, "can't find gid for group "+grpStr)
|
||||
}
|
||||
|
||||
// convert as necessary because of user namespaces
|
||||
chownPair, err := identityMapping.ToHost(idtools.Identity{UID: uid, GID: gid})
|
||||
if err != nil {
|
||||
return idtools.Identity{}, errors.Wrap(err, "unable to convert uid/gid to host mapping")
|
||||
return idtools.Identity{}, errors.Wrapf(err, "unable to convert uid/gid to host mapping")
|
||||
}
|
||||
return chownPair, nil
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ func (m *MockBackend) CommitBuildStep(ctx context.Context, c backend.CommitConfi
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (m *MockBackend) ContainerStart(ctx context.Context, containerID string, checkpoint string, checkpointDir string) error {
|
||||
func (m *MockBackend) ContainerStart(ctx context.Context, containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -44,8 +44,8 @@ func downloadRemote(remoteURL string) (string, io.ReadCloser, error) {
|
||||
// GetWithStatusError does an http.Get() and returns an error if the
|
||||
// status code is 4xx or 5xx.
|
||||
func GetWithStatusError(address string) (resp *http.Response, err error) {
|
||||
resp, err = http.Get(address) // #nosec G107 -- ignore G107: Potential HTTP request made with variable url
|
||||
if err != nil {
|
||||
// #nosec G107
|
||||
if resp, err = http.Get(address); err != nil {
|
||||
if uerr, ok := err.(*url.Error); ok {
|
||||
if derr, ok := uerr.Err.(*net.DNSError); ok && !derr.IsTimeout {
|
||||
return nil, errdefs.NotFound(err)
|
||||
|
||||
@@ -10,11 +10,11 @@ import (
|
||||
)
|
||||
|
||||
// DistributionInspect returns the image digest with the full manifest.
|
||||
func (cli *Client) DistributionInspect(ctx context.Context, imageRef, encodedRegistryAuth string) (registry.DistributionInspect, error) {
|
||||
func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error) {
|
||||
// Contact the registry to retrieve digest and platform information
|
||||
var distributionInspect registry.DistributionInspect
|
||||
if imageRef == "" {
|
||||
return distributionInspect, objectNotFoundError{object: "distribution", id: imageRef}
|
||||
if image == "" {
|
||||
return distributionInspect, objectNotFoundError{object: "distribution", id: image}
|
||||
}
|
||||
|
||||
if err := cli.NewVersionError(ctx, "1.30", "distribution inspect"); err != nil {
|
||||
@@ -28,7 +28,7 @@ func (cli *Client) DistributionInspect(ctx context.Context, imageRef, encodedReg
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := cli.get(ctx, "/distribution/"+imageRef+"/json", url.Values{}, headers)
|
||||
resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers)
|
||||
defer ensureReaderClosed(resp)
|
||||
if err != nil {
|
||||
return distributionInspect, err
|
||||
|
||||
@@ -8,13 +8,13 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
)
|
||||
|
||||
// ImageCreate creates a new image based on the parent options.
|
||||
// It returns the JSON content in the response body.
|
||||
func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options image.CreateOptions) (io.ReadCloser, error) {
|
||||
func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) {
|
||||
ref, err := reference.ParseNormalizedNamed(parentReference)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"gotest.tools/v3/assert"
|
||||
@@ -20,7 +20,7 @@ func TestImageCreateError(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
||||
}
|
||||
_, err := client.ImageCreate(context.Background(), "reference", image.CreateOptions{})
|
||||
_, err := client.ImageCreate(context.Background(), "reference", types.ImageCreateOptions{})
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||
}
|
||||
|
||||
@@ -58,7 +58,7 @@ func TestImageCreate(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
|
||||
createResponse, err := client.ImageCreate(context.Background(), expectedReference, image.CreateOptions{
|
||||
createResponse, err := client.ImageCreate(context.Background(), expectedReference, types.ImageCreateOptions{
|
||||
RegistryAuth: expectedRegistryAuth,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -8,12 +8,11 @@ import (
|
||||
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
)
|
||||
|
||||
// ImageImport creates a new image based on the source options.
|
||||
// It returns the JSON content in the response body.
|
||||
func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error) {
|
||||
func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) {
|
||||
if ref != "" {
|
||||
// Check if the given image name can be resolved
|
||||
if _, err := reference.ParseNormalizedNamed(ref); err != nil {
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
@@ -21,7 +20,7 @@ func TestImageImportError(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
||||
}
|
||||
_, err := client.ImageImport(context.Background(), types.ImageImportSource{}, "image:tag", image.ImportOptions{})
|
||||
_, err := client.ImageImport(context.Background(), types.ImageImportSource{}, "image:tag", types.ImageImportOptions{})
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||
}
|
||||
|
||||
@@ -64,7 +63,7 @@ func TestImageImport(t *testing.T) {
|
||||
importResponse, err := client.ImageImport(context.Background(), types.ImageImportSource{
|
||||
Source: strings.NewReader("source"),
|
||||
SourceName: "image_source",
|
||||
}, "repository_name:imported", image.ImportOptions{
|
||||
}, "repository_name:imported", types.ImageImportOptions{
|
||||
Tag: "imported",
|
||||
Message: "A message",
|
||||
Changes: []string{"change1", "change2"},
|
||||
|
||||
@@ -5,13 +5,14 @@ import (
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
)
|
||||
|
||||
// ImageList returns a list of images in the docker host.
|
||||
func (cli *Client) ImageList(ctx context.Context, options image.ListOptions) ([]image.Summary, error) {
|
||||
func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]image.Summary, error) {
|
||||
var images []image.Summary
|
||||
|
||||
// Make sure we negotiated (if the client is configured to do so),
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/errdefs"
|
||||
@@ -23,7 +24,7 @@ func TestImageListError(t *testing.T) {
|
||||
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
||||
}
|
||||
|
||||
_, err := client.ImageList(context.Background(), image.ListOptions{})
|
||||
_, err := client.ImageList(context.Background(), types.ImageListOptions{})
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||
}
|
||||
|
||||
@@ -35,7 +36,7 @@ func TestImageListConnectionError(t *testing.T) {
|
||||
client, err := NewClientWithOpts(WithAPIVersionNegotiation(), WithHost("tcp://no-such-host.invalid"))
|
||||
assert.NilError(t, err)
|
||||
|
||||
_, err = client.ImageList(context.Background(), image.ListOptions{})
|
||||
_, err = client.ImageList(context.Background(), types.ImageListOptions{})
|
||||
assert.Check(t, is.ErrorType(err, IsErrConnectionFailed))
|
||||
}
|
||||
|
||||
@@ -43,11 +44,11 @@ func TestImageList(t *testing.T) {
|
||||
const expectedURL = "/images/json"
|
||||
|
||||
listCases := []struct {
|
||||
options image.ListOptions
|
||||
options types.ImageListOptions
|
||||
expectedQueryParams map[string]string
|
||||
}{
|
||||
{
|
||||
options: image.ListOptions{},
|
||||
options: types.ImageListOptions{},
|
||||
expectedQueryParams: map[string]string{
|
||||
"all": "",
|
||||
"filter": "",
|
||||
@@ -55,7 +56,7 @@ func TestImageList(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
options: image.ListOptions{
|
||||
options: types.ImageListOptions{
|
||||
Filters: filters.NewArgs(
|
||||
filters.Arg("label", "label1"),
|
||||
filters.Arg("label", "label2"),
|
||||
@@ -69,7 +70,7 @@ func TestImageList(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
options: image.ListOptions{
|
||||
options: types.ImageListOptions{
|
||||
Filters: filters.NewArgs(filters.Arg("dangling", "false")),
|
||||
},
|
||||
expectedQueryParams: map[string]string{
|
||||
@@ -152,7 +153,7 @@ func TestImageListApiBefore125(t *testing.T) {
|
||||
version: "1.24",
|
||||
}
|
||||
|
||||
options := image.ListOptions{
|
||||
options := types.ImageListOptions{
|
||||
Filters: filters.NewArgs(filters.Arg("reference", "image:tag")),
|
||||
}
|
||||
|
||||
@@ -173,12 +174,12 @@ func TestImageListWithSharedSize(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
version string
|
||||
options image.ListOptions
|
||||
options types.ImageListOptions
|
||||
sharedSize string // expected value for the shared-size query param, or empty if it should not be set.
|
||||
}{
|
||||
{name: "unset after 1.42, no options set", version: "1.42"},
|
||||
{name: "set after 1.42, if requested", version: "1.42", options: image.ListOptions{SharedSize: true}, sharedSize: "1"},
|
||||
{name: "unset before 1.42, even if requested", version: "1.41", options: image.ListOptions{SharedSize: true}},
|
||||
{name: "set after 1.42, if requested", version: "1.42", options: types.ImageListOptions{SharedSize: true}, sharedSize: "1"},
|
||||
{name: "unset before 1.42, even if requested", version: "1.41", options: types.ImageListOptions{SharedSize: true}},
|
||||
} {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/errdefs"
|
||||
)
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
// FIXME(vdemeester): there is currently used in a few way in docker/docker
|
||||
// - if not in trusted content, ref is used to pass the whole reference, and tag is empty
|
||||
// - if in trusted content, ref is used to pass the reference name, and tag for the digest
|
||||
func (cli *Client) ImagePull(ctx context.Context, refStr string, options image.PullOptions) (io.ReadCloser, error) {
|
||||
func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) {
|
||||
ref, err := reference.ParseNormalizedNamed(refStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"gotest.tools/v3/assert"
|
||||
@@ -23,7 +23,7 @@ func TestImagePullReferenceParseError(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
// An empty reference is an invalid reference
|
||||
_, err := client.ImagePull(context.Background(), "", image.PullOptions{})
|
||||
_, err := client.ImagePull(context.Background(), "", types.ImagePullOptions{})
|
||||
if err == nil || !strings.Contains(err.Error(), "invalid reference format") {
|
||||
t.Fatalf("expected an error, got %v", err)
|
||||
}
|
||||
@@ -33,7 +33,7 @@ func TestImagePullAnyError(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
||||
}
|
||||
_, err := client.ImagePull(context.Background(), "myimage", image.PullOptions{})
|
||||
_, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{})
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ func TestImagePullStatusUnauthorizedError(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")),
|
||||
}
|
||||
_, err := client.ImagePull(context.Background(), "myimage", image.PullOptions{})
|
||||
_, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{})
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsUnauthorized))
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ func TestImagePullWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) {
|
||||
privilegeFunc := func() (string, error) {
|
||||
return "", fmt.Errorf("Error requesting privilege")
|
||||
}
|
||||
_, err := client.ImagePull(context.Background(), "myimage", image.PullOptions{
|
||||
_, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{
|
||||
PrivilegeFunc: privilegeFunc,
|
||||
})
|
||||
if err == nil || err.Error() != "Error requesting privilege" {
|
||||
@@ -67,7 +67,7 @@ func TestImagePullWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T)
|
||||
privilegeFunc := func() (string, error) {
|
||||
return "a-auth-header", nil
|
||||
}
|
||||
_, err := client.ImagePull(context.Background(), "myimage", image.PullOptions{
|
||||
_, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{
|
||||
PrivilegeFunc: privilegeFunc,
|
||||
})
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsUnauthorized))
|
||||
@@ -108,7 +108,7 @@ func TestImagePullWithPrivilegedFuncNoError(t *testing.T) {
|
||||
privilegeFunc := func() (string, error) {
|
||||
return "IAmValid", nil
|
||||
}
|
||||
resp, err := client.ImagePull(context.Background(), "myimage", image.PullOptions{
|
||||
resp, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{
|
||||
RegistryAuth: "NotValid",
|
||||
PrivilegeFunc: privilegeFunc,
|
||||
})
|
||||
@@ -179,7 +179,7 @@ func TestImagePullWithoutErrors(t *testing.T) {
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
resp, err := client.ImagePull(context.Background(), pullCase.reference, image.PullOptions{
|
||||
resp, err := client.ImagePull(context.Background(), pullCase.reference, types.ImagePullOptions{
|
||||
All: pullCase.all,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"net/url"
|
||||
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/errdefs"
|
||||
)
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
// It executes the privileged function if the operation is unauthorized
|
||||
// and it tries one more time.
|
||||
// It's up to the caller to handle the io.ReadCloser and close it properly.
|
||||
func (cli *Client) ImagePush(ctx context.Context, image string, options image.PushOptions) (io.ReadCloser, error) {
|
||||
func (cli *Client) ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error) {
|
||||
ref, err := reference.ParseNormalizedNamed(image)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"gotest.tools/v3/assert"
|
||||
@@ -23,12 +23,12 @@ func TestImagePushReferenceError(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
// An empty reference is an invalid reference
|
||||
_, err := client.ImagePush(context.Background(), "", image.PushOptions{})
|
||||
_, err := client.ImagePush(context.Background(), "", types.ImagePushOptions{})
|
||||
if err == nil || !strings.Contains(err.Error(), "invalid reference format") {
|
||||
t.Fatalf("expected an error, got %v", err)
|
||||
}
|
||||
// An canonical reference cannot be pushed
|
||||
_, err = client.ImagePush(context.Background(), "repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", image.PushOptions{})
|
||||
_, err = client.ImagePush(context.Background(), "repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", types.ImagePushOptions{})
|
||||
if err == nil || err.Error() != "cannot push a digest reference" {
|
||||
t.Fatalf("expected an error, got %v", err)
|
||||
}
|
||||
@@ -38,7 +38,7 @@ func TestImagePushAnyError(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
||||
}
|
||||
_, err := client.ImagePush(context.Background(), "myimage", image.PushOptions{})
|
||||
_, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{})
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ func TestImagePushStatusUnauthorizedError(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")),
|
||||
}
|
||||
_, err := client.ImagePush(context.Background(), "myimage", image.PushOptions{})
|
||||
_, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{})
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsUnauthorized))
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ func TestImagePushWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) {
|
||||
privilegeFunc := func() (string, error) {
|
||||
return "", fmt.Errorf("Error requesting privilege")
|
||||
}
|
||||
_, err := client.ImagePush(context.Background(), "myimage", image.PushOptions{
|
||||
_, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{
|
||||
PrivilegeFunc: privilegeFunc,
|
||||
})
|
||||
if err == nil || err.Error() != "Error requesting privilege" {
|
||||
@@ -72,7 +72,7 @@ func TestImagePushWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T)
|
||||
privilegeFunc := func() (string, error) {
|
||||
return "a-auth-header", nil
|
||||
}
|
||||
_, err := client.ImagePush(context.Background(), "myimage", image.PushOptions{
|
||||
_, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{
|
||||
PrivilegeFunc: privilegeFunc,
|
||||
})
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsUnauthorized))
|
||||
@@ -109,7 +109,7 @@ func TestImagePushWithPrivilegedFuncNoError(t *testing.T) {
|
||||
privilegeFunc := func() (string, error) {
|
||||
return "IAmValid", nil
|
||||
}
|
||||
resp, err := client.ImagePush(context.Background(), "myimage:tag", image.PushOptions{
|
||||
resp, err := client.ImagePush(context.Background(), "myimage:tag", types.ImagePushOptions{
|
||||
RegistryAuth: "NotValid",
|
||||
PrivilegeFunc: privilegeFunc,
|
||||
})
|
||||
@@ -179,7 +179,7 @@ func TestImagePushWithoutErrors(t *testing.T) {
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
resp, err := client.ImagePush(context.Background(), tc.reference, image.PushOptions{
|
||||
resp, err := client.ImagePush(context.Background(), tc.reference, types.ImagePushOptions{
|
||||
All: tc.all,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -5,11 +5,12 @@ import (
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
)
|
||||
|
||||
// ImageRemove removes an image from the docker host.
|
||||
func (cli *Client) ImageRemove(ctx context.Context, imageID string, options image.RemoveOptions) ([]image.DeleteResponse, error) {
|
||||
func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]image.DeleteResponse, error) {
|
||||
query := url.Values{}
|
||||
|
||||
if options.Force {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"gotest.tools/v3/assert"
|
||||
@@ -21,7 +22,7 @@ func TestImageRemoveError(t *testing.T) {
|
||||
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
||||
}
|
||||
|
||||
_, err := client.ImageRemove(context.Background(), "image_id", image.RemoveOptions{})
|
||||
_, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{})
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||
}
|
||||
|
||||
@@ -30,7 +31,7 @@ func TestImageRemoveImageNotFound(t *testing.T) {
|
||||
client: newMockClient(errorMock(http.StatusNotFound, "no such image: unknown")),
|
||||
}
|
||||
|
||||
_, err := client.ImageRemove(context.Background(), "unknown", image.RemoveOptions{})
|
||||
_, err := client.ImageRemove(context.Background(), "unknown", types.ImageRemoveOptions{})
|
||||
assert.Check(t, is.ErrorContains(err, "no such image: unknown"))
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsNotFound))
|
||||
}
|
||||
@@ -92,7 +93,7 @@ func TestImageRemove(t *testing.T) {
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
imageDeletes, err := client.ImageRemove(context.Background(), "image_id", image.RemoveOptions{
|
||||
imageDeletes, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{
|
||||
Force: removeCase.force,
|
||||
PruneChildren: removeCase.pruneChildren,
|
||||
})
|
||||
|
||||
@@ -90,15 +90,15 @@ type ImageAPIClient interface {
|
||||
ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error)
|
||||
BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error)
|
||||
BuildCancel(ctx context.Context, id string) error
|
||||
ImageCreate(ctx context.Context, parentReference string, options image.CreateOptions) (io.ReadCloser, error)
|
||||
ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error)
|
||||
ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error)
|
||||
ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error)
|
||||
ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error)
|
||||
ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error)
|
||||
ImageList(ctx context.Context, options image.ListOptions) ([]image.Summary, error)
|
||||
ImageList(ctx context.Context, options types.ImageListOptions) ([]image.Summary, error)
|
||||
ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error)
|
||||
ImagePull(ctx context.Context, ref string, options image.PullOptions) (io.ReadCloser, error)
|
||||
ImagePush(ctx context.Context, ref string, options image.PushOptions) (io.ReadCloser, error)
|
||||
ImageRemove(ctx context.Context, image string, options image.RemoveOptions) ([]image.DeleteResponse, error)
|
||||
ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error)
|
||||
ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error)
|
||||
ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]image.DeleteResponse, error)
|
||||
ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error)
|
||||
ImageSave(ctx context.Context, images []string) (io.ReadCloser, error)
|
||||
ImageTag(ctx context.Context, image, ref string) error
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
testBuf = []byte("Buffalo1 buffalo2 Buffalo3 buffalo4 buffalo5 buffalo6 Buffalo7 buffalo8")
|
||||
testBuf = []byte("Buffalo buffalo Buffalo buffalo buffalo buffalo Buffalo buffalo")
|
||||
testBufSize = len(testBuf)
|
||||
)
|
||||
|
||||
|
||||
@@ -45,6 +45,9 @@ func installConfigFlags(conf *config.Config, flags *pflag.FlagSet) error {
|
||||
flags.StringVar(&conf.CgroupParent, "cgroup-parent", "", "Set parent cgroup for all containers")
|
||||
flags.StringVar(&conf.RemappedRoot, "userns-remap", "", "User/Group setting for user namespaces")
|
||||
flags.BoolVar(&conf.LiveRestoreEnabled, "live-restore", false, "Enable live restore of docker when containers are still running")
|
||||
// TODO(thaJeztah): Used to produce a deprecation error; remove the flag and the OOMScoreAdjust field for the next release after v25.0.0.
|
||||
flags.IntVar(&conf.OOMScoreAdjust, "oom-score-adjust", 0, "Set the oom_score_adj for the daemon (deprecated)")
|
||||
_ = flags.MarkDeprecated("oom-score-adjust", "and will be removed in the next release.")
|
||||
flags.BoolVar(&conf.Init, "init", false, "Run an init in the container to forward signals and reap processes")
|
||||
flags.StringVar(&conf.InitPath, "init-path", "", "Path to the docker-init binary")
|
||||
flags.Int64Var(&conf.CPURealtimePeriod, "cpu-rt-period", 0, "Limit the CPU real-time period in microseconds for the parent cgroup for all containers (not supported with cgroups v2)")
|
||||
|
||||
@@ -242,7 +242,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
||||
|
||||
// Override BuildKit's default Resource so that it matches the semconv
|
||||
// version that is used in our code.
|
||||
detect.OverrideResource(resource.Default())
|
||||
detect.Resource = resource.Default()
|
||||
detect.Recorder = detect.NewTraceRecorder()
|
||||
|
||||
tp, err := detect.TracerProvider()
|
||||
@@ -256,10 +256,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
||||
pluginStore := plugin.NewStore()
|
||||
|
||||
var apiServer apiserver.Server
|
||||
cli.authzMiddleware, err = initMiddlewares(&apiServer, cli.Config, pluginStore)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to start API server")
|
||||
}
|
||||
cli.authzMiddleware = initMiddlewares(&apiServer, cli.Config, pluginStore)
|
||||
|
||||
d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore, cli.authzMiddleware)
|
||||
if err != nil {
|
||||
@@ -310,12 +307,14 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
||||
//
|
||||
// FIXME(thaJeztah): better separate runtime and config data?
|
||||
daemonCfg := d.Config()
|
||||
routerOpts, err := newRouterOptions(routerCtx, &daemonCfg, d, c)
|
||||
routerOptions, err := newRouterOptions(routerCtx, &daemonCfg, d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
httpServer.Handler = apiServer.CreateMux(routerOpts.Build()...)
|
||||
routerOptions.cluster = c
|
||||
|
||||
httpServer.Handler = apiServer.CreateMux(routerOptions.Build()...)
|
||||
|
||||
go d.ProcessClusterNotifications(ctx, c.GetWatchStream())
|
||||
|
||||
@@ -357,7 +356,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
||||
notifyStopping()
|
||||
shutdownDaemon(ctx, d)
|
||||
|
||||
if err := routerOpts.buildkit.Close(); err != nil {
|
||||
if err := routerOptions.buildkit.Close(); err != nil {
|
||||
log.G(ctx).WithError(err).Error("Failed to close buildkit")
|
||||
}
|
||||
|
||||
@@ -380,20 +379,12 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
||||
// TODO: This can be removed after buildkit is updated to use http/protobuf as the default.
|
||||
func setOTLPProtoDefault() {
|
||||
const (
|
||||
tracesEnv = "OTEL_EXPORTER_OTLP_TRACES_PROTOCOL"
|
||||
metricsEnv = "OTEL_EXPORTER_OTLP_METRICS_PROTOCOL"
|
||||
protoEnv = "OTEL_EXPORTER_OTLP_PROTOCOL"
|
||||
|
||||
defaultProto = "http/protobuf"
|
||||
tracesEnv = "OTEL_EXPORTER_OTLP_TRACES_PROTOCOL"
|
||||
protoEnv = "OTEL_EXPORTER_OTLP_PROTOCOL"
|
||||
)
|
||||
|
||||
if os.Getenv(protoEnv) == "" {
|
||||
if os.Getenv(tracesEnv) == "" {
|
||||
os.Setenv(tracesEnv, defaultProto)
|
||||
}
|
||||
if os.Getenv(metricsEnv) == "" {
|
||||
os.Setenv(metricsEnv, defaultProto)
|
||||
}
|
||||
if os.Getenv(tracesEnv) == "" && os.Getenv(protoEnv) == "" {
|
||||
os.Setenv(tracesEnv, "http/protobuf")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -406,17 +397,23 @@ type routerOptions struct {
|
||||
cluster *cluster.Cluster
|
||||
}
|
||||
|
||||
func newRouterOptions(ctx context.Context, config *config.Config, d *daemon.Daemon, c *cluster.Cluster) (routerOptions, error) {
|
||||
func newRouterOptions(ctx context.Context, config *config.Config, d *daemon.Daemon) (routerOptions, error) {
|
||||
opts := routerOptions{}
|
||||
sm, err := session.NewManager()
|
||||
if err != nil {
|
||||
return routerOptions{}, errors.Wrap(err, "failed to create sessionmanager")
|
||||
return opts, errors.Wrap(err, "failed to create sessionmanager")
|
||||
}
|
||||
|
||||
manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping())
|
||||
if err != nil {
|
||||
return routerOptions{}, err
|
||||
return opts, err
|
||||
}
|
||||
cgroupParent := newCgroupParent(config)
|
||||
ro := routerOptions{
|
||||
sessionManager: sm,
|
||||
features: d.Features,
|
||||
daemon: d,
|
||||
}
|
||||
|
||||
bk, err := buildkit.New(ctx, buildkit.Opt{
|
||||
SessionManager: sm,
|
||||
@@ -438,22 +435,18 @@ func newRouterOptions(ctx context.Context, config *config.Config, d *daemon.Daem
|
||||
ContainerdNamespace: config.ContainerdNamespace,
|
||||
})
|
||||
if err != nil {
|
||||
return routerOptions{}, err
|
||||
return opts, err
|
||||
}
|
||||
|
||||
bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService)
|
||||
if err != nil {
|
||||
return routerOptions{}, errors.Wrap(err, "failed to create buildmanager")
|
||||
return opts, errors.Wrap(err, "failed to create buildmanager")
|
||||
}
|
||||
|
||||
return routerOptions{
|
||||
sessionManager: sm,
|
||||
buildBackend: bb,
|
||||
features: d.Features,
|
||||
buildkit: bk,
|
||||
daemon: d,
|
||||
cluster: c,
|
||||
}, nil
|
||||
ro.buildBackend = bb
|
||||
ro.buildkit = bk
|
||||
|
||||
return ro, nil
|
||||
}
|
||||
|
||||
func (cli *DaemonCli) reloadConfig() {
|
||||
@@ -629,10 +622,6 @@ func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) {
|
||||
conf.CDISpecDirs = nil
|
||||
}
|
||||
|
||||
if err := loadCLIPlatformConfig(conf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
@@ -719,15 +708,14 @@ func (opts routerOptions) Build() []router.Router {
|
||||
return routers
|
||||
}
|
||||
|
||||
func initMiddlewares(s *apiserver.Server, cfg *config.Config, pluginStore plugingetter.PluginGetter) (*authorization.Middleware, error) {
|
||||
func initMiddlewares(s *apiserver.Server, cfg *config.Config, pluginStore plugingetter.PluginGetter) *authorization.Middleware {
|
||||
v := dockerversion.Version
|
||||
|
||||
exp := middleware.NewExperimentalMiddleware(cfg.Experimental)
|
||||
s.UseMiddleware(exp)
|
||||
|
||||
vm, err := middleware.NewVersionMiddleware(dockerversion.Version, api.DefaultVersion, cfg.MinAPIVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.UseMiddleware(*vm)
|
||||
vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, cfg.MinAPIVersion)
|
||||
s.UseMiddleware(vm)
|
||||
|
||||
if cfg.CorsHeaders != "" {
|
||||
c := middleware.NewCORSMiddleware(cfg.CorsHeaders)
|
||||
@@ -736,7 +724,7 @@ func initMiddlewares(s *apiserver.Server, cfg *config.Config, pluginStore plugin
|
||||
|
||||
authzMiddleware := authorization.NewMiddleware(cfg.AuthorizationPlugins, pluginStore)
|
||||
s.UseMiddleware(authzMiddleware)
|
||||
return authzMiddleware, nil
|
||||
return authzMiddleware
|
||||
}
|
||||
|
||||
func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) {
|
||||
@@ -844,7 +832,6 @@ func loadListeners(cfg *config.Config, tlsConfig *tls.Config) ([]net.Listener, [
|
||||
if proto == "tcp" && !authEnabled {
|
||||
log.G(ctx).WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.")
|
||||
log.G(ctx).WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!")
|
||||
log.G(ctx).WithField("host", protoAddr).Warn("[DEPRECATION NOTICE] In future versions this will be a hard failure preventing the daemon from starting! Learn more at: https://docs.docker.com/go/api-security/")
|
||||
time.Sleep(time.Second)
|
||||
|
||||
// If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot"
|
||||
|
||||
@@ -3,28 +3,11 @@ package main
|
||||
import (
|
||||
cdcgroups "github.com/containerd/cgroups/v3"
|
||||
systemdDaemon "github.com/coreos/go-systemd/v22/daemon"
|
||||
"github.com/docker/docker/daemon"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/pkg/sysinfo"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// loadCLIPlatformConfig loads the platform specific CLI configuration
|
||||
func loadCLIPlatformConfig(conf *config.Config) error {
|
||||
if conf.RemappedRoot == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
containerdNamespace, containerdPluginNamespace, err := daemon.RemapContainerdNamespaces(conf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conf.ContainerdNamespace = containerdNamespace
|
||||
conf.ContainerdPluginNamespace = containerdPluginNamespace
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// preNotifyReady sends a message to the host when the API is active, but before the daemon is
|
||||
func preNotifyReady() {
|
||||
}
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"github.com/spf13/pflag"
|
||||
"go.opentelemetry.io/otel"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/v3/fs"
|
||||
@@ -286,29 +284,3 @@ func TestCDISpecDirs(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestOtelMeterLeak tests for a memory leak in the OTEL meter implementation.
|
||||
// Once the fixed OTEL is vendored, this test will fail - the workaround
|
||||
// and this test should be removed then.
|
||||
func TestOtelMeterLeak(t *testing.T) {
|
||||
meter := otel.Meter("foo")
|
||||
|
||||
var before runtime.MemStats
|
||||
runtime.ReadMemStats(&before)
|
||||
|
||||
const counters = 10 * 1000 * 1000
|
||||
for i := 0; i < counters; i++ {
|
||||
_, _ = meter.Int64Counter("bar")
|
||||
}
|
||||
|
||||
var after runtime.MemStats
|
||||
runtime.ReadMemStats(&after)
|
||||
|
||||
allocs := after.Mallocs - before.Mallocs
|
||||
t.Log("Allocations:", allocs)
|
||||
|
||||
if allocs < 10 {
|
||||
// TODO: Remove Workaround OTEL memory leak in cmd/dockerd/daemon.go
|
||||
t.Fatal("Allocations count decreased. OTEL leak workaround is no longer needed!")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,12 +16,6 @@ func getDefaultDaemonConfigFile() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// loadCLIPlatformConfig loads the platform specific CLI configuration
|
||||
// there is none on windows, so this is a no-op
|
||||
func loadCLIPlatformConfig(conf *config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// setDefaultUmask doesn't do anything on windows
|
||||
func setDefaultUmask() error {
|
||||
return nil
|
||||
|
||||
@@ -14,9 +14,6 @@ import (
|
||||
"github.com/moby/buildkit/util/apicaps"
|
||||
"github.com/moby/term"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/metric/noop"
|
||||
)
|
||||
|
||||
var honorXDG bool
|
||||
@@ -85,12 +82,6 @@ func main() {
|
||||
// Fixes https://github.com/docker/docker/issues/19728
|
||||
signal.Ignore(syscall.SIGPIPE)
|
||||
|
||||
// Workaround OTEL memory leak
|
||||
// See: https://github.com/open-telemetry/opentelemetry-go-contrib/issues/5190
|
||||
// The need for this workaround is checked by the TestOtelMeterLeak test
|
||||
// TODO: Remove this workaround after upgrading to v1.30.0
|
||||
otel.SetMeterProvider(noop.MeterProvider{})
|
||||
|
||||
// Set terminal emulation based on platform as required.
|
||||
_, stdout, stderr := term.StdStreams()
|
||||
onError := func(err error) {
|
||||
|
||||
@@ -14,7 +14,7 @@ func NoArgs(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
if cmd.HasSubCommands() {
|
||||
return errors.New("\n" + strings.TrimRight(cmd.UsageString(), "\n"))
|
||||
return errors.Errorf("\n" + strings.TrimRight(cmd.UsageString(), "\n"))
|
||||
}
|
||||
|
||||
return errors.Errorf(
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// attachContext is the context used for attach calls.
|
||||
// attachContext is the context used for for attach calls.
|
||||
type attachContext struct {
|
||||
mu sync.Mutex
|
||||
ctx context.Context
|
||||
|
||||
@@ -299,8 +299,8 @@ func (container *Container) SetupWorkingDirectory(rootIdentity idtools.Identity)
|
||||
return nil
|
||||
}
|
||||
|
||||
workdir := filepath.Clean(container.Config.WorkingDir)
|
||||
pth, err := container.GetResourcePath(workdir)
|
||||
container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir)
|
||||
pth, err := container.GetResourcePath(container.Config.WorkingDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -514,14 +514,14 @@ func (container *Container) AddMountPointWithVolume(destination string, vol volu
|
||||
}
|
||||
|
||||
// UnmountVolumes unmounts all volumes
|
||||
func (container *Container) UnmountVolumes(ctx context.Context, volumeEventLog func(name string, action events.Action, attributes map[string]string)) error {
|
||||
func (container *Container) UnmountVolumes(volumeEventLog func(name string, action events.Action, attributes map[string]string)) error {
|
||||
var errs []string
|
||||
for _, volumeMount := range container.MountPoints {
|
||||
if volumeMount.Volume == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := volumeMount.Cleanup(ctx); err != nil {
|
||||
if err := volumeMount.Cleanup(); err != nil {
|
||||
errs = append(errs, err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -15,6 +15,8 @@ import (
|
||||
"github.com/docker/docker/api/types/events"
|
||||
mounttypes "github.com/docker/docker/api/types/mount"
|
||||
swarmtypes "github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/volume"
|
||||
volumemounts "github.com/docker/docker/volume/mounts"
|
||||
"github.com/moby/sys/mount"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
@@ -127,11 +129,34 @@ func (container *Container) NetworkMounts() []Mount {
|
||||
}
|
||||
|
||||
// CopyImagePathContent copies files in destination to the volume.
|
||||
func (container *Container) CopyImagePathContent(volumePath, destination string) error {
|
||||
if err := label.Relabel(volumePath, container.MountLabel, true); err != nil && !errors.Is(err, syscall.ENOTSUP) {
|
||||
func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error {
|
||||
rootfs, err := container.GetResourcePath(destination)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return copyExistingContents(destination, volumePath)
|
||||
|
||||
if _, err := os.Stat(rootfs); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
id := stringid.GenerateRandomID()
|
||||
path, err := v.Mount(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := v.Unmount(id); err != nil {
|
||||
log.G(context.TODO()).Warnf("error while unmounting volume %s: %v", v.Name(), err)
|
||||
}
|
||||
}()
|
||||
if err := label.Relabel(path, container.MountLabel, true); err != nil && !errors.Is(err, syscall.ENOTSUP) {
|
||||
return err
|
||||
}
|
||||
return copyExistingContents(rootfs, path)
|
||||
}
|
||||
|
||||
// ShmResourcePath returns path to shm
|
||||
@@ -371,7 +396,7 @@ func (container *Container) DetachAndUnmount(volumeEventLog func(name string, ac
|
||||
Warn("Unable to unmount")
|
||||
}
|
||||
}
|
||||
return container.UnmountVolumes(ctx, volumeEventLog)
|
||||
return container.UnmountVolumes(volumeEventLog)
|
||||
}
|
||||
|
||||
// ignoreUnsupportedXAttrs ignores errors when extended attributes
|
||||
@@ -394,13 +419,9 @@ func copyExistingContents(source, destination string) error {
|
||||
return err
|
||||
}
|
||||
if len(dstList) != 0 {
|
||||
log.G(context.TODO()).WithFields(log.Fields{
|
||||
"source": source,
|
||||
"destination": destination,
|
||||
}).Debug("destination is not empty, do not copy")
|
||||
// destination is not empty, do not copy
|
||||
return nil
|
||||
}
|
||||
|
||||
return fs.CopyDir(destination, source, ignoreUnsupportedXAttrs())
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package container // import "github.com/docker/docker/container"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -129,7 +128,7 @@ func (container *Container) ConfigMounts() []Mount {
|
||||
// On Windows it only delegates to `UnmountVolumes` since there is nothing to
|
||||
// force unmount.
|
||||
func (container *Container) DetachAndUnmount(volumeEventLog func(name string, action events.Action, attributes map[string]string)) error {
|
||||
return container.UnmountVolumes(context.TODO(), volumeEventLog)
|
||||
return container.UnmountVolumes(volumeEventLog)
|
||||
}
|
||||
|
||||
// TmpfsMounts returns the list of tmpfs mounts
|
||||
|
||||
@@ -2,7 +2,6 @@ package stream // import "github.com/docker/docker/container/stream"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
@@ -92,24 +91,24 @@ func (c *Config) NewNopInputPipe() {
|
||||
|
||||
// CloseStreams ensures that the configured streams are properly closed.
|
||||
func (c *Config) CloseStreams() error {
|
||||
var errs []string
|
||||
var errors []string
|
||||
|
||||
if c.stdin != nil {
|
||||
if err := c.stdin.Close(); err != nil {
|
||||
errs = append(errs, fmt.Sprintf("error close stdin: %s", err))
|
||||
errors = append(errors, fmt.Sprintf("error close stdin: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.stdout.Clean(); err != nil {
|
||||
errs = append(errs, fmt.Sprintf("error close stdout: %s", err))
|
||||
errors = append(errors, fmt.Sprintf("error close stdout: %s", err))
|
||||
}
|
||||
|
||||
if err := c.stderr.Clean(); err != nil {
|
||||
errs = append(errs, fmt.Sprintf("error close stderr: %s", err))
|
||||
errors = append(errors, fmt.Sprintf("error close stderr: %s", err))
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return errors.New(strings.Join(errs, "\n"))
|
||||
if len(errors) > 0 {
|
||||
return fmt.Errorf(strings.Join(errors, "\n"))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
# * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns|implicit): the rootlesskit port driver. Defaults to "builtin".
|
||||
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto".
|
||||
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto".
|
||||
# * DOCKERD_ROOTLESS_ROOTLESSKIT_DISABLE_HOST_LOOPBACK=(true|false): prohibit connections to 127.0.0.1 on the host (including via 10.0.2.2, in the case of slirp4netns). Defaults to "true".
|
||||
|
||||
# To apply an environment variable via systemd, create ~/.config/systemd/user/docker.service.d/override.conf as follows,
|
||||
# and run `systemctl --user daemon-reload && systemctl --user restart docker`:
|
||||
@@ -54,30 +53,6 @@ if ! [ -d "$HOME" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mount_directory() {
|
||||
if [ -z "$_DOCKERD_ROOTLESS_CHILD" ]; then
|
||||
echo "mount_directory should be called from the child context. Otherwise data loss is at risk" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DIRECTORY="$1"
|
||||
if [ ! -d "$DIRECTORY" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
# Bind mount directory: this makes this directory visible to
|
||||
# Dockerd, even if it is originally a symlink, given Dockerd does
|
||||
# not always follow symlinks. Some directories might also be
|
||||
# "copied-up", meaning that they will also be writable on the child
|
||||
# namespace; this will be the case only if they are provided as
|
||||
# --copy-up to the rootlesskit.
|
||||
DIRECTORY_REALPATH=$(realpath "$DIRECTORY")
|
||||
MOUNT_OPTIONS="${2:---bind}"
|
||||
rm -rf "$DIRECTORY"
|
||||
mkdir -p "$DIRECTORY"
|
||||
mount $MOUNT_OPTIONS "$DIRECTORY_REALPATH" "$DIRECTORY"
|
||||
}
|
||||
|
||||
rootlesskit=""
|
||||
for f in docker-rootlesskit rootlesskit; do
|
||||
if command -v $f > /dev/null 2>&1; then
|
||||
@@ -96,7 +71,6 @@ fi
|
||||
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}"
|
||||
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}"
|
||||
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}"
|
||||
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_DISABLE_HOST_LOOPBACK:=}"
|
||||
net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET
|
||||
mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU
|
||||
if [ -z "$net" ]; then
|
||||
@@ -124,11 +98,6 @@ if [ -z "$mtu" ]; then
|
||||
mtu=1500
|
||||
fi
|
||||
|
||||
host_loopback="--disable-host-loopback"
|
||||
if [ "$DOCKERD_ROOTLESS_ROOTLESSKIT_DISABLE_HOST_LOOPBACK" = "false" ]; then
|
||||
host_loopback=""
|
||||
fi
|
||||
|
||||
dockerd="${DOCKERD:-dockerd}"
|
||||
|
||||
if [ -z "$_DOCKERD_ROOTLESS_CHILD" ]; then
|
||||
@@ -156,32 +125,13 @@ if [ -z "$_DOCKERD_ROOTLESS_CHILD" ]; then
|
||||
--net=$net --mtu=$mtu \
|
||||
--slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \
|
||||
--slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \
|
||||
$host_loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \
|
||||
--disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \
|
||||
--copy-up=/etc --copy-up=/run \
|
||||
--propagation=rslave \
|
||||
$DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \
|
||||
"$0" "$@"
|
||||
else
|
||||
[ "$_DOCKERD_ROOTLESS_CHILD" = 1 ]
|
||||
|
||||
# The Container Device Interface (CDI) specs can be found by default
|
||||
# under {/etc,/var/run}/cdi. More information at:
|
||||
# https://github.com/cncf-tags/container-device-interface
|
||||
#
|
||||
# In order to use the Container Device Interface (CDI) integration,
|
||||
# the CDI paths need to exist before the Docker daemon is started in
|
||||
# order for it to read the CDI specification files. Otherwise, a
|
||||
# Docker daemon restart will be required for the daemon to discover
|
||||
# them.
|
||||
#
|
||||
# If another set of CDI paths (other than the default /etc/cdi and
|
||||
# /var/run/cdi) are configured through the Docker configuration file
|
||||
# (using "cdi-spec-dirs"), they need to be bind mounted in rootless
|
||||
# mode; otherwise the Docker daemon won't have access to the CDI
|
||||
# specification files.
|
||||
mount_directory /etc/cdi
|
||||
mount_directory /var/run/cdi
|
||||
|
||||
# remove the symlinks for the existing files in the parent namespace if any,
|
||||
# so that we can create our own files in our mount namespace.
|
||||
rm -f /run/docker /run/containerd /run/xtables.lock
|
||||
@@ -196,7 +146,10 @@ else
|
||||
if [ "$(stat -c %T -f /etc)" = "tmpfs" ] && [ -L "/etc/ssl" ]; then
|
||||
# Workaround for "x509: certificate signed by unknown authority" on openSUSE Tumbleweed.
|
||||
# https://github.com/rootless-containers/rootlesskit/issues/225
|
||||
mount_directory /etc/ssl "--rbind"
|
||||
realpath_etc_ssl=$(realpath /etc/ssl)
|
||||
rm -f /etc/ssl
|
||||
mkdir /etc/ssl
|
||||
mount --rbind ${realpath_etc_ssl} /etc/ssl
|
||||
fi
|
||||
|
||||
exec "$dockerd" "$@"
|
||||
|
||||
@@ -8,6 +8,25 @@ import (
|
||||
"github.com/docker/docker/errdefs"
|
||||
)
|
||||
|
||||
// ContainerCopy performs a deprecated operation of archiving the resource at
|
||||
// the specified path in the container identified by the given name.
|
||||
func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) {
|
||||
ctr, err := daemon.GetContainer(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, err := daemon.containerCopy(ctr, res)
|
||||
if err == nil {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
if os.IsNotExist(err) {
|
||||
return nil, containerFileNotFound{res, name}
|
||||
}
|
||||
return nil, errdefs.System(err)
|
||||
}
|
||||
|
||||
// ContainerStatPath stats the filesystem resource at the specified path in the
|
||||
// container identified by the given name.
|
||||
func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user