mirror of
https://github.com/moby/moby.git
synced 2026-01-13 03:31:39 +00:00
Compare commits
134 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f417435e5f | ||
|
|
acd023d42b | ||
|
|
7a075cacf9 | ||
|
|
aff7177ee7 | ||
|
|
ed7c26339e | ||
|
|
74e3b4fb2e | ||
|
|
4cc0416534 | ||
|
|
f9f9e7ff9a | ||
|
|
5fb4eb941d | ||
|
|
67e9aa6d4d | ||
|
|
61b82be580 | ||
|
|
0227d95f99 | ||
|
|
fa9c5c55e1 | ||
|
|
df96d8d0bd | ||
|
|
1652559be4 | ||
|
|
ab29279200 | ||
|
|
147b5388dd | ||
|
|
60103717bc | ||
|
|
45dede440e | ||
|
|
ba4a2dab16 | ||
|
|
51133117fb | ||
|
|
341a7978a5 | ||
|
|
10e3bfd0ac | ||
|
|
269a0d8feb | ||
|
|
876b1d1dcd | ||
|
|
0bcd64689b | ||
|
|
8d454710cd | ||
|
|
6cf694fe70 | ||
|
|
c12bbf549b | ||
|
|
1ae115175c | ||
|
|
a7f9907f5f | ||
|
|
9150d0115e | ||
|
|
9af7c8ec0a | ||
|
|
3344c502da | ||
|
|
6c9fafdda7 | ||
|
|
f8a8cdaf9e | ||
|
|
7a659049b8 | ||
|
|
0ccf1c2a93 | ||
|
|
28c1a8bc2b | ||
|
|
5b5a58b2cd | ||
|
|
282891f70c | ||
|
|
bbe6f09afc | ||
|
|
5b13a38144 | ||
|
|
990e95dcf0 | ||
|
|
a140d0d95f | ||
|
|
91a8312fb7 | ||
|
|
cf03e96354 | ||
|
|
c48b67160d | ||
|
|
225e043196 | ||
|
|
78174d2e74 | ||
|
|
622e66684a | ||
|
|
85f4e6151a | ||
|
|
3e358447f5 | ||
|
|
dd4de8f388 | ||
|
|
f5ef4e76b3 | ||
|
|
6c5e5271c1 | ||
|
|
693fca6199 | ||
|
|
49487e996a | ||
|
|
0358f31dc2 | ||
|
|
081cffb3fa | ||
|
|
9de19554c7 | ||
|
|
2a80b8a7b2 | ||
|
|
61ffecfa3b | ||
|
|
02cd8dec03 | ||
|
|
1d7df5ecc0 | ||
|
|
4e68a265ed | ||
|
|
e437f890ba | ||
|
|
5a0015f72c | ||
|
|
5babfee371 | ||
|
|
fce6e0ca9b | ||
|
|
d838e68300 | ||
|
|
fa0d4159c7 | ||
|
|
06e22dce46 | ||
|
|
b73ee94289 | ||
|
|
fd6a419ad5 | ||
|
|
13ce91825f | ||
|
|
4b63c47c1e | ||
|
|
4edb71bb83 | ||
|
|
667bc3f803 | ||
|
|
1b47bfac02 | ||
|
|
f2d0d87c46 | ||
|
|
6ac38cdbeb | ||
|
|
d7bf237e29 | ||
|
|
f41b342cbe | ||
|
|
f413ba6fdb | ||
|
|
c2ef38f790 | ||
|
|
d5eebf9e19 | ||
|
|
f3f5327b48 | ||
|
|
05a370f52f | ||
|
|
be7b60ef05 | ||
|
|
6d05b9b65b | ||
|
|
c01bbbddeb | ||
|
|
32635850ed | ||
|
|
2cf1c762f8 | ||
|
|
71fa3ab079 | ||
|
|
5295e88ceb | ||
|
|
6eef840b8a | ||
|
|
e2ab4718c8 | ||
|
|
3de920a0b1 | ||
|
|
a445aa95e5 | ||
|
|
cb77e48229 | ||
|
|
e8801fbe26 | ||
|
|
613b6a12c1 | ||
|
|
1b6738369f | ||
|
|
b8cc2e8c66 | ||
|
|
fcccfeb811 | ||
|
|
f8eaa14a18 | ||
|
|
ac76925ff2 | ||
|
|
c7a1d928c0 | ||
|
|
2672baefd7 | ||
|
|
ff15b49b47 | ||
|
|
c0573b133f | ||
|
|
c7466c0b52 | ||
|
|
dde33d0dfe | ||
|
|
39fedb254b | ||
|
|
f0f5fc974a | ||
|
|
7c185a1e40 | ||
|
|
2b036fb1da | ||
|
|
1f24da70d8 | ||
|
|
358fecb566 | ||
|
|
f030b25770 | ||
|
|
e07aed0f77 | ||
|
|
cdf3611cff | ||
|
|
05267e9e8c | ||
|
|
e5edf62bca | ||
|
|
e14d121d49 | ||
|
|
e0acf1cd70 | ||
|
|
c2847b2eb2 | ||
|
|
0894f7fe69 | ||
|
|
d25aa32c21 | ||
|
|
1e335cfa74 | ||
|
|
4d287e9267 | ||
|
|
0240f5675b | ||
|
|
13964248f1 |
@@ -3,19 +3,11 @@
|
||||
"build": {
|
||||
"context": "..",
|
||||
"dockerfile": "../Dockerfile",
|
||||
"target": "devcontainer"
|
||||
"target": "dev"
|
||||
},
|
||||
"workspaceFolder": "/go/src/github.com/docker/docker",
|
||||
"workspaceMount": "source=${localWorkspaceFolder},target=/go/src/github.com/docker/docker,type=bind,consistency=cached",
|
||||
|
||||
"remoteUser": "root",
|
||||
"runArgs": ["--privileged"],
|
||||
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
"extensions": [
|
||||
"golang.go"
|
||||
]
|
||||
}
|
||||
}
|
||||
"runArgs": ["--privileged"]
|
||||
}
|
||||
|
||||
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -22,12 +22,9 @@ Please provide the following information:
|
||||
**- Description for the changelog**
|
||||
<!--
|
||||
Write a short (one line) summary that describes the changes in this
|
||||
pull request for inclusion in the changelog.
|
||||
It must be placed inside the below triple backticks section:
|
||||
pull request for inclusion in the changelog:
|
||||
-->
|
||||
```markdown changelog
|
||||
|
||||
```
|
||||
|
||||
**- A picture of a cute animal (not mandatory but encouraged)**
|
||||
|
||||
|
||||
9
.github/workflows/.dco.yml
vendored
9
.github/workflows/.dco.yml
vendored
@@ -3,15 +3,6 @@ name: .dco
|
||||
|
||||
# TODO: hide reusable workflow from the UI. Tracked in https://github.com/community/community/discussions/12025
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
|
||||
9
.github/workflows/.test-prepare.yml
vendored
9
.github/workflows/.test-prepare.yml
vendored
@@ -3,15 +3,6 @@ name: .test-prepare
|
||||
|
||||
# TODO: hide reusable workflow from the UI. Tracked in https://github.com/community/community/discussions/12025
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
outputs:
|
||||
|
||||
48
.github/workflows/.test.yml
vendored
48
.github/workflows/.test.yml
vendored
@@ -3,15 +3,6 @@ name: .test
|
||||
|
||||
# TODO: hide reusable workflow from the UI. Tracked in https://github.com/community/community/discussions/12025
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
@@ -21,15 +12,13 @@ on:
|
||||
default: "graphdriver"
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.21.13"
|
||||
GO_VERSION: "1.21.6"
|
||||
GOTESTLIST_VERSION: v0.3.1
|
||||
TESTSTAT_VERSION: v0.1.25
|
||||
TESTSTAT_VERSION: v0.1.3
|
||||
ITG_CLI_MATRIX_SIZE: 6
|
||||
DOCKER_EXPERIMENTAL: 1
|
||||
DOCKER_GRAPHDRIVER: ${{ inputs.storage == 'snapshotter' && 'overlayfs' || 'overlay2' }}
|
||||
TEST_INTEGRATION_USE_SNAPSHOTTER: ${{ inputs.storage == 'snapshotter' && '1' || '' }}
|
||||
SETUP_BUILDX_VERSION: latest
|
||||
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
|
||||
|
||||
jobs:
|
||||
unit:
|
||||
@@ -46,10 +35,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
@@ -77,7 +62,6 @@ jobs:
|
||||
directory: ./bundles
|
||||
env_vars: RUNNER_OS
|
||||
flags: unit
|
||||
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
@@ -85,7 +69,6 @@ jobs:
|
||||
with:
|
||||
name: test-reports-unit-${{ inputs.storage }}
|
||||
path: /tmp/reports/*
|
||||
retention-days: 1
|
||||
|
||||
unit-report:
|
||||
runs-on: ubuntu-20.04
|
||||
@@ -113,7 +96,7 @@ jobs:
|
||||
-
|
||||
name: Create summary
|
||||
run: |
|
||||
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
|
||||
teststat -markdown $(find /tmp/reports -type f -name '*.json' -print0 | xargs -0) >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
docker-py:
|
||||
runs-on: ubuntu-20.04
|
||||
@@ -132,10 +115,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
@@ -170,7 +149,6 @@ jobs:
|
||||
with:
|
||||
name: test-reports-docker-py-${{ inputs.storage }}
|
||||
path: /tmp/reports/*
|
||||
retention-days: 1
|
||||
|
||||
integration-flaky:
|
||||
runs-on: ubuntu-20.04
|
||||
@@ -186,10 +164,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
@@ -244,10 +218,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
@@ -287,7 +257,6 @@ jobs:
|
||||
directory: ./bundles/test-integration
|
||||
env_vars: RUNNER_OS
|
||||
flags: integration,${{ matrix.mode }}
|
||||
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
|
||||
-
|
||||
name: Test daemon logs
|
||||
if: always()
|
||||
@@ -300,7 +269,6 @@ jobs:
|
||||
with:
|
||||
name: test-reports-integration-${{ inputs.storage }}-${{ env.TESTREPORTS_NAME }}
|
||||
path: /tmp/reports/*
|
||||
retention-days: 1
|
||||
|
||||
integration-report:
|
||||
runs-on: ubuntu-20.04
|
||||
@@ -329,7 +297,7 @@ jobs:
|
||||
-
|
||||
name: Create summary
|
||||
run: |
|
||||
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
|
||||
teststat -markdown $(find /tmp/reports -type f -name '*.json' -print0 | xargs -0) >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
integration-cli-prepare:
|
||||
runs-on: ubuntu-20.04
|
||||
@@ -389,10 +357,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
@@ -431,7 +395,6 @@ jobs:
|
||||
directory: ./bundles/test-integration
|
||||
env_vars: RUNNER_OS
|
||||
flags: integration-cli
|
||||
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
|
||||
-
|
||||
name: Test daemon logs
|
||||
if: always()
|
||||
@@ -444,7 +407,6 @@ jobs:
|
||||
with:
|
||||
name: test-reports-integration-cli-${{ inputs.storage }}-${{ env.TESTREPORTS_NAME }}
|
||||
path: /tmp/reports/*
|
||||
retention-days: 1
|
||||
|
||||
integration-cli-report:
|
||||
runs-on: ubuntu-20.04
|
||||
@@ -473,4 +435,4 @@ jobs:
|
||||
-
|
||||
name: Create summary
|
||||
run: |
|
||||
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
|
||||
teststat -markdown $(find /tmp/reports -type f -name '*.json' -print0 | xargs -0) >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
21
.github/workflows/.windows.yml
vendored
21
.github/workflows/.windows.yml
vendored
@@ -3,15 +3,6 @@ name: .windows
|
||||
|
||||
# TODO: hide reusable workflow from the UI. Tracked in https://github.com/community/community/discussions/12025
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
@@ -28,9 +19,9 @@ on:
|
||||
default: false
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.21.13"
|
||||
GO_VERSION: "1.21.6"
|
||||
GOTESTLIST_VERSION: v0.3.1
|
||||
TESTSTAT_VERSION: v0.1.25
|
||||
TESTSTAT_VERSION: v0.1.3
|
||||
WINDOWS_BASE_IMAGE: mcr.microsoft.com/windows/servercore
|
||||
WINDOWS_BASE_TAG_2019: ltsc2019
|
||||
WINDOWS_BASE_TAG_2022: ltsc2022
|
||||
@@ -191,7 +182,6 @@ jobs:
|
||||
directory: bundles
|
||||
env_vars: RUNNER_OS
|
||||
flags: unit
|
||||
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
@@ -199,7 +189,6 @@ jobs:
|
||||
with:
|
||||
name: ${{ inputs.os }}-${{ inputs.storage }}-unit-reports
|
||||
path: ${{ env.GOPATH }}\src\github.com\docker\docker\bundles\*
|
||||
retention-days: 1
|
||||
|
||||
unit-test-report:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -225,7 +214,7 @@ jobs:
|
||||
-
|
||||
name: Create summary
|
||||
run: |
|
||||
find /tmp/artifacts -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
|
||||
teststat -markdown $(find /tmp/artifacts -type f -name '*.json' -print0 | xargs -0) >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
integration-test-prepare:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -466,7 +455,6 @@ jobs:
|
||||
directory: bundles
|
||||
env_vars: RUNNER_OS
|
||||
flags: integration,${{ matrix.runtime }}
|
||||
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
|
||||
-
|
||||
name: Docker info
|
||||
run: |
|
||||
@@ -518,7 +506,6 @@ jobs:
|
||||
with:
|
||||
name: ${{ inputs.os }}-${{ inputs.storage }}-integration-reports-${{ matrix.runtime }}-${{ env.TESTREPORTS_NAME }}
|
||||
path: ${{ env.GOPATH }}\src\github.com\docker\docker\bundles\*
|
||||
retention-days: 1
|
||||
|
||||
integration-test-report:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -557,4 +544,4 @@ jobs:
|
||||
-
|
||||
name: Create summary
|
||||
run: |
|
||||
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
|
||||
teststat -markdown $(find /tmp/reports -type f -name '*.json' -print0 | xargs -0) >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
19
.github/workflows/bin-image.yml
vendored
19
.github/workflows/bin-image.yml
vendored
@@ -1,14 +1,5 @@
|
||||
name: bin-image
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
@@ -30,8 +21,6 @@ env:
|
||||
PLATFORM: Moby Engine - Nightly
|
||||
PRODUCT: moby-bin
|
||||
PACKAGER_NAME: The Moby Project
|
||||
SETUP_BUILDX_VERSION: latest
|
||||
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
|
||||
|
||||
jobs:
|
||||
validate-dco:
|
||||
@@ -123,10 +112,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request' && github.repository == 'moby/moby'
|
||||
@@ -186,10 +171,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
|
||||
32
.github/workflows/buildkit.yml
vendored
32
.github/workflows/buildkit.yml
vendored
@@ -1,14 +1,5 @@
|
||||
name: buildkit
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
@@ -22,10 +13,8 @@ on:
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.21.13"
|
||||
GO_VERSION: "1.21.6"
|
||||
DESTDIR: ./build
|
||||
SETUP_BUILDX_VERSION: latest
|
||||
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
|
||||
|
||||
jobs:
|
||||
validate-dco:
|
||||
@@ -42,10 +31,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v4
|
||||
@@ -65,9 +50,6 @@ jobs:
|
||||
timeout-minutes: 120
|
||||
needs:
|
||||
- build
|
||||
env:
|
||||
TEST_IMAGE_BUILD: "0"
|
||||
TEST_IMAGE_ID: "buildkit-tests"
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -120,10 +102,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Download binary artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
@@ -137,14 +115,6 @@ jobs:
|
||||
sudo service docker restart
|
||||
docker version
|
||||
docker info
|
||||
-
|
||||
name: Build test image
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
workdir: ./buildkit
|
||||
targets: integration-tests
|
||||
set: |
|
||||
*.output=type=docker,name=${{ env.TEST_IMAGE_ID }}
|
||||
-
|
||||
name: Test
|
||||
run: |
|
||||
|
||||
63
.github/workflows/ci.yml
vendored
63
.github/workflows/ci.yml
vendored
@@ -1,14 +1,5 @@
|
||||
name: ci
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
@@ -23,8 +14,6 @@ on:
|
||||
|
||||
env:
|
||||
DESTDIR: ./build
|
||||
SETUP_BUILDX_VERSION: latest
|
||||
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
|
||||
|
||||
jobs:
|
||||
validate-dco:
|
||||
@@ -49,10 +38,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v4
|
||||
@@ -66,6 +51,14 @@ jobs:
|
||||
name: Check artifacts
|
||||
run: |
|
||||
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
|
||||
-
|
||||
name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.target }}
|
||||
path: ${{ env.DESTDIR }}
|
||||
if-no-files-found: error
|
||||
retention-days: 7
|
||||
|
||||
prepare-cross:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -111,10 +104,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v4
|
||||
@@ -130,35 +119,11 @@ jobs:
|
||||
name: Check artifacts
|
||||
run: |
|
||||
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
|
||||
|
||||
govulncheck:
|
||||
runs-on: ubuntu-24.04
|
||||
permissions:
|
||||
# required to write sarif report
|
||||
security-events: write
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Run
|
||||
uses: docker/bake-action@v5
|
||||
with:
|
||||
targets: govulncheck
|
||||
env:
|
||||
GOVULNCHECK_FORMAT: sarif
|
||||
-
|
||||
name: Upload SARIF report
|
||||
if: ${{ github.event_name != 'pull_request' && github.repository == 'moby/moby' }}
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
with:
|
||||
sarif_file: ${{ env.DESTDIR }}/govulncheck.out
|
||||
name: cross-${{ env.PLATFORM_PAIR }}
|
||||
path: ${{ env.DESTDIR }}
|
||||
if-no-files-found: error
|
||||
retention-days: 7
|
||||
|
||||
28
.github/workflows/test.yml
vendored
28
.github/workflows/test.yml
vendored
@@ -1,14 +1,5 @@
|
||||
name: test
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
@@ -22,11 +13,7 @@ on:
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.21.13"
|
||||
GIT_PAGER: "cat"
|
||||
PAGER: "cat"
|
||||
SETUP_BUILDX_VERSION: latest
|
||||
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
|
||||
GO_VERSION: "1.21.6"
|
||||
|
||||
jobs:
|
||||
validate-dco:
|
||||
@@ -55,10 +42,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
@@ -74,7 +57,6 @@ jobs:
|
||||
- build-dev
|
||||
- validate-dco
|
||||
uses: ./.github/workflows/.test.yml
|
||||
secrets: inherit
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -127,10 +109,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
@@ -187,10 +165,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Test
|
||||
uses: docker/bake-action@v4
|
||||
|
||||
77
.github/workflows/validate-pr.yml
vendored
77
.github/workflows/validate-pr.yml
vendored
@@ -1,77 +0,0 @@
|
||||
name: validate-pr
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, edited, labeled, unlabeled]
|
||||
|
||||
jobs:
|
||||
check-area-label:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Missing `area/` label
|
||||
if: contains(join(github.event.pull_request.labels.*.name, ','), 'impact/') && !contains(join(github.event.pull_request.labels.*.name, ','), 'area/')
|
||||
run: |
|
||||
echo "::error::Every PR with an 'impact/*' label should also have an 'area/*' label"
|
||||
exit 1
|
||||
- name: OK
|
||||
run: exit 0
|
||||
|
||||
check-changelog:
|
||||
if: contains(join(github.event.pull_request.labels.*.name, ','), 'impact/')
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
PR_BODY: |
|
||||
${{ github.event.pull_request.body }}
|
||||
steps:
|
||||
- name: Check changelog description
|
||||
run: |
|
||||
# Extract the `markdown changelog` note code block
|
||||
block=$(echo -n "$PR_BODY" | tr -d '\r' | awk '/^```markdown changelog$/{flag=1;next}/^```$/{flag=0}flag')
|
||||
|
||||
# Strip empty lines
|
||||
desc=$(echo "$block" | awk NF)
|
||||
|
||||
if [ -z "$desc" ]; then
|
||||
echo "::error::Changelog section is empty. Please provide a description for the changelog."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
len=$(echo -n "$desc" | wc -c)
|
||||
if [[ $len -le 6 ]]; then
|
||||
echo "::error::Description looks too short: $desc"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "This PR will be included in the release notes with the following note:"
|
||||
echo "$desc"
|
||||
|
||||
check-pr-branch:
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
PR_TITLE: ${{ github.event.pull_request.title }}
|
||||
steps:
|
||||
# Backports or PR that target a release branch directly should mention the target branch in the title, for example:
|
||||
# [X.Y backport] Some change that needs backporting to X.Y
|
||||
# [X.Y] Change directly targeting the X.Y branch
|
||||
- name: Check release branch
|
||||
id: title_branch
|
||||
run: |
|
||||
# get the intended major version prefix ("[27.1 backport]" -> "27.") from the PR title.
|
||||
[[ "$PR_TITLE" =~ ^\[([0-9]*\.)[^]]*\] ]] && branch="${BASH_REMATCH[1]}"
|
||||
|
||||
# get major version prefix from the release branch ("27.x -> "27.")
|
||||
[[ "$GITHUB_BASE_REF" =~ ^([0-9]*\.) ]] && target_branch="${BASH_REMATCH[1]}" || target_branch="$GITHUB_BASE_REF"
|
||||
|
||||
if [[ "$target_branch" != "$branch" ]] && ! [[ "$GITHUB_BASE_REF" == "master" && "$branch" == "" ]]; then
|
||||
echo "::error::PR is opened against the $GITHUB_BASE_REF branch, but its title suggests otherwise."
|
||||
exit 1
|
||||
fi
|
||||
10
.github/workflows/windows-2019.yml
vendored
10
.github/workflows/windows-2019.yml
vendored
@@ -1,14 +1,5 @@
|
||||
name: windows-2019
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
@@ -31,7 +22,6 @@ jobs:
|
||||
needs:
|
||||
- test-prepare
|
||||
uses: ./.github/workflows/.windows.yml
|
||||
secrets: inherit
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
|
||||
10
.github/workflows/windows-2022.yml
vendored
10
.github/workflows/windows-2022.yml
vendored
@@ -1,14 +1,5 @@
|
||||
name: windows-2022
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
@@ -34,7 +25,6 @@ jobs:
|
||||
needs:
|
||||
- test-prepare
|
||||
uses: ./.github/workflows/.windows.yml
|
||||
secrets: inherit
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
linters:
|
||||
enable:
|
||||
- depguard
|
||||
- dupword # Checks for duplicate words in the source code.
|
||||
- goimports
|
||||
- gosec
|
||||
- gosimple
|
||||
@@ -26,11 +25,6 @@ linters:
|
||||
- docs
|
||||
|
||||
linters-settings:
|
||||
dupword:
|
||||
ignore:
|
||||
- "true" # some tests use this as expected output
|
||||
- "false" # some tests use this as expected output
|
||||
- "root" # for tests using "ls" output with files owned by "root:root"
|
||||
importas:
|
||||
# Do not allow unaliased imports of aliased packages.
|
||||
no-unaliased: true
|
||||
@@ -38,7 +32,7 @@ linters-settings:
|
||||
alias:
|
||||
# Enforce alias to prevent it accidentally being used instead of our
|
||||
# own errdefs package (or vice-versa).
|
||||
- pkg: github.com/containerd/errdefs
|
||||
- pkg: github.com/containerd/containerd/errdefs
|
||||
alias: cerrdefs
|
||||
- pkg: github.com/opencontainers/image-spec/specs-go/v1
|
||||
alias: ocispec
|
||||
@@ -51,20 +45,6 @@ linters-settings:
|
||||
deny:
|
||||
- pkg: io/ioutil
|
||||
desc: The io/ioutil package has been deprecated, see https://go.dev/doc/go1.16#ioutil
|
||||
- pkg: "github.com/stretchr/testify/assert"
|
||||
desc: Use "gotest.tools/v3/assert" instead
|
||||
- pkg: "github.com/stretchr/testify/require"
|
||||
desc: Use "gotest.tools/v3/assert" instead
|
||||
- pkg: "github.com/stretchr/testify/suite"
|
||||
desc: Do not use
|
||||
- pkg: "github.com/containerd/containerd/errdefs"
|
||||
desc: The errdefs package has moved to a separate module, https://github.com/containerd/errdefs
|
||||
- pkg: "github.com/containerd/containerd/log"
|
||||
desc: The logs package has moved to a separate module, https://github.com/containerd/log
|
||||
- pkg: "github.com/containerd/containerd/pkg/userns"
|
||||
desc: Use github.com/moby/sys/userns instead.
|
||||
- pkg: "github.com/opencontainers/runc/libcontainer/userns"
|
||||
desc: Use github.com/moby/sys/userns instead.
|
||||
revive:
|
||||
rules:
|
||||
# FIXME make sure all packages have a description. Currently, there's many packages without.
|
||||
@@ -138,16 +118,6 @@ issues:
|
||||
linters:
|
||||
- staticcheck
|
||||
|
||||
- text: "ineffectual assignment to ctx"
|
||||
source: "ctx[, ].*=.*\\(ctx[,)]"
|
||||
linters:
|
||||
- ineffassign
|
||||
|
||||
- text: "SA4006: this value of `ctx` is never used"
|
||||
source: "ctx[, ].*=.*\\(ctx[,)]"
|
||||
linters:
|
||||
- staticcheck
|
||||
|
||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||
max-issues-per-linter: 0
|
||||
|
||||
|
||||
27
.mailmap
27
.mailmap
@@ -7,7 +7,6 @@
|
||||
#
|
||||
# For an explanation of this file format, consult gitmailmap(5).
|
||||
|
||||
Aaron Yoshitake <airandfingers@gmail.com>
|
||||
Aaron L. Xu <liker.xu@foxmail.com>
|
||||
Aaron L. Xu <liker.xu@foxmail.com> <likexu@harmonycloud.cn>
|
||||
Aaron Lehmann <alehmann@netflix.com>
|
||||
@@ -31,11 +30,9 @@ Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.akihiro@lab.ntt.co.jp>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.kyoto@gmail.com>
|
||||
Akshay Moghe <akshay.moghe@gmail.com>
|
||||
Alano Terblanche <alano.terblanche@docker.com>
|
||||
Alano Terblanche <alano.terblanche@docker.com> <18033717+Benehiko@users.noreply.github.com>
|
||||
Albin Kerouanton <albinker@gmail.com>
|
||||
Albin Kerouanton <albinker@gmail.com> <557933+akerouanton@users.noreply.github.com>
|
||||
Albin Kerouanton <albinker@gmail.com> <albin@akerouanton.name>
|
||||
Albin Kerouanton <albinker@gmail.com> <557933+akerouanton@users.noreply.github.com>
|
||||
Aleksa Sarai <asarai@suse.de>
|
||||
Aleksa Sarai <asarai@suse.de> <asarai@suse.com>
|
||||
Aleksa Sarai <asarai@suse.de> <cyphar@cyphar.com>
|
||||
@@ -62,8 +59,6 @@ Allen Sun <allensun.shl@alibaba-inc.com> <allen.sun@daocloud.io>
|
||||
Allen Sun <allensun.shl@alibaba-inc.com> <shlallen1990@gmail.com>
|
||||
Anca Iordache <anca.iordache@docker.com>
|
||||
Andrea Denisse Gómez <crypto.andrea@protonmail.ch>
|
||||
Andrew Baxter <423qpsxzhh8k3h@s.rendaw.me>
|
||||
Andrew Baxter <423qpsxzhh8k3h@s.rendaw.me> andrew <>
|
||||
Andrew Kim <taeyeonkim90@gmail.com>
|
||||
Andrew Kim <taeyeonkim90@gmail.com> <akim01@fortinet.com>
|
||||
Andrew Weiss <andrew.weiss@docker.com> <andrew.weiss@microsoft.com>
|
||||
@@ -124,7 +119,6 @@ Brian Goff <cpuguy83@gmail.com> <bgoff@cpuguy83-mbp.home>
|
||||
Brian Goff <cpuguy83@gmail.com> <bgoff@cpuguy83-mbp.local>
|
||||
Brian Goff <cpuguy83@gmail.com> <brian.goff@microsoft.com>
|
||||
Brian Goff <cpuguy83@gmail.com> <cpuguy@hey.com>
|
||||
Calvin Liu <flycalvin@qq.com>
|
||||
Cameron Sparr <gh@sparr.email>
|
||||
Carlos de Paula <me@carlosedp.com>
|
||||
Chander Govindarajan <chandergovind@gmail.com>
|
||||
@@ -136,7 +130,6 @@ Chen Mingjie <chenmingjie0828@163.com>
|
||||
Chen Qiu <cheney-90@hotmail.com>
|
||||
Chen Qiu <cheney-90@hotmail.com> <21321229@zju.edu.cn>
|
||||
Chengfei Shang <cfshang@alauda.io>
|
||||
Chentianze <cmoman@126.com>
|
||||
Chris Dias <cdias@microsoft.com>
|
||||
Chris McKinnel <chris.mckinnel@tangentlabs.co.uk>
|
||||
Chris Price <cprice@mirantis.com>
|
||||
@@ -145,8 +138,6 @@ Chris Telfer <ctelfer@docker.com>
|
||||
Chris Telfer <ctelfer@docker.com> <ctelfer@users.noreply.github.com>
|
||||
Christopher Biscardi <biscarch@sketcht.com>
|
||||
Christopher Latham <sudosurootdev@gmail.com>
|
||||
Christopher Petito <chrisjpetito@gmail.com>
|
||||
Christopher Petito <chrisjpetito@gmail.com> <47751006+krissetto@users.noreply.github.com>
|
||||
Christy Norman <christy@linux.vnet.ibm.com>
|
||||
Chun Chen <ramichen@tencent.com> <chenchun.feed@gmail.com>
|
||||
Corbin Coleman <corbin.coleman@docker.com>
|
||||
@@ -182,8 +173,6 @@ Dattatraya Kumbhar <dattatraya.kumbhar@gslab.com>
|
||||
Dave Goodchild <buddhamagnet@gmail.com>
|
||||
Dave Henderson <dhenderson@gmail.com> <Dave.Henderson@ca.ibm.com>
|
||||
Dave Tucker <dt@docker.com> <dave@dtucker.co.uk>
|
||||
David Dooling <dooling@gmail.com>
|
||||
David Dooling <dooling@gmail.com> <david.dooling@docker.com>
|
||||
David M. Karr <davidmichaelkarr@gmail.com>
|
||||
David Sheets <dsheets@docker.com> <sheets@alum.mit.edu>
|
||||
David Sissitka <me@dsissitka.com>
|
||||
@@ -230,8 +219,6 @@ Felix Hupfeld <felix@quobyte.com> <quofelix@users.noreply.github.com>
|
||||
Felix Ruess <felix.ruess@gmail.com> <felix.ruess@roboception.de>
|
||||
Feng Yan <fy2462@gmail.com>
|
||||
Fengtu Wang <wangfengtu@huawei.com> <wangfengtu@huawei.com>
|
||||
Filipe Pina <hzlu1ot0@duck.com>
|
||||
Filipe Pina <hzlu1ot0@duck.com> <636320+fopina@users.noreply.github.com>
|
||||
Francisco Carriedo <fcarriedo@gmail.com>
|
||||
Frank Rosquin <frank.rosquin+github@gmail.com> <frank.rosquin@gmail.com>
|
||||
Frank Yang <yyb196@gmail.com>
|
||||
@@ -283,7 +270,6 @@ Hollie Teal <hollie@docker.com> <hollie.teal@docker.com>
|
||||
Hollie Teal <hollie@docker.com> <hollietealok@users.noreply.github.com>
|
||||
hsinko <21551195@zju.edu.cn> <hsinko@users.noreply.github.com>
|
||||
Hu Keping <hukeping@huawei.com>
|
||||
Huajin Tong <fliterdashen@gmail.com>
|
||||
Hui Kang <hkang.sunysb@gmail.com>
|
||||
Hui Kang <hkang.sunysb@gmail.com> <kangh@us.ibm.com>
|
||||
Huu Nguyen <huu@prismskylabs.com> <whoshuu@gmail.com>
|
||||
@@ -350,8 +336,6 @@ John Howard <github@lowenna.com> <john.howard@microsoft.com>
|
||||
John Howard <github@lowenna.com> <john@lowenna.com>
|
||||
John Stephens <johnstep@docker.com> <johnstep@users.noreply.github.com>
|
||||
Jon Surrell <jon.surrell@gmail.com> <jon.surrell@automattic.com>
|
||||
Jonathan A. Sternberg <jonathansternberg@gmail.com>
|
||||
Jonathan A. Sternberg <jonathansternberg@gmail.com> <jonathan.sternberg@docker.com>
|
||||
Jonathan Choy <jonathan.j.choy@gmail.com>
|
||||
Jonathan Choy <jonathan.j.choy@gmail.com> <oni@tetsujinlabs.com>
|
||||
Jordan Arentsen <blissdev@gmail.com>
|
||||
@@ -494,14 +478,14 @@ Mikael Davranche <mikael.davranche@corp.ovh.com> <mikael.davranche@corp.ovh.net>
|
||||
Mike Casas <mkcsas0@gmail.com> <mikecasas@users.noreply.github.com>
|
||||
Mike Goelzer <mike.goelzer@docker.com> <mgoelzer@docker.com>
|
||||
Milas Bowman <devnull@milas.dev>
|
||||
Milas Bowman <devnull@milas.dev> <milas.bowman@docker.com>
|
||||
Milas Bowman <devnull@milas.dev> <milasb@gmail.com>
|
||||
Milas Bowman <devnull@milas.dev> <milas.bowman@docker.com>
|
||||
Milind Chawre <milindchawre@gmail.com>
|
||||
Misty Stanley-Jones <misty@docker.com> <misty@apache.org>
|
||||
Mohammad Banikazemi <MBanikazemi@gmail.com>
|
||||
Mohammad Banikazemi <MBanikazemi@gmail.com> <mb@us.ibm.com>
|
||||
Mohd Sadiq <mohdsadiq058@gmail.com> <42430865+msadiq058@users.noreply.github.com>
|
||||
Mohd Sadiq <mohdsadiq058@gmail.com> <mohdsadiq058@gmail.com>
|
||||
Mohd Sadiq <mohdsadiq058@gmail.com> <42430865+msadiq058@users.noreply.github.com>
|
||||
Mohit Soni <mosoni@ebay.com> <mohitsoni1989@gmail.com>
|
||||
Moorthy RS <rsmoorthy@gmail.com> <rsmoorthy@users.noreply.github.com>
|
||||
Moysés Borges <moysesb@gmail.com>
|
||||
@@ -526,7 +510,6 @@ Olli Janatuinen <olli.janatuinen@gmail.com> <olljanat@users.noreply.github.com>
|
||||
Onur Filiz <onur.filiz@microsoft.com>
|
||||
Onur Filiz <onur.filiz@microsoft.com> <ofiliz@users.noreply.github.com>
|
||||
Ouyang Liduo <oyld0210@163.com>
|
||||
Patrick St. laurent <patrick@saint-laurent.us>
|
||||
Patrick Stapleton <github@gdi2290.com>
|
||||
Paul Liljenberg <liljenberg.paul@gmail.com> <letters@paulnotcom.se>
|
||||
Pavel Tikhomirov <ptikhomirov@virtuozzo.com> <ptikhomirov@parallels.com>
|
||||
@@ -550,8 +533,6 @@ Qin TianHuan <tianhuan@bingotree.cn>
|
||||
Ray Tsang <rayt@google.com> <saturnism@users.noreply.github.com>
|
||||
Renaud Gaubert <rgaubert@nvidia.com> <renaud.gaubert@gmail.com>
|
||||
Richard Scothern <richard.scothern@gmail.com>
|
||||
Rob Murray <rob.murray@docker.com>
|
||||
Rob Murray <rob.murray@docker.com> <148866618+robmry@users.noreply.github.com>
|
||||
Robert Terhaar <rterhaar@atlanticdynamic.com> <robbyt@users.noreply.github.com>
|
||||
Roberto G. Hashioka <roberto.hashioka@docker.com> <roberto_hashioka@hotmail.com>
|
||||
Roberto Muñoz Fernández <robertomf@gmail.com> <roberto.munoz.fernandez.contractor@bbva.com>
|
||||
@@ -562,7 +543,6 @@ Rongxiang Song <tinysong1226@gmail.com>
|
||||
Rony Weng <ronyweng@synology.com>
|
||||
Ross Boucher <rboucher@gmail.com>
|
||||
Rui Cao <ruicao@alauda.io>
|
||||
Rui JingAn <quiterace@gmail.com>
|
||||
Runshen Zhu <runshen.zhu@gmail.com>
|
||||
Ryan Stelly <ryan.stelly@live.com>
|
||||
Ryoga Saito <contact@proelbtn.com>
|
||||
@@ -583,7 +563,6 @@ Sebastiaan van Stijn <github@gone.nl> <sebastiaan@ws-key-sebas3.dpi1.dpi>
|
||||
Sebastiaan van Stijn <github@gone.nl> <thaJeztah@users.noreply.github.com>
|
||||
Sebastian Thomschke <sebthom@users.noreply.github.com>
|
||||
Seongyeol Lim <seongyeol37@gmail.com>
|
||||
Serhii Nakon <serhii.n@thescimus.com>
|
||||
Shaun Kaasten <shaunk@gmail.com>
|
||||
Shawn Landden <shawn@churchofgit.com> <shawnlandden@gmail.com>
|
||||
Shengbo Song <thomassong@tencent.com>
|
||||
|
||||
29
AUTHORS
29
AUTHORS
@@ -10,7 +10,6 @@ Aaron Huslage <huslage@gmail.com>
|
||||
Aaron L. Xu <liker.xu@foxmail.com>
|
||||
Aaron Lehmann <alehmann@netflix.com>
|
||||
Aaron Welch <welch@packet.net>
|
||||
Aaron Yoshitake <airandfingers@gmail.com>
|
||||
Abel Muiño <amuino@gmail.com>
|
||||
Abhijeet Kasurde <akasurde@redhat.com>
|
||||
Abhinandan Prativadi <aprativadi@gmail.com>
|
||||
@@ -63,7 +62,6 @@ alambike <alambike@gmail.com>
|
||||
Alan Hoyle <alan@alanhoyle.com>
|
||||
Alan Scherger <flyinprogrammer@gmail.com>
|
||||
Alan Thompson <cloojure@gmail.com>
|
||||
Alano Terblanche <alano.terblanche@docker.com>
|
||||
Albert Callarisa <shark234@gmail.com>
|
||||
Albert Zhang <zhgwenming@gmail.com>
|
||||
Albin Kerouanton <albinker@gmail.com>
|
||||
@@ -143,7 +141,6 @@ Andreas Tiefenthaler <at@an-ti.eu>
|
||||
Andrei Gherzan <andrei@resin.io>
|
||||
Andrei Ushakov <aushakov@netflix.com>
|
||||
Andrei Vagin <avagin@gmail.com>
|
||||
Andrew Baxter <423qpsxzhh8k3h@s.rendaw.me>
|
||||
Andrew C. Bodine <acbodine@us.ibm.com>
|
||||
Andrew Clay Shafer <andrewcshafer@gmail.com>
|
||||
Andrew Duckworth <grillopress@gmail.com>
|
||||
@@ -196,7 +193,6 @@ Anton Löfgren <anton.lofgren@gmail.com>
|
||||
Anton Nikitin <anton.k.nikitin@gmail.com>
|
||||
Anton Polonskiy <anton.polonskiy@gmail.com>
|
||||
Anton Tiurin <noxiouz@yandex.ru>
|
||||
Antonio Aguilar <antonio@zoftko.com>
|
||||
Antonio Murdaca <antonio.murdaca@gmail.com>
|
||||
Antonis Kalipetis <akalipetis@gmail.com>
|
||||
Antony Messerli <amesserl@rackspace.com>
|
||||
@@ -225,6 +221,7 @@ Avi Das <andas222@gmail.com>
|
||||
Avi Kivity <avi@scylladb.com>
|
||||
Avi Miller <avi.miller@oracle.com>
|
||||
Avi Vaid <avaid1996@gmail.com>
|
||||
ayoshitake <airandfingers@gmail.com>
|
||||
Azat Khuyiyakhmetov <shadow_uz@mail.ru>
|
||||
Bao Yonglei <baoyonglei@huawei.com>
|
||||
Bardia Keyoumarsi <bkeyouma@ucsc.edu>
|
||||
@@ -319,7 +316,6 @@ Burke Libbey <burke@libbey.me>
|
||||
Byung Kang <byung.kang.ctr@amrdec.army.mil>
|
||||
Caleb Spare <cespare@gmail.com>
|
||||
Calen Pennington <cale@edx.org>
|
||||
Calvin Liu <flycalvin@qq.com>
|
||||
Cameron Boehmer <cameron.boehmer@gmail.com>
|
||||
Cameron Sparr <gh@sparr.email>
|
||||
Cameron Spear <cameronspear@gmail.com>
|
||||
@@ -366,7 +362,6 @@ Chen Qiu <cheney-90@hotmail.com>
|
||||
Cheng-mean Liu <soccerl@microsoft.com>
|
||||
Chengfei Shang <cfshang@alauda.io>
|
||||
Chengguang Xu <cgxu519@gmx.com>
|
||||
Chentianze <cmoman@126.com>
|
||||
Chenyang Yan <memory.yancy@gmail.com>
|
||||
chenyuzhu <chenyuzhi@oschina.cn>
|
||||
Chetan Birajdar <birajdar.chetan@gmail.com>
|
||||
@@ -414,7 +409,6 @@ Christopher Crone <christopher.crone@docker.com>
|
||||
Christopher Currie <codemonkey+github@gmail.com>
|
||||
Christopher Jones <tophj@linux.vnet.ibm.com>
|
||||
Christopher Latham <sudosurootdev@gmail.com>
|
||||
Christopher Petito <chrisjpetito@gmail.com>
|
||||
Christopher Rigor <crigor@gmail.com>
|
||||
Christy Norman <christy@linux.vnet.ibm.com>
|
||||
Chun Chen <ramichen@tencent.com>
|
||||
@@ -675,7 +669,6 @@ Erik Hollensbe <github@hollensbe.org>
|
||||
Erik Inge Bolsø <knan@redpill-linpro.com>
|
||||
Erik Kristensen <erik@erikkristensen.com>
|
||||
Erik Sipsma <erik@sipsma.dev>
|
||||
Erik Sjölund <erik.sjolund@gmail.com>
|
||||
Erik St. Martin <alakriti@gmail.com>
|
||||
Erik Weathers <erikdw@gmail.com>
|
||||
Erno Hopearuoho <erno.hopearuoho@gmail.com>
|
||||
@@ -738,7 +731,6 @@ Feroz Salam <feroz.salam@sourcegraph.com>
|
||||
Ferran Rodenas <frodenas@gmail.com>
|
||||
Filipe Brandenburger <filbranden@google.com>
|
||||
Filipe Oliveira <contato@fmoliveira.com.br>
|
||||
Filipe Pina <hzlu1ot0@duck.com>
|
||||
Flavio Castelli <fcastelli@suse.com>
|
||||
Flavio Crisciani <flavio.crisciani@docker.com>
|
||||
Florian <FWirtz@users.noreply.github.com>
|
||||
@@ -783,7 +775,6 @@ Gabriel L. Somlo <gsomlo@gmail.com>
|
||||
Gabriel Linder <linder.gabriel@gmail.com>
|
||||
Gabriel Monroy <gabriel@opdemand.com>
|
||||
Gabriel Nicolas Avellaneda <avellaneda.gabriel@gmail.com>
|
||||
Gabriel Tomitsuka <gabriel@tomitsuka.com>
|
||||
Gaetan de Villele <gdevillele@gmail.com>
|
||||
Galen Sampson <galen.sampson@gmail.com>
|
||||
Gang Qiao <qiaohai8866@gmail.com>
|
||||
@@ -799,7 +790,6 @@ Geoff Levand <geoff@infradead.org>
|
||||
Geoffrey Bachelet <grosfrais@gmail.com>
|
||||
Geon Kim <geon0250@gmail.com>
|
||||
George Kontridze <george@bugsnag.com>
|
||||
George Ma <mayangang@outlook.com>
|
||||
George MacRorie <gmacr31@gmail.com>
|
||||
George Xie <georgexsh@gmail.com>
|
||||
Georgi Hristozov <georgi@forkbomb.nl>
|
||||
@@ -885,8 +875,6 @@ Hsing-Yu (David) Chen <davidhsingyuchen@gmail.com>
|
||||
hsinko <21551195@zju.edu.cn>
|
||||
Hu Keping <hukeping@huawei.com>
|
||||
Hu Tao <hutao@cn.fujitsu.com>
|
||||
Huajin Tong <fliterdashen@gmail.com>
|
||||
huang-jl <1046678590@qq.com>
|
||||
HuanHuan Ye <logindaveye@gmail.com>
|
||||
Huanzhong Zhang <zhanghuanzhong90@gmail.com>
|
||||
Huayi Zhang <irachex@gmail.com>
|
||||
@@ -921,7 +909,6 @@ Illo Abdulrahim <abdulrahim.illo@nokia.com>
|
||||
Ilya Dmitrichenko <errordeveloper@gmail.com>
|
||||
Ilya Gusev <mail@igusev.ru>
|
||||
Ilya Khlopotov <ilya.khlopotov@gmail.com>
|
||||
imalasong <2879499479@qq.com>
|
||||
imre Fitos <imre.fitos+github@gmail.com>
|
||||
inglesp <peter.inglesby@gmail.com>
|
||||
Ingo Gottwald <in.gottwald@gmail.com>
|
||||
@@ -939,7 +926,6 @@ J Bruni <joaohbruni@yahoo.com.br>
|
||||
J. Nunn <jbnunn@gmail.com>
|
||||
Jack Danger Canty <jackdanger@squareup.com>
|
||||
Jack Laxson <jackjrabbit@gmail.com>
|
||||
Jack Walker <90711509+j2walker@users.noreply.github.com>
|
||||
Jacob Atzen <jacob@jacobatzen.dk>
|
||||
Jacob Edelman <edelman.jd@gmail.com>
|
||||
Jacob Tomlinson <jacob@tom.linson.uk>
|
||||
@@ -983,7 +969,6 @@ Jannick Fahlbusch <git@jf-projects.de>
|
||||
Januar Wayong <januar@gmail.com>
|
||||
Jared Biel <jared.biel@bolderthinking.com>
|
||||
Jared Hocutt <jaredh@netapp.com>
|
||||
Jaroslav Jindrak <dzejrou@gmail.com>
|
||||
Jaroslaw Zabiello <hipertracker@gmail.com>
|
||||
Jasmine Hegman <jasmine@jhegman.com>
|
||||
Jason A. Donenfeld <Jason@zx2c4.com>
|
||||
@@ -999,7 +984,6 @@ Jason Shepherd <jason@jasonshepherd.net>
|
||||
Jason Smith <jasonrichardsmith@gmail.com>
|
||||
Jason Sommer <jsdirv@gmail.com>
|
||||
Jason Stangroome <jason@codeassassin.com>
|
||||
Jasper Siepkes <siepkes@serviceplanet.nl>
|
||||
Javier Bassi <javierbassi@gmail.com>
|
||||
jaxgeller <jacksongeller@gmail.com>
|
||||
Jay <teguhwpurwanto@gmail.com>
|
||||
@@ -1028,7 +1012,6 @@ Jeffrey Bolle <jeffreybolle@gmail.com>
|
||||
Jeffrey Morgan <jmorganca@gmail.com>
|
||||
Jeffrey van Gogh <jvg@google.com>
|
||||
Jenny Gebske <jennifer@gebske.de>
|
||||
Jeongseok Kang <piono623@naver.com>
|
||||
Jeremy Chambers <jeremy@thehipbot.com>
|
||||
Jeremy Grosser <jeremy@synack.me>
|
||||
Jeremy Huntwork <jhuntwork@lightcubesolutions.com>
|
||||
@@ -1046,7 +1029,6 @@ Jezeniel Zapanta <jpzapanta22@gmail.com>
|
||||
Jhon Honce <jhonce@redhat.com>
|
||||
Ji.Zhilong <zhilongji@gmail.com>
|
||||
Jian Liao <jliao@alauda.io>
|
||||
Jian Zeng <anonymousknight96@gmail.com>
|
||||
Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
|
||||
Jiang Jinyang <jjyruby@gmail.com>
|
||||
Jianyong Wu <jianyong.wu@arm.com>
|
||||
@@ -1111,7 +1093,6 @@ Jon Johnson <jonjohnson@google.com>
|
||||
Jon Surrell <jon.surrell@gmail.com>
|
||||
Jon Wedaman <jweede@gmail.com>
|
||||
Jonas Dohse <jonas@dohse.ch>
|
||||
Jonas Geiler <git@jonasgeiler.com>
|
||||
Jonas Heinrich <Jonas@JonasHeinrich.com>
|
||||
Jonas Pfenniger <jonas@pfenniger.name>
|
||||
Jonathan A. Schweder <jonathanschweder@gmail.com>
|
||||
@@ -1279,7 +1260,6 @@ Lakshan Perera <lakshan@laktek.com>
|
||||
Lalatendu Mohanty <lmohanty@redhat.com>
|
||||
Lance Chen <cyen0312@gmail.com>
|
||||
Lance Kinley <lkinley@loyaltymethods.com>
|
||||
Lars Andringa <l.s.andringa@rug.nl>
|
||||
Lars Butler <Lars.Butler@gmail.com>
|
||||
Lars Kellogg-Stedman <lars@redhat.com>
|
||||
Lars R. Damerow <lars@pixar.com>
|
||||
@@ -1686,7 +1666,6 @@ Patrick Böänziger <patrick.baenziger@bsi-software.com>
|
||||
Patrick Devine <patrick.devine@docker.com>
|
||||
Patrick Haas <patrickhaas@google.com>
|
||||
Patrick Hemmer <patrick.hemmer@gmail.com>
|
||||
Patrick St. laurent <patrick@saint-laurent.us>
|
||||
Patrick Stapleton <github@gdi2290.com>
|
||||
Patrik Cyvoct <patrik@ptrk.io>
|
||||
pattichen <craftsbear@gmail.com>
|
||||
@@ -1892,7 +1871,6 @@ Royce Remer <royceremer@gmail.com>
|
||||
Rozhnov Alexandr <nox73@ya.ru>
|
||||
Rudolph Gottesheim <r.gottesheim@loot.at>
|
||||
Rui Cao <ruicao@alauda.io>
|
||||
Rui JingAn <quiterace@gmail.com>
|
||||
Rui Lopes <rgl@ruilopes.com>
|
||||
Ruilin Li <liruilin4@huawei.com>
|
||||
Runshen Zhu <runshen.zhu@gmail.com>
|
||||
@@ -1989,7 +1967,6 @@ Sergey Evstifeev <sergey.evstifeev@gmail.com>
|
||||
Sergii Kabashniuk <skabashnyuk@codenvy.com>
|
||||
Sergio Lopez <slp@redhat.com>
|
||||
Serhat Gülçiçek <serhat25@gmail.com>
|
||||
Serhii Nakon <serhii.n@thescimus.com>
|
||||
SeungUkLee <lsy931106@gmail.com>
|
||||
Sevki Hasirci <s@sevki.org>
|
||||
Shane Canon <scanon@lbl.gov>
|
||||
@@ -2199,7 +2176,6 @@ Tomek Mańko <tomek.manko@railgun-solutions.com>
|
||||
Tommaso Visconti <tommaso.visconti@gmail.com>
|
||||
Tomoya Tabuchi <t@tomoyat1.com>
|
||||
Tomáš Hrčka <thrcka@redhat.com>
|
||||
Tomáš Virtus <nechtom@gmail.com>
|
||||
tonic <tonicbupt@gmail.com>
|
||||
Tonny Xu <tonny.xu@gmail.com>
|
||||
Tony Abboud <tdabboud@hotmail.com>
|
||||
@@ -2244,7 +2220,6 @@ Victor I. Wood <viw@t2am.com>
|
||||
Victor Lyuboslavsky <victor@victoreda.com>
|
||||
Victor Marmol <vmarmol@google.com>
|
||||
Victor Palma <palma.victor@gmail.com>
|
||||
Victor Toni <victor.toni@gmail.com>
|
||||
Victor Vieux <victor.vieux@docker.com>
|
||||
Victoria Bialas <victoria.bialas@docker.com>
|
||||
Vijaya Kumar K <vijayak@caviumnetworks.com>
|
||||
@@ -2278,7 +2253,6 @@ VladimirAus <v_roudakov@yahoo.com>
|
||||
Vladislav Kolesnikov <vkolesnikov@beget.ru>
|
||||
Vlastimil Zeman <vlastimil.zeman@diffblue.com>
|
||||
Vojtech Vitek (V-Teq) <vvitek@redhat.com>
|
||||
voloder <110066198+voloder@users.noreply.github.com>
|
||||
Walter Leibbrandt <github@wrl.co.za>
|
||||
Walter Stanish <walter@pratyeka.org>
|
||||
Wang Chao <chao.wang@ucloud.cn>
|
||||
@@ -2296,7 +2270,6 @@ Wassim Dhif <wassimdhif@gmail.com>
|
||||
Wataru Ishida <ishida.wataru@lab.ntt.co.jp>
|
||||
Wayne Chang <wayne@neverfear.org>
|
||||
Wayne Song <wsong@docker.com>
|
||||
weebney <weebney@gmail.com>
|
||||
Weerasak Chongnguluam <singpor@gmail.com>
|
||||
Wei Fu <fuweid89@gmail.com>
|
||||
Wei Wu <wuwei4455@gmail.com>
|
||||
|
||||
@@ -101,7 +101,7 @@ the contributors guide.
|
||||
<td>
|
||||
<p>
|
||||
Register for the Docker Community Slack at
|
||||
<a href="https://dockr.ly/comm-slack" target="_blank">https://dockr.ly/comm-slack</a>.
|
||||
<a href="https://dockr.ly/slack" target="_blank">https://dockr.ly/slack</a>.
|
||||
We use the #moby-project channel for general discussion, and there are separate channels for other Moby projects such as #containerd.
|
||||
</p>
|
||||
</td>
|
||||
|
||||
53
Dockerfile
53
Dockerfile
@@ -1,19 +1,19 @@
|
||||
# syntax=docker/dockerfile:1.7
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.21.13
|
||||
ARG GO_VERSION=1.21.6
|
||||
ARG BASE_DEBIAN_DISTRO="bookworm"
|
||||
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
||||
ARG XX_VERSION=1.5.0
|
||||
ARG XX_VERSION=1.2.1
|
||||
|
||||
ARG VPNKIT_VERSION=0.5.0
|
||||
|
||||
ARG DOCKERCLI_REPOSITORY="https://github.com/docker/cli.git"
|
||||
ARG DOCKERCLI_VERSION=v27.0.2
|
||||
ARG DOCKERCLI_VERSION=v25.0.2
|
||||
# cli version used for integration-cli tests
|
||||
ARG DOCKERCLI_INTEGRATION_REPOSITORY="https://github.com/docker/cli.git"
|
||||
ARG DOCKERCLI_INTEGRATION_VERSION=v17.06.2-ce
|
||||
ARG BUILDX_VERSION=0.16.1
|
||||
ARG COMPOSE_VERSION=v2.29.0
|
||||
ARG BUILDX_VERSION=0.12.1
|
||||
ARG COMPOSE_VERSION=v2.24.5
|
||||
|
||||
ARG SYSTEMD="false"
|
||||
ARG DOCKER_STATIC=1
|
||||
@@ -24,12 +24,6 @@ ARG DOCKER_STATIC=1
|
||||
# specified here should match a current release.
|
||||
ARG REGISTRY_VERSION=2.8.3
|
||||
|
||||
# delve is currently only supported on linux/amd64 and linux/arm64;
|
||||
# https://github.com/go-delve/delve/blob/v1.8.1/pkg/proc/native/support_sentinel.go#L1-L6
|
||||
ARG DELVE_SUPPORTED=${TARGETPLATFORM#linux/amd64} DELVE_SUPPORTED=${DELVE_SUPPORTED#linux/arm64}
|
||||
ARG DELVE_SUPPORTED=${DELVE_SUPPORTED:+"unsupported"}
|
||||
ARG DELVE_SUPPORTED=${DELVE_SUPPORTED:-"supported"}
|
||||
|
||||
# cross compilation helper
|
||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
||||
|
||||
@@ -150,7 +144,7 @@ RUN git init . && git remote add origin "https://github.com/go-delve/delve.git"
|
||||
ARG DELVE_VERSION=v1.21.1
|
||||
RUN git fetch -q --depth 1 origin "${DELVE_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS delve-supported
|
||||
FROM base AS delve-build
|
||||
WORKDIR /usr/src/delve
|
||||
ARG TARGETPLATFORM
|
||||
RUN --mount=from=delve-src,src=/usr/src/delve,rw \
|
||||
@@ -161,8 +155,16 @@ RUN --mount=from=delve-src,src=/usr/src/delve,rw \
|
||||
xx-verify /build/dlv
|
||||
EOT
|
||||
|
||||
FROM binary-dummy AS delve-unsupported
|
||||
FROM delve-${DELVE_SUPPORTED} AS delve
|
||||
# delve is currently only supported on linux/amd64 and linux/arm64;
|
||||
# https://github.com/go-delve/delve/blob/v1.8.1/pkg/proc/native/support_sentinel.go#L1-L6
|
||||
FROM binary-dummy AS delve-windows
|
||||
FROM binary-dummy AS delve-linux-arm
|
||||
FROM binary-dummy AS delve-linux-ppc64le
|
||||
FROM binary-dummy AS delve-linux-s390x
|
||||
FROM delve-build AS delve-linux-amd64
|
||||
FROM delve-build AS delve-linux-arm64
|
||||
FROM delve-linux-${TARGETARCH} AS delve-linux
|
||||
FROM delve-${TARGETOS} AS delve
|
||||
|
||||
FROM base AS tomll
|
||||
# GOTOML_VERSION specifies the version of the tomll binary to build and install
|
||||
@@ -196,7 +198,7 @@ RUN git init . && git remote add origin "https://github.com/containerd/container
|
||||
# When updating the binary version you may also need to update the vendor
|
||||
# version to pick up bug fixes or new APIs, however, usually the Go packages
|
||||
# are built from a commit from the master branch.
|
||||
ARG CONTAINERD_VERSION=v1.7.21
|
||||
ARG CONTAINERD_VERSION=v1.7.13
|
||||
RUN git fetch -q --depth 1 origin "${CONTAINERD_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS containerd-build
|
||||
@@ -229,7 +231,7 @@ FROM binary-dummy AS containerd-windows
|
||||
FROM containerd-${TARGETOS} AS containerd
|
||||
|
||||
FROM base AS golangci_lint
|
||||
ARG GOLANGCI_LINT_VERSION=v1.59.1
|
||||
ARG GOLANGCI_LINT_VERSION=v1.55.2
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
GOBIN=/build/ GO111MODULE=on go install "github.com/golangci/golangci-lint/cmd/golangci-lint@${GOLANGCI_LINT_VERSION}" \
|
||||
@@ -243,18 +245,12 @@ RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
&& /build/gotestsum --version
|
||||
|
||||
FROM base AS shfmt
|
||||
ARG SHFMT_VERSION=v3.8.0
|
||||
ARG SHFMT_VERSION=v3.6.0
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
GOBIN=/build/ GO111MODULE=on go install "mvdan.cc/sh/v3/cmd/shfmt@${SHFMT_VERSION}" \
|
||||
&& /build/shfmt --version
|
||||
|
||||
FROM base AS gopls
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
GOBIN=/build/ GO111MODULE=on go install "golang.org/x/tools/gopls@latest" \
|
||||
&& /build/gopls version
|
||||
|
||||
FROM base AS dockercli
|
||||
WORKDIR /go/src/github.com/docker/cli
|
||||
ARG DOCKERCLI_REPOSITORY
|
||||
@@ -287,7 +283,7 @@ RUN git init . && git remote add origin "https://github.com/opencontainers/runc.
|
||||
# that is used. If you need to update runc, open a pull request in the containerd
|
||||
# project first, and update both after that is merged. When updating RUNC_VERSION,
|
||||
# consider updating runc in vendor.mod accordingly.
|
||||
ARG RUNC_VERSION=v1.1.13
|
||||
ARG RUNC_VERSION=v1.1.12
|
||||
RUN git fetch -q --depth 1 origin "${RUNC_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS runc-build
|
||||
@@ -356,7 +352,7 @@ FROM base AS rootlesskit-src
|
||||
WORKDIR /usr/src/rootlesskit
|
||||
RUN git init . && git remote add origin "https://github.com/rootless-containers/rootlesskit.git"
|
||||
# When updating, also update vendor.mod and hack/dockerfile/install/rootlesskit.installer accordingly.
|
||||
ARG ROOTLESSKIT_VERSION=v2.0.2
|
||||
ARG ROOTLESSKIT_VERSION=v2.0.1
|
||||
RUN git fetch -q --depth 1 origin "${ROOTLESSKIT_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS rootlesskit-build
|
||||
@@ -659,11 +655,6 @@ RUN <<EOT
|
||||
docker-proxy --version
|
||||
EOT
|
||||
|
||||
# devcontainer is a stage used by .devcontainer/devcontainer.json
|
||||
FROM dev-base AS devcontainer
|
||||
COPY --link . .
|
||||
COPY --link --from=gopls /build/ /usr/local/bin/
|
||||
|
||||
# usage:
|
||||
# > make shell
|
||||
# > SYSTEMD=true make shell
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
# This represents the bare minimum required to build and test Docker.
|
||||
|
||||
ARG GO_VERSION=1.21.13
|
||||
ARG GO_VERSION=1.21.6
|
||||
|
||||
ARG BASE_DEBIAN_DISTRO="bookworm"
|
||||
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
||||
|
||||
@@ -161,10 +161,10 @@ FROM ${WINDOWS_BASE_IMAGE}:${WINDOWS_BASE_IMAGE_TAG}
|
||||
# Use PowerShell as the default shell
|
||||
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
|
||||
|
||||
ARG GO_VERSION=1.21.13
|
||||
ARG GO_VERSION=1.21.6
|
||||
ARG GOTESTSUM_VERSION=v1.8.2
|
||||
ARG GOWINRES_VERSION=v0.3.1
|
||||
ARG CONTAINERD_VERSION=v1.7.21
|
||||
ARG CONTAINERD_VERSION=v1.7.13
|
||||
|
||||
# Environment variable notes:
|
||||
# - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
|
||||
|
||||
@@ -38,7 +38,6 @@
|
||||
"laurazard",
|
||||
"mhbauer",
|
||||
"neersighted",
|
||||
"robmry",
|
||||
"rumpl",
|
||||
"runcom",
|
||||
"samuelkarp",
|
||||
@@ -76,6 +75,7 @@
|
||||
"olljanat",
|
||||
"programmerq",
|
||||
"ripcurld",
|
||||
"robmry",
|
||||
"sam-thibault",
|
||||
"samwhited",
|
||||
"thajeztah"
|
||||
|
||||
32
Makefile
32
Makefile
@@ -1,3 +1,5 @@
|
||||
.PHONY: all binary dynbinary build cross help install manpages run shell test test-docker-py test-integration test-unit validate validate-% win
|
||||
|
||||
DOCKER ?= docker
|
||||
BUILDX ?= $(DOCKER) buildx
|
||||
|
||||
@@ -14,9 +16,6 @@ export VALIDATE_REPO
|
||||
export VALIDATE_BRANCH
|
||||
export VALIDATE_ORIGIN_BRANCH
|
||||
|
||||
export PAGER
|
||||
export GIT_PAGER
|
||||
|
||||
# env vars passed through directly to Docker's build scripts
|
||||
# to allow things like `make KEEPBUNDLE=1 binary` easily
|
||||
# `project/PACKAGERS.md` have some limited documentation of some of these
|
||||
@@ -78,8 +77,6 @@ DOCKER_ENVS := \
|
||||
-e DEFAULT_PRODUCT_LICENSE \
|
||||
-e PRODUCT \
|
||||
-e PACKAGER_NAME \
|
||||
-e PAGER \
|
||||
-e GIT_PAGER \
|
||||
-e OTEL_EXPORTER_OTLP_ENDPOINT \
|
||||
-e OTEL_EXPORTER_OTLP_PROTOCOL \
|
||||
-e OTEL_SERVICE_NAME
|
||||
@@ -155,19 +152,15 @@ BAKE_CMD := $(BUILDX) bake
|
||||
|
||||
default: binary
|
||||
|
||||
.PHONY: all
|
||||
all: build ## validate all checks, build linux binaries, run all tests,\ncross build non-linux binaries, and generate archives
|
||||
$(DOCKER_RUN_DOCKER) bash -c 'hack/validate/default && hack/make.sh'
|
||||
|
||||
.PHONY: binary
|
||||
binary: bundles ## build statically linked linux binaries
|
||||
$(BAKE_CMD) binary
|
||||
|
||||
.PHONY: dynbinary
|
||||
dynbinary: bundles ## build dynamically linked linux binaries
|
||||
$(BAKE_CMD) dynbinary
|
||||
|
||||
.PHONY: cross
|
||||
cross: bundles ## cross build the binaries
|
||||
$(BAKE_CMD) binary-cross
|
||||
|
||||
@@ -181,15 +174,12 @@ clean: clean-cache
|
||||
clean-cache: ## remove the docker volumes that are used for caching in the dev-container
|
||||
docker volume rm -f docker-dev-cache docker-mod-cache
|
||||
|
||||
.PHONY: help
|
||||
help: ## this help
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z0-9_-]+:.*?## / {gsub("\\\\n",sprintf("\n%22c",""), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||
|
||||
.PHONY: install
|
||||
install: ## install the linux binaries
|
||||
KEEPBUNDLE=1 hack/make.sh install-binary
|
||||
|
||||
.PHONY: run
|
||||
run: build ## run the docker daemon in a container
|
||||
$(DOCKER_RUN_DOCKER) sh -c "KEEPBUNDLE=1 hack/make.sh install-binary run"
|
||||
|
||||
@@ -202,22 +192,17 @@ endif
|
||||
build: bundles
|
||||
$(BUILD_CMD) $(BUILD_OPTS) $(shell_target) --load -t "$(DOCKER_IMAGE)" .
|
||||
|
||||
.PHONY: shell
|
||||
shell: build ## start a shell inside the build env
|
||||
$(DOCKER_RUN_DOCKER) bash
|
||||
|
||||
.PHONY: test
|
||||
test: build test-unit ## run the unit, integration and docker-py tests
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration test-docker-py
|
||||
|
||||
.PHONY: test-docker-py
|
||||
test-docker-py: build ## run the docker-py tests
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-docker-py
|
||||
|
||||
.PHONY: test-integration-cli
|
||||
test-integration-cli: test-integration ## (DEPRECATED) use test-integration
|
||||
|
||||
.PHONY: test-integration
|
||||
ifneq ($(and $(TEST_SKIP_INTEGRATION),$(TEST_SKIP_INTEGRATION_CLI)),)
|
||||
test-integration:
|
||||
@echo Both integrations suites skipped per environment variables
|
||||
@@ -226,29 +211,23 @@ test-integration: build ## run the integration tests
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration
|
||||
endif
|
||||
|
||||
.PHONY: test-integration-flaky
|
||||
test-integration-flaky: build ## run the stress test for all new integration tests
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration-flaky
|
||||
|
||||
.PHONY: test-unit
|
||||
test-unit: build ## run the unit tests
|
||||
$(DOCKER_RUN_DOCKER) hack/test/unit
|
||||
|
||||
.PHONY: validate
|
||||
validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isolation, golint, tests, tomls, go vet and vendor
|
||||
$(DOCKER_RUN_DOCKER) hack/validate/all
|
||||
|
||||
.PHONY: validate-generate-files
|
||||
validate-generate-files:
|
||||
$(BUILD_CMD) --target "validate" \
|
||||
--output "type=cacheonly" \
|
||||
--file "./hack/dockerfiles/generate-files.Dockerfile" .
|
||||
|
||||
.PHONY: validate-%
|
||||
validate-%: build ## validate specific check
|
||||
$(DOCKER_RUN_DOCKER) hack/validate/$*
|
||||
|
||||
.PHONY: win
|
||||
win: bundles ## cross build the binary for windows
|
||||
$(BAKE_CMD) --set *.platform=windows/amd64 binary
|
||||
|
||||
@@ -271,10 +250,9 @@ swagger-docs: ## preview the API documentation
|
||||
.PHONY: generate-files
|
||||
generate-files:
|
||||
$(eval $@_TMP_OUT := $(shell mktemp -d -t moby-output.XXXXXXXXXX))
|
||||
@if [ -z "$($@_TMP_OUT)" ]; then \
|
||||
echo "Temp dir is not set"; \
|
||||
exit 1; \
|
||||
fi
|
||||
ifeq ($($@_TMP_OUT),)
|
||||
$(error Could not create temp directory.)
|
||||
endif
|
||||
$(BUILD_CMD) --target "update" \
|
||||
--output "type=local,dest=$($@_TMP_OUT)" \
|
||||
--file "./hack/dockerfiles/generate-files.Dockerfile" .
|
||||
|
||||
@@ -32,7 +32,7 @@ New projects can be added if they fit with the community goals. Docker is commit
|
||||
However, other projects are also encouraged to use Moby as an upstream, and to reuse the components in diverse ways, and all these uses will be treated in the same way. External maintainers and contributors are welcomed.
|
||||
|
||||
The Moby project is not intended as a location for support or feature requests for Docker products, but as a place for contributors to work on open source code, fix bugs, and make the code more useful.
|
||||
The releases are supported by the maintainers, community and users, on a best efforts basis only. For customers who want enterprise or commercial support, [Docker Desktop](https://www.docker.com/products/docker-desktop/) and [Mirantis Container Runtime](https://www.mirantis.com/software/mirantis-container-runtime/) are the appropriate products for these use cases.
|
||||
The releases are supported by the maintainers, community and users, on a best efforts basis only, and are not intended for customers who want enterprise or commercial support; Docker EE is the appropriate product for these use cases.
|
||||
|
||||
-----
|
||||
|
||||
|
||||
@@ -2,17 +2,8 @@ package api // import "github.com/docker/docker/api"
|
||||
|
||||
// Common constants for daemon and client.
|
||||
const (
|
||||
// DefaultVersion of the current REST API.
|
||||
DefaultVersion = "1.47"
|
||||
|
||||
// MinSupportedAPIVersion is the minimum API version that can be supported
|
||||
// by the API server, specified as "major.minor". Note that the daemon
|
||||
// may be configured with a different minimum API version, as returned
|
||||
// in [github.com/docker/docker/api/types.Version.MinAPIVersion].
|
||||
//
|
||||
// API requests for API versions lower than the configured version produce
|
||||
// an error.
|
||||
MinSupportedAPIVersion = "1.24"
|
||||
// DefaultVersion of Current REST API
|
||||
DefaultVersion = "1.44"
|
||||
|
||||
// NoBaseImageSpecifier is the symbol used by the FROM
|
||||
// command to specify that no base image is to be used.
|
||||
|
||||
@@ -88,9 +88,11 @@ func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string
|
||||
}
|
||||
}
|
||||
|
||||
if imageID != "" && !useBuildKit {
|
||||
if !useBuildKit {
|
||||
stdout := config.ProgressWriter.StdoutFormatter
|
||||
_, _ = fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(imageID))
|
||||
fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(imageID))
|
||||
}
|
||||
if imageID != "" && !useBuildKit {
|
||||
err = tagImages(ctx, b.imageComponent, config.ProgressWriter.StdoutFormatter, image.ID(imageID), tags)
|
||||
}
|
||||
return imageID, err
|
||||
|
||||
34
api/server/errorhandler.go
Normal file
34
api/server/errorhandler.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/server/httpstatus"
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/gorilla/mux"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// makeErrorHandler makes an HTTP handler that decodes a Docker error and
|
||||
// returns it in the response.
|
||||
func makeErrorHandler(err error) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
statusCode := httpstatus.FromError(err)
|
||||
vars := mux.Vars(r)
|
||||
if apiVersionSupportsJSONErrors(vars["version"]) {
|
||||
response := &types.ErrorResponse{
|
||||
Message: err.Error(),
|
||||
}
|
||||
_ = httputils.WriteJSON(w, statusCode, response)
|
||||
} else {
|
||||
http.Error(w, status.Convert(err).Message(), statusCode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func apiVersionSupportsJSONErrors(version string) bool {
|
||||
const firstAPIVersionWithJSONErrors = "1.23"
|
||||
return version == "" || versions.GreaterThan(version, firstAPIVersionWithJSONErrors)
|
||||
}
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
cerrdefs "github.com/containerd/errdefs"
|
||||
cerrdefs "github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/docker/errdefs"
|
||||
@@ -24,37 +24,42 @@ func FromError(err error) int {
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
|
||||
var statusCode int
|
||||
|
||||
// Stop right there
|
||||
// Are you sure you should be adding a new error class here? Do one of the existing ones work?
|
||||
|
||||
// Note that the below functions are already checking the error causal chain for matches.
|
||||
switch {
|
||||
case errdefs.IsNotFound(err):
|
||||
return http.StatusNotFound
|
||||
statusCode = http.StatusNotFound
|
||||
case errdefs.IsInvalidParameter(err):
|
||||
return http.StatusBadRequest
|
||||
statusCode = http.StatusBadRequest
|
||||
case errdefs.IsConflict(err):
|
||||
return http.StatusConflict
|
||||
statusCode = http.StatusConflict
|
||||
case errdefs.IsUnauthorized(err):
|
||||
return http.StatusUnauthorized
|
||||
statusCode = http.StatusUnauthorized
|
||||
case errdefs.IsUnavailable(err):
|
||||
return http.StatusServiceUnavailable
|
||||
statusCode = http.StatusServiceUnavailable
|
||||
case errdefs.IsForbidden(err):
|
||||
return http.StatusForbidden
|
||||
statusCode = http.StatusForbidden
|
||||
case errdefs.IsNotModified(err):
|
||||
return http.StatusNotModified
|
||||
statusCode = http.StatusNotModified
|
||||
case errdefs.IsNotImplemented(err):
|
||||
return http.StatusNotImplemented
|
||||
statusCode = http.StatusNotImplemented
|
||||
case errdefs.IsSystem(err) || errdefs.IsUnknown(err) || errdefs.IsDataLoss(err) || errdefs.IsDeadline(err) || errdefs.IsCancelled(err):
|
||||
return http.StatusInternalServerError
|
||||
statusCode = http.StatusInternalServerError
|
||||
default:
|
||||
if statusCode := statusCodeFromGRPCError(err); statusCode != http.StatusInternalServerError {
|
||||
statusCode = statusCodeFromGRPCError(err)
|
||||
if statusCode != http.StatusInternalServerError {
|
||||
return statusCode
|
||||
}
|
||||
if statusCode := statusCodeFromContainerdError(err); statusCode != http.StatusInternalServerError {
|
||||
statusCode = statusCodeFromContainerdError(err)
|
||||
if statusCode != http.StatusInternalServerError {
|
||||
return statusCode
|
||||
}
|
||||
if statusCode := statusCodeFromDistributionError(err); statusCode != http.StatusInternalServerError {
|
||||
statusCode = statusCodeFromDistributionError(err)
|
||||
if statusCode != http.StatusInternalServerError {
|
||||
return statusCode
|
||||
}
|
||||
if e, ok := err.(causer); ok {
|
||||
@@ -66,9 +71,13 @@ func FromError(err error) int {
|
||||
"error": err,
|
||||
"error_type": fmt.Sprintf("%T", err),
|
||||
}).Debug("FIXME: Got an API for which error does not match any expected type!!!")
|
||||
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
|
||||
if statusCode == 0 {
|
||||
statusCode = http.StatusInternalServerError
|
||||
}
|
||||
|
||||
return statusCode
|
||||
}
|
||||
|
||||
// statusCodeFromGRPCError returns status code according to gRPC error
|
||||
|
||||
@@ -12,4 +12,5 @@ import (
|
||||
// container configuration.
|
||||
type ContainerDecoder interface {
|
||||
DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *network.NetworkingConfig, error)
|
||||
DecodeHostConfig(src io.Reader) (*container.HostConfig, error)
|
||||
}
|
||||
|
||||
@@ -1,17 +1,12 @@
|
||||
package httputils // import "github.com/docker/docker/api/server/httputils"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// BoolValue transforms a form value in different formats into a boolean type.
|
||||
@@ -114,24 +109,3 @@ func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions,
|
||||
}
|
||||
return ArchiveOptions{name, path}, nil
|
||||
}
|
||||
|
||||
// DecodePlatform decodes the OCI platform JSON string into a Platform struct.
|
||||
func DecodePlatform(platformJSON string) (*ocispec.Platform, error) {
|
||||
var p ocispec.Platform
|
||||
|
||||
if err := json.Unmarshal([]byte(platformJSON), &p); err != nil {
|
||||
return nil, errdefs.InvalidParameter(errors.Wrap(err, "failed to parse platform"))
|
||||
}
|
||||
|
||||
hasAnyOptional := (p.Variant != "" || p.OSVersion != "" || len(p.OSFeatures) > 0)
|
||||
|
||||
if p.OS == "" && p.Architecture == "" && hasAnyOptional {
|
||||
return nil, errdefs.InvalidParameter(errors.New("optional platform fields provided, but OS and Architecture are missing"))
|
||||
}
|
||||
|
||||
if p.OS == "" || p.Architecture == "" {
|
||||
return nil, errdefs.InvalidParameter(errors.New("both OS and Architecture must be provided"))
|
||||
}
|
||||
|
||||
return &p, nil
|
||||
}
|
||||
|
||||
@@ -1,16 +1,9 @@
|
||||
package httputils // import "github.com/docker/docker/api/server/httputils"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/docker/docker/errdefs"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func TestBoolValue(t *testing.T) {
|
||||
@@ -110,23 +103,3 @@ func TestInt64ValueOrDefaultWithError(t *testing.T) {
|
||||
t.Fatal("Expected an error.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePlatformInvalid(t *testing.T) {
|
||||
for _, tc := range []ocispec.Platform{
|
||||
{
|
||||
OSVersion: "1.2.3",
|
||||
OSFeatures: []string{"a", "b"},
|
||||
},
|
||||
{OSVersion: "12.0"},
|
||||
{OS: "linux"},
|
||||
{Architecture: "amd64"},
|
||||
} {
|
||||
t.Run(platforms.Format(tc), func(t *testing.T) {
|
||||
js, err := json.Marshal(tc)
|
||||
assert.NilError(t, err)
|
||||
|
||||
_, err = DecodePlatform(string(js))
|
||||
assert.Check(t, errdefs.IsInvalidParameter(err))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
|
||||
@@ -17,11 +16,7 @@ import (
|
||||
|
||||
// WriteLogStream writes an encoded byte stream of log messages from the
|
||||
// messages channel, multiplexing them with a stdcopy.Writer if mux is true
|
||||
func WriteLogStream(_ context.Context, w http.ResponseWriter, msgs <-chan *backend.LogMessage, config *container.LogsOptions, mux bool) {
|
||||
// See https://github.com/moby/moby/issues/47448
|
||||
// Trigger headers to be written immediately.
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
func WriteLogStream(_ context.Context, w io.Writer, msgs <-chan *backend.LogMessage, config *container.LogsOptions, mux bool) {
|
||||
wf := ioutils.NewWriteFlusher(w)
|
||||
defer wf.Close()
|
||||
|
||||
|
||||
@@ -10,15 +10,11 @@ import (
|
||||
|
||||
// CORSMiddleware injects CORS headers to each request
|
||||
// when it's configured.
|
||||
//
|
||||
// Deprecated: CORS headers should not be set on the API. This feature will be removed in the next release.
|
||||
type CORSMiddleware struct {
|
||||
defaultHeaders string
|
||||
}
|
||||
|
||||
// NewCORSMiddleware creates a new CORSMiddleware with default headers.
|
||||
//
|
||||
// Deprecated: CORS headers should not be set on the API. This feature will be removed in the next release.
|
||||
func NewCORSMiddleware(d string) CORSMiddleware {
|
||||
return CORSMiddleware{defaultHeaders: d}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"net/http"
|
||||
"runtime"
|
||||
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
)
|
||||
@@ -14,40 +13,19 @@ import (
|
||||
// VersionMiddleware is a middleware that
|
||||
// validates the client and server versions.
|
||||
type VersionMiddleware struct {
|
||||
serverVersion string
|
||||
|
||||
// defaultAPIVersion is the default API version provided by the API server,
|
||||
// specified as "major.minor". It is usually configured to the latest API
|
||||
// version [github.com/docker/docker/api.DefaultVersion].
|
||||
//
|
||||
// API requests for API versions greater than this version are rejected by
|
||||
// the server and produce a [versionUnsupportedError].
|
||||
defaultAPIVersion string
|
||||
|
||||
// minAPIVersion is the minimum API version provided by the API server,
|
||||
// specified as "major.minor".
|
||||
//
|
||||
// API requests for API versions lower than this version are rejected by
|
||||
// the server and produce a [versionUnsupportedError].
|
||||
minAPIVersion string
|
||||
serverVersion string
|
||||
defaultVersion string
|
||||
minVersion string
|
||||
}
|
||||
|
||||
// NewVersionMiddleware creates a VersionMiddleware with the given versions.
|
||||
func NewVersionMiddleware(serverVersion, defaultAPIVersion, minAPIVersion string) (*VersionMiddleware, error) {
|
||||
if versions.LessThan(defaultAPIVersion, api.MinSupportedAPIVersion) || versions.GreaterThan(defaultAPIVersion, api.DefaultVersion) {
|
||||
return nil, fmt.Errorf("invalid default API version (%s): must be between %s and %s", defaultAPIVersion, api.MinSupportedAPIVersion, api.DefaultVersion)
|
||||
// NewVersionMiddleware creates a new VersionMiddleware
|
||||
// with the default versions.
|
||||
func NewVersionMiddleware(s, d, m string) VersionMiddleware {
|
||||
return VersionMiddleware{
|
||||
serverVersion: s,
|
||||
defaultVersion: d,
|
||||
minVersion: m,
|
||||
}
|
||||
if versions.LessThan(minAPIVersion, api.MinSupportedAPIVersion) || versions.GreaterThan(minAPIVersion, api.DefaultVersion) {
|
||||
return nil, fmt.Errorf("invalid minimum API version (%s): must be between %s and %s", minAPIVersion, api.MinSupportedAPIVersion, api.DefaultVersion)
|
||||
}
|
||||
if versions.GreaterThan(minAPIVersion, defaultAPIVersion) {
|
||||
return nil, fmt.Errorf("invalid API version: the minimum API version (%s) is higher than the default version (%s)", minAPIVersion, defaultAPIVersion)
|
||||
}
|
||||
return &VersionMiddleware{
|
||||
serverVersion: serverVersion,
|
||||
defaultAPIVersion: defaultAPIVersion,
|
||||
minAPIVersion: minAPIVersion,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type versionUnsupportedError struct {
|
||||
@@ -67,18 +45,18 @@ func (e versionUnsupportedError) InvalidParameter() {}
|
||||
func (v VersionMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
w.Header().Set("Server", fmt.Sprintf("Docker/%s (%s)", v.serverVersion, runtime.GOOS))
|
||||
w.Header().Set("API-Version", v.defaultAPIVersion)
|
||||
w.Header().Set("API-Version", v.defaultVersion)
|
||||
w.Header().Set("OSType", runtime.GOOS)
|
||||
|
||||
apiVersion := vars["version"]
|
||||
if apiVersion == "" {
|
||||
apiVersion = v.defaultAPIVersion
|
||||
apiVersion = v.defaultVersion
|
||||
}
|
||||
if versions.LessThan(apiVersion, v.minAPIVersion) {
|
||||
return versionUnsupportedError{version: apiVersion, minVersion: v.minAPIVersion}
|
||||
if versions.LessThan(apiVersion, v.minVersion) {
|
||||
return versionUnsupportedError{version: apiVersion, minVersion: v.minVersion}
|
||||
}
|
||||
if versions.GreaterThan(apiVersion, v.defaultAPIVersion) {
|
||||
return versionUnsupportedError{version: apiVersion, maxVersion: v.defaultAPIVersion}
|
||||
if versions.GreaterThan(apiVersion, v.defaultVersion) {
|
||||
return versionUnsupportedError{version: apiVersion, maxVersion: v.defaultVersion}
|
||||
}
|
||||
ctx = context.WithValue(ctx, httputils.APIVersionKey{}, apiVersion)
|
||||
return handler(ctx, w, r, vars)
|
||||
|
||||
@@ -2,82 +2,27 @@ package middleware // import "github.com/docker/docker/api/server/middleware"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
)
|
||||
|
||||
func TestNewVersionMiddlewareValidation(t *testing.T) {
|
||||
tests := []struct {
|
||||
doc, defaultVersion, minVersion, expectedErr string
|
||||
}{
|
||||
{
|
||||
doc: "defaults",
|
||||
defaultVersion: api.DefaultVersion,
|
||||
minVersion: api.MinSupportedAPIVersion,
|
||||
},
|
||||
{
|
||||
doc: "invalid default lower than min",
|
||||
defaultVersion: api.MinSupportedAPIVersion,
|
||||
minVersion: api.DefaultVersion,
|
||||
expectedErr: fmt.Sprintf("invalid API version: the minimum API version (%s) is higher than the default version (%s)", api.DefaultVersion, api.MinSupportedAPIVersion),
|
||||
},
|
||||
{
|
||||
doc: "invalid default too low",
|
||||
defaultVersion: "0.1",
|
||||
minVersion: api.MinSupportedAPIVersion,
|
||||
expectedErr: fmt.Sprintf("invalid default API version (0.1): must be between %s and %s", api.MinSupportedAPIVersion, api.DefaultVersion),
|
||||
},
|
||||
{
|
||||
doc: "invalid default too high",
|
||||
defaultVersion: "9999.9999",
|
||||
minVersion: api.DefaultVersion,
|
||||
expectedErr: fmt.Sprintf("invalid default API version (9999.9999): must be between %s and %s", api.MinSupportedAPIVersion, api.DefaultVersion),
|
||||
},
|
||||
{
|
||||
doc: "invalid minimum too low",
|
||||
defaultVersion: api.MinSupportedAPIVersion,
|
||||
minVersion: "0.1",
|
||||
expectedErr: fmt.Sprintf("invalid minimum API version (0.1): must be between %s and %s", api.MinSupportedAPIVersion, api.DefaultVersion),
|
||||
},
|
||||
{
|
||||
doc: "invalid minimum too high",
|
||||
defaultVersion: api.DefaultVersion,
|
||||
minVersion: "9999.9999",
|
||||
expectedErr: fmt.Sprintf("invalid minimum API version (9999.9999): must be between %s and %s", api.MinSupportedAPIVersion, api.DefaultVersion),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
tc := tc
|
||||
t.Run(tc.doc, func(t *testing.T) {
|
||||
_, err := NewVersionMiddleware("1.2.3", tc.defaultVersion, tc.minVersion)
|
||||
if tc.expectedErr == "" {
|
||||
assert.Check(t, err)
|
||||
} else {
|
||||
assert.Check(t, is.Error(err, tc.expectedErr))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVersionMiddlewareVersion(t *testing.T) {
|
||||
expectedVersion := "<not set>"
|
||||
defaultVersion := "1.10.0"
|
||||
minVersion := "1.2.0"
|
||||
expectedVersion := defaultVersion
|
||||
handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
v := httputils.VersionFromContext(ctx)
|
||||
assert.Check(t, is.Equal(expectedVersion, v))
|
||||
return nil
|
||||
}
|
||||
|
||||
m, err := NewVersionMiddleware("1.2.3", api.DefaultVersion, api.MinSupportedAPIVersion)
|
||||
assert.NilError(t, err)
|
||||
m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion)
|
||||
h := m.WrapHandler(handler)
|
||||
|
||||
req, _ := http.NewRequest(http.MethodGet, "/containers/json", nil)
|
||||
@@ -90,19 +35,19 @@ func TestVersionMiddlewareVersion(t *testing.T) {
|
||||
errString string
|
||||
}{
|
||||
{
|
||||
expectedVersion: api.DefaultVersion,
|
||||
expectedVersion: "1.10.0",
|
||||
},
|
||||
{
|
||||
reqVersion: api.MinSupportedAPIVersion,
|
||||
expectedVersion: api.MinSupportedAPIVersion,
|
||||
reqVersion: "1.9.0",
|
||||
expectedVersion: "1.9.0",
|
||||
},
|
||||
{
|
||||
reqVersion: "0.1",
|
||||
errString: fmt.Sprintf("client version 0.1 is too old. Minimum supported API version is %s, please upgrade your client to a newer version", api.MinSupportedAPIVersion),
|
||||
errString: "client version 0.1 is too old. Minimum supported API version is 1.2.0, please upgrade your client to a newer version",
|
||||
},
|
||||
{
|
||||
reqVersion: "9999.9999",
|
||||
errString: fmt.Sprintf("client version 9999.9999 is too new. Maximum supported API version is %s", api.DefaultVersion),
|
||||
errString: "client version 9999.9999 is too new. Maximum supported API version is 1.10.0",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -126,8 +71,9 @@ func TestVersionMiddlewareWithErrorsReturnsHeaders(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
|
||||
m, err := NewVersionMiddleware("1.2.3", api.DefaultVersion, api.MinSupportedAPIVersion)
|
||||
assert.NilError(t, err)
|
||||
defaultVersion := "1.10.0"
|
||||
minVersion := "1.2.0"
|
||||
m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion)
|
||||
h := m.WrapHandler(handler)
|
||||
|
||||
req, _ := http.NewRequest(http.MethodGet, "/containers/json", nil)
|
||||
@@ -135,12 +81,12 @@ func TestVersionMiddlewareWithErrorsReturnsHeaders(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
vars := map[string]string{"version": "0.1"}
|
||||
err = h(ctx, resp, req, vars)
|
||||
err := h(ctx, resp, req, vars)
|
||||
assert.Check(t, is.ErrorContains(err, ""))
|
||||
|
||||
hdr := resp.Result().Header
|
||||
assert.Check(t, is.Contains(hdr.Get("Server"), "Docker/1.2.3"))
|
||||
assert.Check(t, is.Contains(hdr.Get("Server"), "Docker/"+defaultVersion))
|
||||
assert.Check(t, is.Contains(hdr.Get("Server"), runtime.GOOS))
|
||||
assert.Check(t, is.Equal(hdr.Get("API-Version"), api.DefaultVersion))
|
||||
assert.Check(t, is.Equal(hdr.Get("API-Version"), defaultVersion))
|
||||
assert.Check(t, is.Equal(hdr.Get("OSType"), runtime.GOOS))
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -41,7 +42,6 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
||||
SuppressOutput: httputils.BoolValue(r, "q"),
|
||||
NoCache: httputils.BoolValue(r, "nocache"),
|
||||
ForceRemove: httputils.BoolValue(r, "forcerm"),
|
||||
PullParent: httputils.BoolValue(r, "pull"),
|
||||
MemorySwap: httputils.Int64ValueOrZero(r, "memswap"),
|
||||
Memory: httputils.Int64ValueOrZero(r, "memory"),
|
||||
CPUShares: httputils.Int64ValueOrZero(r, "cpushares"),
|
||||
@@ -66,14 +66,17 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
||||
return nil, invalidParam{errors.New("security options are not supported on " + runtime.GOOS)}
|
||||
}
|
||||
|
||||
if httputils.BoolValue(r, "forcerm") {
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
if httputils.BoolValue(r, "forcerm") && versions.GreaterThanOrEqualTo(version, "1.12") {
|
||||
options.Remove = true
|
||||
} else if r.FormValue("rm") == "" {
|
||||
} else if r.FormValue("rm") == "" && versions.GreaterThanOrEqualTo(version, "1.12") {
|
||||
options.Remove = true
|
||||
} else {
|
||||
options.Remove = httputils.BoolValue(r, "rm")
|
||||
}
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
if httputils.BoolValue(r, "pull") && versions.GreaterThanOrEqualTo(version, "1.16") {
|
||||
options.PullParent = true
|
||||
}
|
||||
if versions.GreaterThanOrEqualTo(version, "1.32") {
|
||||
options.Platform = r.FormValue("platform")
|
||||
}
|
||||
@@ -104,7 +107,7 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
||||
}
|
||||
|
||||
if ulimitsJSON := r.FormValue("ulimits"); ulimitsJSON != "" {
|
||||
buildUlimits := []*container.Ulimit{}
|
||||
buildUlimits := []*units.Ulimit{}
|
||||
if err := json.Unmarshal([]byte(ulimitsJSON), &buildUlimits); err != nil {
|
||||
return nil, invalidParam{errors.Wrap(err, "error reading ulimit settings")}
|
||||
}
|
||||
|
||||
@@ -14,19 +14,20 @@ import (
|
||||
|
||||
// execBackend includes functions to implement to provide exec functionality.
|
||||
type execBackend interface {
|
||||
ContainerExecCreate(name string, options *container.ExecOptions) (string, error)
|
||||
ContainerExecCreate(name string, config *types.ExecConfig) (string, error)
|
||||
ContainerExecInspect(id string) (*backend.ExecInspect, error)
|
||||
ContainerExecResize(name string, height, width int) error
|
||||
ContainerExecStart(ctx context.Context, name string, options backend.ExecStartConfig) error
|
||||
ContainerExecStart(ctx context.Context, name string, options container.ExecStartOptions) error
|
||||
ExecExists(name string) (bool, error)
|
||||
}
|
||||
|
||||
// copyBackend includes functions to implement to provide container copy functionality.
|
||||
type copyBackend interface {
|
||||
ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *container.PathStat, err error)
|
||||
ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error)
|
||||
ContainerCopy(name string, res string) (io.ReadCloser, error)
|
||||
ContainerExport(ctx context.Context, name string, out io.Writer) error
|
||||
ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error
|
||||
ContainerStatPath(name string, path string) (stat *container.PathStat, err error)
|
||||
ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error)
|
||||
}
|
||||
|
||||
// stateBackend includes functions to implement to provide container state lifecycle functionality.
|
||||
@@ -38,7 +39,7 @@ type stateBackend interface {
|
||||
ContainerResize(name string, height, width int) error
|
||||
ContainerRestart(ctx context.Context, name string, options container.StopOptions) error
|
||||
ContainerRm(name string, config *backend.ContainerRmConfig) error
|
||||
ContainerStart(ctx context.Context, name string, checkpoint string, checkpointDir string) error
|
||||
ContainerStart(ctx context.Context, name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error
|
||||
ContainerStop(ctx context.Context, name string, options container.StopOptions) error
|
||||
ContainerUnpause(name string) error
|
||||
ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error)
|
||||
@@ -62,7 +63,7 @@ type attachBackend interface {
|
||||
|
||||
// systemBackend includes functions to implement to provide system wide containers functionality
|
||||
type systemBackend interface {
|
||||
ContainersPrune(ctx context.Context, pruneFilters filters.Args) (*container.PruneReport, error)
|
||||
ContainersPrune(ctx context.Context, pruneFilters filters.Args) (*types.ContainersPruneReport, error)
|
||||
}
|
||||
|
||||
type commitBackend interface {
|
||||
|
||||
@@ -56,6 +56,7 @@ func (r *containerRouter) initRoutes() {
|
||||
router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait),
|
||||
router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize),
|
||||
router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach),
|
||||
router.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), // Deprecated since 1.8 (API v1.20), errors out since 1.12 (API v1.24)
|
||||
router.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate),
|
||||
router.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart),
|
||||
router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize),
|
||||
|
||||
@@ -10,8 +10,8 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/log"
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/docker/docker/api/server/httpstatus"
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types"
|
||||
@@ -22,14 +22,11 @@ import (
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
containerpkg "github.com/docker/docker/container"
|
||||
networkSettings "github.com/docker/docker/daemon/network"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/libnetwork/netlabel"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/runconfig"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"go.opentelemetry.io/otel"
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
@@ -42,13 +39,13 @@ func (s *containerRouter) postCommit(ctx context.Context, w http.ResponseWriter,
|
||||
return err
|
||||
}
|
||||
|
||||
// FIXME(thaJeztah): change this to unmarshal just [container.Config]:
|
||||
// The commit endpoint accepts a [container.Config], but the decoder uses a
|
||||
// [container.CreateRequest], which is a superset, and also contains
|
||||
// [container.HostConfig] and [network.NetworkConfig]. Those structs
|
||||
// are discarded here, but decoder.DecodeConfig also performs validation,
|
||||
// so a request containing those additional fields would result in a
|
||||
// validation error.
|
||||
// TODO: remove pause arg, and always pause in backend
|
||||
pause := httputils.BoolValue(r, "pause")
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
if r.FormValue("pause") == "" && versions.GreaterThanOrEqualTo(version, "1.13") {
|
||||
pause = true
|
||||
}
|
||||
|
||||
config, _, _, err := s.decoder.DecodeConfig(r.Body)
|
||||
if err != nil && !errors.Is(err, io.EOF) { // Do not fail if body is empty.
|
||||
return err
|
||||
@@ -60,7 +57,7 @@ func (s *containerRouter) postCommit(ctx context.Context, w http.ResponseWriter,
|
||||
}
|
||||
|
||||
imgID, err := s.backend.CreateImageFromContainer(ctx, r.Form.Get("container"), &backend.CreateImageConfig{
|
||||
Pause: httputils.BoolValueOrDefault(r, "pause", true), // TODO(dnephin): remove pause arg, and always pause in backend
|
||||
Pause: pause,
|
||||
Tag: ref,
|
||||
Author: r.Form.Get("author"),
|
||||
Comment: r.Form.Get("comment"),
|
||||
@@ -104,15 +101,6 @@ func (s *containerRouter) getContainersJSON(ctx context.Context, w http.Response
|
||||
return err
|
||||
}
|
||||
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
|
||||
if versions.LessThan(version, "1.46") {
|
||||
for _, c := range containers {
|
||||
// Ignore HostConfig.Annotations because it was added in API v1.46.
|
||||
c.HostConfig.Annotations = nil
|
||||
}
|
||||
}
|
||||
|
||||
return httputils.WriteJSON(w, http.StatusOK, containers)
|
||||
}
|
||||
|
||||
@@ -130,20 +118,14 @@ func (s *containerRouter) getContainersStats(ctx context.Context, w http.Respons
|
||||
oneShot = httputils.BoolValueOrDefault(r, "one-shot", false)
|
||||
}
|
||||
|
||||
return s.backend.ContainerStats(ctx, vars["name"], &backend.ContainerStatsConfig{
|
||||
Stream: stream,
|
||||
OneShot: oneShot,
|
||||
OutStream: func() io.Writer {
|
||||
// Assume that when this is called the request is OK.
|
||||
w.WriteHeader(http.StatusOK)
|
||||
if !stream {
|
||||
return w
|
||||
}
|
||||
wf := ioutils.NewWriteFlusher(w)
|
||||
wf.Flush()
|
||||
return wf
|
||||
},
|
||||
})
|
||||
config := &backend.ContainerStatsConfig{
|
||||
Stream: stream,
|
||||
OneShot: oneShot,
|
||||
OutStream: w,
|
||||
Version: httputils.VersionFromContext(ctx),
|
||||
}
|
||||
|
||||
return s.backend.ContainerStats(ctx, vars["name"], config)
|
||||
}
|
||||
|
||||
func (s *containerRouter) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
@@ -196,27 +178,48 @@ func (s *containerRouter) getContainersExport(ctx context.Context, w http.Respon
|
||||
return s.backend.ContainerExport(ctx, vars["name"], w)
|
||||
}
|
||||
|
||||
func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
ctx, span := otel.Tracer("").Start(ctx, "containerRouter.postContainersStart")
|
||||
defer span.End()
|
||||
type bodyOnStartError struct{}
|
||||
|
||||
func (bodyOnStartError) Error() string {
|
||||
return "starting container with non-empty request body was deprecated since API v1.22 and removed in v1.24"
|
||||
}
|
||||
|
||||
func (bodyOnStartError) InvalidParameter() {}
|
||||
|
||||
func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
// If contentLength is -1, we can assumed chunked encoding
|
||||
// or more technically that the length is unknown
|
||||
// https://golang.org/src/pkg/net/http/request.go#L139
|
||||
// net/http otherwise seems to swallow any headers related to chunked encoding
|
||||
// including r.TransferEncoding
|
||||
// allow a nil body for backwards compatibility
|
||||
//
|
||||
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
var hostConfig *container.HostConfig
|
||||
// A non-nil json object is at least 7 characters.
|
||||
if r.ContentLength > 7 || r.ContentLength == -1 {
|
||||
return errdefs.InvalidParameter(errors.New("starting container with non-empty request body was deprecated since API v1.22 and removed in v1.24"))
|
||||
if versions.GreaterThanOrEqualTo(version, "1.24") {
|
||||
return bodyOnStartError{}
|
||||
}
|
||||
|
||||
if err := httputils.CheckForJSON(r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := s.decoder.DecodeHostConfig(r.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hostConfig = c
|
||||
}
|
||||
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.backend.ContainerStart(ctx, vars["name"], r.Form.Get("checkpoint"), r.Form.Get("checkpoint-dir")); err != nil {
|
||||
checkpoint := r.Form.Get("checkpoint")
|
||||
checkpointDir := r.Form.Get("checkpoint-dir")
|
||||
if err := s.backend.ContainerStart(ctx, vars["name"], hostConfig, checkpoint, checkpointDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -252,14 +255,25 @@ func (s *containerRouter) postContainersStop(ctx context.Context, w http.Respons
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *containerRouter) postContainersKill(_ context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
func (s *containerRouter) postContainersKill(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name := vars["name"]
|
||||
if err := s.backend.ContainerKill(name, r.Form.Get("signal")); err != nil {
|
||||
return errors.Wrapf(err, "cannot kill container: %s", name)
|
||||
var isStopped bool
|
||||
if errdefs.IsConflict(err) {
|
||||
isStopped = true
|
||||
}
|
||||
|
||||
// Return error that's not caused because the container is stopped.
|
||||
// Return error if the container is not running and the api is >= 1.20
|
||||
// to keep backwards compatibility.
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
if versions.GreaterThanOrEqualTo(version, "1.20") || !isStopped {
|
||||
return errors.Wrapf(err, "Cannot kill container: %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
@@ -487,29 +501,18 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||
if hostConfig == nil {
|
||||
hostConfig = &container.HostConfig{}
|
||||
}
|
||||
if hostConfig.NetworkMode == "" {
|
||||
hostConfig.NetworkMode = "default"
|
||||
}
|
||||
if networkingConfig == nil {
|
||||
networkingConfig = &network.NetworkingConfig{}
|
||||
}
|
||||
if networkingConfig.EndpointsConfig == nil {
|
||||
networkingConfig.EndpointsConfig = make(map[string]*network.EndpointSettings)
|
||||
}
|
||||
// The NetworkMode "default" is used as a way to express a container should
|
||||
// be attached to the OS-dependant default network, in an OS-independent
|
||||
// way. Doing this conversion as soon as possible ensures we have less
|
||||
// NetworkMode to handle down the path (including in the
|
||||
// backward-compatibility layer we have just below).
|
||||
//
|
||||
// Note that this is not the only place where this conversion has to be
|
||||
// done (as there are various other places where containers get created).
|
||||
if hostConfig.NetworkMode == "" || hostConfig.NetworkMode.IsDefault() {
|
||||
hostConfig.NetworkMode = networkSettings.DefaultNetwork
|
||||
if nw, ok := networkingConfig.EndpointsConfig[network.NetworkDefault]; ok {
|
||||
networkingConfig.EndpointsConfig[hostConfig.NetworkMode.NetworkName()] = nw
|
||||
delete(networkingConfig.EndpointsConfig, network.NetworkDefault)
|
||||
}
|
||||
}
|
||||
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
adjustCPUShares := versions.LessThan(version, "1.19")
|
||||
|
||||
// When using API 1.24 and under, the client is responsible for removing the container
|
||||
if versions.LessThan(version, "1.25") {
|
||||
@@ -599,27 +602,17 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||
hostConfig.Annotations = nil
|
||||
}
|
||||
|
||||
defaultReadOnlyNonRecursive := false
|
||||
if versions.LessThan(version, "1.44") {
|
||||
if config.Healthcheck != nil {
|
||||
// StartInterval was added in API 1.44
|
||||
config.Healthcheck.StartInterval = 0
|
||||
}
|
||||
|
||||
// Set ReadOnlyNonRecursive to true because it was added in API 1.44
|
||||
// Before that all read-only mounts were non-recursive.
|
||||
// Keep that behavior for clients on older APIs.
|
||||
defaultReadOnlyNonRecursive = true
|
||||
|
||||
for _, m := range hostConfig.Mounts {
|
||||
if m.Type == mount.TypeBind {
|
||||
if m.BindOptions != nil && m.BindOptions.ReadOnlyForceRecursive {
|
||||
// NOTE: that technically this is a breaking change for older
|
||||
// API versions, and we should ignore the new field.
|
||||
// However, this option may be incorrectly set by a client with
|
||||
// the expectation that the failing to apply recursive read-only
|
||||
// is enforced, so we decided to produce an error instead,
|
||||
// instead of silently ignoring.
|
||||
if m.BindOptions != nil {
|
||||
// Ignore ReadOnlyNonRecursive because it was added in API 1.44.
|
||||
m.BindOptions.ReadOnlyNonRecursive = false
|
||||
if m.BindOptions.ReadOnlyForceRecursive {
|
||||
return errdefs.InvalidParameter(errors.New("BindOptions.ReadOnlyForceRecursive needs API v1.44 or newer"))
|
||||
}
|
||||
}
|
||||
@@ -635,14 +628,6 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||
}
|
||||
}
|
||||
|
||||
if versions.LessThan(version, "1.45") {
|
||||
for _, m := range hostConfig.Mounts {
|
||||
if m.VolumeOptions != nil && m.VolumeOptions.Subpath != "" {
|
||||
return errdefs.InvalidParameter(errors.New("VolumeOptions.Subpath needs API v1.45 or newer"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var warnings []string
|
||||
if warn, err := handleMACAddressBC(config, hostConfig, networkingConfig, version); err != nil {
|
||||
return err
|
||||
@@ -650,12 +635,6 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||
warnings = append(warnings, warn)
|
||||
}
|
||||
|
||||
if warn, err := handleSysctlBC(hostConfig, networkingConfig, version); err != nil {
|
||||
return err
|
||||
} else if warn != "" {
|
||||
warnings = append(warnings, warn)
|
||||
}
|
||||
|
||||
if hostConfig.PidsLimit != nil && *hostConfig.PidsLimit <= 0 {
|
||||
// Don't set a limit if either no limit was specified, or "unlimited" was
|
||||
// explicitly set.
|
||||
@@ -665,12 +644,12 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||
}
|
||||
|
||||
ccr, err := s.backend.ContainerCreate(ctx, backend.ContainerCreateConfig{
|
||||
Name: name,
|
||||
Config: config,
|
||||
HostConfig: hostConfig,
|
||||
NetworkingConfig: networkingConfig,
|
||||
Platform: platform,
|
||||
DefaultReadOnlyNonRecursive: defaultReadOnlyNonRecursive,
|
||||
Name: name,
|
||||
Config: config,
|
||||
HostConfig: hostConfig,
|
||||
NetworkingConfig: networkingConfig,
|
||||
AdjustCPUShares: adjustCPUShares,
|
||||
Platform: platform,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -683,46 +662,42 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||
// networkingConfig to set the endpoint-specific MACAddress field introduced in API v1.44. It returns a warning message
|
||||
// or an error if the container-wide field was specified for API >= v1.44.
|
||||
func handleMACAddressBC(config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, version string) (string, error) {
|
||||
if config.MacAddress == "" { //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
|
||||
return "", nil
|
||||
}
|
||||
|
||||
deprecatedMacAddress := config.MacAddress //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
|
||||
|
||||
// For older versions of the API, migrate the container-wide MAC address to EndpointsConfig.
|
||||
if versions.LessThan(version, "1.44") {
|
||||
if deprecatedMacAddress == "" {
|
||||
// If a MAC address is supplied in EndpointsConfig, discard it because the old API
|
||||
// would have ignored it.
|
||||
for _, ep := range networkingConfig.EndpointsConfig {
|
||||
ep.MacAddress = ""
|
||||
// The container-wide MacAddress parameter is deprecated and should now be specified in EndpointsConfig.
|
||||
if hostConfig.NetworkMode.IsDefault() || hostConfig.NetworkMode.IsBridge() || hostConfig.NetworkMode.IsUserDefined() {
|
||||
nwName := hostConfig.NetworkMode.NetworkName()
|
||||
if _, ok := networkingConfig.EndpointsConfig[nwName]; !ok {
|
||||
networkingConfig.EndpointsConfig[nwName] = &network.EndpointSettings{}
|
||||
}
|
||||
return "", nil
|
||||
// Overwrite the config: either the endpoint's MacAddress was set by the user on API < v1.44, which
|
||||
// must be ignored, or migrate the top-level MacAddress to the endpoint's config.
|
||||
networkingConfig.EndpointsConfig[nwName].MacAddress = deprecatedMacAddress
|
||||
}
|
||||
if !hostConfig.NetworkMode.IsBridge() && !hostConfig.NetworkMode.IsUserDefined() {
|
||||
if !hostConfig.NetworkMode.IsDefault() && !hostConfig.NetworkMode.IsBridge() && !hostConfig.NetworkMode.IsUserDefined() {
|
||||
return "", runconfig.ErrConflictContainerNetworkAndMac
|
||||
}
|
||||
|
||||
epConfig, err := epConfigForNetMode(version, hostConfig.NetworkMode, networkingConfig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
epConfig.MacAddress = deprecatedMacAddress
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// The container-wide MacAddress parameter is deprecated and should now be specified in EndpointsConfig.
|
||||
if deprecatedMacAddress == "" {
|
||||
return "", nil
|
||||
}
|
||||
var warning string
|
||||
if hostConfig.NetworkMode.IsBridge() || hostConfig.NetworkMode.IsUserDefined() {
|
||||
ep, err := epConfigForNetMode(version, hostConfig.NetworkMode, networkingConfig)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "unable to migrate container-wide MAC address to a specific network")
|
||||
if hostConfig.NetworkMode.IsDefault() || hostConfig.NetworkMode.IsBridge() || hostConfig.NetworkMode.IsUserDefined() {
|
||||
nwName := hostConfig.NetworkMode.NetworkName()
|
||||
if _, ok := networkingConfig.EndpointsConfig[nwName]; !ok {
|
||||
networkingConfig.EndpointsConfig[nwName] = &network.EndpointSettings{}
|
||||
}
|
||||
// ep is the endpoint that needs the container-wide MAC address; migrate the address
|
||||
// to it, or bail out if there's a mismatch.
|
||||
|
||||
ep := networkingConfig.EndpointsConfig[nwName]
|
||||
if ep.MacAddress == "" {
|
||||
ep.MacAddress = deprecatedMacAddress
|
||||
} else if ep.MacAddress != deprecatedMacAddress {
|
||||
return "", errdefs.InvalidParameter(errors.New("the container-wide MAC address must match the endpoint-specific MAC address for the main network, or be left empty"))
|
||||
return "", errdefs.InvalidParameter(errors.New("the container-wide MAC address should match the endpoint-specific MAC address for the main network or should be left empty"))
|
||||
}
|
||||
}
|
||||
warning = "The container-wide MacAddress field is now deprecated. It should be specified in EndpointsConfig instead."
|
||||
@@ -731,148 +706,6 @@ func handleMACAddressBC(config *container.Config, hostConfig *container.HostConf
|
||||
return warning, nil
|
||||
}
|
||||
|
||||
// handleSysctlBC migrates top level network endpoint-specific '--sysctl'
|
||||
// settings to an DriverOpts for an endpoint. This is necessary because sysctls
|
||||
// are applied during container task creation, but sysctls that name an interface
|
||||
// (for example 'net.ipv6.conf.eth0.forwarding') cannot be applied until the
|
||||
// interface has been created. So, these settings are removed from hostConfig.Sysctls
|
||||
// and added to DriverOpts[netlabel.EndpointSysctls].
|
||||
//
|
||||
// Because interface names ('ethN') are allocated sequentially, and the order of
|
||||
// network connections is not deterministic on container restart, only 'eth0'
|
||||
// would work reliably in a top-level '--sysctl' option, and then only when
|
||||
// there's a single initial network connection. So, settings for 'eth0' are
|
||||
// migrated to the primary interface, identified by 'hostConfig.NetworkMode'.
|
||||
// Settings for other interfaces are treated as errors.
|
||||
//
|
||||
// In the DriverOpts, because the interface name cannot be determined in advance, the
|
||||
// interface name is replaced by "IFNAME". For example, 'net.ipv6.conf.eth0.forwarding'
|
||||
// becomes 'net.ipv6.conf.IFNAME.forwarding'. The value in DriverOpts is a
|
||||
// comma-separated list.
|
||||
//
|
||||
// A warning is generated when settings are migrated.
|
||||
func handleSysctlBC(
|
||||
hostConfig *container.HostConfig,
|
||||
netConfig *network.NetworkingConfig,
|
||||
version string,
|
||||
) (string, error) {
|
||||
if !hostConfig.NetworkMode.IsPrivate() {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
var ep *network.EndpointSettings
|
||||
var toDelete []string
|
||||
var netIfSysctls []string
|
||||
for k, v := range hostConfig.Sysctls {
|
||||
// If the sysctl name matches "net.*.*.eth0.*" ...
|
||||
if spl := strings.SplitN(k, ".", 5); len(spl) == 5 && spl[0] == "net" && strings.HasPrefix(spl[3], "eth") {
|
||||
netIfSysctl := fmt.Sprintf("net.%s.%s.IFNAME.%s=%s", spl[1], spl[2], spl[4], v)
|
||||
// Find the EndpointConfig to migrate settings to, if not already found.
|
||||
if ep == nil {
|
||||
/* TODO(robmry) - apply this to the API version used in 28.0.0
|
||||
// Per-endpoint sysctls were introduced in API version 1.46. Migration is
|
||||
// needed, but refuse to do it automatically for newer versions of the API.
|
||||
if versions.GreaterThan(version, "1.??") {
|
||||
return "", fmt.Errorf("interface specific sysctl setting %q must be supplied using driver option '%s'",
|
||||
k, netlabel.EndpointSysctls)
|
||||
}
|
||||
*/
|
||||
var err error
|
||||
ep, err = epConfigForNetMode(version, hostConfig.NetworkMode, netConfig)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to find a network for sysctl %s: %w", k, err)
|
||||
}
|
||||
}
|
||||
// Only try to migrate settings for "eth0", anything else would always
|
||||
// have behaved unpredictably.
|
||||
if spl[3] != "eth0" {
|
||||
return "", fmt.Errorf(`unable to determine network endpoint for sysctl %s, use driver option '%s' to set per-interface sysctls`,
|
||||
k, netlabel.EndpointSysctls)
|
||||
}
|
||||
// Prepare the migration.
|
||||
toDelete = append(toDelete, k)
|
||||
netIfSysctls = append(netIfSysctls, netIfSysctl)
|
||||
}
|
||||
}
|
||||
if ep == nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
newDriverOpt := strings.Join(netIfSysctls, ",")
|
||||
warning := fmt.Sprintf(`Migrated sysctl %q to DriverOpts{%q:%q}.`,
|
||||
strings.Join(toDelete, ","),
|
||||
netlabel.EndpointSysctls, newDriverOpt)
|
||||
|
||||
// Append existing per-endpoint sysctls to the migrated sysctls (give priority
|
||||
// to per-endpoint settings).
|
||||
if ep.DriverOpts == nil {
|
||||
ep.DriverOpts = map[string]string{}
|
||||
}
|
||||
if oldDriverOpt, ok := ep.DriverOpts[netlabel.EndpointSysctls]; ok {
|
||||
newDriverOpt += "," + oldDriverOpt
|
||||
}
|
||||
ep.DriverOpts[netlabel.EndpointSysctls] = newDriverOpt
|
||||
|
||||
// Delete migrated settings from the top-level sysctls.
|
||||
for _, k := range toDelete {
|
||||
delete(hostConfig.Sysctls, k)
|
||||
}
|
||||
|
||||
return warning, nil
|
||||
}
|
||||
|
||||
// epConfigForNetMode finds, or creates, an entry in netConfig.EndpointsConfig
|
||||
// corresponding to nwMode.
|
||||
//
|
||||
// nwMode.NetworkName() may be the network's name, its id, or its short-id.
|
||||
//
|
||||
// The corresponding endpoint in netConfig.EndpointsConfig may be keyed on a
|
||||
// different one of name/id/short-id. If there's any ambiguity (there are
|
||||
// endpoints but the names don't match), return an error and do not create a new
|
||||
// endpoint, because it might be a duplicate.
|
||||
func epConfigForNetMode(
|
||||
version string,
|
||||
nwMode container.NetworkMode,
|
||||
netConfig *network.NetworkingConfig,
|
||||
) (*network.EndpointSettings, error) {
|
||||
nwName := nwMode.NetworkName()
|
||||
|
||||
// It's always safe to create an EndpointsConfig entry under nwName if there are
|
||||
// no entries already (because there can't be an entry for this network nwName
|
||||
// refers to under any other name/short-id/id).
|
||||
if len(netConfig.EndpointsConfig) == 0 {
|
||||
es := &network.EndpointSettings{}
|
||||
netConfig.EndpointsConfig = map[string]*network.EndpointSettings{
|
||||
nwName: es,
|
||||
}
|
||||
return es, nil
|
||||
}
|
||||
|
||||
// There cannot be more than one entry in EndpointsConfig with API < 1.44.
|
||||
if versions.LessThan(version, "1.44") {
|
||||
// No need to check for a match between NetworkMode and the names/ids in EndpointsConfig,
|
||||
// the old version of the API would pick this network anyway.
|
||||
for _, ep := range netConfig.EndpointsConfig {
|
||||
return ep, nil
|
||||
}
|
||||
}
|
||||
|
||||
// There is existing endpoint config - if it's not indexed by NetworkMode.Name(), we
|
||||
// can't tell which network the container-wide settings are intended for. NetworkMode,
|
||||
// the keys in EndpointsConfig and the NetworkID in EndpointsConfig may mix network
|
||||
// name/id/short-id. It's not safe to create EndpointsConfig under the NetworkMode
|
||||
// name to store the container-wide setting, because that may result in two sets
|
||||
// of EndpointsConfig for the same network and one set will be discarded later. So,
|
||||
// reject the request ...
|
||||
ep, ok := netConfig.EndpointsConfig[nwName]
|
||||
if !ok {
|
||||
return nil, errdefs.InvalidParameter(
|
||||
errors.New("HostConfig.NetworkMode must match the identity of a network in NetworkSettings.Networks"))
|
||||
}
|
||||
|
||||
return ep, nil
|
||||
}
|
||||
|
||||
func (s *containerRouter) deleteContainers(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
@@ -927,7 +760,7 @@ func (s *containerRouter) postContainersAttach(ctx context.Context, w http.Respo
|
||||
}
|
||||
|
||||
contentType := types.MediaTypeRawStream
|
||||
setupStreams := func(multiplexed bool, cancel func()) (io.ReadCloser, io.Writer, io.Writer, error) {
|
||||
setupStreams := func(multiplexed bool) (io.ReadCloser, io.Writer, io.Writer, error) {
|
||||
conn, _, err := hijacker.Hijack()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
@@ -945,8 +778,6 @@ func (s *containerRouter) postContainersAttach(ctx context.Context, w http.Respo
|
||||
fmt.Fprintf(conn, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
|
||||
}
|
||||
|
||||
go notifyClosed(ctx, conn, cancel)
|
||||
|
||||
closer := func() error {
|
||||
httputils.CloseStreams(conn)
|
||||
return nil
|
||||
@@ -995,7 +826,7 @@ func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.Respons
|
||||
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
|
||||
setupStreams := func(multiplexed bool, cancel func()) (io.ReadCloser, io.Writer, io.Writer, error) {
|
||||
setupStreams := func(multiplexed bool) (io.ReadCloser, io.Writer, io.Writer, error) {
|
||||
wsChan := make(chan *websocket.Conn)
|
||||
h := func(conn *websocket.Conn) {
|
||||
wsChan <- conn
|
||||
@@ -1014,8 +845,6 @@ func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.Respons
|
||||
if versions.GreaterThanOrEqualTo(version, "1.28") {
|
||||
conn.PayloadType = websocket.BinaryFrame
|
||||
}
|
||||
|
||||
// TODO: Close notifications
|
||||
return conn, conn, conn, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,352 +0,0 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/libnetwork/netlabel"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
)
|
||||
|
||||
func TestHandleMACAddressBC(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
apiVersion string
|
||||
ctrWideMAC string
|
||||
networkMode container.NetworkMode
|
||||
epConfig map[string]*network.EndpointSettings
|
||||
expEpWithCtrWideMAC string
|
||||
expEpWithNoMAC string
|
||||
expCtrWideMAC string
|
||||
expWarning string
|
||||
expError string
|
||||
}{
|
||||
{
|
||||
name: "old api ctr-wide mac mix id and name",
|
||||
apiVersion: "1.43",
|
||||
ctrWideMAC: "11:22:33:44:55:66",
|
||||
networkMode: "aNetId",
|
||||
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
|
||||
expEpWithCtrWideMAC: "aNetName",
|
||||
expCtrWideMAC: "11:22:33:44:55:66",
|
||||
},
|
||||
{
|
||||
name: "old api clear ep mac",
|
||||
apiVersion: "1.43",
|
||||
networkMode: "aNetId",
|
||||
epConfig: map[string]*network.EndpointSettings{"aNetName": {MacAddress: "11:22:33:44:55:66"}},
|
||||
expEpWithNoMAC: "aNetName",
|
||||
},
|
||||
{
|
||||
name: "old api no-network ctr-wide mac",
|
||||
apiVersion: "1.43",
|
||||
networkMode: "none",
|
||||
ctrWideMAC: "11:22:33:44:55:66",
|
||||
expError: "conflicting options: mac-address and the network mode",
|
||||
expCtrWideMAC: "11:22:33:44:55:66",
|
||||
},
|
||||
{
|
||||
name: "old api create ep",
|
||||
apiVersion: "1.43",
|
||||
networkMode: "aNetId",
|
||||
ctrWideMAC: "11:22:33:44:55:66",
|
||||
epConfig: map[string]*network.EndpointSettings{},
|
||||
expEpWithCtrWideMAC: "aNetId",
|
||||
expCtrWideMAC: "11:22:33:44:55:66",
|
||||
},
|
||||
{
|
||||
name: "old api migrate ctr-wide mac",
|
||||
apiVersion: "1.43",
|
||||
ctrWideMAC: "11:22:33:44:55:66",
|
||||
networkMode: "aNetName",
|
||||
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
|
||||
expEpWithCtrWideMAC: "aNetName",
|
||||
expCtrWideMAC: "11:22:33:44:55:66",
|
||||
},
|
||||
{
|
||||
name: "new api no macs",
|
||||
apiVersion: "1.44",
|
||||
networkMode: "aNetId",
|
||||
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
|
||||
},
|
||||
{
|
||||
name: "new api ep specific mac",
|
||||
apiVersion: "1.44",
|
||||
networkMode: "aNetName",
|
||||
epConfig: map[string]*network.EndpointSettings{"aNetName": {MacAddress: "11:22:33:44:55:66"}},
|
||||
},
|
||||
{
|
||||
name: "new api migrate ctr-wide mac to new ep",
|
||||
apiVersion: "1.44",
|
||||
ctrWideMAC: "11:22:33:44:55:66",
|
||||
networkMode: "aNetName",
|
||||
epConfig: map[string]*network.EndpointSettings{},
|
||||
expEpWithCtrWideMAC: "aNetName",
|
||||
expWarning: "The container-wide MacAddress field is now deprecated",
|
||||
expCtrWideMAC: "",
|
||||
},
|
||||
{
|
||||
name: "new api migrate ctr-wide mac to existing ep",
|
||||
apiVersion: "1.44",
|
||||
ctrWideMAC: "11:22:33:44:55:66",
|
||||
networkMode: "aNetName",
|
||||
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
|
||||
expEpWithCtrWideMAC: "aNetName",
|
||||
expWarning: "The container-wide MacAddress field is now deprecated",
|
||||
expCtrWideMAC: "",
|
||||
},
|
||||
{
|
||||
name: "new api mode vs name mismatch",
|
||||
apiVersion: "1.44",
|
||||
ctrWideMAC: "11:22:33:44:55:66",
|
||||
networkMode: "aNetId",
|
||||
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
|
||||
expError: "unable to migrate container-wide MAC address to a specific network: HostConfig.NetworkMode must match the identity of a network in NetworkSettings.Networks",
|
||||
expCtrWideMAC: "11:22:33:44:55:66",
|
||||
},
|
||||
{
|
||||
name: "new api mac mismatch",
|
||||
apiVersion: "1.44",
|
||||
ctrWideMAC: "11:22:33:44:55:66",
|
||||
networkMode: "aNetName",
|
||||
epConfig: map[string]*network.EndpointSettings{"aNetName": {MacAddress: "00:11:22:33:44:55"}},
|
||||
expError: "the container-wide MAC address must match the endpoint-specific MAC address",
|
||||
expCtrWideMAC: "11:22:33:44:55:66",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cfg := &container.Config{
|
||||
MacAddress: tc.ctrWideMAC, //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
NetworkMode: tc.networkMode,
|
||||
}
|
||||
epConfig := make(map[string]*network.EndpointSettings, len(tc.epConfig))
|
||||
for k, v := range tc.epConfig {
|
||||
v := *v
|
||||
epConfig[k] = &v
|
||||
}
|
||||
netCfg := &network.NetworkingConfig{
|
||||
EndpointsConfig: epConfig,
|
||||
}
|
||||
|
||||
warning, err := handleMACAddressBC(cfg, hostCfg, netCfg, tc.apiVersion)
|
||||
|
||||
if tc.expError == "" {
|
||||
assert.Check(t, err)
|
||||
} else {
|
||||
assert.Check(t, is.ErrorContains(err, tc.expError))
|
||||
}
|
||||
if tc.expWarning == "" {
|
||||
assert.Check(t, is.Equal(warning, ""))
|
||||
} else {
|
||||
assert.Check(t, is.Contains(warning, tc.expWarning))
|
||||
}
|
||||
if tc.expEpWithCtrWideMAC != "" {
|
||||
got := netCfg.EndpointsConfig[tc.expEpWithCtrWideMAC].MacAddress
|
||||
assert.Check(t, is.Equal(got, tc.ctrWideMAC))
|
||||
}
|
||||
if tc.expEpWithNoMAC != "" {
|
||||
got := netCfg.EndpointsConfig[tc.expEpWithNoMAC].MacAddress
|
||||
assert.Check(t, is.Equal(got, ""))
|
||||
}
|
||||
gotCtrWideMAC := cfg.MacAddress //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
|
||||
assert.Check(t, is.Equal(gotCtrWideMAC, tc.expCtrWideMAC))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEpConfigForNetMode(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
apiVersion string
|
||||
networkMode string
|
||||
epConfig map[string]*network.EndpointSettings
|
||||
expEpId string
|
||||
expNumEps int
|
||||
expError bool
|
||||
}{
|
||||
{
|
||||
name: "old api no eps",
|
||||
apiVersion: "1.43",
|
||||
networkMode: "mynet",
|
||||
expNumEps: 1,
|
||||
},
|
||||
{
|
||||
name: "new api no eps",
|
||||
apiVersion: "1.44",
|
||||
networkMode: "mynet",
|
||||
expNumEps: 1,
|
||||
},
|
||||
{
|
||||
name: "old api with ep",
|
||||
apiVersion: "1.43",
|
||||
networkMode: "mynet",
|
||||
epConfig: map[string]*network.EndpointSettings{
|
||||
"anything": {EndpointID: "epone"},
|
||||
},
|
||||
expEpId: "epone",
|
||||
expNumEps: 1,
|
||||
},
|
||||
{
|
||||
name: "new api with matching ep",
|
||||
apiVersion: "1.44",
|
||||
networkMode: "mynet",
|
||||
epConfig: map[string]*network.EndpointSettings{
|
||||
"mynet": {EndpointID: "epone"},
|
||||
},
|
||||
expEpId: "epone",
|
||||
expNumEps: 1,
|
||||
},
|
||||
{
|
||||
name: "new api with mismatched ep",
|
||||
apiVersion: "1.44",
|
||||
networkMode: "mynet",
|
||||
epConfig: map[string]*network.EndpointSettings{
|
||||
"shortid": {EndpointID: "epone"},
|
||||
},
|
||||
expError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
netConfig := &network.NetworkingConfig{
|
||||
EndpointsConfig: tc.epConfig,
|
||||
}
|
||||
ep, err := epConfigForNetMode(tc.apiVersion, container.NetworkMode(tc.networkMode), netConfig)
|
||||
if tc.expError {
|
||||
assert.Check(t, is.ErrorContains(err, "HostConfig.NetworkMode must match the identity of a network in NetworkSettings.Networks"))
|
||||
} else {
|
||||
assert.Assert(t, err)
|
||||
assert.Check(t, is.Equal(ep.EndpointID, tc.expEpId))
|
||||
assert.Check(t, is.Len(netConfig.EndpointsConfig, tc.expNumEps))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleSysctlBC(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
apiVersion string
|
||||
networkMode string
|
||||
sysctls map[string]string
|
||||
epConfig map[string]*network.EndpointSettings
|
||||
expEpSysctls []string
|
||||
expSysctls map[string]string
|
||||
expWarningContains []string
|
||||
expError string
|
||||
}{
|
||||
{
|
||||
name: "migrate to new ep",
|
||||
apiVersion: "1.46",
|
||||
networkMode: "mynet",
|
||||
sysctls: map[string]string{
|
||||
"net.ipv6.conf.all.disable_ipv6": "0",
|
||||
"net.ipv6.conf.eth0.accept_ra": "2",
|
||||
"net.ipv6.conf.eth0.forwarding": "1",
|
||||
},
|
||||
expSysctls: map[string]string{
|
||||
"net.ipv6.conf.all.disable_ipv6": "0",
|
||||
},
|
||||
expEpSysctls: []string{"net.ipv6.conf.IFNAME.forwarding=1", "net.ipv6.conf.IFNAME.accept_ra=2"},
|
||||
expWarningContains: []string{
|
||||
"Migrated",
|
||||
"net.ipv6.conf.eth0.accept_ra", "net.ipv6.conf.IFNAME.accept_ra=2",
|
||||
"net.ipv6.conf.eth0.forwarding", "net.ipv6.conf.IFNAME.forwarding=1",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "migrate nothing",
|
||||
apiVersion: "1.46",
|
||||
networkMode: "mynet",
|
||||
sysctls: map[string]string{
|
||||
"net.ipv6.conf.all.disable_ipv6": "0",
|
||||
},
|
||||
expSysctls: map[string]string{
|
||||
"net.ipv6.conf.all.disable_ipv6": "0",
|
||||
},
|
||||
},
|
||||
/* TODO(robmry) - enable this test for the API version used in 28.0.0
|
||||
{
|
||||
name: "migration disabled for newer api",
|
||||
apiVersion: "1.??",
|
||||
networkMode: "mynet",
|
||||
sysctls: map[string]string{
|
||||
"net.ipv6.conf.eth0.accept_ra": "2",
|
||||
},
|
||||
expError: "must be supplied using driver option 'com.docker.network.endpoint.sysctls'",
|
||||
},
|
||||
*/
|
||||
{
|
||||
name: "only migrate eth0",
|
||||
apiVersion: "1.46",
|
||||
networkMode: "mynet",
|
||||
sysctls: map[string]string{
|
||||
"net.ipv6.conf.eth1.accept_ra": "2",
|
||||
},
|
||||
expError: "unable to determine network endpoint",
|
||||
},
|
||||
{
|
||||
name: "net name mismatch",
|
||||
apiVersion: "1.46",
|
||||
networkMode: "mynet",
|
||||
epConfig: map[string]*network.EndpointSettings{
|
||||
"shortid": {EndpointID: "epone"},
|
||||
},
|
||||
sysctls: map[string]string{
|
||||
"net.ipv6.conf.eth1.accept_ra": "2",
|
||||
},
|
||||
expError: "unable to find a network for sysctl",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
hostCfg := &container.HostConfig{
|
||||
NetworkMode: container.NetworkMode(tc.networkMode),
|
||||
Sysctls: map[string]string{},
|
||||
}
|
||||
for k, v := range tc.sysctls {
|
||||
hostCfg.Sysctls[k] = v
|
||||
}
|
||||
netCfg := &network.NetworkingConfig{
|
||||
EndpointsConfig: tc.epConfig,
|
||||
}
|
||||
|
||||
warnings, err := handleSysctlBC(hostCfg, netCfg, tc.apiVersion)
|
||||
|
||||
for _, s := range tc.expWarningContains {
|
||||
assert.Check(t, is.Contains(warnings, s))
|
||||
}
|
||||
|
||||
if tc.expError != "" {
|
||||
assert.Check(t, is.ErrorContains(err, tc.expError))
|
||||
} else {
|
||||
assert.Check(t, err)
|
||||
|
||||
assert.Check(t, is.DeepEqual(hostCfg.Sysctls, tc.expSysctls))
|
||||
|
||||
ep := netCfg.EndpointsConfig[tc.networkMode]
|
||||
if ep == nil {
|
||||
assert.Check(t, is.Nil(tc.expEpSysctls))
|
||||
} else {
|
||||
got, ok := ep.DriverOpts[netlabel.EndpointSysctls]
|
||||
assert.Check(t, ok)
|
||||
// Check for expected ep-sysctls.
|
||||
for _, want := range tc.expEpSysctls {
|
||||
assert.Check(t, is.Contains(got, want))
|
||||
}
|
||||
// Check for unexpected ep-sysctls.
|
||||
assert.Check(t, is.Len(got, len(strings.Join(tc.expEpSysctls, ","))))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -10,12 +10,51 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
gddohttputil "github.com/golang/gddo/httputil"
|
||||
)
|
||||
|
||||
// setContainerPathStatHeader encodes the stat to JSON, base64 encode, and place in a header.
|
||||
func setContainerPathStatHeader(stat *container.PathStat, header http.Header) error {
|
||||
type pathError struct{}
|
||||
|
||||
func (pathError) Error() string {
|
||||
return "Path cannot be empty"
|
||||
}
|
||||
|
||||
func (pathError) InvalidParameter() {}
|
||||
|
||||
// postContainersCopy is deprecated in favor of getContainersArchive.
|
||||
//
|
||||
// Deprecated since 1.8 (API v1.20), errors out since 1.12 (API v1.24)
|
||||
func (s *containerRouter) postContainersCopy(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
if versions.GreaterThanOrEqualTo(version, "1.24") {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg := types.CopyConfig{}
|
||||
if err := httputils.ReadJSON(r, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cfg.Resource == "" {
|
||||
return pathError{}
|
||||
}
|
||||
|
||||
data, err := s.backend.ContainerCopy(vars["name"], cfg.Resource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer data.Close()
|
||||
|
||||
w.Header().Set("Content-Type", "application/x-tar")
|
||||
_, err = io.Copy(w, data)
|
||||
return err
|
||||
}
|
||||
|
||||
// // Encode the stat to JSON, base64 encode, and place in a header.
|
||||
func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error {
|
||||
statJSON, err := json.Marshal(stat)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/errdefs"
|
||||
@@ -39,7 +38,7 @@ func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.Re
|
||||
return err
|
||||
}
|
||||
|
||||
execConfig := &container.ExecOptions{}
|
||||
execConfig := &types.ExecConfig{}
|
||||
if err := httputils.ReadJSON(r, execConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -72,14 +71,23 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
|
||||
return err
|
||||
}
|
||||
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
if versions.LessThan(version, "1.22") {
|
||||
// API versions before 1.22 did not enforce application/json content-type.
|
||||
// Allow older clients to work by patching the content-type.
|
||||
if r.Header.Get("Content-Type") != "application/json" {
|
||||
r.Header.Set("Content-Type", "application/json")
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
execName = vars["name"]
|
||||
stdin, inStream io.ReadCloser
|
||||
stdout, stderr, outStream io.Writer
|
||||
)
|
||||
|
||||
options := &container.ExecStartOptions{}
|
||||
if err := httputils.ReadJSON(r, options); err != nil {
|
||||
execStartCheck := &types.ExecStartCheck{}
|
||||
if err := httputils.ReadJSON(r, execStartCheck); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -87,21 +95,19 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
|
||||
return err
|
||||
}
|
||||
|
||||
if options.ConsoleSize != nil {
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
|
||||
if execStartCheck.ConsoleSize != nil {
|
||||
// Not supported before 1.42
|
||||
if versions.LessThan(version, "1.42") {
|
||||
options.ConsoleSize = nil
|
||||
execStartCheck.ConsoleSize = nil
|
||||
}
|
||||
|
||||
// No console without tty
|
||||
if !options.Tty {
|
||||
options.ConsoleSize = nil
|
||||
if !execStartCheck.Tty {
|
||||
execStartCheck.ConsoleSize = nil
|
||||
}
|
||||
}
|
||||
|
||||
if !options.Detach {
|
||||
if !execStartCheck.Detach {
|
||||
var err error
|
||||
// Setting up the streaming http interface.
|
||||
inStream, outStream, err = httputils.HijackConnection(w)
|
||||
@@ -112,43 +118,42 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
|
||||
|
||||
if _, ok := r.Header["Upgrade"]; ok {
|
||||
contentType := types.MediaTypeRawStream
|
||||
if !options.Tty && versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.42") {
|
||||
if !execStartCheck.Tty && versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.42") {
|
||||
contentType = types.MediaTypeMultiplexedStream
|
||||
}
|
||||
_, _ = fmt.Fprint(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: "+contentType+"\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n")
|
||||
fmt.Fprint(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: "+contentType+"\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n")
|
||||
} else {
|
||||
_, _ = fmt.Fprint(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n")
|
||||
fmt.Fprint(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n")
|
||||
}
|
||||
|
||||
// copy headers that were removed as part of hijack
|
||||
if err := w.Header().WriteSubset(outStream, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
_, _ = fmt.Fprint(outStream, "\r\n")
|
||||
fmt.Fprint(outStream, "\r\n")
|
||||
|
||||
stdin = inStream
|
||||
if options.Tty {
|
||||
stdout = outStream
|
||||
} else {
|
||||
stdout = outStream
|
||||
if !execStartCheck.Tty {
|
||||
stderr = stdcopy.NewStdWriter(outStream, stdcopy.Stderr)
|
||||
stdout = stdcopy.NewStdWriter(outStream, stdcopy.Stdout)
|
||||
}
|
||||
}
|
||||
|
||||
// Now run the user process in container.
|
||||
//
|
||||
// TODO: Maybe we should we pass ctx here if we're not detaching?
|
||||
err := s.backend.ContainerExecStart(context.Background(), execName, backend.ExecStartConfig{
|
||||
options := container.ExecStartOptions{
|
||||
Stdin: stdin,
|
||||
Stdout: stdout,
|
||||
Stderr: stderr,
|
||||
ConsoleSize: options.ConsoleSize,
|
||||
})
|
||||
if err != nil {
|
||||
if options.Detach {
|
||||
ConsoleSize: execStartCheck.ConsoleSize,
|
||||
}
|
||||
|
||||
// Now run the user process in container.
|
||||
// Maybe we should we pass ctx here if we're not detaching?
|
||||
if err := s.backend.ContainerExecStart(context.Background(), execName, options); err != nil {
|
||||
if execStartCheck.Detach {
|
||||
return err
|
||||
}
|
||||
_, _ = fmt.Fprintf(stdout, "%v\r\n", err)
|
||||
stdout.Write([]byte(err.Error() + "\r\n"))
|
||||
log.G(ctx).Errorf("Error running exec %s in container: %v", execName, err)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/docker/internal/unix_noeintr"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func notifyClosed(ctx context.Context, conn net.Conn, notify func()) {
|
||||
sc, ok := conn.(syscall.Conn)
|
||||
if !ok {
|
||||
log.G(ctx).Debug("notifyClosed: conn does not support close notifications")
|
||||
return
|
||||
}
|
||||
|
||||
rc, err := sc.SyscallConn()
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Warn("notifyClosed: failed get raw conn for close notifications")
|
||||
return
|
||||
}
|
||||
|
||||
epFd, err := unix_noeintr.EpollCreate()
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Warn("notifyClosed: failed to create epoll fd")
|
||||
return
|
||||
}
|
||||
defer unix.Close(epFd)
|
||||
|
||||
err = rc.Control(func(fd uintptr) {
|
||||
err := unix_noeintr.EpollCtl(epFd, unix.EPOLL_CTL_ADD, int(fd), &unix.EpollEvent{
|
||||
Events: unix.EPOLLHUP,
|
||||
Fd: int32(fd),
|
||||
})
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Warn("notifyClosed: failed to register fd for close notifications")
|
||||
return
|
||||
}
|
||||
|
||||
events := make([]unix.EpollEvent, 1)
|
||||
if _, err := unix_noeintr.EpollWait(epFd, events, -1); err != nil {
|
||||
log.G(ctx).WithError(err).Warn("notifyClosed: failed to wait for close notifications")
|
||||
return
|
||||
}
|
||||
notify()
|
||||
})
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Warn("notifyClosed: failed to register for close notifications")
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
//go:build !linux
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
)
|
||||
|
||||
func notifyClosed(ctx context.Context, conn net.Conn, notify func()) {}
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/distribution"
|
||||
@@ -13,7 +12,6 @@ import (
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
distributionpkg "github.com/docker/docker/distribution"
|
||||
"github.com/docker/docker/errdefs"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
@@ -26,10 +24,10 @@ func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.Res
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
imgName := vars["name"]
|
||||
image := vars["name"]
|
||||
|
||||
// TODO why is reference.ParseAnyReference() / reference.ParseNormalizedNamed() not using the reference.ErrTagInvalidFormat (and so on) errors?
|
||||
ref, err := reference.ParseAnyReference(imgName)
|
||||
ref, err := reference.ParseAnyReference(image)
|
||||
if err != nil {
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
@@ -39,7 +37,7 @@ func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.Res
|
||||
// full image ID
|
||||
return errors.Errorf("no manifest found for full image ID")
|
||||
}
|
||||
return errdefs.InvalidParameter(errors.Errorf("unknown image reference format: %s", imgName))
|
||||
return errdefs.InvalidParameter(errors.Errorf("unknown image reference format: %s", image))
|
||||
}
|
||||
|
||||
// For a search it is not an error if no auth was given. Ignore invalid
|
||||
@@ -155,9 +153,6 @@ func (s *distributionRouter) fetchManifest(ctx context.Context, distrepo distrib
|
||||
}
|
||||
}
|
||||
case *schema1.SignedManifest:
|
||||
if os.Getenv("DOCKER_ENABLE_DEPRECATED_PULL_SCHEMA_1_IMAGE") == "" {
|
||||
return registry.DistributionInspect{}, distributionpkg.DeprecatedSchema1ImageError(namedRef)
|
||||
}
|
||||
platform := ocispec.Platform{
|
||||
Architecture: mnfstObj.Architecture,
|
||||
OS: "linux",
|
||||
|
||||
@@ -1,22 +1,13 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.21
|
||||
|
||||
package grpc // import "github.com/docker/docker/api/server/router/grpc"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/defaults"
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/docker/api/server/router"
|
||||
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
"github.com/moby/buildkit/util/stack"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
|
||||
"go.opentelemetry.io/otel"
|
||||
"golang.org/x/net/http2"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
@@ -29,17 +20,12 @@ type grpcRouter struct {
|
||||
|
||||
// NewRouter initializes a new grpc http router
|
||||
func NewRouter(backends ...Backend) router.Router {
|
||||
opts := []grpc.ServerOption{
|
||||
grpc.StatsHandler(tracing.ServerStatsHandler(otelgrpc.WithTracerProvider(otel.GetTracerProvider()))),
|
||||
grpc.ChainUnaryInterceptor(unaryInterceptor, grpcerrors.UnaryServerInterceptor),
|
||||
grpc.StreamInterceptor(grpcerrors.StreamServerInterceptor),
|
||||
grpc.MaxRecvMsgSize(defaults.DefaultMaxRecvMsgSize),
|
||||
grpc.MaxSendMsgSize(defaults.DefaultMaxSendMsgSize),
|
||||
}
|
||||
unary := grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(unaryInterceptor(), grpcerrors.UnaryServerInterceptor))
|
||||
stream := grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(otelgrpc.StreamServerInterceptor(), grpcerrors.StreamServerInterceptor))
|
||||
|
||||
r := &grpcRouter{
|
||||
h2Server: &http2.Server{},
|
||||
grpcServer: grpc.NewServer(opts...),
|
||||
grpcServer: grpc.NewServer(unary, stream),
|
||||
}
|
||||
for _, b := range backends {
|
||||
b.RegisterGRPC(r.grpcServer)
|
||||
@@ -59,20 +45,16 @@ func (gr *grpcRouter) initRoutes() {
|
||||
}
|
||||
}
|
||||
|
||||
func unaryInterceptor(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
|
||||
// This method is used by the clients to send their traces to buildkit so they can be included
|
||||
// in the daemon trace and stored in the build history record. This method can not be traced because
|
||||
// it would cause an infinite loop.
|
||||
if strings.HasSuffix(info.FullMethod, "opentelemetry.proto.collector.trace.v1.TraceService/Export") {
|
||||
return handler(ctx, req)
|
||||
}
|
||||
func unaryInterceptor() grpc.UnaryServerInterceptor {
|
||||
withTrace := otelgrpc.UnaryServerInterceptor()
|
||||
|
||||
resp, err = handler(ctx, req)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Error(info.FullMethod)
|
||||
if log.GetLevel() >= log.DebugLevel {
|
||||
fmt.Fprintf(os.Stderr, "%+v", stack.Formatter(grpcerrors.FromGRPC(err)))
|
||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
|
||||
// This method is used by the clients to send their traces to buildkit so they can be included
|
||||
// in the daemon trace and stored in the build history record. This method can not be traced because
|
||||
// it would cause an infinite loop.
|
||||
if strings.HasSuffix(info.FullMethod, "opentelemetry.proto.collector.trace.v1.TraceService/Export") {
|
||||
return handler(ctx, req)
|
||||
}
|
||||
return withTrace(ctx, req, info, handler)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
@@ -24,10 +24,10 @@ type Backend interface {
|
||||
type imageBackend interface {
|
||||
ImageDelete(ctx context.Context, imageRef string, force, prune bool) ([]image.DeleteResponse, error)
|
||||
ImageHistory(ctx context.Context, imageName string) ([]*image.HistoryResponseItem, error)
|
||||
Images(ctx context.Context, opts image.ListOptions) ([]*image.Summary, error)
|
||||
GetImage(ctx context.Context, refOrID string, options backend.GetImageOpts) (*dockerimage.Image, error)
|
||||
Images(ctx context.Context, opts types.ImageListOptions) ([]*image.Summary, error)
|
||||
GetImage(ctx context.Context, refOrID string, options image.GetImageOpts) (*dockerimage.Image, error)
|
||||
TagImage(ctx context.Context, id dockerimage.ID, newRef reference.Named) error
|
||||
ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*image.PruneReport, error)
|
||||
ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error)
|
||||
}
|
||||
|
||||
type importExportBackend interface {
|
||||
@@ -38,7 +38,7 @@ type importExportBackend interface {
|
||||
|
||||
type registryBackend interface {
|
||||
PullImage(ctx context.Context, ref reference.Named, platform *ocispec.Platform, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error
|
||||
PushImage(ctx context.Context, ref reference.Named, platform *ocispec.Platform, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error
|
||||
PushImage(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error
|
||||
}
|
||||
|
||||
type Searcher interface {
|
||||
|
||||
@@ -10,14 +10,13 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
imagetypes "github.com/docker/docker/api/types/image"
|
||||
opts "github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/builder/remotecontext"
|
||||
@@ -56,7 +55,7 @@ func (ir *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrit
|
||||
if p := r.FormValue("platform"); p != "" {
|
||||
sp, err := platforms.Parse(p)
|
||||
if err != nil {
|
||||
return errdefs.InvalidParameter(err)
|
||||
return err
|
||||
}
|
||||
platform = &sp
|
||||
}
|
||||
@@ -73,9 +72,9 @@ func (ir *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrit
|
||||
// Special case: "pull -a" may send an image name with a
|
||||
// trailing :. This is ugly, but let's not break API
|
||||
// compatibility.
|
||||
imgName := strings.TrimSuffix(img, ":")
|
||||
image := strings.TrimSuffix(img, ":")
|
||||
|
||||
ref, err := reference.ParseNormalizedNamed(imgName)
|
||||
ref, err := reference.ParseNormalizedNamed(image)
|
||||
if err != nil {
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
@@ -190,7 +189,7 @@ func (ir *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter
|
||||
|
||||
var ref reference.Named
|
||||
|
||||
// Tag is empty only in case PushOptions.All is true.
|
||||
// Tag is empty only in case ImagePushOptions.All is true.
|
||||
if tag != "" {
|
||||
r, err := httputils.RepoTagReference(img, tag)
|
||||
if err != nil {
|
||||
@@ -205,24 +204,7 @@ func (ir *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter
|
||||
ref = r
|
||||
}
|
||||
|
||||
var platform *ocispec.Platform
|
||||
// Platform is optional, and only supported in API version 1.46 and later.
|
||||
// However the PushOptions struct previously was an alias for the PullOptions struct
|
||||
// which also contained a Platform field.
|
||||
// This means that older clients may be sending a platform field, even
|
||||
// though it wasn't really supported by the server.
|
||||
// Don't break these clients and just ignore the platform field on older APIs.
|
||||
if versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.46") {
|
||||
if formPlatform := r.Form.Get("platform"); formPlatform != "" {
|
||||
p, err := httputils.DecodePlatform(formPlatform)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
platform = p
|
||||
}
|
||||
}
|
||||
|
||||
if err := ir.backend.PushImage(ctx, ref, platform, metaHeaders, authConfig, output); err != nil {
|
||||
if err := ir.backend.PushImage(ctx, ref, metaHeaders, authConfig, output); err != nil {
|
||||
if !output.Flushed() {
|
||||
return err
|
||||
}
|
||||
@@ -303,7 +285,7 @@ func (ir *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter,
|
||||
}
|
||||
|
||||
func (ir *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
img, err := ir.backend.GetImage(ctx, vars["name"], backend.GetImageOpts{Details: true})
|
||||
img, err := ir.backend.GetImage(ctx, vars["name"], opts.GetImageOpts{Details: true})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -316,16 +298,6 @@ func (ir *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWrite
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
if versions.LessThan(version, "1.44") {
|
||||
imageInspect.VirtualSize = imageInspect.Size //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.44.
|
||||
|
||||
if imageInspect.Created == "" {
|
||||
// backwards compatibility for Created not existing returning "0001-01-01T00:00:00Z"
|
||||
// https://github.com/moby/moby/issues/47368
|
||||
imageInspect.Created = time.Time{}.Format(time.RFC3339Nano)
|
||||
}
|
||||
}
|
||||
if versions.GreaterThanOrEqualTo(version, "1.45") {
|
||||
imageInspect.Container = "" //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.45.
|
||||
imageInspect.ContainerConfig = nil //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.45.
|
||||
}
|
||||
return httputils.WriteJSON(w, http.StatusOK, imageInspect)
|
||||
}
|
||||
@@ -381,7 +353,7 @@ func (ir *imageRouter) toImageInspect(img *image.Image) (*types.ImageInspect, er
|
||||
Data: img.Details.Metadata,
|
||||
},
|
||||
RootFS: rootFSToAPIType(img.RootFS),
|
||||
Metadata: imagetypes.Metadata{
|
||||
Metadata: opts.Metadata{
|
||||
LastTagTime: img.Details.LastUpdated,
|
||||
},
|
||||
}, nil
|
||||
@@ -423,16 +395,10 @@ func (ir *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter,
|
||||
sharedSize = httputils.BoolValue(r, "shared-size")
|
||||
}
|
||||
|
||||
var manifests bool
|
||||
if versions.GreaterThanOrEqualTo(version, "1.47") {
|
||||
manifests = httputils.BoolValue(r, "manifests")
|
||||
}
|
||||
|
||||
images, err := ir.backend.Images(ctx, imagetypes.ListOptions{
|
||||
images, err := ir.backend.Images(ctx, types.ImageListOptions{
|
||||
All: httputils.BoolValue(r, "all"),
|
||||
Filters: imageFilters,
|
||||
SharedSize: sharedSize,
|
||||
Manifests: manifests,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -486,7 +452,7 @@ func (ir *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter,
|
||||
return errdefs.InvalidParameter(errors.New("refusing to create an ambiguous tag using digest algorithm as name"))
|
||||
}
|
||||
|
||||
img, err := ir.backend.GetImage(ctx, vars["name"], backend.GetImageOpts{})
|
||||
img, err := ir.backend.GetImage(ctx, vars["name"], opts.GetImageOpts{})
|
||||
if err != nil {
|
||||
return errdefs.NotFound(err)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package network // import "github.com/docker/docker/api/server/router/network"
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
@@ -11,20 +12,20 @@ import (
|
||||
// Backend is all the methods that need to be implemented
|
||||
// to provide network specific functionality.
|
||||
type Backend interface {
|
||||
GetNetworks(filters.Args, backend.NetworkListConfig) ([]network.Inspect, error)
|
||||
CreateNetwork(nc network.CreateRequest) (*network.CreateResponse, error)
|
||||
ConnectContainerToNetwork(ctx context.Context, containerName, networkName string, endpointConfig *network.EndpointSettings) error
|
||||
GetNetworks(filters.Args, backend.NetworkListConfig) ([]types.NetworkResource, error)
|
||||
CreateNetwork(nc types.NetworkCreateRequest) (*types.NetworkCreateResponse, error)
|
||||
ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error
|
||||
DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error
|
||||
DeleteNetwork(networkID string) error
|
||||
NetworksPrune(ctx context.Context, pruneFilters filters.Args) (*network.PruneReport, error)
|
||||
NetworksPrune(ctx context.Context, pruneFilters filters.Args) (*types.NetworksPruneReport, error)
|
||||
}
|
||||
|
||||
// ClusterBackend is all the methods that need to be implemented
|
||||
// to provide cluster network specific functionality.
|
||||
type ClusterBackend interface {
|
||||
GetNetworks(filters.Args) ([]network.Inspect, error)
|
||||
GetNetwork(name string) (network.Inspect, error)
|
||||
GetNetworksByName(name string) ([]network.Inspect, error)
|
||||
CreateNetwork(nc network.CreateRequest) (string, error)
|
||||
GetNetworks(filters.Args) ([]types.NetworkResource, error)
|
||||
GetNetwork(name string) (types.NetworkResource, error)
|
||||
GetNetworksByName(name string) ([]types.NetworkResource, error)
|
||||
CreateNetwork(nc types.NetworkCreateRequest) (string, error)
|
||||
RemoveNetwork(name string) error
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
@@ -31,7 +32,7 @@ func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWrit
|
||||
return err
|
||||
}
|
||||
|
||||
var list []network.Summary
|
||||
var list []types.NetworkResource
|
||||
nr, err := n.cluster.GetNetworks(filter)
|
||||
if err == nil {
|
||||
list = nr
|
||||
@@ -59,7 +60,7 @@ func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWrit
|
||||
}
|
||||
|
||||
if list == nil {
|
||||
list = []network.Summary{}
|
||||
list = []types.NetworkResource{}
|
||||
}
|
||||
|
||||
return httputils.WriteJSON(w, http.StatusOK, list)
|
||||
@@ -108,8 +109,8 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
|
||||
|
||||
// For full name and partial ID, save the result first, and process later
|
||||
// in case multiple records was found based on the same term
|
||||
listByFullName := map[string]network.Inspect{}
|
||||
listByPartialID := map[string]network.Inspect{}
|
||||
listByFullName := map[string]types.NetworkResource{}
|
||||
listByPartialID := map[string]types.NetworkResource{}
|
||||
|
||||
// TODO(@cpuguy83): All this logic for figuring out which network to return does not belong here
|
||||
// Instead there should be a backend function to just get one network.
|
||||
@@ -203,7 +204,7 @@ func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWr
|
||||
return err
|
||||
}
|
||||
|
||||
var create network.CreateRequest
|
||||
var create types.NetworkCreateRequest
|
||||
if err := httputils.ReadJSON(r, &create); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -212,10 +213,6 @@ func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWr
|
||||
return libnetwork.NetworkNameError(create.Name)
|
||||
}
|
||||
|
||||
// For a Swarm-scoped network, this call to backend.CreateNetwork is used to
|
||||
// validate the configuration. The network will not be created but, if the
|
||||
// configuration is valid, ManagerRedirectError will be returned and handled
|
||||
// below.
|
||||
nw, err := n.backend.CreateNetwork(create)
|
||||
if err != nil {
|
||||
if _, ok := err.(libnetwork.ManagerRedirectError); !ok {
|
||||
@@ -225,7 +222,7 @@ func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWr
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nw = &network.CreateResponse{
|
||||
nw = &types.NetworkCreateResponse{
|
||||
ID: id,
|
||||
}
|
||||
}
|
||||
@@ -238,7 +235,7 @@ func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseW
|
||||
return err
|
||||
}
|
||||
|
||||
var connect network.ConnectOptions
|
||||
var connect types.NetworkConnect
|
||||
if err := httputils.ReadJSON(r, &connect); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -247,7 +244,7 @@ func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseW
|
||||
// The reason is that, In case of attachable network in swarm scope, the actual local network
|
||||
// may not be available at the time. At the same time, inside daemon `ConnectContainerToNetwork`
|
||||
// does the ambiguity check anyway. Therefore, passing the name to daemon would be enough.
|
||||
return n.backend.ConnectContainerToNetwork(ctx, connect.Container, vars["id"], connect.EndpointConfig)
|
||||
return n.backend.ConnectContainerToNetwork(connect.Container, vars["id"], connect.EndpointConfig)
|
||||
}
|
||||
|
||||
func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
@@ -255,7 +252,7 @@ func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.Respon
|
||||
return err
|
||||
}
|
||||
|
||||
var disconnect network.DisconnectOptions
|
||||
var disconnect types.NetworkDisconnect
|
||||
if err := httputils.ReadJSON(r, &disconnect); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -310,9 +307,9 @@ func (n *networkRouter) postNetworksPrune(ctx context.Context, w http.ResponseWr
|
||||
// For full name and partial ID, save the result first, and process later
|
||||
// in case multiple records was found based on the same term
|
||||
// TODO (yongtang): should we wrap with version here for backward compatibility?
|
||||
func (n *networkRouter) findUniqueNetwork(term string) (network.Inspect, error) {
|
||||
listByFullName := map[string]network.Inspect{}
|
||||
listByPartialID := map[string]network.Inspect{}
|
||||
func (n *networkRouter) findUniqueNetwork(term string) (types.NetworkResource, error) {
|
||||
listByFullName := map[string]types.NetworkResource{}
|
||||
listByPartialID := map[string]types.NetworkResource{}
|
||||
|
||||
filter := filters.NewArgs(filters.Arg("idOrName", term))
|
||||
networks, _ := n.backend.GetNetworks(filter, backend.NetworkListConfig{Detailed: true})
|
||||
@@ -362,7 +359,7 @@ func (n *networkRouter) findUniqueNetwork(term string) (network.Inspect, error)
|
||||
}
|
||||
}
|
||||
if len(listByFullName) > 1 {
|
||||
return network.Inspect{}, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on name)", term, len(listByFullName)))
|
||||
return types.NetworkResource{}, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on name)", term, len(listByFullName)))
|
||||
}
|
||||
|
||||
// Find based on partial ID, returns true only if no duplicates
|
||||
@@ -372,8 +369,8 @@ func (n *networkRouter) findUniqueNetwork(term string) (network.Inspect, error)
|
||||
}
|
||||
}
|
||||
if len(listByPartialID) > 1 {
|
||||
return network.Inspect{}, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on ID prefix)", term, len(listByPartialID)))
|
||||
return types.NetworkResource{}, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on ID prefix)", term, len(listByPartialID)))
|
||||
}
|
||||
|
||||
return network.Inspect{}, errdefs.NotFound(libnetwork.ErrNoSuchNetwork(term))
|
||||
return types.NetworkResource{}, errdefs.NotFound(libnetwork.ErrNoSuchNetwork(term))
|
||||
}
|
||||
|
||||
@@ -224,6 +224,14 @@ func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter,
|
||||
adjustForAPIVersion(v, &service)
|
||||
}
|
||||
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
if versions.LessThan(version, "1.44") {
|
||||
if service.TaskTemplate.ContainerSpec != nil && service.TaskTemplate.ContainerSpec.Healthcheck != nil {
|
||||
// StartInterval was added in API 1.44
|
||||
service.TaskTemplate.ContainerSpec.Healthcheck.StartInterval = 0
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := sr.backend.CreateService(service, encodedAuth, queryRegistry)
|
||||
if err != nil {
|
||||
log.G(ctx).WithFields(log.Fields{
|
||||
|
||||
@@ -78,16 +78,6 @@ func adjustForAPIVersion(cliVersion string, service *swarm.ServiceSpec) {
|
||||
if cliVersion == "" {
|
||||
return
|
||||
}
|
||||
if versions.LessThan(cliVersion, "1.46") {
|
||||
if service.TaskTemplate.ContainerSpec != nil {
|
||||
for i, mount := range service.TaskTemplate.ContainerSpec.Mounts {
|
||||
if mount.TmpfsOptions != nil {
|
||||
mount.TmpfsOptions.Options = nil
|
||||
service.TaskTemplate.ContainerSpec.Mounts[i] = mount
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if versions.LessThan(cliVersion, "1.40") {
|
||||
if service.TaskTemplate.ContainerSpec != nil {
|
||||
// Sysctls for docker swarm services weren't supported before
|
||||
@@ -131,24 +121,11 @@ func adjustForAPIVersion(cliVersion string, service *swarm.ServiceSpec) {
|
||||
}
|
||||
|
||||
if versions.LessThan(cliVersion, "1.44") {
|
||||
if service.TaskTemplate.ContainerSpec != nil {
|
||||
// seccomp, apparmor, and no_new_privs were added in 1.44.
|
||||
if service.TaskTemplate.ContainerSpec.Privileges != nil {
|
||||
service.TaskTemplate.ContainerSpec.Privileges.Seccomp = nil
|
||||
service.TaskTemplate.ContainerSpec.Privileges.AppArmor = nil
|
||||
service.TaskTemplate.ContainerSpec.Privileges.NoNewPrivileges = false
|
||||
}
|
||||
if service.TaskTemplate.ContainerSpec.Healthcheck != nil {
|
||||
// StartInterval was added in API 1.44
|
||||
service.TaskTemplate.ContainerSpec.Healthcheck.StartInterval = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if versions.LessThan(cliVersion, "1.46") {
|
||||
if service.TaskTemplate.ContainerSpec != nil && service.TaskTemplate.ContainerSpec.OomScoreAdj != 0 {
|
||||
// OomScoreAdj was added in API 1.46
|
||||
service.TaskTemplate.ContainerSpec.OomScoreAdj = 0
|
||||
// seccomp, apparmor, and no_new_privs were added in 1.44.
|
||||
if service.TaskTemplate.ContainerSpec != nil && service.TaskTemplate.ContainerSpec.Privileges != nil {
|
||||
service.TaskTemplate.ContainerSpec.Privileges.Seccomp = nil
|
||||
service.TaskTemplate.ContainerSpec.Privileges.AppArmor = nil
|
||||
service.TaskTemplate.ContainerSpec.Privileges.NoNewPrivileges = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,9 +4,8 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
|
||||
func TestAdjustForAPIVersion(t *testing.T) {
|
||||
@@ -39,25 +38,13 @@ func TestAdjustForAPIVersion(t *testing.T) {
|
||||
ConfigName: "configRuntime",
|
||||
},
|
||||
},
|
||||
Ulimits: []*container.Ulimit{
|
||||
Ulimits: []*units.Ulimit{
|
||||
{
|
||||
Name: "nofile",
|
||||
Soft: 100,
|
||||
Hard: 200,
|
||||
},
|
||||
},
|
||||
Mounts: []mount.Mount{
|
||||
{
|
||||
Type: mount.TypeTmpfs,
|
||||
Source: "/foo",
|
||||
Target: "/bar",
|
||||
TmpfsOptions: &mount.TmpfsOptions{
|
||||
Options: [][]string{
|
||||
{"exec"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Placement: &swarm.Placement{
|
||||
MaxReplicas: 222,
|
||||
@@ -70,19 +57,6 @@ func TestAdjustForAPIVersion(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
adjustForAPIVersion("1.46", spec)
|
||||
if !reflect.DeepEqual(
|
||||
spec.TaskTemplate.ContainerSpec.Mounts[0].TmpfsOptions.Options,
|
||||
[][]string{{"exec"}},
|
||||
) {
|
||||
t.Error("TmpfsOptions.Options was stripped from spec")
|
||||
}
|
||||
|
||||
adjustForAPIVersion("1.45", spec)
|
||||
if len(spec.TaskTemplate.ContainerSpec.Mounts[0].TmpfsOptions.Options) != 0 {
|
||||
t.Error("TmpfsOptions.Options not stripped from spec")
|
||||
}
|
||||
|
||||
// first, does calling this with a later version correctly NOT strip
|
||||
// fields? do the later version first, so we can reuse this spec in the
|
||||
// next test.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.21
|
||||
//go:build go1.19
|
||||
|
||||
package system // import "github.com/docker/docker/api/server/router/system"
|
||||
|
||||
|
||||
@@ -81,6 +81,7 @@ func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *ht
|
||||
nameOnly = append(nameOnly, so.Name)
|
||||
}
|
||||
info.SecurityOptions = nameOnly
|
||||
info.ExecutionDriver = "<not supported>" //nolint:staticcheck // ignore SA1019 (ExecutionDriver is deprecated)
|
||||
}
|
||||
if versions.LessThan(version, "1.39") {
|
||||
if info.KernelVersion == "" {
|
||||
@@ -96,10 +97,6 @@ func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *ht
|
||||
info.Runtimes[k] = system.RuntimeWithStatus{Runtime: rt.Runtime}
|
||||
}
|
||||
}
|
||||
if versions.LessThan(version, "1.46") {
|
||||
// Containerd field introduced in API v1.46.
|
||||
info.Containerd = nil
|
||||
}
|
||||
if versions.GreaterThanOrEqualTo(version, "1.42") {
|
||||
info.KernelMemory = false
|
||||
}
|
||||
@@ -266,7 +263,6 @@ func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
output := ioutils.NewWriteFlusher(w)
|
||||
defer output.Close()
|
||||
output.Flush()
|
||||
@@ -276,18 +272,7 @@ func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *
|
||||
buffered, l := s.backend.SubscribeToEvents(since, until, ef)
|
||||
defer s.backend.UnsubscribeFromEvents(l)
|
||||
|
||||
shouldSkip := func(ev events.Message) bool { return false }
|
||||
if versions.LessThan(httputils.VersionFromContext(ctx), "1.46") {
|
||||
// Image create events were added in API 1.46
|
||||
shouldSkip = func(ev events.Message) bool {
|
||||
return ev.Type == "image" && ev.Action == "create"
|
||||
}
|
||||
}
|
||||
|
||||
for _, ev := range buffered {
|
||||
if shouldSkip(ev) {
|
||||
continue
|
||||
}
|
||||
if err := enc.Encode(ev); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -305,9 +290,6 @@ func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *
|
||||
log.G(ctx).Warnf("unexpected event message: %q", ev)
|
||||
continue
|
||||
}
|
||||
if shouldSkip(jev) {
|
||||
continue
|
||||
}
|
||||
if err := enc.Encode(jev); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -3,9 +3,11 @@ package volume // import "github.com/docker/docker/api/server/router/volume"
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/docker/volume/service/opts"
|
||||
// TODO return types need to be refactored into pkg
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
"github.com/docker/docker/volume/service/opts"
|
||||
)
|
||||
|
||||
// Backend is the methods that need to be implemented to provide
|
||||
@@ -15,7 +17,7 @@ type Backend interface {
|
||||
Get(ctx context.Context, name string, opts ...opts.GetOption) (*volume.Volume, error)
|
||||
Create(ctx context.Context, name, driverName string, opts ...opts.CreateOption) (*volume.Volume, error)
|
||||
Remove(ctx context.Context, name string, opts ...opts.RemoveOption) error
|
||||
Prune(ctx context.Context, pruneFilters filters.Args) (*volume.PruneReport, error)
|
||||
Prune(ctx context.Context, pruneFilters filters.Args) (*types.VolumesPruneReport, error)
|
||||
}
|
||||
|
||||
// ClusterBackend is the backend used for Swarm Cluster Volumes. Regular
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"gotest.tools/v3/assert"
|
||||
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
"github.com/docker/docker/errdefs"
|
||||
@@ -635,7 +636,7 @@ func (b *fakeVolumeBackend) Remove(_ context.Context, name string, o ...opts.Rem
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *fakeVolumeBackend) Prune(_ context.Context, _ filters.Args) (*volume.PruneReport, error) {
|
||||
func (b *fakeVolumeBackend) Prune(_ context.Context, _ filters.Args) (*types.VolumesPruneReport, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/docker/docker/api/server/middleware"
|
||||
"github.com/docker/docker/api/server/router"
|
||||
"github.com/docker/docker/api/server/router/debug"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/gorilla/mux"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
@@ -58,13 +57,19 @@ func (s *Server) makeHTTPHandler(handler httputils.APIFunc, operation string) ht
|
||||
if statusCode >= 500 {
|
||||
log.G(ctx).Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err)
|
||||
}
|
||||
_ = httputils.WriteJSON(w, statusCode, &types.ErrorResponse{
|
||||
Message: err.Error(),
|
||||
})
|
||||
makeErrorHandler(err)(w, r)
|
||||
}
|
||||
}), operation).ServeHTTP
|
||||
}
|
||||
|
||||
type pageNotFoundError struct{}
|
||||
|
||||
func (pageNotFoundError) Error() string {
|
||||
return "page not found"
|
||||
}
|
||||
|
||||
func (pageNotFoundError) NotFound() {}
|
||||
|
||||
// CreateMux returns a new mux with all the routers registered.
|
||||
func (s *Server) CreateMux(routers ...router.Router) *mux.Router {
|
||||
m := mux.NewRouter()
|
||||
@@ -86,12 +91,7 @@ func (s *Server) CreateMux(routers ...router.Router) *mux.Router {
|
||||
m.Path("/debug" + r.Path()).Handler(f)
|
||||
}
|
||||
|
||||
notFoundHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_ = httputils.WriteJSON(w, http.StatusNotFound, &types.ErrorResponse{
|
||||
Message: "page not found",
|
||||
})
|
||||
})
|
||||
|
||||
notFoundHandler := makeErrorHandler(pageNotFoundError{})
|
||||
m.HandleFunc(versionMatcher+"/{path:.*}", notFoundHandler)
|
||||
m.NotFoundHandler = notFoundHandler
|
||||
m.MethodNotAllowedHandler = notFoundHandler
|
||||
|
||||
@@ -15,11 +15,8 @@ import (
|
||||
func TestMiddlewares(t *testing.T) {
|
||||
srv := &Server{}
|
||||
|
||||
m, err := middleware.NewVersionMiddleware("0.1omega2", api.DefaultVersion, api.MinSupportedAPIVersion)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
srv.UseMiddleware(*m)
|
||||
const apiMinVersion = "1.12"
|
||||
srv.UseMiddleware(middleware.NewVersionMiddleware("0.1omega2", api.DefaultVersion, apiMinVersion))
|
||||
|
||||
req, _ := http.NewRequest(http.MethodGet, "/containers/json", nil)
|
||||
resp := httptest.NewRecorder()
|
||||
|
||||
870
api/swagger.yaml
870
api/swagger.yaml
File diff suppressed because it is too large
Load Diff
@@ -1,26 +0,0 @@
|
||||
package auxprogress
|
||||
|
||||
import (
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// ManifestPushedInsteadOfIndex is a note that is sent when a manifest is pushed
|
||||
// instead of an index. It is sent when the pushed image is an multi-platform
|
||||
// index, but the whole index couldn't be pushed.
|
||||
type ManifestPushedInsteadOfIndex struct {
|
||||
ManifestPushedInsteadOfIndex bool `json:"manifestPushedInsteadOfIndex"` // Always true
|
||||
|
||||
// OriginalIndex is the descriptor of the original image index.
|
||||
OriginalIndex ocispec.Descriptor `json:"originalIndex"`
|
||||
|
||||
// SelectedManifest is the descriptor of the manifest that was pushed instead.
|
||||
SelectedManifest ocispec.Descriptor `json:"selectedManifest"`
|
||||
}
|
||||
|
||||
// ContentMissing is a note that is sent when push fails because the content is missing.
|
||||
type ContentMissing struct {
|
||||
ContentMissing bool `json:"contentMissing"` // Always true
|
||||
|
||||
// Desc is the descriptor of the root object that was attempted to be pushed.
|
||||
Desc ocispec.Descriptor `json:"desc"`
|
||||
}
|
||||
@@ -13,12 +13,12 @@ import (
|
||||
|
||||
// ContainerCreateConfig is the parameter set to ContainerCreate()
|
||||
type ContainerCreateConfig struct {
|
||||
Name string
|
||||
Config *container.Config
|
||||
HostConfig *container.HostConfig
|
||||
NetworkingConfig *network.NetworkingConfig
|
||||
Platform *ocispec.Platform
|
||||
DefaultReadOnlyNonRecursive bool
|
||||
Name string
|
||||
Config *container.Config
|
||||
HostConfig *container.HostConfig
|
||||
NetworkingConfig *network.NetworkingConfig
|
||||
Platform *ocispec.Platform
|
||||
AdjustCPUShares bool
|
||||
}
|
||||
|
||||
// ContainerRmConfig holds arguments for the container remove
|
||||
@@ -30,7 +30,7 @@ type ContainerRmConfig struct {
|
||||
|
||||
// ContainerAttachConfig holds the streams to use when connecting to a container to view logs.
|
||||
type ContainerAttachConfig struct {
|
||||
GetStreams func(multiplexed bool, cancel func()) (io.ReadCloser, io.Writer, io.Writer, error)
|
||||
GetStreams func(multiplexed bool) (io.ReadCloser, io.Writer, io.Writer, error)
|
||||
UseStdin bool
|
||||
UseStdout bool
|
||||
UseStderr bool
|
||||
@@ -89,15 +89,8 @@ type LogSelector struct {
|
||||
type ContainerStatsConfig struct {
|
||||
Stream bool
|
||||
OneShot bool
|
||||
OutStream func() io.Writer
|
||||
}
|
||||
|
||||
// ExecStartConfig holds the options to start container's exec.
|
||||
type ExecStartConfig struct {
|
||||
Stdin io.Reader
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
ConsoleSize *[2]uint `json:",omitempty"`
|
||||
OutStream io.Writer
|
||||
Version string
|
||||
}
|
||||
|
||||
// ExecInspect holds information about a running process started
|
||||
@@ -137,13 +130,6 @@ type CreateImageConfig struct {
|
||||
Changes []string
|
||||
}
|
||||
|
||||
// GetImageOpts holds parameters to retrieve image information
|
||||
// from the backend.
|
||||
type GetImageOpts struct {
|
||||
Platform *ocispec.Platform
|
||||
Details bool
|
||||
}
|
||||
|
||||
// CommitConfig is the configuration for creating an image as part of a build.
|
||||
type CommitConfig struct {
|
||||
Author string
|
||||
|
||||
@@ -2,15 +2,43 @@ package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
units "github.com/docker/go-units"
|
||||
)
|
||||
|
||||
// ContainerExecInspect holds information returned by exec inspect.
|
||||
type ContainerExecInspect struct {
|
||||
ExecID string `json:"ID"`
|
||||
ContainerID string
|
||||
Running bool
|
||||
ExitCode int
|
||||
Pid int
|
||||
}
|
||||
|
||||
// CopyToContainerOptions holds information
|
||||
// about files to copy into a container
|
||||
type CopyToContainerOptions struct {
|
||||
AllowOverwriteDirWithFile bool
|
||||
CopyUIDGID bool
|
||||
}
|
||||
|
||||
// EventsOptions holds parameters to filter events with.
|
||||
type EventsOptions struct {
|
||||
Since string
|
||||
Until string
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// NetworkListOptions holds parameters to filter the list of networks with.
|
||||
type NetworkListOptions struct {
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// NewHijackedResponse intializes a HijackedResponse type
|
||||
func NewHijackedResponse(conn net.Conn, mediaType string) HijackedResponse {
|
||||
return HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn), mediaType: mediaType}
|
||||
@@ -73,7 +101,7 @@ type ImageBuildOptions struct {
|
||||
NetworkMode string
|
||||
ShmSize int64
|
||||
Dockerfile string
|
||||
Ulimits []*container.Ulimit
|
||||
Ulimits []*units.Ulimit
|
||||
// BuildArgs needs to be a *string instead of just a string so that
|
||||
// we can tell the difference between "" (empty string) and no value
|
||||
// at all (nil). See the parsing of buildArgs in
|
||||
@@ -94,7 +122,7 @@ type ImageBuildOptions struct {
|
||||
Target string
|
||||
SessionID string
|
||||
Platform string
|
||||
// Version specifies the version of the underlying builder to use
|
||||
// Version specifies the version of the unerlying builder to use
|
||||
Version BuilderVersion
|
||||
// BuildID is an optional identifier that can be passed together with the
|
||||
// build request. The same identifier can be used to gracefully cancel the
|
||||
@@ -129,13 +157,81 @@ type ImageBuildResponse struct {
|
||||
OSType string
|
||||
}
|
||||
|
||||
// ImageCreateOptions holds information to create images.
|
||||
type ImageCreateOptions struct {
|
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry.
|
||||
Platform string // Platform is the target platform of the image if it needs to be pulled from the registry.
|
||||
}
|
||||
|
||||
// ImageImportSource holds source information for ImageImport
|
||||
type ImageImportSource struct {
|
||||
Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this.
|
||||
SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute.
|
||||
}
|
||||
|
||||
// ImageImportOptions holds information to import images from the client host.
|
||||
type ImageImportOptions struct {
|
||||
Tag string // Tag is the name to tag this image with. This attribute is deprecated.
|
||||
Message string // Message is the message to tag the image with
|
||||
Changes []string // Changes are the raw changes to apply to this image
|
||||
Platform string // Platform is the target platform of the image
|
||||
}
|
||||
|
||||
// ImageListOptions holds parameters to list images with.
|
||||
type ImageListOptions struct {
|
||||
// All controls whether all images in the graph are filtered, or just
|
||||
// the heads.
|
||||
All bool
|
||||
|
||||
// Filters is a JSON-encoded set of filter arguments.
|
||||
Filters filters.Args
|
||||
|
||||
// SharedSize indicates whether the shared size of images should be computed.
|
||||
SharedSize bool
|
||||
|
||||
// ContainerCount indicates whether container count should be computed.
|
||||
ContainerCount bool
|
||||
}
|
||||
|
||||
// ImageLoadResponse returns information to the client about a load process.
|
||||
type ImageLoadResponse struct {
|
||||
// Body must be closed to avoid a resource leak
|
||||
Body io.ReadCloser
|
||||
JSON bool
|
||||
}
|
||||
|
||||
// ImagePullOptions holds information to pull images.
|
||||
type ImagePullOptions struct {
|
||||
All bool
|
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
||||
PrivilegeFunc RequestPrivilegeFunc
|
||||
Platform string
|
||||
}
|
||||
|
||||
// RequestPrivilegeFunc is a function interface that
|
||||
// clients can supply to retry operations after
|
||||
// getting an authorization error.
|
||||
// This function returns the registry authentication
|
||||
// header value in base 64 format, or an error
|
||||
// if the privilege request fails.
|
||||
type RequestPrivilegeFunc func(context.Context) (string, error)
|
||||
type RequestPrivilegeFunc func() (string, error)
|
||||
|
||||
// ImagePushOptions holds information to push images.
|
||||
type ImagePushOptions ImagePullOptions
|
||||
|
||||
// ImageRemoveOptions holds parameters to remove images.
|
||||
type ImageRemoveOptions struct {
|
||||
Force bool
|
||||
PruneChildren bool
|
||||
}
|
||||
|
||||
// ImageSearchOptions holds parameters to search images with.
|
||||
type ImageSearchOptions struct {
|
||||
RegistryAuth string
|
||||
PrivilegeFunc RequestPrivilegeFunc
|
||||
Filters filters.Args
|
||||
Limit int
|
||||
}
|
||||
|
||||
// NodeListOptions holds parameters to list nodes with.
|
||||
type NodeListOptions struct {
|
||||
@@ -240,7 +336,7 @@ type PluginInstallOptions struct {
|
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
||||
RemoteRef string // RemoteRef is the plugin name on the registry
|
||||
PrivilegeFunc RequestPrivilegeFunc
|
||||
AcceptPermissionsFunc func(context.Context, PluginPrivileges) (bool, error)
|
||||
AcceptPermissionsFunc func(PluginPrivileges) (bool, error)
|
||||
Args []string
|
||||
}
|
||||
|
||||
|
||||
18
api/types/configs.go
Normal file
18
api/types/configs.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
// ExecConfig is a small subset of the Config struct that holds the configuration
|
||||
// for the exec feature of docker.
|
||||
type ExecConfig struct {
|
||||
User string // User that will run the command
|
||||
Privileged bool // Is the container in privileged mode
|
||||
Tty bool // Attach standard streams to a tty.
|
||||
ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width]
|
||||
AttachStdin bool // Attach the standard input, makes possible user interaction
|
||||
AttachStderr bool // Attach the standard error
|
||||
AttachStdout bool // Attach the standard output
|
||||
Detach bool // Execute in detach mode
|
||||
DetachKeys string // Escape keys for detach
|
||||
Env []string // Environment variables
|
||||
WorkingDir string // Working directory
|
||||
Cmd []string // Execution commands and args
|
||||
}
|
||||
@@ -1,11 +1,12 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/strslice"
|
||||
dockerspec "github.com/docker/docker/image/spec/specs-go/v1"
|
||||
"github.com/docker/go-connections/nat"
|
||||
dockerspec "github.com/moby/docker-image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// MinimumDuration puts a minimum on user configured duration.
|
||||
@@ -35,6 +36,14 @@ type StopOptions struct {
|
||||
// HealthConfig holds configuration settings for the HEALTHCHECK feature.
|
||||
type HealthConfig = dockerspec.HealthcheckConfig
|
||||
|
||||
// ExecStartOptions holds the options to start container's exec.
|
||||
type ExecStartOptions struct {
|
||||
Stdin io.Reader
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
ConsoleSize *[2]uint `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Config contains the configuration data about a container.
|
||||
// It should hold only portable information about the container.
|
||||
// Here, "portable" means "independent from the host we are running on".
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PruneReport contains the response for Engine API:
|
||||
// POST "/containers/prune"
|
||||
type PruneReport struct {
|
||||
ContainersDeleted []string
|
||||
SpaceReclaimed uint64
|
||||
}
|
||||
|
||||
// PathStat is used to encode the header from
|
||||
// GET "/containers/{name:.*}/archive"
|
||||
// "Name" is the file or directory name.
|
||||
type PathStat struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Mode os.FileMode `json:"mode"`
|
||||
Mtime time.Time `json:"mtime"`
|
||||
LinkTarget string `json:"linkTarget"`
|
||||
}
|
||||
|
||||
// CopyToContainerOptions holds information
|
||||
// about files to copy into a container
|
||||
type CopyToContainerOptions struct {
|
||||
AllowOverwriteDirWithFile bool
|
||||
CopyUIDGID bool
|
||||
}
|
||||
|
||||
// StatsResponseReader wraps an io.ReadCloser to read (a stream of) stats
|
||||
// for a container, as produced by the GET "/stats" endpoint.
|
||||
//
|
||||
// The OSType field is set to the server's platform to allow
|
||||
// platform-specific handling of the response.
|
||||
//
|
||||
// TODO(thaJeztah): remove this wrapper, and make OSType part of [StatsResponse].
|
||||
type StatsResponseReader struct {
|
||||
Body io.ReadCloser `json:"body"`
|
||||
OSType string `json:"ostype"`
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
package container
|
||||
|
||||
import "github.com/docker/docker/api/types/network"
|
||||
|
||||
// CreateRequest is the request message sent to the server for container
|
||||
// create calls. It is a config wrapper that holds the container [Config]
|
||||
// (portable) and the corresponding [HostConfig] (non-portable) and
|
||||
// [network.NetworkingConfig].
|
||||
type CreateRequest struct {
|
||||
*Config
|
||||
HostConfig *HostConfig `json:"HostConfig,omitempty"`
|
||||
NetworkingConfig *network.NetworkingConfig `json:"NetworkingConfig,omitempty"`
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
package container
|
||||
|
||||
// ExecOptions is a small subset of the Config struct that holds the configuration
|
||||
// for the exec feature of docker.
|
||||
type ExecOptions struct {
|
||||
User string // User that will run the command
|
||||
Privileged bool // Is the container in privileged mode
|
||||
Tty bool // Attach standard streams to a tty.
|
||||
ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width]
|
||||
AttachStdin bool // Attach the standard input, makes possible user interaction
|
||||
AttachStderr bool // Attach the standard error
|
||||
AttachStdout bool // Attach the standard output
|
||||
Detach bool // Execute in detach mode
|
||||
DetachKeys string // Escape keys for detach
|
||||
Env []string // Environment variables
|
||||
WorkingDir string // Working directory
|
||||
Cmd []string // Execution commands and args
|
||||
}
|
||||
|
||||
// ExecStartOptions is a temp struct used by execStart
|
||||
// Config fields is part of ExecConfig in runconfig package
|
||||
type ExecStartOptions struct {
|
||||
// ExecStart will first check if it's detached
|
||||
Detach bool
|
||||
// Check if there's a tty
|
||||
Tty bool
|
||||
// Terminal size [height, width], unused if Tty == false
|
||||
ConsoleSize *[2]uint `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ExecAttachOptions is a temp struct used by execAttach.
|
||||
//
|
||||
// TODO(thaJeztah): make this a separate type; ContainerExecAttach does not use the Detach option, and cannot run detached.
|
||||
type ExecAttachOptions = ExecStartOptions
|
||||
|
||||
// ExecInspect holds information returned by exec inspect.
|
||||
type ExecInspect struct {
|
||||
ExecID string `json:"ID"`
|
||||
ContainerID string
|
||||
Running bool
|
||||
ExitCode int
|
||||
Pid int
|
||||
}
|
||||
@@ -360,12 +360,6 @@ type LogConfig struct {
|
||||
Config map[string]string
|
||||
}
|
||||
|
||||
// Ulimit is an alias for [units.Ulimit], which may be moving to a different
|
||||
// location or become a local type. This alias is to help transitioning.
|
||||
//
|
||||
// Users are recommended to use this alias instead of using [units.Ulimit] directly.
|
||||
type Ulimit = units.Ulimit
|
||||
|
||||
// Resources contains container's resources (cgroups config, ulimits...)
|
||||
type Resources struct {
|
||||
// Applicable to all platforms
|
||||
@@ -393,14 +387,14 @@ type Resources struct {
|
||||
|
||||
// KernelMemory specifies the kernel memory limit (in bytes) for the container.
|
||||
// Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes.
|
||||
KernelMemory int64 `json:",omitempty"`
|
||||
KernelMemoryTCP int64 `json:",omitempty"` // Hard limit for kernel TCP buffer memory (in bytes)
|
||||
MemoryReservation int64 // Memory soft limit (in bytes)
|
||||
MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
|
||||
MemorySwappiness *int64 // Tuning container memory swappiness behaviour
|
||||
OomKillDisable *bool // Whether to disable OOM Killer or not
|
||||
PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change.
|
||||
Ulimits []*Ulimit // List of ulimits to be set in the container
|
||||
KernelMemory int64 `json:",omitempty"`
|
||||
KernelMemoryTCP int64 `json:",omitempty"` // Hard limit for kernel TCP buffer memory (in bytes)
|
||||
MemoryReservation int64 // Memory soft limit (in bytes)
|
||||
MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
|
||||
MemorySwappiness *int64 // Tuning container memory swappiness behaviour
|
||||
OomKillDisable *bool // Whether to disable OOM Killer or not
|
||||
PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change.
|
||||
Ulimits []*units.Ulimit // List of ulimits to be set in the container
|
||||
|
||||
// Applicable to Windows
|
||||
CPUCount int64 `json:"CpuCount"` // CPU count
|
||||
|
||||
@@ -9,6 +9,24 @@ func (i Isolation) IsValid() bool {
|
||||
return i.IsDefault()
|
||||
}
|
||||
|
||||
// NetworkName returns the name of the network stack.
|
||||
func (n NetworkMode) NetworkName() string {
|
||||
if n.IsBridge() {
|
||||
return network.NetworkBridge
|
||||
} else if n.IsHost() {
|
||||
return network.NetworkHost
|
||||
} else if n.IsContainer() {
|
||||
return "container"
|
||||
} else if n.IsNone() {
|
||||
return network.NetworkNone
|
||||
} else if n.IsDefault() {
|
||||
return network.NetworkDefault
|
||||
} else if n.IsUserDefined() {
|
||||
return n.UserDefined()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// IsBridge indicates whether container uses the bridge network stack
|
||||
func (n NetworkMode) IsBridge() bool {
|
||||
return n == network.NetworkBridge
|
||||
@@ -23,23 +41,3 @@ func (n NetworkMode) IsHost() bool {
|
||||
func (n NetworkMode) IsUserDefined() bool {
|
||||
return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer()
|
||||
}
|
||||
|
||||
// NetworkName returns the name of the network stack.
|
||||
func (n NetworkMode) NetworkName() string {
|
||||
switch {
|
||||
case n.IsDefault():
|
||||
return network.NetworkDefault
|
||||
case n.IsBridge():
|
||||
return network.NetworkBridge
|
||||
case n.IsHost():
|
||||
return network.NetworkHost
|
||||
case n.IsNone():
|
||||
return network.NetworkNone
|
||||
case n.IsContainer():
|
||||
return "container"
|
||||
case n.IsUserDefined():
|
||||
return n.UserDefined()
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,11 +2,6 @@ package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
import "github.com/docker/docker/api/types/network"
|
||||
|
||||
// IsValid indicates if an isolation technology is valid
|
||||
func (i Isolation) IsValid() bool {
|
||||
return i.IsDefault() || i.IsHyperV() || i.IsProcess()
|
||||
}
|
||||
|
||||
// IsBridge indicates whether container uses the bridge network stack
|
||||
// in windows it is given the name NAT
|
||||
func (n NetworkMode) IsBridge() bool {
|
||||
@@ -24,24 +19,24 @@ func (n NetworkMode) IsUserDefined() bool {
|
||||
return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer()
|
||||
}
|
||||
|
||||
// IsValid indicates if an isolation technology is valid
|
||||
func (i Isolation) IsValid() bool {
|
||||
return i.IsDefault() || i.IsHyperV() || i.IsProcess()
|
||||
}
|
||||
|
||||
// NetworkName returns the name of the network stack.
|
||||
func (n NetworkMode) NetworkName() string {
|
||||
switch {
|
||||
case n.IsDefault():
|
||||
if n.IsDefault() {
|
||||
return network.NetworkDefault
|
||||
case n.IsBridge():
|
||||
} else if n.IsBridge() {
|
||||
return network.NetworkNat
|
||||
case n.IsHost():
|
||||
// Windows currently doesn't support host network-mode, so
|
||||
// this would currently never happen..
|
||||
return network.NetworkHost
|
||||
case n.IsNone():
|
||||
} else if n.IsNone() {
|
||||
return network.NetworkNone
|
||||
case n.IsContainer():
|
||||
} else if n.IsContainer() {
|
||||
return "container"
|
||||
case n.IsUserDefined():
|
||||
} else if n.IsUserDefined() {
|
||||
return n.UserDefined()
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
package events // import "github.com/docker/docker/api/types/events"
|
||||
import "github.com/docker/docker/api/types/filters"
|
||||
|
||||
// Type is used for event-types.
|
||||
type Type string
|
||||
@@ -126,10 +125,3 @@ type Message struct {
|
||||
Time int64 `json:"time,omitempty"`
|
||||
TimeNano int64 `json:"timeNano,omitempty"`
|
||||
}
|
||||
|
||||
// ListOptions holds parameters to filter events with.
|
||||
type ListOptions struct {
|
||||
Since string
|
||||
Until string
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
@@ -1,47 +1,9 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
import "time"
|
||||
|
||||
// Metadata contains engine-local data about the image.
|
||||
type Metadata struct {
|
||||
// LastTagTime is the date and time at which the image was last tagged.
|
||||
LastTagTime time.Time `json:",omitempty"`
|
||||
}
|
||||
|
||||
// PruneReport contains the response for Engine API:
|
||||
// POST "/images/prune"
|
||||
type PruneReport struct {
|
||||
ImagesDeleted []DeleteResponse
|
||||
SpaceReclaimed uint64
|
||||
}
|
||||
|
||||
// LoadResponse returns information to the client about a load process.
|
||||
//
|
||||
// TODO(thaJeztah): remove this type, and just use an io.ReadCloser
|
||||
//
|
||||
// This type was added in https://github.com/moby/moby/pull/18878, related
|
||||
// to https://github.com/moby/moby/issues/19177;
|
||||
//
|
||||
// Make docker load to output json when the response content type is json
|
||||
// Swarm hijacks the response from docker load and returns JSON rather
|
||||
// than plain text like the Engine does. This makes the API library to return
|
||||
// information to figure that out.
|
||||
//
|
||||
// However the "load" endpoint unconditionally returns JSON;
|
||||
// https://github.com/moby/moby/blob/7b9d2ef6e5518a3d3f3cc418459f8df786cfbbd1/api/server/router/image/image_routes.go#L248-L255
|
||||
//
|
||||
// PR https://github.com/moby/moby/pull/21959 made the response-type depend
|
||||
// on whether "quiet" was set, but this logic got changed in a follow-up
|
||||
// https://github.com/moby/moby/pull/25557, which made the JSON response-type
|
||||
// unconditionally, but the output produced depend on whether"quiet" was set.
|
||||
//
|
||||
// We should deprecated the "quiet" option, as it's really a client
|
||||
// responsibility.
|
||||
type LoadResponse struct {
|
||||
// Body must be closed to avoid a resource leak
|
||||
Body io.ReadCloser
|
||||
JSON bool
|
||||
}
|
||||
|
||||
@@ -1,99 +0,0 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
type ManifestKind string
|
||||
|
||||
const (
|
||||
ManifestKindImage ManifestKind = "image"
|
||||
ManifestKindAttestation ManifestKind = "attestation"
|
||||
ManifestKindUnknown ManifestKind = "unknown"
|
||||
)
|
||||
|
||||
type ManifestSummary struct {
|
||||
// ID is the content-addressable ID of an image and is the same as the
|
||||
// digest of the image manifest.
|
||||
//
|
||||
// Required: true
|
||||
ID string `json:"ID"`
|
||||
|
||||
// Descriptor is the OCI descriptor of the image.
|
||||
//
|
||||
// Required: true
|
||||
Descriptor ocispec.Descriptor `json:"Descriptor"`
|
||||
|
||||
// Indicates whether all the child content (image config, layers) is
|
||||
// fully available locally
|
||||
//
|
||||
// Required: true
|
||||
Available bool `json:"Available"`
|
||||
|
||||
// Size is the size information of the content related to this manifest.
|
||||
// Note: These sizes only take the locally available content into account.
|
||||
//
|
||||
// Required: true
|
||||
Size struct {
|
||||
// Content is the size (in bytes) of all the locally present
|
||||
// content in the content store (e.g. image config, layers)
|
||||
// referenced by this manifest and its children.
|
||||
// This only includes blobs in the content store.
|
||||
Content int64 `json:"Content"`
|
||||
|
||||
// Total is the total size (in bytes) of all the locally present
|
||||
// data (both distributable and non-distributable) that's related to
|
||||
// this manifest and its children.
|
||||
// This equal to the sum of [Content] size AND all the sizes in the
|
||||
// [Size] struct present in the Kind-specific data struct.
|
||||
// For example, for an image kind (Kind == ManifestKindImage),
|
||||
// this would include the size of the image content and unpacked
|
||||
// image snapshots ([Size.Content] + [ImageData.Size.Unpacked]).
|
||||
Total int64 `json:"Total"`
|
||||
} `json:"Size"`
|
||||
|
||||
// Kind is the kind of the image manifest.
|
||||
//
|
||||
// Required: true
|
||||
Kind ManifestKind `json:"Kind"`
|
||||
|
||||
// Fields below are specific to the kind of the image manifest.
|
||||
|
||||
// Present only if Kind == ManifestKindImage.
|
||||
ImageData *ImageProperties `json:"ImageData,omitempty"`
|
||||
|
||||
// Present only if Kind == ManifestKindAttestation.
|
||||
AttestationData *AttestationProperties `json:"AttestationData,omitempty"`
|
||||
}
|
||||
|
||||
type ImageProperties struct {
|
||||
// Platform is the OCI platform object describing the platform of the image.
|
||||
//
|
||||
// Required: true
|
||||
Platform ocispec.Platform `json:"Platform"`
|
||||
|
||||
Size struct {
|
||||
// Unpacked is the size (in bytes) of the locally unpacked
|
||||
// (uncompressed) image content that's directly usable by the containers
|
||||
// running this image.
|
||||
// It's independent of the distributable content - e.g.
|
||||
// the image might still have an unpacked data that's still used by
|
||||
// some container even when the distributable/compressed content is
|
||||
// already gone.
|
||||
//
|
||||
// Required: true
|
||||
Unpacked int64 `json:"Unpacked"`
|
||||
}
|
||||
|
||||
// Containers is an array containing the IDs of the containers that are
|
||||
// using this image.
|
||||
//
|
||||
// Required: true
|
||||
Containers []string `json:"Containers"`
|
||||
}
|
||||
|
||||
type AttestationProperties struct {
|
||||
// For is the digest of the image manifest that this attestation is for.
|
||||
For digest.Digest `json:"For"`
|
||||
}
|
||||
@@ -1,88 +1,9 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
import ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// ImportSource holds source information for ImageImport
|
||||
type ImportSource struct {
|
||||
Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this.
|
||||
SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute.
|
||||
}
|
||||
|
||||
// ImportOptions holds information to import images from the client host.
|
||||
type ImportOptions struct {
|
||||
Tag string // Tag is the name to tag this image with. This attribute is deprecated.
|
||||
Message string // Message is the message to tag the image with
|
||||
Changes []string // Changes are the raw changes to apply to this image
|
||||
Platform string // Platform is the target platform of the image
|
||||
}
|
||||
|
||||
// CreateOptions holds information to create images.
|
||||
type CreateOptions struct {
|
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry.
|
||||
Platform string // Platform is the target platform of the image if it needs to be pulled from the registry.
|
||||
}
|
||||
|
||||
// PullOptions holds information to pull images.
|
||||
type PullOptions struct {
|
||||
All bool
|
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
||||
|
||||
// PrivilegeFunc is a function that clients can supply to retry operations
|
||||
// after getting an authorization error. This function returns the registry
|
||||
// authentication header value in base64 encoded format, or an error if the
|
||||
// privilege request fails.
|
||||
//
|
||||
// Also see [github.com/docker/docker/api/types.RequestPrivilegeFunc].
|
||||
PrivilegeFunc func(context.Context) (string, error)
|
||||
Platform string
|
||||
}
|
||||
|
||||
// PushOptions holds information to push images.
|
||||
type PushOptions struct {
|
||||
All bool
|
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
||||
|
||||
// PrivilegeFunc is a function that clients can supply to retry operations
|
||||
// after getting an authorization error. This function returns the registry
|
||||
// authentication header value in base64 encoded format, or an error if the
|
||||
// privilege request fails.
|
||||
//
|
||||
// Also see [github.com/docker/docker/api/types.RequestPrivilegeFunc].
|
||||
PrivilegeFunc func(context.Context) (string, error)
|
||||
|
||||
// Platform is an optional field that selects a specific platform to push
|
||||
// when the image is a multi-platform image.
|
||||
// Using this will only push a single platform-specific manifest.
|
||||
Platform *ocispec.Platform `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ListOptions holds parameters to list images with.
|
||||
type ListOptions struct {
|
||||
// All controls whether all images in the graph are filtered, or just
|
||||
// the heads.
|
||||
All bool
|
||||
|
||||
// Filters is a JSON-encoded set of filter arguments.
|
||||
Filters filters.Args
|
||||
|
||||
// SharedSize indicates whether the shared size of images should be computed.
|
||||
SharedSize bool
|
||||
|
||||
// ContainerCount indicates whether container count should be computed.
|
||||
ContainerCount bool
|
||||
|
||||
// Manifests indicates whether the image manifests should be returned.
|
||||
Manifests bool
|
||||
}
|
||||
|
||||
// RemoveOptions holds parameters to remove images.
|
||||
type RemoveOptions struct {
|
||||
Force bool
|
||||
PruneChildren bool
|
||||
// GetImageOpts holds parameters to inspect an image.
|
||||
type GetImageOpts struct {
|
||||
Platform *ocispec.Platform
|
||||
Details bool
|
||||
}
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
package image
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// Summary summary
|
||||
// swagger:model Summary
|
||||
type Summary struct {
|
||||
|
||||
// Number of containers using this image. Includes both stopped and running
|
||||
@@ -42,14 +47,6 @@ type Summary struct {
|
||||
// Required: true
|
||||
ParentID string `json:"ParentId"`
|
||||
|
||||
// Manifests is a list of image manifests available in this image. It
|
||||
// provides a more detailed view of the platform-specific image manifests or
|
||||
// other image-attached data like build attestations.
|
||||
//
|
||||
// WARNING: This is experimental and may change at any time without any backward
|
||||
// compatibility.
|
||||
Manifests []ManifestSummary `json:"Manifests,omitempty"`
|
||||
|
||||
// List of content-addressable digests of locally available image manifests
|
||||
// that the image is referenced from. Multiple manifests can refer to the
|
||||
// same image.
|
||||
|
||||
@@ -96,7 +96,6 @@ type BindOptions struct {
|
||||
type VolumeOptions struct {
|
||||
NoCopy bool `json:",omitempty"`
|
||||
Labels map[string]string `json:",omitempty"`
|
||||
Subpath string `json:",omitempty"`
|
||||
DriverConfig *Driver `json:",omitempty"`
|
||||
}
|
||||
|
||||
@@ -119,11 +118,7 @@ type TmpfsOptions struct {
|
||||
SizeBytes int64 `json:",omitempty"`
|
||||
// Mode of the tmpfs upon creation
|
||||
Mode os.FileMode `json:",omitempty"`
|
||||
// Options to be passed to the tmpfs mount. An array of arrays. Flag
|
||||
// options should be provided as 1-length arrays. Other types should be
|
||||
// provided as 2-length arrays, where the first item is the key and the
|
||||
// second the value.
|
||||
Options [][]string `json:",omitempty"`
|
||||
|
||||
// TODO(stevvooe): There are several more tmpfs flags, specified in the
|
||||
// daemon, that are accepted. Only the most basic are added for now.
|
||||
//
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
package network
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// CreateResponse NetworkCreateResponse
|
||||
//
|
||||
// OK response to NetworkCreate operation
|
||||
// swagger:model CreateResponse
|
||||
type CreateResponse struct {
|
||||
|
||||
// The ID of the created network.
|
||||
// Required: true
|
||||
ID string `json:"Id"`
|
||||
|
||||
// Warnings encountered when creating the container
|
||||
// Required: true
|
||||
Warning string `json:"Warning"`
|
||||
}
|
||||
@@ -18,7 +18,6 @@ type EndpointSettings struct {
|
||||
// Once the container is running, it becomes operational data (it may contain a
|
||||
// generated address).
|
||||
MacAddress string
|
||||
DriverOpts map[string]string
|
||||
// Operational data
|
||||
NetworkID string
|
||||
EndpointID string
|
||||
@@ -28,6 +27,7 @@ type EndpointSettings struct {
|
||||
IPv6Gateway string
|
||||
GlobalIPv6Address string
|
||||
GlobalIPv6PrefixLen int
|
||||
DriverOpts map[string]string
|
||||
// DNSNames holds all the (non fully qualified) DNS names associated to this endpoint. First entry is used to
|
||||
// generate PTR records.
|
||||
DNSNames []string
|
||||
|
||||
@@ -102,6 +102,7 @@ func TestEndpointIPAMConfigWithOutOfRangeAddrs(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestEndpointIPAMConfigWithInvalidConfig(t *testing.T) {
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package network // import "github.com/docker/docker/api/types/network"
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
)
|
||||
|
||||
@@ -19,82 +17,6 @@ const (
|
||||
NetworkNat = "nat"
|
||||
)
|
||||
|
||||
// CreateRequest is the request message sent to the server for network create call.
|
||||
type CreateRequest struct {
|
||||
CreateOptions
|
||||
Name string // Name is the requested name of the network.
|
||||
|
||||
// Deprecated: CheckDuplicate is deprecated since API v1.44, but it defaults to true when sent by the client
|
||||
// package to older daemons.
|
||||
CheckDuplicate *bool `json:",omitempty"`
|
||||
}
|
||||
|
||||
// CreateOptions holds options to create a network.
|
||||
type CreateOptions struct {
|
||||
Driver string // Driver is the driver-name used to create the network (e.g. `bridge`, `overlay`)
|
||||
Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level).
|
||||
EnableIPv6 *bool `json:",omitempty"` // EnableIPv6 represents whether to enable IPv6.
|
||||
IPAM *IPAM // IPAM is the network's IP Address Management.
|
||||
Internal bool // Internal represents if the network is used internal only.
|
||||
Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
|
||||
Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster.
|
||||
ConfigOnly bool // ConfigOnly creates a config-only network. Config-only networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services.
|
||||
ConfigFrom *ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. The specified network must be a config-only network; see [CreateOptions.ConfigOnly].
|
||||
Options map[string]string // Options specifies the network-specific options to use for when creating the network.
|
||||
Labels map[string]string // Labels holds metadata specific to the network being created.
|
||||
}
|
||||
|
||||
// ListOptions holds parameters to filter the list of networks with.
|
||||
type ListOptions struct {
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// InspectOptions holds parameters to inspect network.
|
||||
type InspectOptions struct {
|
||||
Scope string
|
||||
Verbose bool
|
||||
}
|
||||
|
||||
// ConnectOptions represents the data to be used to connect a container to the
|
||||
// network.
|
||||
type ConnectOptions struct {
|
||||
Container string
|
||||
EndpointConfig *EndpointSettings `json:",omitempty"`
|
||||
}
|
||||
|
||||
// DisconnectOptions represents the data to be used to disconnect a container
|
||||
// from the network.
|
||||
type DisconnectOptions struct {
|
||||
Container string
|
||||
Force bool
|
||||
}
|
||||
|
||||
// Inspect is the body of the "get network" http response message.
|
||||
type Inspect struct {
|
||||
Name string // Name is the name of the network
|
||||
ID string `json:"Id"` // ID uniquely identifies a network on a single machine
|
||||
Created time.Time // Created is the time the network created
|
||||
Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level)
|
||||
Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`)
|
||||
EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6
|
||||
IPAM IPAM // IPAM is the network's IP Address Management
|
||||
Internal bool // Internal represents if the network is used internal only
|
||||
Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
|
||||
Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster.
|
||||
ConfigFrom ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network.
|
||||
ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services.
|
||||
Containers map[string]EndpointResource // Containers contains endpoints belonging to the network
|
||||
Options map[string]string // Options holds the network specific options to use for when creating the network
|
||||
Labels map[string]string // Labels holds metadata specific to the network being created
|
||||
Peers []PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network
|
||||
Services map[string]ServiceInfo `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Summary is used as response when listing networks. It currently is an alias
|
||||
// for [Inspect], but may diverge in the future, as not all information may
|
||||
// be included when listing networks.
|
||||
type Summary = Inspect
|
||||
|
||||
// Address represents an IP address
|
||||
type Address struct {
|
||||
Addr string
|
||||
@@ -123,16 +45,6 @@ type ServiceInfo struct {
|
||||
Tasks []Task
|
||||
}
|
||||
|
||||
// EndpointResource contains network resources allocated and used for a
|
||||
// container in a network.
|
||||
type EndpointResource struct {
|
||||
Name string
|
||||
EndpointID string
|
||||
MacAddress string
|
||||
IPv4Address string
|
||||
IPv6Address string
|
||||
}
|
||||
|
||||
// NetworkingConfig represents the container's networking configuration for each of its interfaces
|
||||
// Carries the networking configs specified in the `docker run` and `docker network connect` commands
|
||||
type NetworkingConfig struct {
|
||||
@@ -158,9 +70,3 @@ var acceptedFilters = map[string]bool{
|
||||
func ValidateFilters(filter filters.Args) error {
|
||||
return filter.Validate(acceptedFilters)
|
||||
}
|
||||
|
||||
// PruneReport contains the response for Engine API:
|
||||
// POST "/networks/prune"
|
||||
type PruneReport struct {
|
||||
NetworksDeleted []string
|
||||
}
|
||||
|
||||
@@ -34,9 +34,10 @@ type AuthConfig struct {
|
||||
}
|
||||
|
||||
// EncodeAuthConfig serializes the auth configuration as a base64url encoded
|
||||
// ([RFC4648, section 5]) JSON string for sending through the X-Registry-Auth header.
|
||||
// RFC4648, section 5) JSON string for sending through the X-Registry-Auth header.
|
||||
//
|
||||
// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5
|
||||
// For details on base64url encoding, see:
|
||||
// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5
|
||||
func EncodeAuthConfig(authConfig AuthConfig) (string, error) {
|
||||
buf, err := json.Marshal(authConfig)
|
||||
if err != nil {
|
||||
@@ -45,14 +46,15 @@ func EncodeAuthConfig(authConfig AuthConfig) (string, error) {
|
||||
return base64.URLEncoding.EncodeToString(buf), nil
|
||||
}
|
||||
|
||||
// DecodeAuthConfig decodes base64url encoded ([RFC4648, section 5]) JSON
|
||||
// DecodeAuthConfig decodes base64url encoded (RFC4648, section 5) JSON
|
||||
// authentication information as sent through the X-Registry-Auth header.
|
||||
//
|
||||
// This function always returns an [AuthConfig], even if an error occurs. It is up
|
||||
// This function always returns an AuthConfig, even if an error occurs. It is up
|
||||
// to the caller to decide if authentication is required, and if the error can
|
||||
// be ignored.
|
||||
//
|
||||
// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5
|
||||
// For details on base64url encoding, see:
|
||||
// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5
|
||||
func DecodeAuthConfig(authEncoded string) (*AuthConfig, error) {
|
||||
if authEncoded == "" {
|
||||
return &AuthConfig{}, nil
|
||||
@@ -67,7 +69,7 @@ func DecodeAuthConfig(authEncoded string) (*AuthConfig, error) {
|
||||
// clients and API versions. Current clients and API versions expect authentication
|
||||
// to be provided through the X-Registry-Auth header.
|
||||
//
|
||||
// Like [DecodeAuthConfig], this function always returns an [AuthConfig], even if an
|
||||
// Like DecodeAuthConfig, this function always returns an AuthConfig, even if an
|
||||
// error occurs. It is up to the caller to decide if authentication is required,
|
||||
// and if the error can be ignored.
|
||||
func DecodeAuthConfigBody(rdr io.ReadCloser) (*AuthConfig, error) {
|
||||
|
||||
@@ -84,6 +84,32 @@ type IndexInfo struct {
|
||||
Official bool
|
||||
}
|
||||
|
||||
// SearchResult describes a search result returned from a registry
|
||||
type SearchResult struct {
|
||||
// StarCount indicates the number of stars this repository has
|
||||
StarCount int `json:"star_count"`
|
||||
// IsOfficial is true if the result is from an official repository.
|
||||
IsOfficial bool `json:"is_official"`
|
||||
// Name is the name of the repository
|
||||
Name string `json:"name"`
|
||||
// IsAutomated indicates whether the result is automated.
|
||||
//
|
||||
// Deprecated: the "is_automated" field is deprecated and will always be "false" in the future.
|
||||
IsAutomated bool `json:"is_automated"`
|
||||
// Description is a textual description of the repository
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
// SearchResults lists a collection search results returned from a registry
|
||||
type SearchResults struct {
|
||||
// Query contains the query string that generated the search results
|
||||
Query string `json:"query"`
|
||||
// NumResults indicates the number of results the query returned
|
||||
NumResults int `json:"num_results"`
|
||||
// Results is a slice containing the actual results for the search
|
||||
Results []SearchResult `json:"results"`
|
||||
}
|
||||
|
||||
// DistributionInspect describes the result obtained from contacting the
|
||||
// registry to retrieve image metadata
|
||||
type DistributionInspect struct {
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
package registry
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
)
|
||||
|
||||
// SearchOptions holds parameters to search images with.
|
||||
type SearchOptions struct {
|
||||
RegistryAuth string
|
||||
|
||||
// PrivilegeFunc is a [types.RequestPrivilegeFunc] the client can
|
||||
// supply to retry operations after getting an authorization error.
|
||||
//
|
||||
// It must return the registry authentication header value in base64
|
||||
// format, or an error if the privilege request fails.
|
||||
PrivilegeFunc func(context.Context) (string, error)
|
||||
Filters filters.Args
|
||||
Limit int
|
||||
}
|
||||
|
||||
// SearchResult describes a search result returned from a registry
|
||||
type SearchResult struct {
|
||||
// StarCount indicates the number of stars this repository has
|
||||
StarCount int `json:"star_count"`
|
||||
// IsOfficial is true if the result is from an official repository.
|
||||
IsOfficial bool `json:"is_official"`
|
||||
// Name is the name of the repository
|
||||
Name string `json:"name"`
|
||||
// IsAutomated indicates whether the result is automated.
|
||||
//
|
||||
// Deprecated: the "is_automated" field is deprecated and will always be "false".
|
||||
IsAutomated bool `json:"is_automated"`
|
||||
// Description is a textual description of the repository
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
// SearchResults lists a collection search results returned from a registry
|
||||
type SearchResults struct {
|
||||
// Query contains the query string that generated the search results
|
||||
Query string `json:"query"`
|
||||
// NumResults indicates the number of results the query returned
|
||||
NumResults int `json:"num_results"`
|
||||
// Results is a slice containing the actual results for the search
|
||||
Results []SearchResult `json:"results"`
|
||||
}
|
||||
@@ -1,4 +1,6 @@
|
||||
package container
|
||||
// Package types is used for API stability in the types and response to the
|
||||
// consumers of the API stats endpoint.
|
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
import "time"
|
||||
|
||||
@@ -167,10 +169,8 @@ type Stats struct {
|
||||
MemoryStats MemoryStats `json:"memory_stats,omitempty"`
|
||||
}
|
||||
|
||||
// StatsResponse is newly used Networks.
|
||||
//
|
||||
// TODO(thaJeztah): unify with [Stats]. This wrapper was to account for pre-api v1.21 changes, see https://github.com/moby/moby/commit/d3379946ec96fb6163cb8c4517d7d5a067045801
|
||||
type StatsResponse struct {
|
||||
// StatsJSON is newly used Networks
|
||||
type StatsJSON struct {
|
||||
Stats
|
||||
|
||||
Name string `json:"name,omitempty"`
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
|
||||
// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf)
|
||||
@@ -114,6 +115,5 @@ type ContainerSpec struct {
|
||||
Sysctls map[string]string `json:",omitempty"`
|
||||
CapabilityAdd []string `json:",omitempty"`
|
||||
CapabilityDrop []string `json:",omitempty"`
|
||||
Ulimits []*container.Ulimit `json:",omitempty"`
|
||||
OomScoreAdj int64 `json:",omitempty"`
|
||||
Ulimits []*units.Ulimit `json:",omitempty"`
|
||||
}
|
||||
|
||||
@@ -75,7 +75,8 @@ type Info struct {
|
||||
DefaultAddressPools []NetworkAddressPool `json:",omitempty"`
|
||||
CDISpecDirs []string
|
||||
|
||||
Containerd *ContainerdInfo `json:",omitempty"`
|
||||
// Legacy API fields for older API versions.
|
||||
legacyFields
|
||||
|
||||
// Warnings contains a slice of warnings that occurred while collecting
|
||||
// system information. These warnings are intended to be informational
|
||||
@@ -84,41 +85,8 @@ type Info struct {
|
||||
Warnings []string
|
||||
}
|
||||
|
||||
// ContainerdInfo holds information about the containerd instance used by the daemon.
|
||||
type ContainerdInfo struct {
|
||||
// Address is the path to the containerd socket.
|
||||
Address string `json:",omitempty"`
|
||||
// Namespaces is the containerd namespaces used by the daemon.
|
||||
Namespaces ContainerdNamespaces
|
||||
}
|
||||
|
||||
// ContainerdNamespaces reflects the containerd namespaces used by the daemon.
|
||||
//
|
||||
// These namespaces can be configured in the daemon configuration, and are
|
||||
// considered to be used exclusively by the daemon,
|
||||
//
|
||||
// As these namespaces are considered to be exclusively accessed
|
||||
// by the daemon, it is not recommended to change these values,
|
||||
// or to change them to a value that is used by other systems,
|
||||
// such as cri-containerd.
|
||||
type ContainerdNamespaces struct {
|
||||
// Containers holds the default containerd namespace used for
|
||||
// containers managed by the daemon.
|
||||
//
|
||||
// The default namespace for containers is "moby", but will be
|
||||
// suffixed with the `<uid>.<gid>` of the remapped `root` if
|
||||
// user-namespaces are enabled and the containerd image-store
|
||||
// is used.
|
||||
Containers string
|
||||
|
||||
// Plugins holds the default containerd namespace used for
|
||||
// plugins managed by the daemon.
|
||||
//
|
||||
// The default namespace for plugins is "moby", but will be
|
||||
// suffixed with the `<uid>.<gid>` of the remapped `root` if
|
||||
// user-namespaces are enabled and the containerd image-store
|
||||
// is used.
|
||||
Plugins string
|
||||
type legacyFields struct {
|
||||
ExecutionDriver string `json:",omitempty"` // Deprecated: deprecated since API v1.25, but returned for older versions.
|
||||
}
|
||||
|
||||
// PluginsInfo is a temp struct holding Plugins name
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
@@ -70,17 +72,14 @@ type ImageInspect struct {
|
||||
|
||||
// Created is the date and time at which the image was created, formatted in
|
||||
// RFC 3339 nano-seconds (time.RFC3339Nano).
|
||||
//
|
||||
// This information is only available if present in the image,
|
||||
// and omitted otherwise.
|
||||
Created string `json:",omitempty"`
|
||||
Created string
|
||||
|
||||
// Container is the ID of the container that was used to create the image.
|
||||
//
|
||||
// Depending on how the image was created, this field may be empty.
|
||||
//
|
||||
// Deprecated: this field is omitted in API v1.45, but kept for backward compatibility.
|
||||
Container string `json:",omitempty"`
|
||||
Container string
|
||||
|
||||
// ContainerConfig is an optional field containing the configuration of the
|
||||
// container that was last committed when creating the image.
|
||||
@@ -89,7 +88,7 @@ type ImageInspect struct {
|
||||
// and it is not in active use anymore.
|
||||
//
|
||||
// Deprecated: this field is omitted in API v1.45, but kept for backward compatibility.
|
||||
ContainerConfig *container.Config `json:",omitempty"`
|
||||
ContainerConfig *container.Config
|
||||
|
||||
// DockerVersion is the version of Docker that was used to build the image.
|
||||
//
|
||||
@@ -153,13 +152,36 @@ type Container struct {
|
||||
State string
|
||||
Status string
|
||||
HostConfig struct {
|
||||
NetworkMode string `json:",omitempty"`
|
||||
Annotations map[string]string `json:",omitempty"`
|
||||
NetworkMode string `json:",omitempty"`
|
||||
}
|
||||
NetworkSettings *SummaryNetworkSettings
|
||||
Mounts []MountPoint
|
||||
}
|
||||
|
||||
// CopyConfig contains request body of Engine API:
|
||||
// POST "/containers/"+containerID+"/copy"
|
||||
type CopyConfig struct {
|
||||
Resource string
|
||||
}
|
||||
|
||||
// ContainerPathStat is used to encode the header from
|
||||
// GET "/containers/{name:.*}/archive"
|
||||
// "Name" is the file or directory name.
|
||||
type ContainerPathStat struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Mode os.FileMode `json:"mode"`
|
||||
Mtime time.Time `json:"mtime"`
|
||||
LinkTarget string `json:"linkTarget"`
|
||||
}
|
||||
|
||||
// ContainerStats contains response of Engine API:
|
||||
// GET "/stats"
|
||||
type ContainerStats struct {
|
||||
Body io.ReadCloser `json:"body"`
|
||||
OSType string `json:"ostype"`
|
||||
}
|
||||
|
||||
// Ping contains response of Engine API:
|
||||
// GET "/_ping"
|
||||
type Ping struct {
|
||||
@@ -205,6 +227,17 @@ type Version struct {
|
||||
BuildTime string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ExecStartCheck is a temp struct used by execStart
|
||||
// Config fields is part of ExecConfig in runconfig package
|
||||
type ExecStartCheck struct {
|
||||
// ExecStart will first check if it's detached
|
||||
Detach bool
|
||||
// Check if there's a tty
|
||||
Tty bool
|
||||
// Terminal size [height, width], unused if Tty == false
|
||||
ConsoleSize *[2]uint `json:",omitempty"`
|
||||
}
|
||||
|
||||
// HealthcheckResult stores information about a single run of a healthcheck probe
|
||||
type HealthcheckResult struct {
|
||||
Start time.Time // Start is the time this check started
|
||||
@@ -245,6 +278,18 @@ type ContainerState struct {
|
||||
Health *Health `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ContainerNode stores information about the node that a container
|
||||
// is running on. It's only used by the Docker Swarm standalone API
|
||||
type ContainerNode struct {
|
||||
ID string
|
||||
IPAddress string `json:"IP"`
|
||||
Addr string
|
||||
Name string
|
||||
Cpus int
|
||||
Memory int64
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
// ContainerJSONBase contains response of Engine API:
|
||||
// GET "/containers/{name:.*}/json"
|
||||
type ContainerJSONBase struct {
|
||||
@@ -258,7 +303,7 @@ type ContainerJSONBase struct {
|
||||
HostnamePath string
|
||||
HostsPath string
|
||||
LogPath string
|
||||
Node *ContainerNode `json:",omitempty"` // Deprecated: Node was only propagated by Docker Swarm standalone API. It sill be removed in the next release.
|
||||
Node *ContainerNode `json:",omitempty"` // Node is only propagated by Docker Swarm standalone API
|
||||
Name string
|
||||
RestartCount int
|
||||
Driver string
|
||||
@@ -375,6 +420,84 @@ type MountPoint struct {
|
||||
Propagation mount.Propagation
|
||||
}
|
||||
|
||||
// NetworkResource is the body of the "get network" http response message
|
||||
type NetworkResource struct {
|
||||
Name string // Name is the requested name of the network
|
||||
ID string `json:"Id"` // ID uniquely identifies a network on a single machine
|
||||
Created time.Time // Created is the time the network created
|
||||
Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level)
|
||||
Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`)
|
||||
EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6
|
||||
IPAM network.IPAM // IPAM is the network's IP Address Management
|
||||
Internal bool // Internal represents if the network is used internal only
|
||||
Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
|
||||
Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster.
|
||||
ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network.
|
||||
ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services.
|
||||
Containers map[string]EndpointResource // Containers contains endpoints belonging to the network
|
||||
Options map[string]string // Options holds the network specific options to use for when creating the network
|
||||
Labels map[string]string // Labels holds metadata specific to the network being created
|
||||
Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network
|
||||
Services map[string]network.ServiceInfo `json:",omitempty"`
|
||||
}
|
||||
|
||||
// EndpointResource contains network resources allocated and used for a container in a network
|
||||
type EndpointResource struct {
|
||||
Name string
|
||||
EndpointID string
|
||||
MacAddress string
|
||||
IPv4Address string
|
||||
IPv6Address string
|
||||
}
|
||||
|
||||
// NetworkCreate is the expected body of the "create network" http request message
|
||||
type NetworkCreate struct {
|
||||
// Deprecated: CheckDuplicate is deprecated since API v1.44, but it defaults to true when sent by the client
|
||||
// package to older daemons.
|
||||
CheckDuplicate bool `json:",omitempty"`
|
||||
Driver string
|
||||
Scope string
|
||||
EnableIPv6 bool
|
||||
IPAM *network.IPAM
|
||||
Internal bool
|
||||
Attachable bool
|
||||
Ingress bool
|
||||
ConfigOnly bool
|
||||
ConfigFrom *network.ConfigReference
|
||||
Options map[string]string
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
// NetworkCreateRequest is the request message sent to the server for network create call.
|
||||
type NetworkCreateRequest struct {
|
||||
NetworkCreate
|
||||
Name string
|
||||
}
|
||||
|
||||
// NetworkCreateResponse is the response message sent by the server for network create call
|
||||
type NetworkCreateResponse struct {
|
||||
ID string `json:"Id"`
|
||||
Warning string
|
||||
}
|
||||
|
||||
// NetworkConnect represents the data to be used to connect a container to the network
|
||||
type NetworkConnect struct {
|
||||
Container string
|
||||
EndpointConfig *network.EndpointSettings `json:",omitempty"`
|
||||
}
|
||||
|
||||
// NetworkDisconnect represents the data to be used to disconnect a container from the network
|
||||
type NetworkDisconnect struct {
|
||||
Container string
|
||||
Force bool
|
||||
}
|
||||
|
||||
// NetworkInspectOptions holds parameters to inspect network
|
||||
type NetworkInspectOptions struct {
|
||||
Scope string
|
||||
Verbose bool
|
||||
}
|
||||
|
||||
// DiskUsageObject represents an object type used for disk usage query filtering.
|
||||
type DiskUsageObject string
|
||||
|
||||
@@ -407,6 +530,27 @@ type DiskUsage struct {
|
||||
BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40.
|
||||
}
|
||||
|
||||
// ContainersPruneReport contains the response for Engine API:
|
||||
// POST "/containers/prune"
|
||||
type ContainersPruneReport struct {
|
||||
ContainersDeleted []string
|
||||
SpaceReclaimed uint64
|
||||
}
|
||||
|
||||
// VolumesPruneReport contains the response for Engine API:
|
||||
// POST "/volumes/prune"
|
||||
type VolumesPruneReport struct {
|
||||
VolumesDeleted []string
|
||||
SpaceReclaimed uint64
|
||||
}
|
||||
|
||||
// ImagesPruneReport contains the response for Engine API:
|
||||
// POST "/images/prune"
|
||||
type ImagesPruneReport struct {
|
||||
ImagesDeleted []image.DeleteResponse
|
||||
SpaceReclaimed uint64
|
||||
}
|
||||
|
||||
// BuildCachePruneReport contains the response for Engine API:
|
||||
// POST "/build/prune"
|
||||
type BuildCachePruneReport struct {
|
||||
@@ -414,6 +558,12 @@ type BuildCachePruneReport struct {
|
||||
SpaceReclaimed uint64
|
||||
}
|
||||
|
||||
// NetworksPruneReport contains the response for Engine API:
|
||||
// POST "/networks/prune"
|
||||
type NetworksPruneReport struct {
|
||||
NetworksDeleted []string
|
||||
}
|
||||
|
||||
// SecretCreateResponse contains the information returned to a client
|
||||
// on the creation of a new secret.
|
||||
type SecretCreateResponse struct {
|
||||
|
||||
@@ -1,210 +1,138 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/api/types/checkpoint"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/api/types/system"
|
||||
)
|
||||
|
||||
// ImagesPruneReport contains the response for Engine API:
|
||||
// POST "/images/prune"
|
||||
// CheckpointCreateOptions holds parameters to create a checkpoint from a container.
|
||||
//
|
||||
// Deprecated: use [image.PruneReport].
|
||||
type ImagesPruneReport = image.PruneReport
|
||||
// Deprecated: use [checkpoint.CreateOptions].
|
||||
type CheckpointCreateOptions = checkpoint.CreateOptions
|
||||
|
||||
// VolumesPruneReport contains the response for Engine API:
|
||||
// POST "/volumes/prune".
|
||||
// CheckpointListOptions holds parameters to list checkpoints for a container
|
||||
//
|
||||
// Deprecated: use [volume.PruneReport].
|
||||
type VolumesPruneReport = volume.PruneReport
|
||||
// Deprecated: use [checkpoint.ListOptions].
|
||||
type CheckpointListOptions = checkpoint.ListOptions
|
||||
|
||||
// NetworkCreateRequest is the request message sent to the server for network create call.
|
||||
// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container
|
||||
//
|
||||
// Deprecated: use [network.CreateRequest].
|
||||
type NetworkCreateRequest = network.CreateRequest
|
||||
// Deprecated: use [checkpoint.DeleteOptions].
|
||||
type CheckpointDeleteOptions = checkpoint.DeleteOptions
|
||||
|
||||
// NetworkCreate is the expected body of the "create network" http request message
|
||||
// Checkpoint represents the details of a checkpoint when listing endpoints.
|
||||
//
|
||||
// Deprecated: use [network.CreateOptions].
|
||||
type NetworkCreate = network.CreateOptions
|
||||
// Deprecated: use [checkpoint.Summary].
|
||||
type Checkpoint = checkpoint.Summary
|
||||
|
||||
// NetworkListOptions holds parameters to filter the list of networks with.
|
||||
// Info contains response of Engine API:
|
||||
// GET "/info"
|
||||
//
|
||||
// Deprecated: use [network.ListOptions].
|
||||
type NetworkListOptions = network.ListOptions
|
||||
// Deprecated: use [system.Info].
|
||||
type Info = system.Info
|
||||
|
||||
// NetworkCreateResponse is the response message sent by the server for network create call.
|
||||
// Commit holds the Git-commit (SHA1) that a binary was built from, as reported
|
||||
// in the version-string of external tools, such as containerd, or runC.
|
||||
//
|
||||
// Deprecated: use [network.CreateResponse].
|
||||
type NetworkCreateResponse = network.CreateResponse
|
||||
// Deprecated: use [system.Commit].
|
||||
type Commit = system.Commit
|
||||
|
||||
// NetworkInspectOptions holds parameters to inspect network.
|
||||
// PluginsInfo is a temp struct holding Plugins name
|
||||
// registered with docker daemon. It is used by [system.Info] struct
|
||||
//
|
||||
// Deprecated: use [network.InspectOptions].
|
||||
type NetworkInspectOptions = network.InspectOptions
|
||||
// Deprecated: use [system.PluginsInfo].
|
||||
type PluginsInfo = system.PluginsInfo
|
||||
|
||||
// NetworkConnect represents the data to be used to connect a container to the network
|
||||
// NetworkAddressPool is a temp struct used by [system.Info] struct.
|
||||
//
|
||||
// Deprecated: use [network.ConnectOptions].
|
||||
type NetworkConnect = network.ConnectOptions
|
||||
// Deprecated: use [system.NetworkAddressPool].
|
||||
type NetworkAddressPool = system.NetworkAddressPool
|
||||
|
||||
// NetworkDisconnect represents the data to be used to disconnect a container from the network
|
||||
// Runtime describes an OCI runtime.
|
||||
//
|
||||
// Deprecated: use [network.DisconnectOptions].
|
||||
type NetworkDisconnect = network.DisconnectOptions
|
||||
// Deprecated: use [system.Runtime].
|
||||
type Runtime = system.Runtime
|
||||
|
||||
// EndpointResource contains network resources allocated and used for a container in a network.
|
||||
// SecurityOpt contains the name and options of a security option.
|
||||
//
|
||||
// Deprecated: use [network.EndpointResource].
|
||||
type EndpointResource = network.EndpointResource
|
||||
// Deprecated: use [system.SecurityOpt].
|
||||
type SecurityOpt = system.SecurityOpt
|
||||
|
||||
// NetworkResource is the body of the "get network" http response message/
|
||||
// KeyValue holds a key/value pair.
|
||||
//
|
||||
// Deprecated: use [network.Inspect] or [network.Summary] (for list operations).
|
||||
type NetworkResource = network.Inspect
|
||||
// Deprecated: use [system.KeyValue].
|
||||
type KeyValue = system.KeyValue
|
||||
|
||||
// NetworksPruneReport contains the response for Engine API:
|
||||
// POST "/networks/prune"
|
||||
// ImageDeleteResponseItem image delete response item.
|
||||
//
|
||||
// Deprecated: use [network.PruneReport].
|
||||
type NetworksPruneReport = network.PruneReport
|
||||
// Deprecated: use [image.DeleteResponse].
|
||||
type ImageDeleteResponseItem = image.DeleteResponse
|
||||
|
||||
// ExecConfig is a small subset of the Config struct that holds the configuration
|
||||
// for the exec feature of docker.
|
||||
// ImageSummary image summary.
|
||||
//
|
||||
// Deprecated: use [container.ExecOptions].
|
||||
type ExecConfig = container.ExecOptions
|
||||
// Deprecated: use [image.Summary].
|
||||
type ImageSummary = image.Summary
|
||||
|
||||
// ExecStartCheck is a temp struct used by execStart
|
||||
// Config fields is part of ExecConfig in runconfig package
|
||||
// ImageMetadata contains engine-local data about the image.
|
||||
//
|
||||
// Deprecated: use [container.ExecStartOptions] or [container.ExecAttachOptions].
|
||||
type ExecStartCheck = container.ExecStartOptions
|
||||
// Deprecated: use [image.Metadata].
|
||||
type ImageMetadata = image.Metadata
|
||||
|
||||
// ContainerExecInspect holds information returned by exec inspect.
|
||||
// ServiceCreateResponse contains the information returned to a client
|
||||
// on the creation of a new service.
|
||||
//
|
||||
// Deprecated: use [container.ExecInspect].
|
||||
type ContainerExecInspect = container.ExecInspect
|
||||
// Deprecated: use [swarm.ServiceCreateResponse].
|
||||
type ServiceCreateResponse = swarm.ServiceCreateResponse
|
||||
|
||||
// ContainersPruneReport contains the response for Engine API:
|
||||
// POST "/containers/prune"
|
||||
// ServiceUpdateResponse service update response.
|
||||
//
|
||||
// Deprecated: use [container.PruneReport].
|
||||
type ContainersPruneReport = container.PruneReport
|
||||
// Deprecated: use [swarm.ServiceUpdateResponse].
|
||||
type ServiceUpdateResponse = swarm.ServiceUpdateResponse
|
||||
|
||||
// ContainerPathStat is used to encode the header from
|
||||
// GET "/containers/{name:.*}/archive"
|
||||
// "Name" is the file or directory name.
|
||||
// ContainerStartOptions holds parameters to start containers.
|
||||
//
|
||||
// Deprecated: use [container.PathStat].
|
||||
type ContainerPathStat = container.PathStat
|
||||
// Deprecated: use [container.StartOptions].
|
||||
type ContainerStartOptions = container.StartOptions
|
||||
|
||||
// CopyToContainerOptions holds information
|
||||
// about files to copy into a container.
|
||||
// ResizeOptions holds parameters to resize a TTY.
|
||||
// It can be used to resize container TTYs and
|
||||
// exec process TTYs too.
|
||||
//
|
||||
// Deprecated: use [container.CopyToContainerOptions],
|
||||
type CopyToContainerOptions = container.CopyToContainerOptions
|
||||
// Deprecated: use [container.ResizeOptions].
|
||||
type ResizeOptions = container.ResizeOptions
|
||||
|
||||
// ContainerStats contains response of Engine API:
|
||||
// GET "/stats"
|
||||
// ContainerAttachOptions holds parameters to attach to a container.
|
||||
//
|
||||
// Deprecated: use [container.StatsResponseReader].
|
||||
type ContainerStats = container.StatsResponseReader
|
||||
// Deprecated: use [container.AttachOptions].
|
||||
type ContainerAttachOptions = container.AttachOptions
|
||||
|
||||
// ThrottlingData stores CPU throttling stats of one running container.
|
||||
// Not used on Windows.
|
||||
// ContainerCommitOptions holds parameters to commit changes into a container.
|
||||
//
|
||||
// Deprecated: use [container.ThrottlingData].
|
||||
type ThrottlingData = container.ThrottlingData
|
||||
// Deprecated: use [container.CommitOptions].
|
||||
type ContainerCommitOptions = container.CommitOptions
|
||||
|
||||
// CPUUsage stores All CPU stats aggregated since container inception.
|
||||
// ContainerListOptions holds parameters to list containers with.
|
||||
//
|
||||
// Deprecated: use [container.CPUUsage].
|
||||
type CPUUsage = container.CPUUsage
|
||||
// Deprecated: use [container.ListOptions].
|
||||
type ContainerListOptions = container.ListOptions
|
||||
|
||||
// CPUStats aggregates and wraps all CPU related info of container
|
||||
// ContainerLogsOptions holds parameters to filter logs with.
|
||||
//
|
||||
// Deprecated: use [container.CPUStats].
|
||||
type CPUStats = container.CPUStats
|
||||
// Deprecated: use [container.LogsOptions].
|
||||
type ContainerLogsOptions = container.LogsOptions
|
||||
|
||||
// MemoryStats aggregates all memory stats since container inception on Linux.
|
||||
// Windows returns stats for commit and private working set only.
|
||||
// ContainerRemoveOptions holds parameters to remove containers.
|
||||
//
|
||||
// Deprecated: use [container.MemoryStats].
|
||||
type MemoryStats = container.MemoryStats
|
||||
// Deprecated: use [container.RemoveOptions].
|
||||
type ContainerRemoveOptions = container.RemoveOptions
|
||||
|
||||
// BlkioStatEntry is one small entity to store a piece of Blkio stats
|
||||
// Not used on Windows.
|
||||
// DecodeSecurityOptions decodes a security options string slice to a type safe
|
||||
// [system.SecurityOpt].
|
||||
//
|
||||
// Deprecated: use [container.BlkioStatEntry].
|
||||
type BlkioStatEntry = container.BlkioStatEntry
|
||||
|
||||
// BlkioStats stores All IO service stats for data read and write.
|
||||
// This is a Linux specific structure as the differences between expressing
|
||||
// block I/O on Windows and Linux are sufficiently significant to make
|
||||
// little sense attempting to morph into a combined structure.
|
||||
//
|
||||
// Deprecated: use [container.BlkioStats].
|
||||
type BlkioStats = container.BlkioStats
|
||||
|
||||
// StorageStats is the disk I/O stats for read/write on Windows.
|
||||
//
|
||||
// Deprecated: use [container.StorageStats].
|
||||
type StorageStats = container.StorageStats
|
||||
|
||||
// NetworkStats aggregates the network stats of one container
|
||||
//
|
||||
// Deprecated: use [container.NetworkStats].
|
||||
type NetworkStats = container.NetworkStats
|
||||
|
||||
// PidsStats contains the stats of a container's pids
|
||||
//
|
||||
// Deprecated: use [container.PidsStats].
|
||||
type PidsStats = container.PidsStats
|
||||
|
||||
// Stats is Ultimate struct aggregating all types of stats of one container
|
||||
//
|
||||
// Deprecated: use [container.Stats].
|
||||
type Stats = container.Stats
|
||||
|
||||
// StatsJSON is newly used Networks
|
||||
//
|
||||
// Deprecated: use [container.StatsResponse].
|
||||
type StatsJSON = container.StatsResponse
|
||||
|
||||
// EventsOptions holds parameters to filter events with.
|
||||
//
|
||||
// Deprecated: use [events.ListOptions].
|
||||
type EventsOptions = events.ListOptions
|
||||
|
||||
// ImageSearchOptions holds parameters to search images with.
|
||||
//
|
||||
// Deprecated: use [registry.SearchOptions].
|
||||
type ImageSearchOptions = registry.SearchOptions
|
||||
|
||||
// ImageImportSource holds source information for ImageImport
|
||||
//
|
||||
// Deprecated: use [image.ImportSource].
|
||||
type ImageImportSource image.ImportSource
|
||||
|
||||
// ImageLoadResponse returns information to the client about a load process.
|
||||
//
|
||||
// Deprecated: use [image.LoadResponse].
|
||||
type ImageLoadResponse = image.LoadResponse
|
||||
|
||||
// ContainerNode stores information about the node that a container
|
||||
// is running on. It's only used by the Docker Swarm standalone API.
|
||||
//
|
||||
// Deprecated: ContainerNode was used for the classic Docker Swarm standalone API. It will be removed in the next release.
|
||||
type ContainerNode struct {
|
||||
ID string
|
||||
IPAddress string `json:"IP"`
|
||||
Addr string
|
||||
Name string
|
||||
Cpus int
|
||||
Memory int64
|
||||
Labels map[string]string
|
||||
// Deprecated: use [system.DecodeSecurityOptions].
|
||||
func DecodeSecurityOptions(opts []string) ([]system.SecurityOpt, error) {
|
||||
return system.DecodeSecurityOptions(opts)
|
||||
}
|
||||
|
||||
14
api/types/versions/README.md
Normal file
14
api/types/versions/README.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# Legacy API type versions
|
||||
|
||||
This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`.
|
||||
|
||||
Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`.
|
||||
|
||||
## Package name conventions
|
||||
|
||||
The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention:
|
||||
|
||||
1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`.
|
||||
2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`.
|
||||
|
||||
For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`.
|
||||
35
api/types/versions/v1p19/types.go
Normal file
35
api/types/versions/v1p19/types.go
Normal file
@@ -0,0 +1,35 @@
|
||||
// Package v1p19 provides specific API types for the API version 1, patch 19.
|
||||
package v1p19 // import "github.com/docker/docker/api/types/versions/v1p19"
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/versions/v1p20"
|
||||
"github.com/docker/go-connections/nat"
|
||||
)
|
||||
|
||||
// ContainerJSON is a backcompatibility struct for APIs prior to 1.20.
|
||||
// Note this is not used by the Windows daemon.
|
||||
type ContainerJSON struct {
|
||||
*types.ContainerJSONBase
|
||||
Volumes map[string]string
|
||||
VolumesRW map[string]bool
|
||||
Config *ContainerConfig
|
||||
NetworkSettings *v1p20.NetworkSettings
|
||||
}
|
||||
|
||||
// ContainerConfig is a backcompatibility struct for APIs prior to 1.20.
|
||||
type ContainerConfig struct {
|
||||
*container.Config
|
||||
|
||||
MacAddress string
|
||||
NetworkDisabled bool
|
||||
ExposedPorts map[nat.Port]struct{}
|
||||
|
||||
// backward compatibility, they now live in HostConfig
|
||||
VolumeDriver string
|
||||
Memory int64
|
||||
MemorySwap int64
|
||||
CPUShares int64 `json:"CpuShares"`
|
||||
CPUSet string `json:"Cpuset"`
|
||||
}
|
||||
40
api/types/versions/v1p20/types.go
Normal file
40
api/types/versions/v1p20/types.go
Normal file
@@ -0,0 +1,40 @@
|
||||
// Package v1p20 provides specific API types for the API version 1, patch 20.
|
||||
package v1p20 // import "github.com/docker/docker/api/types/versions/v1p20"
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/go-connections/nat"
|
||||
)
|
||||
|
||||
// ContainerJSON is a backcompatibility struct for the API 1.20
|
||||
type ContainerJSON struct {
|
||||
*types.ContainerJSONBase
|
||||
Mounts []types.MountPoint
|
||||
Config *ContainerConfig
|
||||
NetworkSettings *NetworkSettings
|
||||
}
|
||||
|
||||
// ContainerConfig is a backcompatibility struct used in ContainerJSON for the API 1.20
|
||||
type ContainerConfig struct {
|
||||
*container.Config
|
||||
|
||||
MacAddress string
|
||||
NetworkDisabled bool
|
||||
ExposedPorts map[nat.Port]struct{}
|
||||
|
||||
// backward compatibility, they now live in HostConfig
|
||||
VolumeDriver string
|
||||
}
|
||||
|
||||
// StatsJSON is a backcompatibility struct used in Stats for APIs prior to 1.21
|
||||
type StatsJSON struct {
|
||||
types.Stats
|
||||
Network types.NetworkStats `json:"network,omitempty"`
|
||||
}
|
||||
|
||||
// NetworkSettings is a backward compatible struct for APIs prior to 1.21
|
||||
type NetworkSettings struct {
|
||||
types.NetworkSettingsBase
|
||||
types.DefaultNetworkSettings
|
||||
}
|
||||
@@ -238,13 +238,13 @@ type TopologyRequirement struct {
|
||||
// If requisite is specified, all topologies in preferred list MUST
|
||||
// also be present in the list of requisite topologies.
|
||||
//
|
||||
// If the SP is unable to make the provisioned volume available
|
||||
// If the SP is unable to to make the provisioned volume available
|
||||
// from any of the preferred topologies, the SP MAY choose a topology
|
||||
// from the list of requisite topologies.
|
||||
// If the list of requisite topologies is not specified, then the SP
|
||||
// MAY choose from the list of all possible topologies.
|
||||
// If the list of requisite topologies is specified and the SP is
|
||||
// unable to make the provisioned volume available from any of the
|
||||
// unable to to make the provisioned volume available from any of the
|
||||
// requisite topologies it MUST fail the CreateVolume call.
|
||||
//
|
||||
// Example 1:
|
||||
@@ -254,7 +254,7 @@ type TopologyRequirement struct {
|
||||
// {"region": "R1", "zone": "Z3"}
|
||||
// preferred =
|
||||
// {"region": "R1", "zone": "Z3"}
|
||||
// then the SP SHOULD first attempt to make the provisioned volume
|
||||
// then the the SP SHOULD first attempt to make the provisioned volume
|
||||
// available from "zone" "Z3" in the "region" "R1" and fall back to
|
||||
// "zone" "Z2" in the "region" "R1" if that is not possible.
|
||||
//
|
||||
@@ -268,7 +268,7 @@ type TopologyRequirement struct {
|
||||
// preferred =
|
||||
// {"region": "R1", "zone": "Z4"},
|
||||
// {"region": "R1", "zone": "Z2"}
|
||||
// then the SP SHOULD first attempt to make the provisioned volume
|
||||
// then the the SP SHOULD first attempt to make the provisioned volume
|
||||
// accessible from "zone" "Z4" in the "region" "R1" and fall back to
|
||||
// "zone" "Z2" in the "region" "R1" if that is not possible. If that
|
||||
// is not possible, the SP may choose between either the "zone"
|
||||
@@ -287,7 +287,7 @@ type TopologyRequirement struct {
|
||||
// preferred =
|
||||
// {"region": "R1", "zone": "Z5"},
|
||||
// {"region": "R1", "zone": "Z3"}
|
||||
// then the SP SHOULD first attempt to make the provisioned volume
|
||||
// then the the SP SHOULD first attempt to make the provisioned volume
|
||||
// accessible from the combination of the two "zones" "Z5" and "Z3" in
|
||||
// the "region" "R1". If that's not possible, it should fall back to
|
||||
// a combination of "Z5" and other possibilities from the list of
|
||||
|
||||
@@ -6,10 +6,3 @@ import "github.com/docker/docker/api/types/filters"
|
||||
type ListOptions struct {
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// PruneReport contains the response for Engine API:
|
||||
// POST "/volumes/prune"
|
||||
type PruneReport struct {
|
||||
VolumesDeleted []string
|
||||
SpaceReclaimed uint64
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.21
|
||||
//go:build go1.19
|
||||
|
||||
package containerimage
|
||||
|
||||
@@ -9,23 +9,22 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
cerrdefs "github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/gc"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/leases"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
cdreference "github.com/containerd/containerd/reference"
|
||||
ctdreference "github.com/containerd/containerd/reference"
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
"github.com/containerd/containerd/remotes/docker/schema1" //nolint:staticcheck // Ignore SA1019: "github.com/containerd/containerd/remotes/docker/schema1" is deprecated: use images formatted in Docker Image Manifest v2, Schema 2, or OCI Image Spec v1.
|
||||
cerrdefs "github.com/containerd/errdefs"
|
||||
"github.com/containerd/log"
|
||||
"github.com/containerd/platforms"
|
||||
distreference "github.com/distribution/reference"
|
||||
dimages "github.com/docker/docker/daemon/images"
|
||||
"github.com/docker/docker/distribution/metadata"
|
||||
@@ -35,15 +34,14 @@ import (
|
||||
pkgprogress "github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/reference"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/client/llb/sourceresolver"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/solver"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/source"
|
||||
"github.com/moby/buildkit/source/containerimage"
|
||||
srctypes "github.com/moby/buildkit/source/types"
|
||||
"github.com/moby/buildkit/sourcepolicy"
|
||||
policy "github.com/moby/buildkit/sourcepolicy/pb"
|
||||
spb "github.com/moby/buildkit/sourcepolicy/pb"
|
||||
"github.com/moby/buildkit/util/flightcontrol"
|
||||
"github.com/moby/buildkit/util/imageutil"
|
||||
@@ -82,77 +80,9 @@ func NewSource(opt SourceOpt) (*Source, error) {
|
||||
return &Source{SourceOpt: opt}, nil
|
||||
}
|
||||
|
||||
// Schemes returns a list of SourceOp identifier schemes that this source
|
||||
// should match.
|
||||
func (is *Source) Schemes() []string {
|
||||
return []string{srctypes.DockerImageScheme}
|
||||
}
|
||||
|
||||
// Identifier constructs an Identifier from the given scheme, ref, and attrs,
|
||||
// all of which come from a SourceOp.
|
||||
func (is *Source) Identifier(scheme, ref string, attrs map[string]string, platform *pb.Platform) (source.Identifier, error) {
|
||||
return is.registryIdentifier(ref, attrs, platform)
|
||||
}
|
||||
|
||||
// Copied from github.com/moby/buildkit/source/containerimage/source.go
|
||||
func (is *Source) registryIdentifier(ref string, attrs map[string]string, platform *pb.Platform) (source.Identifier, error) {
|
||||
id, err := containerimage.NewImageIdentifier(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if platform != nil {
|
||||
id.Platform = &ocispec.Platform{
|
||||
OS: platform.OS,
|
||||
Architecture: platform.Architecture,
|
||||
Variant: platform.Variant,
|
||||
OSVersion: platform.OSVersion,
|
||||
}
|
||||
if platform.OSFeatures != nil {
|
||||
id.Platform.OSFeatures = append([]string{}, platform.OSFeatures...)
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range attrs {
|
||||
switch k {
|
||||
case pb.AttrImageResolveMode:
|
||||
rm, err := resolver.ParseImageResolveMode(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id.ResolveMode = rm
|
||||
case pb.AttrImageRecordType:
|
||||
rt, err := parseImageRecordType(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id.RecordType = rt
|
||||
case pb.AttrImageLayerLimit:
|
||||
l, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid layer limit %s", v)
|
||||
}
|
||||
if l <= 0 {
|
||||
return nil, errors.Errorf("invalid layer limit %s", v)
|
||||
}
|
||||
id.LayerLimit = &l
|
||||
}
|
||||
}
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func parseImageRecordType(v string) (client.UsageRecordType, error) {
|
||||
switch client.UsageRecordType(v) {
|
||||
case "", client.UsageRecordTypeRegular:
|
||||
return client.UsageRecordTypeRegular, nil
|
||||
case client.UsageRecordTypeInternal:
|
||||
return client.UsageRecordTypeInternal, nil
|
||||
case client.UsageRecordTypeFrontend:
|
||||
return client.UsageRecordTypeFrontend, nil
|
||||
default:
|
||||
return "", errors.Errorf("invalid record type %s", v)
|
||||
}
|
||||
// ID returns image scheme identifier
|
||||
func (is *Source) ID() string {
|
||||
return srctypes.DockerImageScheme
|
||||
}
|
||||
|
||||
func (is *Source) resolveLocal(refStr string) (*image.Image, error) {
|
||||
@@ -177,7 +107,7 @@ type resolveRemoteResult struct {
|
||||
dt []byte
|
||||
}
|
||||
|
||||
func (is *Source) resolveRemote(ctx context.Context, ref string, platform *ocispec.Platform, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
|
||||
func (is *Source) resolveRemote(ctx context.Context, ref string, platform *ocispec.Platform, sm *session.Manager, g session.Group) (string, digest.Digest, []byte, error) {
|
||||
p := platforms.DefaultSpec()
|
||||
if platform != nil {
|
||||
p = *platform
|
||||
@@ -186,36 +116,34 @@ func (is *Source) resolveRemote(ctx context.Context, ref string, platform *ocisp
|
||||
key := "getconfig::" + ref + "::" + platforms.Format(p)
|
||||
res, err := is.g.Do(ctx, key, func(ctx context.Context) (*resolveRemoteResult, error) {
|
||||
res := resolver.DefaultPool.GetResolver(is.RegistryHosts, ref, "pull", sm, g)
|
||||
dgst, dt, err := imageutil.Config(ctx, ref, res, is.ContentStore, is.LeaseManager, platform)
|
||||
ref, dgst, dt, err := imageutil.Config(ctx, ref, res, is.ContentStore, is.LeaseManager, platform, []*policy.Policy{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resolveRemoteResult{ref: ref, dgst: dgst, dt: dt}, nil
|
||||
})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
return ref, "", nil, err
|
||||
}
|
||||
return res.dgst, res.dt, nil
|
||||
return res.ref, res.dgst, res.dt, nil
|
||||
}
|
||||
|
||||
// ResolveImageConfig returns image config for an image
|
||||
func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt sourceresolver.Opt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
|
||||
if opt.ImageOpt == nil {
|
||||
return "", nil, fmt.Errorf("can only resolve an image: %v, opt: %v", ref, opt)
|
||||
}
|
||||
func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (string, digest.Digest, []byte, error) {
|
||||
ref, err := applySourcePolicies(ctx, ref, opt.SourcePolicies)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
return "", "", nil, err
|
||||
}
|
||||
resolveMode, err := resolver.ParseImageResolveMode(opt.ImageOpt.ResolveMode)
|
||||
resolveMode, err := source.ParseImageResolveMode(opt.ResolveMode)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
return ref, "", nil, err
|
||||
}
|
||||
switch resolveMode {
|
||||
case resolver.ResolveModeForcePull:
|
||||
return is.resolveRemote(ctx, ref, opt.Platform, sm, g)
|
||||
case source.ResolveModeForcePull:
|
||||
ref, dgst, dt, err := is.resolveRemote(ctx, ref, opt.Platform, sm, g)
|
||||
// TODO: pull should fallback to local in case of failure to allow offline behavior
|
||||
// the fallback doesn't work currently
|
||||
return ref, dgst, dt, err
|
||||
/*
|
||||
if err == nil {
|
||||
return dgst, dt, err
|
||||
@@ -225,10 +153,10 @@ func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt source
|
||||
return "", dt, err
|
||||
*/
|
||||
|
||||
case resolver.ResolveModeDefault:
|
||||
case source.ResolveModeDefault:
|
||||
// default == prefer local, but in the future could be smarter
|
||||
fallthrough
|
||||
case resolver.ResolveModePreferLocal:
|
||||
case source.ResolveModePreferLocal:
|
||||
img, err := is.resolveLocal(ref)
|
||||
if err == nil {
|
||||
if opt.Platform != nil && !platformMatches(img, opt.Platform) {
|
||||
@@ -237,19 +165,19 @@ func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt source
|
||||
path.Join(img.OS, img.Architecture, img.Variant),
|
||||
)
|
||||
} else {
|
||||
return "", img.RawJSON(), err
|
||||
return ref, "", img.RawJSON(), err
|
||||
}
|
||||
}
|
||||
// fallback to remote
|
||||
return is.resolveRemote(ctx, ref, opt.Platform, sm, g)
|
||||
}
|
||||
// should never happen
|
||||
return "", nil, fmt.Errorf("builder cannot resolve image %s: invalid mode %q", ref, opt.ImageOpt.ResolveMode)
|
||||
return ref, "", nil, fmt.Errorf("builder cannot resolve image %s: invalid mode %q", ref, opt.ResolveMode)
|
||||
}
|
||||
|
||||
// Resolve returns access to pulling for an identifier
|
||||
func (is *Source) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager, vtx solver.Vertex) (source.SourceInstance, error) {
|
||||
imageIdentifier, ok := id.(*containerimage.ImageIdentifier)
|
||||
imageIdentifier, ok := id.(*source.ImageIdentifier)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid image identifier %v", id)
|
||||
}
|
||||
@@ -273,7 +201,7 @@ type puller struct {
|
||||
is *Source
|
||||
resolveLocalOnce sync.Once
|
||||
g flightcontrol.Group[struct{}]
|
||||
src *containerimage.ImageIdentifier
|
||||
src *source.ImageIdentifier
|
||||
desc ocispec.Descriptor
|
||||
ref string
|
||||
config []byte
|
||||
@@ -325,7 +253,7 @@ func (p *puller) resolveLocal() {
|
||||
}
|
||||
}
|
||||
|
||||
if p.src.ResolveMode == resolver.ResolveModeDefault || p.src.ResolveMode == resolver.ResolveModePreferLocal {
|
||||
if p.src.ResolveMode == source.ResolveModeDefault || p.src.ResolveMode == source.ResolveModePreferLocal {
|
||||
ref := p.src.Reference.String()
|
||||
img, err := p.is.resolveLocal(ref)
|
||||
if err == nil {
|
||||
@@ -374,17 +302,12 @@ func (p *puller) resolve(ctx context.Context, g session.Group) error {
|
||||
if err != nil {
|
||||
return struct{}{}, err
|
||||
}
|
||||
_, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), sourceresolver.Opt{
|
||||
Platform: &p.platform,
|
||||
ImageOpt: &sourceresolver.ResolveImageOpt{
|
||||
ResolveMode: p.src.ResolveMode.String(),
|
||||
},
|
||||
}, p.sm, g)
|
||||
newRef, _, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), llb.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: p.src.ResolveMode.String()}, p.sm, g)
|
||||
if err != nil {
|
||||
return struct{}{}, err
|
||||
}
|
||||
|
||||
p.ref = ref.String()
|
||||
p.ref = newRef
|
||||
p.config = dt
|
||||
}
|
||||
return struct{}{}, nil
|
||||
@@ -943,8 +866,12 @@ func applySourcePolicies(ctx context.Context, str string, spls []*spb.Policy) (s
|
||||
if err != nil {
|
||||
return "", errors.WithStack(err)
|
||||
}
|
||||
op := &pb.SourceOp{
|
||||
Identifier: srctypes.DockerImageScheme + "://" + ref.String(),
|
||||
op := &pb.Op{
|
||||
Op: &pb.Op_Source{
|
||||
Source: &pb.SourceOp{
|
||||
Identifier: srctypes.DockerImageScheme + "://" + ref.String(),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
mut, err := sourcepolicy.NewEngine(spls).Evaluate(ctx, op)
|
||||
@@ -957,9 +884,9 @@ func applySourcePolicies(ctx context.Context, str string, spls []*spb.Policy) (s
|
||||
t string
|
||||
ok bool
|
||||
)
|
||||
t, newRef, ok := strings.Cut(op.GetIdentifier(), "://")
|
||||
t, newRef, ok := strings.Cut(op.GetSource().GetIdentifier(), "://")
|
||||
if !ok {
|
||||
return "", errors.Errorf("could not parse ref: %s", op.GetIdentifier())
|
||||
return "", errors.Errorf("could not parse ref: %s", op.GetSource().GetIdentifier())
|
||||
}
|
||||
if ok && t != srctypes.DockerImageScheme {
|
||||
return "", &imageutil.ResolveToNonImageError{Ref: str, Updated: newRef}
|
||||
|
||||
@@ -22,9 +22,6 @@ func (s *snapshotter) GetDiffIDs(ctx context.Context, key string) ([]layer.DiffI
|
||||
}
|
||||
|
||||
func (s *snapshotter) EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error) {
|
||||
s.layerCreateLocker.Lock(key)
|
||||
defer s.layerCreateLocker.Unlock(key)
|
||||
|
||||
diffIDs, err := s.GetDiffIDs(ctx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -7,17 +7,16 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
cerrdefs "github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/leases"
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
cerrdefs "github.com/containerd/errdefs"
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/moby/buildkit/util/leaseutil"
|
||||
"github.com/moby/locker"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
@@ -52,11 +51,10 @@ type checksumCalculator interface {
|
||||
type snapshotter struct {
|
||||
opt Opt
|
||||
|
||||
refs map[string]layer.Layer
|
||||
db *bolt.DB
|
||||
mu sync.Mutex
|
||||
reg graphIDRegistrar
|
||||
layerCreateLocker *locker.Locker
|
||||
refs map[string]layer.Layer
|
||||
db *bolt.DB
|
||||
mu sync.Mutex
|
||||
reg graphIDRegistrar
|
||||
}
|
||||
|
||||
// NewSnapshotter creates a new snapshotter
|
||||
@@ -73,11 +71,10 @@ func NewSnapshotter(opt Opt, prevLM leases.Manager, ns string) (snapshot.Snapsho
|
||||
}
|
||||
|
||||
s := &snapshotter{
|
||||
opt: opt,
|
||||
db: db,
|
||||
refs: map[string]layer.Layer{},
|
||||
reg: reg,
|
||||
layerCreateLocker: locker.New(),
|
||||
opt: opt,
|
||||
db: db,
|
||||
refs: map[string]layer.Layer{},
|
||||
reg: reg,
|
||||
}
|
||||
|
||||
slm := newLeaseManager(s, prevLM)
|
||||
|
||||
@@ -10,11 +10,10 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
timetypes "github.com/docker/docker/api/types/time"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/builder-next/exporter"
|
||||
@@ -22,11 +21,11 @@ import (
|
||||
"github.com/docker/docker/builder/builder-next/exporter/overrides"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/daemon/images"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/libnetwork"
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/go-units"
|
||||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/control"
|
||||
@@ -94,7 +93,6 @@ type Opt struct {
|
||||
Snapshotter string
|
||||
ContainerdAddress string
|
||||
ContainerdNamespace string
|
||||
Callbacks exporter.BuildkitCallbacks
|
||||
}
|
||||
|
||||
// Builder can build using BuildKit backend
|
||||
@@ -328,7 +326,7 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
|
||||
// TODO: remove once opt.Options.Platform is of type specs.Platform
|
||||
_, err := platforms.Parse(opt.Options.Platform)
|
||||
if err != nil {
|
||||
return nil, errdefs.InvalidParameter(err)
|
||||
return nil, err
|
||||
}
|
||||
frontendAttrs["platform"] = opt.Options.Platform
|
||||
}
|
||||
@@ -391,10 +389,9 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
|
||||
}
|
||||
|
||||
req := &controlapi.SolveRequest{
|
||||
Ref: id,
|
||||
Exporters: []*controlapi.Exporter{
|
||||
{Type: exporterName, Attrs: exporterAttrs},
|
||||
},
|
||||
Ref: id,
|
||||
Exporter: exporterName,
|
||||
ExporterAttrs: exporterAttrs,
|
||||
Frontend: "dockerfile.v0",
|
||||
FrontendAttrs: frontendAttrs,
|
||||
Session: opt.Options.SessionID,
|
||||
@@ -613,7 +610,7 @@ func toBuildkitExtraHosts(inp []string, hostGatewayIP net.IP) (string, error) {
|
||||
}
|
||||
|
||||
// toBuildkitUlimits converts ulimits from docker type=soft:hard format to buildkit's csv format
|
||||
func toBuildkitUlimits(inp []*container.Ulimit) (string, error) {
|
||||
func toBuildkitUlimits(inp []*units.Ulimit) (string, error) {
|
||||
if len(inp) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
@@ -11,9 +11,9 @@ import (
|
||||
ctd "github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/content/local"
|
||||
ctdmetadata "github.com/containerd/containerd/metadata"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
"github.com/containerd/log"
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/builder/builder-next/adapters/containerimage"
|
||||
@@ -46,7 +46,6 @@ import (
|
||||
"github.com/moby/buildkit/util/archutil"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/moby/buildkit/util/network/netproviders"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
"github.com/moby/buildkit/util/tracing/detect"
|
||||
"github.com/moby/buildkit/worker"
|
||||
"github.com/moby/buildkit/worker/containerd"
|
||||
@@ -68,17 +67,11 @@ func newController(ctx context.Context, rt http.RoundTripper, opt Opt) (*control
|
||||
}
|
||||
|
||||
func getTraceExporter(ctx context.Context) trace.SpanExporter {
|
||||
tc := make(tracing.MultiSpanExporter, 0, 2)
|
||||
if detect.Recorder != nil {
|
||||
tc = append(tc, detect.Recorder)
|
||||
}
|
||||
|
||||
if exp, err := detect.NewSpanExporter(ctx); err != nil {
|
||||
exp, err := detect.Exporter()
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Error("Failed to detect trace exporter for buildkit controller")
|
||||
} else if !detect.IsNoneSpanExporter(exp) {
|
||||
tc = append(tc, exp)
|
||||
}
|
||||
return tc
|
||||
return exp
|
||||
}
|
||||
|
||||
func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
@@ -109,22 +102,10 @@ func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt
|
||||
|
||||
dns := getDNSConfig(opt.DNSConfig)
|
||||
|
||||
workerOpts := containerd.WorkerOptions{
|
||||
Root: opt.Root,
|
||||
Address: opt.ContainerdAddress,
|
||||
SnapshotterName: opt.Snapshotter,
|
||||
Namespace: opt.ContainerdNamespace,
|
||||
Rootless: opt.Rootless,
|
||||
Labels: map[string]string{
|
||||
wo, err := containerd.NewWorkerOpt(opt.Root, opt.ContainerdAddress, opt.Snapshotter, opt.ContainerdNamespace,
|
||||
opt.Rootless, map[string]string{
|
||||
label.Snapshotter: opt.Snapshotter,
|
||||
},
|
||||
DNS: dns,
|
||||
NetworkOpt: nc,
|
||||
ApparmorProfile: opt.ApparmorProfile,
|
||||
Selinux: false,
|
||||
}
|
||||
|
||||
wo, err := containerd.NewWorkerOpt(workerOpts, ctd.WithTimeout(60*time.Second))
|
||||
}, dns, nc, opt.ApparmorProfile, false, nil, "", ctd.WithTimeout(60*time.Second))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -149,7 +130,7 @@ func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt
|
||||
}
|
||||
wo.Executor = exec
|
||||
|
||||
w, err := mobyworker.NewContainerdWorker(ctx, wo, opt.Callbacks)
|
||||
w, err := mobyworker.NewContainerdWorker(ctx, wo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -160,15 +141,9 @@ func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gwf, err := gateway.NewGatewayFrontend(wc.Infos(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
frontends := map[string]frontend.Frontend{
|
||||
"dockerfile.v0": forwarder.NewGatewayForwarder(wc.Infos(), dockerfile.Build),
|
||||
"gateway.v0": gwf,
|
||||
"gateway.v0": gateway.NewGatewayFrontend(wc.Infos()),
|
||||
}
|
||||
|
||||
return control.NewController(control.Opt{
|
||||
@@ -327,13 +302,9 @@ func newGraphDriverController(ctx context.Context, rt http.RoundTripper, opt Opt
|
||||
}
|
||||
|
||||
exp, err := mobyexporter.New(mobyexporter.Opt{
|
||||
ImageStore: dist.ImageStore,
|
||||
ContentStore: store,
|
||||
Differ: differ,
|
||||
ImageTagger: opt.ImageTagger,
|
||||
LeaseManager: lm,
|
||||
ImageExportedCallback: opt.Callbacks.Exported,
|
||||
// Callbacks.Named is not used here because the tag operation is handled directly by the image service.
|
||||
ImageStore: dist.ImageStore,
|
||||
Differ: differ,
|
||||
ImageTagger: opt.ImageTagger,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -392,14 +363,9 @@ func newGraphDriverController(ctx context.Context, rt http.RoundTripper, opt Opt
|
||||
}
|
||||
wc.Add(w)
|
||||
|
||||
gwf, err := gateway.NewGatewayFrontend(wc.Infos(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
frontends := map[string]frontend.Frontend{
|
||||
"dockerfile.v0": forwarder.NewGatewayForwarder(wc.Infos(), dockerfile.Build),
|
||||
"gateway.v0": gwf,
|
||||
"gateway.v0": gateway.NewGatewayFrontend(wc.Infos()),
|
||||
}
|
||||
|
||||
return control.NewController(control.Opt{
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/moby/buildkit/executor"
|
||||
"github.com/moby/buildkit/executor/oci"
|
||||
"github.com/moby/buildkit/executor/resources"
|
||||
resourcestypes "github.com/moby/buildkit/executor/resources/types"
|
||||
"github.com/moby/buildkit/executor/runcexecutor"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
@@ -57,16 +56,9 @@ func newExecutor(root, cgroupParent string, net *libnetwork.Controller, dnsConfi
|
||||
return nil, err
|
||||
}
|
||||
|
||||
runcCmds := []string{"runc"}
|
||||
|
||||
// TODO: FIXME: testing env var, replace with something better or remove in a major version or two
|
||||
if runcOverride := os.Getenv("DOCKER_BUILDKIT_RUNC_COMMAND"); runcOverride != "" {
|
||||
runcCmds = []string{runcOverride}
|
||||
}
|
||||
|
||||
return runcexecutor.New(runcexecutor.Opt{
|
||||
Root: filepath.Join(root, "executor"),
|
||||
CommandCandidates: runcCmds,
|
||||
CommandCandidates: []string{"runc"},
|
||||
DefaultCgroupParent: cgroupParent,
|
||||
Rootless: rootless,
|
||||
NoPivot: os.Getenv("DOCKER_RAMDISK") != "",
|
||||
@@ -113,20 +105,20 @@ func (iface *lnInterface) init(c *libnetwork.Controller, n *libnetwork.Network)
|
||||
defer close(iface.ready)
|
||||
id := identity.NewID()
|
||||
|
||||
ep, err := n.CreateEndpoint(context.TODO(), id, libnetwork.CreateOptionDisableResolution())
|
||||
ep, err := n.CreateEndpoint(id, libnetwork.CreateOptionDisableResolution())
|
||||
if err != nil {
|
||||
iface.err = err
|
||||
return
|
||||
}
|
||||
|
||||
sbx, err := c.NewSandbox(context.TODO(), id, libnetwork.OptionUseExternalKey(), libnetwork.OptionHostsPath(filepath.Join(iface.provider.Root, id, "hosts")),
|
||||
sbx, err := c.NewSandbox(id, libnetwork.OptionUseExternalKey(), libnetwork.OptionHostsPath(filepath.Join(iface.provider.Root, id, "hosts")),
|
||||
libnetwork.OptionResolvConfPath(filepath.Join(iface.provider.Root, id, "resolv.conf")))
|
||||
if err != nil {
|
||||
iface.err = err
|
||||
return
|
||||
}
|
||||
|
||||
if err := ep.Join(context.TODO(), sbx); err != nil {
|
||||
if err := ep.Join(sbx); err != nil {
|
||||
iface.err = err
|
||||
return
|
||||
}
|
||||
@@ -136,8 +128,8 @@ func (iface *lnInterface) init(c *libnetwork.Controller, n *libnetwork.Network)
|
||||
}
|
||||
|
||||
// TODO(neersighted): Unstub Sample(), and collect data from the libnetwork Endpoint.
|
||||
func (iface *lnInterface) Sample() (*resourcestypes.NetworkSample, error) {
|
||||
return &resourcestypes.NetworkSample{}, nil
|
||||
func (iface *lnInterface) Sample() (*network.Sample, error) {
|
||||
return &network.Sample{}, nil
|
||||
}
|
||||
|
||||
func (iface *lnInterface) Set(s *specs.Spec) error {
|
||||
@@ -161,7 +153,7 @@ func (iface *lnInterface) Close() error {
|
||||
<-iface.ready
|
||||
if iface.sbx != nil {
|
||||
go func() {
|
||||
if err := iface.sbx.Delete(context.TODO()); err != nil {
|
||||
if err := iface.sbx.Delete(); err != nil {
|
||||
log.G(context.TODO()).WithError(err).Errorf("failed to delete builder network sandbox")
|
||||
}
|
||||
if err := os.RemoveAll(filepath.Join(iface.provider.Root, iface.sbx.ContainerID())); err != nil {
|
||||
|
||||
@@ -22,11 +22,11 @@ func newExecutor(_, _ string, _ *libnetwork.Controller, _ *oci.DNSConfig, _ bool
|
||||
type stubExecutor struct{}
|
||||
|
||||
func (w *stubExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (resourcetypes.Recorder, error) {
|
||||
return nil, errors.New("buildkit executor not implemented for " + runtime.GOOS)
|
||||
return nil, errors.New("buildkit executor not implemented for "+runtime.GOOS)
|
||||
}
|
||||
|
||||
func (w *stubExecutor) Exec(ctx context.Context, id string, process executor.ProcessInfo) error {
|
||||
return errors.New("buildkit executor not implemented for " + runtime.GOOS)
|
||||
return errors.New("buildkit executor not implemented for "+runtime.GOOS)
|
||||
}
|
||||
|
||||
func getDNSConfig(config.DNSConfig) *oci.DNSConfig {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user